aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-bus-event_source-devices-events62
-rw-r--r--Documentation/ABI/testing/sysfs-devices-power_resources_D013
-rw-r--r--Documentation/ABI/testing/sysfs-devices-power_resources_D114
-rw-r--r--Documentation/ABI/testing/sysfs-devices-power_resources_D214
-rw-r--r--Documentation/ABI/testing/sysfs-devices-power_resources_D3hot14
-rw-r--r--Documentation/ABI/testing/sysfs-devices-power_state20
-rw-r--r--Documentation/ABI/testing/sysfs-devices-real_power_state23
-rw-r--r--Documentation/ABI/testing/sysfs-devices-resource_in_use12
-rw-r--r--Documentation/ABI/testing/sysfs-platform-ts550047
-rw-r--r--Documentation/PCI/MSI-HOWTO.txt37
-rw-r--r--Documentation/acpi/enumeration.txt4
-rw-r--r--Documentation/acpi/scan_handlers.txt77
-rw-r--r--Documentation/atomic_ops.txt2
-rw-r--r--Documentation/cgroups/00-INDEX2
-rw-r--r--Documentation/cgroups/memcg_test.txt3
-rw-r--r--Documentation/cpu-freq/cpu-drivers.txt6
-rw-r--r--Documentation/cpu-freq/user-guide.txt8
-rw-r--r--Documentation/device-mapper/dm-raid.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/atmel-aic.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/gic.txt4
-rw-r--r--Documentation/devicetree/bindings/arm/kirkwood.txt27
-rw-r--r--Documentation/devicetree/bindings/arm/omap/omap.txt6
-rw-r--r--Documentation/devicetree/bindings/arm/psci.txt55
-rw-r--r--Documentation/devicetree/bindings/clock/prima2-clock.txt73
-rw-r--r--Documentation/devicetree/bindings/drm/exynos/g2d.txt22
-rw-r--r--Documentation/devicetree/bindings/i2c/ina209.txt18
-rw-r--r--Documentation/devicetree/bindings/i2c/max6697.txt64
-rw-r--r--Documentation/devicetree/bindings/input/imx-keypad.txt53
-rw-r--r--Documentation/devicetree/bindings/input/lpc32xx-key.txt9
-rw-r--r--Documentation/devicetree/bindings/input/matrix-keymap.txt8
-rw-r--r--Documentation/devicetree/bindings/input/nvidia,tegra20-kbc.txt22
-rw-r--r--Documentation/devicetree/bindings/input/omap-keypad.txt13
-rw-r--r--Documentation/devicetree/bindings/input/tca8418_keypad.txt6
-rw-r--r--Documentation/devicetree/bindings/leds/leds-ns2.txt (renamed from Documentation/devicetree/bindings/gpio/leds-ns2.txt)0
-rwxr-xr-xDocumentation/devicetree/bindings/mfd/tps6507x.txt91
-rw-r--r--Documentation/devicetree/bindings/mips/cavium/dma-engine.txt2
-rw-r--r--Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt8
-rw-r--r--Documentation/devicetree/bindings/mmc/samsung-sdhci.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt60
-rw-r--r--Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt5
-rw-r--r--Documentation/devicetree/bindings/pinctrl/nvidia,tegra114-pinmux.txt120
-rw-r--r--Documentation/devicetree/bindings/pinctrl/ste,nomadik.txt140
-rw-r--r--Documentation/devicetree/bindings/power_supply/qnap-poweroff.txt13
-rw-r--r--Documentation/devicetree/bindings/power_supply/restart-poweroff.txt8
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/srio.txt4
-rw-r--r--Documentation/devicetree/bindings/regulator/anatop-regulator.txt8
-rw-r--r--Documentation/devicetree/bindings/regulator/s5m8767-regulator.txt152
-rw-r--r--Documentation/devicetree/bindings/regulator/tps51632-regulator.txt27
-rw-r--r--Documentation/devicetree/bindings/regulator/tps62360-regulator.txt4
-rw-r--r--Documentation/devicetree/bindings/rtc/s3c-rtc.txt2
-rw-r--r--Documentation/devicetree/bindings/spi/sh-msiof.txt12
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt4
-rw-r--r--Documentation/devicetree/bindings/watchdog/samsung-wdt.txt2
-rw-r--r--Documentation/filesystems/f2fs.txt18
-rw-r--r--[-rwxr-xr-x]Documentation/hid/hid-sensor.txt0
-rw-r--r--Documentation/hwmon/coretemp9
-rw-r--r--Documentation/hwmon/ina20993
-rw-r--r--Documentation/hwmon/it8716
-rw-r--r--Documentation/hwmon/jc423
-rw-r--r--Documentation/hwmon/lm7390
-rw-r--r--Documentation/hwmon/max3444016
-rw-r--r--Documentation/hwmon/max669758
-rw-r--r--Documentation/hwmon/sysfs-interface8
-rw-r--r--Documentation/hwmon/zl610026
-rw-r--r--Documentation/kbuild/makefiles.txt23
-rw-r--r--Documentation/kernel-parameters.txt18
-rw-r--r--Documentation/memory-barriers.txt1
-rw-r--r--Documentation/pinctrl.txt18
-rw-r--r--Documentation/power/freezing-of-tasks.txt5
-rw-r--r--Documentation/power/runtime_pm.txt4
-rw-r--r--Documentation/trace/events-power.txt27
-rw-r--r--Documentation/trace/ftrace.txt83
-rw-r--r--Documentation/virtual/kvm/api.txt99
-rw-r--r--Documentation/x86/boot.txt28
-rw-r--r--Documentation/x86/zero-page.txt4
-rw-r--r--MAINTAINERS75
-rw-r--r--Makefile7
-rw-r--r--arch/Kconfig12
-rw-r--r--arch/alpha/Kconfig1
-rw-r--r--arch/alpha/kernel/osf_sys.c6
-rw-r--r--arch/arm/Kconfig18
-rw-r--r--arch/arm/Makefile1
-rw-r--r--arch/arm/boot/dts/armada-370-db.dts2
-rw-r--r--arch/arm/boot/dts/armada-xp-mv78230.dtsi14
-rw-r--r--arch/arm/boot/dts/armada-xp-mv78260.dtsi21
-rw-r--r--arch/arm/boot/dts/armada-xp-mv78460.dtsi21
-rw-r--r--arch/arm/boot/dts/at91rm9200.dtsi4
-rw-r--r--arch/arm/boot/dts/at91sam9x5.dtsi60
-rw-r--r--arch/arm/boot/dts/cros5250-common.dtsi12
-rw-r--r--arch/arm/boot/dts/dbx5x0.dtsi10
-rw-r--r--arch/arm/boot/dts/dove-cubox.dts14
-rw-r--r--arch/arm/boot/dts/exynos5250-smdk5250.dts8
-rw-r--r--arch/arm/boot/dts/highbank.dts10
-rw-r--r--arch/arm/boot/dts/kirkwood-ns2-common.dtsi16
-rw-r--r--arch/arm/boot/dts/kirkwood.dtsi2
-rw-r--r--arch/arm/boot/dts/kizbox.dts2
-rw-r--r--arch/arm/boot/dts/prima2.dtsi31
-rw-r--r--arch/arm/boot/dts/sun4i-a10.dtsi30
-rw-r--r--arch/arm/boot/dts/sun5i-a13-olinuxino.dts2
-rw-r--r--arch/arm/boot/dts/sun5i-a13.dtsi23
-rw-r--r--arch/arm/boot/dts/sunxi.dtsi6
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts2
-rw-r--r--arch/arm/common/gic.c25
-rw-r--r--arch/arm/configs/at91_dt_defconfig3
-rw-r--r--arch/arm/crypto/aes-armv4.S64
-rw-r--r--arch/arm/crypto/sha1-armv4-large.S24
-rw-r--r--arch/arm/include/asm/assembler.h10
-rw-r--r--arch/arm/include/asm/cputype.h33
-rw-r--r--arch/arm/include/asm/cti.h10
-rw-r--r--arch/arm/include/asm/hardware/coresight.h6
-rw-r--r--arch/arm/include/asm/hw_breakpoint.h3
-rw-r--r--arch/arm/include/asm/idmap.h1
-rw-r--r--arch/arm/include/asm/kvm_arm.h214
-rw-r--r--arch/arm/include/asm/kvm_asm.h82
-rw-r--r--arch/arm/include/asm/kvm_coproc.h47
-rw-r--r--arch/arm/include/asm/kvm_emulate.h72
-rw-r--r--arch/arm/include/asm/kvm_host.h161
-rw-r--r--arch/arm/include/asm/kvm_mmio.h56
-rw-r--r--arch/arm/include/asm/kvm_mmu.h50
-rw-r--r--arch/arm/include/asm/kvm_psci.h23
-rw-r--r--arch/arm/include/asm/mach/pci.h1
-rw-r--r--arch/arm/include/asm/memory.h10
-rw-r--r--arch/arm/include/asm/opcodes-sec.h24
-rw-r--r--arch/arm/include/asm/opcodes.h1
-rw-r--r--arch/arm/include/asm/outercache.h1
-rw-r--r--arch/arm/include/asm/pgtable-3level-hwdef.h5
-rw-r--r--arch/arm/include/asm/pgtable-3level.h18
-rw-r--r--arch/arm/include/asm/pgtable.h7
-rw-r--r--arch/arm/include/asm/psci.h36
-rw-r--r--arch/arm/include/asm/smp_scu.h8
-rw-r--r--arch/arm/include/asm/spinlock.h16
-rw-r--r--arch/arm/include/asm/virt.h4
-rw-r--r--arch/arm/include/uapi/asm/kvm.h164
-rw-r--r--arch/arm/kernel/Makefile1
-rw-r--r--arch/arm/kernel/asm-offsets.c25
-rw-r--r--arch/arm/kernel/bios32.c9
-rw-r--r--arch/arm/kernel/debug.S2
-rw-r--r--arch/arm/kernel/head.S5
-rw-r--r--arch/arm/kernel/hw_breakpoint.c61
-rw-r--r--arch/arm/kernel/hyp-stub.S18
-rw-r--r--arch/arm/kernel/perf_event.c16
-rw-r--r--arch/arm/kernel/perf_event_cpu.c51
-rw-r--r--arch/arm/kernel/perf_event_v6.c4
-rw-r--r--arch/arm/kernel/perf_event_v7.c18
-rw-r--r--arch/arm/kernel/perf_event_xscale.c2
-rw-r--r--arch/arm/kernel/process.c13
-rw-r--r--arch/arm/kernel/psci.c211
-rw-r--r--arch/arm/kernel/sched_clock.c4
-rw-r--r--arch/arm/kernel/smp.c31
-rw-r--r--arch/arm/kernel/smp_scu.c2
-rw-r--r--arch/arm/kernel/smp_twd.c53
-rw-r--r--arch/arm/kernel/vmlinux.lds.S6
-rw-r--r--arch/arm/kvm/Kconfig56
-rw-r--r--arch/arm/kvm/Makefile21
-rw-r--r--arch/arm/kvm/arm.c1015
-rw-r--r--arch/arm/kvm/coproc.c1046
-rw-r--r--arch/arm/kvm/coproc.h153
-rw-r--r--arch/arm/kvm/coproc_a15.c162
-rw-r--r--arch/arm/kvm/emulate.c373
-rw-r--r--arch/arm/kvm/guest.c222
-rw-r--r--arch/arm/kvm/init.S114
-rw-r--r--arch/arm/kvm/interrupts.S478
-rw-r--r--arch/arm/kvm/interrupts_head.S441
-rw-r--r--arch/arm/kvm/mmio.c153
-rw-r--r--arch/arm/kvm/mmu.c787
-rw-r--r--arch/arm/kvm/psci.c108
-rw-r--r--arch/arm/kvm/reset.c74
-rw-r--r--arch/arm/kvm/trace.h235
-rw-r--r--arch/arm/mach-at91/setup.c2
-rw-r--r--arch/arm/mach-davinci/cpuidle.c84
-rw-r--r--arch/arm/mach-exynos/Kconfig2
-rw-r--r--arch/arm/mach-exynos/include/mach/cpufreq.h19
-rw-r--r--arch/arm/mach-highbank/Kconfig4
-rw-r--r--arch/arm/mach-highbank/core.h1
-rw-r--r--arch/arm/mach-highbank/highbank.c6
-rw-r--r--arch/arm/mach-highbank/sysregs.h4
-rw-r--r--arch/arm/mach-imx/Kconfig1
-rw-r--r--arch/arm/mach-imx/clk-imx25.c6
-rw-r--r--arch/arm/mach-imx/clk-imx27.c6
-rw-r--r--arch/arm/mach-imx/clk-imx31.c6
-rw-r--r--arch/arm/mach-imx/clk-imx35.c6
-rw-r--r--arch/arm/mach-imx/clk-imx51-imx53.c6
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c3
-rw-r--r--arch/arm/mach-imx/common.h1
-rw-r--r--arch/arm/mach-imx/devices/devices-common.h1
-rw-r--r--arch/arm/mach-imx/devices/platform-fsl-usb2-udc.c15
-rw-r--r--arch/arm/mach-imx/devices/platform-imx-fb.c2
-rw-r--r--arch/arm/mach-imx/hotplug.c10
-rw-r--r--arch/arm/mach-imx/iram_alloc.c3
-rw-r--r--arch/arm/mach-imx/platsmp.c1
-rw-r--r--arch/arm/mach-imx/pm-imx6q.c1
-rw-r--r--arch/arm/mach-integrator/pci_v3.c14
-rw-r--r--arch/arm/mach-kirkwood/board-ns2.c38
-rw-r--r--arch/arm/mach-mvebu/Makefile2
-rw-r--r--arch/arm/mach-omap2/board-omap4panda.c6
-rw-r--r--arch/arm/mach-omap2/cclock2420_data.c2
-rw-r--r--arch/arm/mach-omap2/cclock2430_data.c2
-rw-r--r--arch/arm/mach-omap2/cclock44xx_data.c13
-rw-r--r--arch/arm/mach-omap2/devices.c2
-rw-r--r--arch/arm/mach-omap2/drm.c3
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c6
-rw-r--r--arch/arm/mach-omap2/pm34xx.c2
-rw-r--r--arch/arm/mach-omap2/timer.c8
-rw-r--r--arch/arm/mach-realview/include/mach/irqs-eb.h2
-rw-r--r--arch/arm/mach-s3c64xx/mach-crag6410-module.c2
-rw-r--r--arch/arm/mach-s3c64xx/pm.c2
-rw-r--r--arch/arm/mach-sunxi/Kconfig1
-rw-r--r--arch/arm/mach-tegra/cpu-tegra.c3
-rw-r--r--arch/arm/mach-ux500/Kconfig6
-rw-r--r--arch/arm/mach-ux500/board-mop500.c20
-rw-r--r--arch/arm/mach-ux500/cpu-db8500.c2
-rw-r--r--arch/arm/mach-ux500/include/mach/irqs-board-mop500.h10
-rw-r--r--arch/arm/mach-versatile/core.c15
-rw-r--r--arch/arm/mach-versatile/pci.c11
-rw-r--r--arch/arm/mm/Kconfig10
-rw-r--r--arch/arm/mm/Makefile2
-rw-r--r--arch/arm/mm/context.c3
-rw-r--r--arch/arm/mm/dma-mapping.c20
-rw-r--r--arch/arm/mm/idmap.c55
-rw-r--r--arch/arm/mm/ioremap.c135
-rw-r--r--arch/arm/mm/mm.h12
-rw-r--r--arch/arm/mm/mmu.c60
-rw-r--r--arch/arm/mm/proc-macros.S5
-rw-r--r--arch/arm/mm/proc-v6.S2
-rw-r--r--arch/arm/mm/proc-v7-2level.S2
-rw-r--r--arch/arm/mm/proc-v7-3level.S2
-rw-r--r--arch/arm/mm/vmregion.c205
-rw-r--r--arch/arm/mm/vmregion.h31
-rw-r--r--arch/arm/plat-versatile/headsmp.S2
-rw-r--r--arch/arm/vfp/entry.S6
-rw-r--r--arch/arm/vfp/vfphw.S4
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/kernel/process.c13
-rw-r--r--arch/avr32/include/asm/dma-mapping.h10
-rw-r--r--arch/blackfin/Kconfig2
-rw-r--r--arch/blackfin/include/asm/dma-mapping.h10
-rw-r--r--arch/blackfin/kernel/process.c7
-rw-r--r--arch/c6x/include/asm/dma-mapping.h15
-rw-r--r--arch/cris/include/asm/dma-mapping.h10
-rw-r--r--arch/cris/kernel/process.c11
-rw-r--r--arch/frv/Kconfig1
-rw-r--r--arch/frv/include/asm/dma-mapping.h15
-rw-r--r--arch/hexagon/Kconfig2
-rw-r--r--arch/ia64/Kconfig1
-rw-r--r--arch/ia64/hp/common/aml_nfw.c2
-rw-r--r--arch/ia64/include/asm/acpi.h4
-rw-r--r--arch/ia64/include/asm/cputime.h92
-rw-r--r--arch/ia64/include/asm/thread_info.h4
-rw-r--r--arch/ia64/include/asm/xen/minstate.h2
-rw-r--r--arch/ia64/kernel/asm-offsets.c2
-rw-r--r--arch/ia64/kernel/entry.S16
-rw-r--r--arch/ia64/kernel/fsys.S4
-rw-r--r--arch/ia64/kernel/head.S4
-rw-r--r--arch/ia64/kernel/ivt.S8
-rw-r--r--arch/ia64/kernel/minstate.h2
-rw-r--r--arch/ia64/kernel/process.c3
-rw-r--r--arch/ia64/kernel/ptrace.c27
-rw-r--r--arch/ia64/kernel/setup.c1
-rw-r--r--arch/ia64/kernel/time.c5
-rw-r--r--arch/m32r/kernel/process.c51
-rw-r--r--arch/m68k/include/asm/dma-mapping.h29
-rw-r--r--arch/m68k/include/asm/pgtable_no.h2
-rw-r--r--arch/m68k/include/asm/processor.h1
-rw-r--r--arch/m68k/include/asm/unistd.h2
-rw-r--r--arch/m68k/include/uapi/asm/unistd.h1
-rw-r--r--arch/m68k/kernel/Makefile4
-rw-r--r--arch/m68k/kernel/syscalltable.S1
-rw-r--r--arch/m68k/mm/init.c8
-rw-r--r--arch/microblaze/kernel/process.c3
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/bcm47xx/Kconfig3
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-l2c.c9
-rw-r--r--arch/mips/include/asm/dsp.h2
-rw-r--r--arch/mips/include/asm/inst.h1
-rw-r--r--arch/mips/include/asm/mach-pnx833x/war.h2
-rw-r--r--arch/mips/include/asm/pgtable-64.h1
-rw-r--r--arch/mips/include/uapi/asm/Kbuild1
-rw-r--r--arch/mips/include/uapi/asm/break.h (renamed from arch/mips/include/asm/break.h)0
-rw-r--r--arch/mips/kernel/ftrace.c36
-rw-r--r--arch/mips/kernel/mcount.S7
-rw-r--r--arch/mips/kernel/vpe.c2
-rw-r--r--arch/mips/lantiq/irq.c2
-rw-r--r--arch/mips/lib/delay.c2
-rw-r--r--arch/mips/mm/ioremap.c6
-rw-r--r--arch/mips/mm/mmap.c6
-rw-r--r--arch/mips/netlogic/xlr/setup.c5
-rw-r--r--arch/mips/pci/pci-ar71xx.c2
-rw-r--r--arch/mips/pci/pci-ar724x.c2
-rw-r--r--arch/mn10300/include/asm/dma-mapping.h15
-rw-r--r--arch/mn10300/kernel/process.c7
-rw-r--r--arch/openrisc/kernel/idle.c5
-rw-r--r--arch/parisc/Kconfig2
-rw-r--r--arch/parisc/include/asm/dma-mapping.h15
-rw-r--r--arch/parisc/kernel/entry.S18
-rw-r--r--arch/parisc/kernel/irq.c6
-rw-r--r--arch/parisc/kernel/ptrace.c2
-rw-r--r--arch/parisc/kernel/signal.c4
-rw-r--r--arch/parisc/math-emu/cnv_float.h11
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/configs/chroma_defconfig2
-rw-r--r--arch/powerpc/configs/corenet64_smp_defconfig2
-rw-r--r--arch/powerpc/configs/pasemi_defconfig2
-rw-r--r--arch/powerpc/include/asm/cputime.h6
-rw-r--r--arch/powerpc/include/asm/lppaca.h2
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h26
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h4
-rw-r--r--arch/powerpc/kernel/entry_32.S2
-rw-r--r--arch/powerpc/kernel/entry_64.S17
-rw-r--r--arch/powerpc/kernel/kgdb.c5
-rw-r--r--arch/powerpc/kernel/time.c16
-rw-r--r--arch/powerpc/kvm/emulate.c2
-rw-r--r--arch/powerpc/mm/hash_low_64.S62
-rw-r--r--arch/powerpc/oprofile/op_model_power4.c2
-rw-r--r--arch/powerpc/perf/core-book3s.c12
-rw-r--r--arch/powerpc/perf/power7-pmu.c80
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c1
-rw-r--r--arch/powerpc/platforms/pasemi/cpufreq.c7
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c6
-rw-r--r--arch/powerpc/platforms/pseries/setup.c6
-rw-r--r--arch/powerpc/sysdev/bestcomm/bestcomm.c2
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/include/asm/pgtable.h12
-rw-r--r--arch/s390/kernel/time.c3
-rw-r--r--arch/s390/kernel/vtime.c6
-rw-r--r--arch/sh/Kconfig4
-rw-r--r--arch/sh/kernel/idle.c12
-rw-r--r--arch/sparc/Kconfig2
-rw-r--r--arch/sparc/include/asm/pgtable_64.h14
-rw-r--r--arch/sparc/include/asm/processor_32.h1
-rw-r--r--arch/sparc/kernel/apc.c3
-rw-r--r--arch/sparc/kernel/leon_pmc.c5
-rw-r--r--arch/sparc/kernel/pmc.c3
-rw-r--r--arch/sparc/kernel/process_32.c7
-rw-r--r--arch/sparc/kernel/prom_common.c4
-rw-r--r--arch/sparc/kernel/sbus.c6
-rw-r--r--arch/sparc/mm/gup.c59
-rw-r--r--arch/tile/Kconfig2
-rw-r--r--arch/tile/include/asm/io.h6
-rw-r--r--arch/tile/include/asm/irqflags.h32
-rw-r--r--arch/tile/include/uapi/arch/interrupts_32.h394
-rw-r--r--arch/tile/include/uapi/arch/interrupts_64.h346
-rw-r--r--arch/tile/kernel/intvec_64.S4
-rw-r--r--arch/tile/kernel/process.c2
-rw-r--r--arch/tile/kernel/reboot.c2
-rw-r--r--arch/tile/kernel/setup.c5
-rw-r--r--arch/tile/kernel/stack.c3
-rw-r--r--arch/tile/lib/cacheflush.c2
-rw-r--r--arch/tile/lib/cpumask.c2
-rw-r--r--arch/tile/lib/exports.c2
-rw-r--r--arch/tile/mm/homecache.c1
-rw-r--r--arch/unicore32/kernel/process.c5
-rw-r--r--arch/x86/Kconfig40
-rw-r--r--arch/x86/Makefile4
-rw-r--r--arch/x86/boot/Makefile4
-rw-r--r--arch/x86/boot/compressed/eboot.c21
-rw-r--r--arch/x86/boot/compressed/head_32.S8
-rw-r--r--arch/x86/boot/compressed/head_64.S8
-rw-r--r--arch/x86/boot/compressed/misc.c2
-rw-r--r--arch/x86/boot/compressed/misc.h1
-rw-r--r--arch/x86/boot/header.S39
-rw-r--r--arch/x86/boot/setup.ld2
-rw-r--r--arch/x86/boot/tools/build.c81
-rw-r--r--arch/x86/configs/i386_defconfig1
-rw-r--r--arch/x86/ia32/ia32entry.S4
-rw-r--r--arch/x86/include/asm/acpi.h4
-rw-r--r--arch/x86/include/asm/amd_nb.h17
-rw-r--r--arch/x86/include/asm/bootparam_utils.h38
-rw-r--r--arch/x86/include/asm/cpufeature.h2
-rw-r--r--arch/x86/include/asm/efi.h1
-rw-r--r--arch/x86/include/asm/ftrace.h1
-rw-r--r--arch/x86/include/asm/hpet.h5
-rw-r--r--arch/x86/include/asm/hw_irq.h13
-rw-r--r--arch/x86/include/asm/hypervisor.h13
-rw-r--r--arch/x86/include/asm/io_apic.h28
-rw-r--r--arch/x86/include/asm/irq_remapping.h40
-rw-r--r--arch/x86/include/asm/irq_vectors.h4
-rw-r--r--arch/x86/include/asm/kvm_para.h8
-rw-r--r--arch/x86/include/asm/linkage.h18
-rw-r--r--arch/x86/include/asm/mce.h84
-rw-r--r--arch/x86/include/asm/mshyperv.h4
-rw-r--r--arch/x86/include/asm/mwait.h3
-rw-r--r--arch/x86/include/asm/pci.h3
-rw-r--r--arch/x86/include/asm/perf_event.h13
-rw-r--r--arch/x86/include/asm/pgtable.h17
-rw-r--r--arch/x86/include/asm/pgtable_32.h7
-rw-r--r--arch/x86/include/asm/pgtable_64.h3
-rw-r--r--arch/x86/include/asm/processor.h20
-rw-r--r--arch/x86/include/asm/required-features.h8
-rw-r--r--arch/x86/include/asm/uv/uv.h2
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h44
-rw-r--r--arch/x86/include/asm/uv/uv_mmrs.h1496
-rw-r--r--arch/x86/include/asm/x86_init.h27
-rw-r--r--arch/x86/include/asm/xor.h491
-rw-r--r--arch/x86/include/asm/xor_32.h309
-rw-r--r--arch/x86/include/asm/xor_64.h305
-rw-r--r--arch/x86/include/uapi/asm/bootparam.h63
-rw-r--r--arch/x86/include/uapi/asm/mce.h87
-rw-r--r--arch/x86/include/uapi/asm/msr-index.h5
-rw-r--r--arch/x86/kernel/Makefile3
-rw-r--r--arch/x86/kernel/apb_timer.c10
-rw-r--r--arch/x86/kernel/apic/apic.c28
-rw-r--r--arch/x86/kernel/apic/io_apic.c457
-rw-r--r--arch/x86/kernel/apic/ipi.c2
-rw-r--r--arch/x86/kernel/apic/x2apic_phys.c21
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c206
-rw-r--r--arch/x86/kernel/apm_32.c68
-rw-r--r--arch/x86/kernel/cpu/amd.c4
-rw-r--r--arch/x86/kernel/cpu/bugs.c27
-rw-r--r--arch/x86/kernel/cpu/hypervisor.c7
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c9
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c14
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c54
-rw-r--r--arch/x86/kernel/cpu/perf_event.c21
-rw-r--r--arch/x86/kernel/cpu/perf_event.h25
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c322
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c6
-rw-r--r--arch/x86/kernel/cpu/perf_event_p6.c2
-rw-r--r--arch/x86/kernel/cpu/proc.c2
-rw-r--r--arch/x86/kernel/cpu/vmware.c13
-rw-r--r--arch/x86/kernel/entry_32.S10
-rw-r--r--arch/x86/kernel/entry_64.S14
-rw-r--r--arch/x86/kernel/head32.c3
-rw-r--r--arch/x86/kernel/head64.c2
-rw-r--r--arch/x86/kernel/head_32.S102
-rw-r--r--arch/x86/kernel/hpet.c2
-rw-r--r--arch/x86/kernel/kprobes/Makefile7
-rw-r--r--arch/x86/kernel/kprobes/common.h (renamed from arch/x86/kernel/kprobes-common.h)11
-rw-r--r--arch/x86/kernel/kprobes/core.c (renamed from arch/x86/kernel/kprobes.c)76
-rw-r--r--arch/x86/kernel/kprobes/ftrace.c93
-rw-r--r--arch/x86/kernel/kprobes/opt.c (renamed from arch/x86/kernel/kprobes-opt.c)2
-rw-r--r--arch/x86/kernel/kvm.c1
-rw-r--r--arch/x86/kernel/msr.c3
-rw-r--r--arch/x86/kernel/pci-dma.c2
-rw-r--r--arch/x86/kernel/process.c122
-rw-r--r--arch/x86/kernel/ptrace.c2
-rw-r--r--arch/x86/kernel/reboot.c2
-rw-r--r--arch/x86/kernel/rtc.c1
-rw-r--r--arch/x86/kernel/setup.c28
-rw-r--r--arch/x86/kernel/smpboot.c2
-rw-r--r--arch/x86/kernel/step.c9
-rw-r--r--arch/x86/kernel/sys_x86_64.c2
-rw-r--r--arch/x86/kernel/tsc.c3
-rw-r--r--arch/x86/kernel/uprobes.c4
-rw-r--r--arch/x86/kernel/x86_init.c24
-rw-r--r--arch/x86/mm/fault.c8
-rw-r--r--arch/x86/mm/init_64.c7
-rw-r--r--arch/x86/mm/memtest.c10
-rw-r--r--arch/x86/mm/srat.c29
-rw-r--r--arch/x86/mm/tlb.c2
-rw-r--r--arch/x86/pci/mmconfig-shared.c3
-rw-r--r--arch/x86/platform/Makefile2
-rw-r--r--arch/x86/platform/efi/efi-bgrt.c7
-rw-r--r--arch/x86/platform/efi/efi.c59
-rw-r--r--arch/x86/platform/efi/efi_64.c22
-rw-r--r--arch/x86/platform/goldfish/Makefile1
-rw-r--r--arch/x86/platform/goldfish/goldfish.c51
-rw-r--r--arch/x86/platform/olpc/olpc-xo15-sci.c2
-rw-r--r--arch/x86/platform/sfi/sfi.c2
-rw-r--r--arch/x86/platform/ts5500/Makefile1
-rw-r--r--arch/x86/platform/ts5500/ts5500.c339
-rw-r--r--arch/x86/platform/uv/tlb_uv.c14
-rw-r--r--arch/x86/platform/uv/uv_time.c13
-rw-r--r--arch/x86/tools/insn_sanity.c10
-rw-r--r--arch/x86/tools/relocs.c6
-rw-r--r--arch/x86/um/fault.c2
-rw-r--r--arch/x86/vdso/vclock_gettime.c2
-rw-r--r--arch/x86/xen/enlighten.c78
-rw-r--r--arch/x86/xen/setup.c5
-rw-r--r--arch/x86/xen/smp.c7
-rw-r--r--arch/x86/xen/suspend.c2
-rw-r--r--arch/x86/xen/xen-asm_32.S14
-rw-r--r--arch/x86/xen/xen-ops.h2
-rw-r--r--arch/xtensa/include/asm/dma-mapping.h15
-rw-r--r--block/blk-exec.c1
-rw-r--r--block/elevator.c35
-rw-r--r--block/genhd.c42
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/Kconfig2
-rw-r--r--drivers/acpi/Makefile3
-rw-r--r--drivers/acpi/ac.c4
-rw-r--r--drivers/acpi/acpi_memhotplug.c82
-rw-r--r--drivers/acpi/acpi_pad.c3
-rw-r--r--drivers/acpi/acpi_platform.c72
-rw-r--r--drivers/acpi/acpica/Makefile8
-rw-r--r--drivers/acpi/acpica/accommon.h3
-rw-r--r--drivers/acpi/acpica/acdebug.h19
-rw-r--r--drivers/acpi/acpica/acdispat.h2
-rw-r--r--drivers/acpi/acpica/acevents.h23
-rw-r--r--drivers/acpi/acpica/acglobal.h43
-rw-r--r--drivers/acpi/acpica/achware.h2
-rw-r--r--drivers/acpi/acpica/acinterp.h4
-rw-r--r--drivers/acpi/acpica/aclocal.h47
-rw-r--r--drivers/acpi/acpica/acmacros.h173
-rw-r--r--drivers/acpi/acpica/acnamesp.h16
-rw-r--r--drivers/acpi/acpica/acobject.h4
-rw-r--r--drivers/acpi/acpica/acopcode.h2
-rw-r--r--drivers/acpi/acpica/acparser.h27
-rw-r--r--drivers/acpi/acpica/acpredef.h31
-rw-r--r--drivers/acpi/acpica/acresrc.h8
-rw-r--r--drivers/acpi/acpica/acstruct.h2
-rw-r--r--drivers/acpi/acpica/actables.h2
-rw-r--r--drivers/acpi/acpica/acutils.h59
-rw-r--r--drivers/acpi/acpica/amlcode.h2
-rw-r--r--drivers/acpi/acpica/amlresrc.h8
-rw-r--r--drivers/acpi/acpica/dsargs.c2
-rw-r--r--drivers/acpi/acpica/dscontrol.c2
-rw-r--r--drivers/acpi/acpica/dsfield.c2
-rw-r--r--drivers/acpi/acpica/dsinit.c2
-rw-r--r--drivers/acpi/acpica/dsmethod.c8
-rw-r--r--drivers/acpi/acpica/dsmthdat.c2
-rw-r--r--drivers/acpi/acpica/dsobject.c20
-rw-r--r--drivers/acpi/acpica/dsopcode.c16
-rw-r--r--drivers/acpi/acpica/dsutils.c12
-rw-r--r--drivers/acpi/acpica/dswexec.c6
-rw-r--r--drivers/acpi/acpica/dswload.c7
-rw-r--r--drivers/acpi/acpica/dswload2.c4
-rw-r--r--drivers/acpi/acpica/dswscope.c2
-rw-r--r--drivers/acpi/acpica/dswstate.c2
-rw-r--r--drivers/acpi/acpica/evevent.c2
-rw-r--r--drivers/acpi/acpica/evglock.c2
-rw-r--r--drivers/acpi/acpica/evgpe.c14
-rw-r--r--drivers/acpi/acpica/evgpeblk.c24
-rw-r--r--drivers/acpi/acpica/evgpeinit.c5
-rw-r--r--drivers/acpi/acpica/evgpeutil.c2
-rw-r--r--drivers/acpi/acpica/evhandler.c529
-rw-r--r--drivers/acpi/acpica/evmisc.c2
-rw-r--r--drivers/acpi/acpica/evregion.c584
-rw-r--r--drivers/acpi/acpica/evrgnini.c2
-rw-r--r--drivers/acpi/acpica/evsci.c6
-rw-r--r--drivers/acpi/acpica/evxface.c36
-rw-r--r--drivers/acpi/acpica/evxfevnt.c7
-rw-r--r--drivers/acpi/acpica/evxfgpe.c11
-rw-r--r--drivers/acpi/acpica/evxfregn.c2
-rw-r--r--drivers/acpi/acpica/exconfig.c22
-rw-r--r--drivers/acpi/acpica/exconvrt.c4
-rw-r--r--drivers/acpi/acpica/excreate.c2
-rw-r--r--drivers/acpi/acpica/exdebug.c2
-rw-r--r--drivers/acpi/acpica/exdump.c21
-rw-r--r--drivers/acpi/acpica/exfield.c2
-rw-r--r--drivers/acpi/acpica/exfldio.c3
-rw-r--r--drivers/acpi/acpica/exmisc.c2
-rw-r--r--drivers/acpi/acpica/exmutex.c5
-rw-r--r--drivers/acpi/acpica/exnames.c2
-rw-r--r--drivers/acpi/acpica/exoparg1.c10
-rw-r--r--drivers/acpi/acpica/exoparg2.c2
-rw-r--r--drivers/acpi/acpica/exoparg3.c2
-rw-r--r--drivers/acpi/acpica/exoparg6.c2
-rw-r--r--drivers/acpi/acpica/exprep.c6
-rw-r--r--drivers/acpi/acpica/exregion.c25
-rw-r--r--drivers/acpi/acpica/exresnte.c2
-rw-r--r--drivers/acpi/acpica/exresolv.c2
-rw-r--r--drivers/acpi/acpica/exresop.c2
-rw-r--r--drivers/acpi/acpica/exstore.c31
-rw-r--r--drivers/acpi/acpica/exstoren.c4
-rw-r--r--drivers/acpi/acpica/exstorob.c2
-rw-r--r--drivers/acpi/acpica/exsystem.c2
-rw-r--r--drivers/acpi/acpica/exutils.c24
-rw-r--r--drivers/acpi/acpica/hwacpi.c13
-rw-r--r--drivers/acpi/acpica/hwesleep.c3
-rw-r--r--drivers/acpi/acpica/hwgpe.c10
-rw-r--r--drivers/acpi/acpica/hwpci.c2
-rw-r--r--drivers/acpi/acpica/hwregs.c8
-rw-r--r--drivers/acpi/acpica/hwsleep.c8
-rw-r--r--drivers/acpi/acpica/hwtimer.c9
-rw-r--r--drivers/acpi/acpica/hwvalid.c20
-rw-r--r--drivers/acpi/acpica/hwxface.c137
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c13
-rw-r--r--drivers/acpi/acpica/nsaccess.c2
-rw-r--r--drivers/acpi/acpica/nsalloc.c2
-rw-r--r--drivers/acpi/acpica/nsdump.c14
-rw-r--r--drivers/acpi/acpica/nsdumpdv.c3
-rw-r--r--drivers/acpi/acpica/nseval.c2
-rw-r--r--drivers/acpi/acpica/nsinit.c29
-rw-r--r--drivers/acpi/acpica/nsload.c2
-rw-r--r--drivers/acpi/acpica/nsnames.c5
-rw-r--r--drivers/acpi/acpica/nsobject.c2
-rw-r--r--drivers/acpi/acpica/nsparse.c2
-rw-r--r--drivers/acpi/acpica/nspredef.c586
-rw-r--r--drivers/acpi/acpica/nsprepkg.c621
-rw-r--r--drivers/acpi/acpica/nsrepair.c2
-rw-r--r--drivers/acpi/acpica/nsrepair2.c5
-rw-r--r--drivers/acpi/acpica/nssearch.c7
-rw-r--r--drivers/acpi/acpica/nsutils.c88
-rw-r--r--drivers/acpi/acpica/nswalk.c6
-rw-r--r--drivers/acpi/acpica/nsxfeval.c19
-rw-r--r--drivers/acpi/acpica/nsxfname.c20
-rw-r--r--drivers/acpi/acpica/nsxfobj.c2
-rw-r--r--drivers/acpi/acpica/psargs.c9
-rw-r--r--drivers/acpi/acpica/psloop.c623
-rw-r--r--drivers/acpi/acpica/psobject.c647
-rw-r--r--drivers/acpi/acpica/psopcode.c174
-rw-r--r--drivers/acpi/acpica/psopinfo.c223
-rw-r--r--drivers/acpi/acpica/psparse.c2
-rw-r--r--drivers/acpi/acpica/psscope.c2
-rw-r--r--drivers/acpi/acpica/pstree.c2
-rw-r--r--drivers/acpi/acpica/psutils.c10
-rw-r--r--drivers/acpi/acpica/pswalk.c2
-rw-r--r--drivers/acpi/acpica/psxface.c2
-rw-r--r--drivers/acpi/acpica/rsaddr.c2
-rw-r--r--drivers/acpi/acpica/rscalc.c8
-rw-r--r--drivers/acpi/acpica/rscreate.c9
-rw-r--r--drivers/acpi/acpica/rsdump.c424
-rw-r--r--drivers/acpi/acpica/rsdumpinfo.c454
-rw-r--r--drivers/acpi/acpica/rsinfo.c2
-rw-r--r--drivers/acpi/acpica/rsio.c2
-rw-r--r--drivers/acpi/acpica/rsirq.c40
-rw-r--r--drivers/acpi/acpica/rslist.c9
-rw-r--r--drivers/acpi/acpica/rsmemory.c8
-rw-r--r--drivers/acpi/acpica/rsmisc.c76
-rw-r--r--drivers/acpi/acpica/rsserial.c10
-rw-r--r--drivers/acpi/acpica/rsutils.c14
-rw-r--r--drivers/acpi/acpica/rsxface.c107
-rw-r--r--drivers/acpi/acpica/tbfadt.c7
-rw-r--r--drivers/acpi/acpica/tbfind.c2
-rw-r--r--drivers/acpi/acpica/tbinstal.c2
-rw-r--r--drivers/acpi/acpica/tbutils.c4
-rw-r--r--drivers/acpi/acpica/tbxface.c7
-rw-r--r--drivers/acpi/acpica/tbxfload.c4
-rw-r--r--drivers/acpi/acpica/tbxfroot.c2
-rw-r--r--drivers/acpi/acpica/utaddress.c6
-rw-r--r--drivers/acpi/acpica/utalloc.c2
-rw-r--r--drivers/acpi/acpica/utcache.c2
-rw-r--r--drivers/acpi/acpica/utcopy.c6
-rw-r--r--drivers/acpi/acpica/utdebug.c120
-rw-r--r--drivers/acpi/acpica/utdecode.c2
-rw-r--r--drivers/acpi/acpica/utdelete.c70
-rw-r--r--drivers/acpi/acpica/uteval.c4
-rw-r--r--drivers/acpi/acpica/utexcep.c2
-rw-r--r--drivers/acpi/acpica/utglobal.c11
-rw-r--r--drivers/acpi/acpica/utids.c2
-rw-r--r--drivers/acpi/acpica/utinit.c2
-rw-r--r--drivers/acpi/acpica/utlock.c16
-rw-r--r--drivers/acpi/acpica/utmath.c2
-rw-r--r--drivers/acpi/acpica/utmisc.c830
-rw-r--r--drivers/acpi/acpica/utmutex.c2
-rw-r--r--drivers/acpi/acpica/utobject.c4
-rw-r--r--drivers/acpi/acpica/utosi.c2
-rw-r--r--drivers/acpi/acpica/utownerid.c218
-rw-r--r--drivers/acpi/acpica/utresrc.c83
-rw-r--r--drivers/acpi/acpica/utstate.c42
-rw-r--r--drivers/acpi/acpica/utstring.c574
-rw-r--r--drivers/acpi/acpica/uttrack.c18
-rw-r--r--drivers/acpi/acpica/utxface.c6
-rw-r--r--drivers/acpi/acpica/utxferror.c6
-rw-r--r--drivers/acpi/acpica/utxfinit.c2
-rw-r--r--drivers/acpi/acpica/utxfmutex.c2
-rw-r--r--drivers/acpi/apei/apei-base.c3
-rw-r--r--drivers/acpi/apei/cper.c19
-rw-r--r--drivers/acpi/battery.c2
-rw-r--r--drivers/acpi/bus.c270
-rw-r--r--drivers/acpi/button.c4
-rw-r--r--drivers/acpi/container.c211
-rw-r--r--drivers/acpi/csrt.c159
-rw-r--r--drivers/acpi/custom_method.c2
-rw-r--r--drivers/acpi/device_pm.c359
-rw-r--r--drivers/acpi/dock.c44
-rw-r--r--drivers/acpi/ec.c2
-rw-r--r--drivers/acpi/fan.c4
-rw-r--r--drivers/acpi/glue.c85
-rw-r--r--drivers/acpi/hed.c2
-rw-r--r--drivers/acpi/internal.h31
-rw-r--r--drivers/acpi/numa.c8
-rw-r--r--drivers/acpi/osl.c4
-rw-r--r--drivers/acpi/pci_bind.c122
-rw-r--r--drivers/acpi/pci_link.c47
-rw-r--r--drivers/acpi/pci_root.c101
-rw-r--r--drivers/acpi/pci_slot.c7
-rw-r--r--drivers/acpi/power.c730
-rw-r--r--drivers/acpi/proc.c9
-rw-r--r--drivers/acpi/processor_driver.c62
-rw-r--r--drivers/acpi/processor_idle.c52
-rw-r--r--drivers/acpi/processor_perflib.c7
-rw-r--r--drivers/acpi/sbs.c6
-rw-r--r--drivers/acpi/sbshc.c4
-rw-r--r--drivers/acpi/scan.c976
-rw-r--r--drivers/acpi/sleep.c97
-rw-r--r--drivers/acpi/sleep.h2
-rw-r--r--drivers/acpi/sysfs.c2
-rw-r--r--drivers/acpi/tables.c6
-rw-r--r--drivers/acpi/thermal.c10
-rw-r--r--drivers/acpi/video.c8
-rw-r--r--drivers/ata/ahci.c101
-rw-r--r--drivers/ata/ahci.h6
-rw-r--r--drivers/ata/libahci.c124
-rw-r--r--drivers/ata/libata-acpi.c18
-rw-r--r--drivers/ata/libata-core.c22
-rw-r--r--drivers/ata/libata-eh.c2
-rw-r--r--drivers/atm/iphase.h146
-rw-r--r--drivers/base/Makefile1
-rw-r--r--drivers/base/dd.c7
-rw-r--r--drivers/base/pinctrl.c69
-rw-r--r--drivers/base/power/domain.c3
-rw-r--r--drivers/base/power/opp.c19
-rw-r--r--drivers/base/power/wakeup.c6
-rw-r--r--drivers/base/regmap/Makefile2
-rw-r--r--drivers/base/regmap/internal.h22
-rw-r--r--drivers/base/regmap/regcache-flat.c72
-rw-r--r--drivers/base/regmap/regcache.c1
-rw-r--r--drivers/base/regmap/regmap-debugfs.c52
-rw-r--r--drivers/base/regmap/regmap-irq.c125
-rw-r--r--drivers/base/regmap/regmap-mmio.c79
-rw-r--r--drivers/base/regmap/regmap-spi.c54
-rw-r--r--drivers/base/regmap/regmap.c353
-rw-r--r--drivers/bcma/bcma_private.h5
-rw-r--r--drivers/bcma/driver_chipcommon_nflash.c2
-rw-r--r--drivers/bcma/driver_gpio.c5
-rw-r--r--drivers/bcma/main.c7
-rw-r--r--drivers/block/drbd/drbd_req.c2
-rw-r--r--drivers/block/drbd/drbd_req.h1
-rw-r--r--drivers/block/drbd/drbd_state.c7
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c24
-rw-r--r--drivers/block/sunvdc.c2
-rw-r--r--drivers/block/swim.c1
-rw-r--r--drivers/block/virtio_blk.c7
-rw-r--r--drivers/block/xen-blkback/blkback.c18
-rw-r--r--drivers/block/xen-blkfront.c10
-rw-r--r--drivers/bluetooth/ath3k.c10
-rw-r--r--drivers/bluetooth/btusb.c5
-rw-r--r--drivers/char/hpet.c2
-rw-r--r--drivers/char/sonypi.c2
-rw-r--r--drivers/char/virtio_console.c3
-rw-r--r--drivers/clk/Makefile11
-rw-r--r--drivers/clk/clk-divider.c6
-rw-r--r--drivers/clk/clk-fixed-factor.c5
-rw-r--r--drivers/clk/clk-fixed-rate.c3
-rw-r--r--drivers/clk/clk-highbank.c20
-rw-r--r--drivers/clk/clk-max77686.c37
-rw-r--r--drivers/clk/clk-prima2.c205
-rw-r--r--drivers/clk/clk-sunxi.c30
-rw-r--r--drivers/clk/clk-vt8500.c143
-rw-r--r--drivers/clk/clk-zynq.c14
-rw-r--r--drivers/clk/clk.c169
-rw-r--r--drivers/clk/mvebu/clk-cpu.c9
-rw-r--r--drivers/clk/mvebu/clk-gating-ctrl.c1
-rw-r--r--drivers/clk/mxs/clk-imx23.c2
-rw-r--r--drivers/clk/mxs/clk-imx28.c2
-rw-r--r--drivers/clk/versatile/clk-vexpress-osc.c1
-rw-r--r--drivers/clk/versatile/clk-vexpress.c8
-rw-r--r--drivers/clk/x86/Makefile2
-rw-r--r--drivers/clk/x86/clk-lpss.c99
-rw-r--r--drivers/clk/x86/clk-lpss.h36
-rw-r--r--drivers/clk/x86/clk-lpt.c86
-rw-r--r--drivers/clocksource/sunxi_timer.c4
-rw-r--r--drivers/cpufreq/Kconfig2
-rw-r--r--drivers/cpufreq/Kconfig.arm30
-rw-r--r--drivers/cpufreq/Kconfig.x8615
-rw-r--r--drivers/cpufreq/Makefile11
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c15
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c47
-rw-r--r--drivers/cpufreq/cpufreq.c460
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c19
-rw-r--r--drivers/cpufreq/cpufreq_governor.c131
-rw-r--r--drivers/cpufreq/cpufreq_governor.h6
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c70
-rw-r--r--drivers/cpufreq/cpufreq_stats.c49
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c2
-rw-r--r--drivers/cpufreq/db8500-cpufreq.c4
-rw-r--r--drivers/cpufreq/exynos-cpufreq.c188
-rw-r--r--drivers/cpufreq/exynos4210-cpufreq.c152
-rw-r--r--drivers/cpufreq/exynos4x12-cpufreq.c388
-rw-r--r--drivers/cpufreq/exynos5250-cpufreq.c178
-rw-r--r--drivers/cpufreq/freq_table.c15
-rw-r--r--drivers/cpufreq/highbank-cpufreq.c120
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c336
-rw-r--r--drivers/cpufreq/intel_pstate.c823
-rw-r--r--drivers/cpufreq/kirkwood-cpufreq.c259
-rw-r--r--drivers/cpufreq/maple-cpufreq.c2
-rw-r--r--drivers/cpufreq/omap-cpufreq.c7
-rw-r--r--drivers/cpufreq/powernow-k8.c46
-rw-r--r--drivers/cpufreq/spear-cpufreq.c12
-rw-r--r--drivers/cpuidle/cpuidle.c2
-rw-r--r--drivers/devfreq/devfreq.c5
-rw-r--r--drivers/devfreq/exynos4_bus.c94
-rw-r--r--drivers/dma/imx-dma.c5
-rw-r--r--drivers/dma/ioat/dma_v3.c2
-rw-r--r--drivers/dma/tegra20-apb-dma.c8
-rw-r--r--drivers/edac/amd64_edac.c214
-rw-r--r--drivers/edac/amd64_edac.h12
-rw-r--r--drivers/edac/edac_mc.c6
-rw-r--r--drivers/edac/edac_pci_sysfs.c2
-rw-r--r--drivers/edac/mce_amd.c166
-rw-r--r--drivers/edac/mce_amd.h13
-rw-r--r--drivers/edac/mpc85xx_edac.c4
-rw-r--r--drivers/firmware/dmi_scan.c2
-rw-r--r--drivers/firmware/efivars.c9
-rw-r--r--drivers/firmware/iscsi_ibft_find.c2
-rw-r--r--drivers/gpio/Kconfig6
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/gpio-ab8500.c520
-rw-r--r--drivers/gpio/gpio-mvebu.c6
-rw-r--r--drivers/gpio/gpio-samsung.c14
-rw-r--r--drivers/gpio/gpiolib-of.c37
-rw-r--r--drivers/gpu/drm/exynos/Kconfig4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c33
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c24
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c26
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c121
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c9
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c21
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c11
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h3
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c47
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c17
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c24
-rw-r--r--drivers/gpu/drm/nouveau/core/core/falcon.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/core/subdev.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/object.h7
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c3
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c33
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c86
-rw-r--r--drivers/gpu/drm/radeon/ni.c14
-rw-r--r--drivers/gpu/drm/radeon/r600.c21
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c38
-rw-r--r--drivers/gpu/drm/radeon/radeon.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c1
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/cayman1
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rv5152
-rw-r--r--drivers/gpu/drm/radeon/rv515.c2
-rw-r--r--drivers/gpu/drm/radeon/si.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c24
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c13
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hv/Kconfig2
-rw-r--r--drivers/hv/hv_balloon.c35
-rw-r--r--drivers/hwmon/Kconfig32
-rw-r--r--drivers/hwmon/Makefile2
-rw-r--r--drivers/hwmon/acpi_power_meter.c2
-rw-r--r--drivers/hwmon/ad7414.c2
-rw-r--r--drivers/hwmon/adm1021.c4
-rw-r--r--drivers/hwmon/adm1026.c16
-rw-r--r--drivers/hwmon/adm1031.c12
-rw-r--r--drivers/hwmon/adm9240.c6
-rw-r--r--drivers/hwmon/ads7828.c6
-rw-r--r--drivers/hwmon/adt7410.c28
-rw-r--r--drivers/hwmon/adt7462.c20
-rw-r--r--drivers/hwmon/adt7470.c20
-rw-r--r--drivers/hwmon/adt7475.c18
-rw-r--r--drivers/hwmon/amc6821.c32
-rw-r--r--drivers/hwmon/asb100.c10
-rw-r--r--drivers/hwmon/asc7621.c26
-rw-r--r--drivers/hwmon/asus_atk0110.c4
-rw-r--r--drivers/hwmon/coretemp.c5
-rw-r--r--drivers/hwmon/dme1737.c15
-rw-r--r--drivers/hwmon/emc2103.c2
-rw-r--r--drivers/hwmon/emc6w201.c6
-rw-r--r--drivers/hwmon/f71882fg.c25
-rw-r--r--drivers/hwmon/f75375s.c12
-rw-r--r--drivers/hwmon/fschmd.c4
-rw-r--r--drivers/hwmon/g760a.c2
-rw-r--r--drivers/hwmon/gl518sm.c10
-rw-r--r--drivers/hwmon/gl520sm.c9
-rw-r--r--drivers/hwmon/gpio-fan.c4
-rw-r--r--drivers/hwmon/ina209.c636
-rw-r--r--drivers/hwmon/it87.c52
-rw-r--r--drivers/hwmon/jc42.c10
-rw-r--r--drivers/hwmon/lm63.c8
-rw-r--r--drivers/hwmon/lm73.c136
-rw-r--r--drivers/hwmon/lm75.h2
-rw-r--r--drivers/hwmon/lm77.c2
-rw-r--r--drivers/hwmon/lm78.c6
-rw-r--r--drivers/hwmon/lm80.c8
-rw-r--r--drivers/hwmon/lm85.c10
-rw-r--r--drivers/hwmon/lm90.c2
-rw-r--r--drivers/hwmon/lm93.c28
-rw-r--r--drivers/hwmon/lm95245.c4
-rw-r--r--drivers/hwmon/max16065.c2
-rw-r--r--drivers/hwmon/max1668.c4
-rw-r--r--drivers/hwmon/max6639.c4
-rw-r--r--drivers/hwmon/max6642.c2
-rw-r--r--drivers/hwmon/max6650.c4
-rw-r--r--drivers/hwmon/max6697.c726
-rw-r--r--drivers/hwmon/ntc_thermistor.c4
-rw-r--r--drivers/hwmon/pmbus/Kconfig2
-rw-r--r--drivers/hwmon/pmbus/max34440.c75
-rw-r--r--drivers/hwmon/pmbus/pmbus.h11
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c840
-rw-r--r--drivers/hwmon/pmbus/zl6100.c176
-rw-r--r--drivers/hwmon/sht15.c157
-rw-r--r--drivers/hwmon/sis5595.c6
-rw-r--r--drivers/hwmon/smsc47m1.c2
-rw-r--r--drivers/hwmon/smsc47m192.c4
-rw-r--r--drivers/hwmon/thmc50.c6
-rw-r--r--drivers/hwmon/tmp102.c2
-rw-r--r--drivers/hwmon/tmp401.c14
-rw-r--r--drivers/hwmon/via686a.c17
-rw-r--r--drivers/hwmon/vt1211.c10
-rw-r--r--drivers/hwmon/vt8231.c22
-rw-r--r--drivers/hwmon/w83627ehf.c17
-rw-r--r--drivers/hwmon/w83627hf.c23
-rw-r--r--drivers/hwmon/w83781d.c17
-rw-r--r--drivers/hwmon/w83791d.c10
-rw-r--r--drivers/hwmon/w83792d.c25
-rw-r--r--drivers/hwmon/w83793.c18
-rw-r--r--drivers/hwmon/w83795.c28
-rw-r--r--drivers/hwmon/w83l786ng.c17
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c4
-rw-r--r--drivers/i2c/busses/i2c-mxs.c6
-rw-r--r--drivers/i2c/busses/i2c-omap.c6
-rw-r--r--drivers/i2c/busses/i2c-scmi.c2
-rw-r--r--drivers/i2c/busses/i2c-sirf.c4
-rw-r--r--drivers/i2c/muxes/i2c-mux-pinctrl.c2
-rw-r--r--drivers/idle/intel_idle.c281
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c11
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c6
-rw-r--r--drivers/input/Kconfig2
-rw-r--r--drivers/input/input-mt.c1
-rw-r--r--drivers/input/input.c16
-rw-r--r--drivers/input/joystick/analog.c2
-rw-r--r--drivers/input/joystick/walkera0701.c82
-rw-r--r--drivers/input/keyboard/Kconfig16
-rw-r--r--drivers/input/keyboard/Makefile1
-rw-r--r--drivers/input/keyboard/atkbd.c74
-rw-r--r--drivers/input/keyboard/goldfish_events.c194
-rw-r--r--drivers/input/keyboard/imx_keypad.c43
-rw-r--r--drivers/input/keyboard/lm8323.c2
-rw-r--r--drivers/input/keyboard/matrix_keypad.c8
-rw-r--r--drivers/input/keyboard/qt2160.c141
-rw-r--r--drivers/input/keyboard/tegra-kbc.c478
-rw-r--r--drivers/input/misc/adxl34x.c7
-rw-r--r--drivers/input/misc/atlas_btns.c2
-rw-r--r--drivers/input/misc/bma150.c14
-rw-r--r--drivers/input/misc/twl4030-vibra.c45
-rw-r--r--drivers/input/misc/twl6040-vibra.c100
-rw-r--r--drivers/input/misc/wm831x-on.c4
-rw-r--r--drivers/input/mouse/Kconfig22
-rw-r--r--drivers/input/mouse/Makefile2
-rw-r--r--drivers/input/mouse/alps.c773
-rw-r--r--drivers/input/mouse/alps.h145
-rw-r--r--drivers/input/mouse/cyapa.c973
-rw-r--r--drivers/input/mouse/cypress_ps2.c725
-rw-r--r--drivers/input/mouse/cypress_ps2.h191
-rw-r--r--drivers/input/mouse/psmouse-base.c32
-rw-r--r--drivers/input/mouse/psmouse.h1
-rw-r--r--drivers/input/mouse/synaptics.c32
-rw-r--r--drivers/input/serio/Kconfig1
-rw-r--r--drivers/input/tablet/wacom_sys.c6
-rw-r--r--drivers/input/tablet/wacom_wac.c192
-rw-r--r--drivers/input/tablet/wacom_wac.h2
-rw-r--r--drivers/input/touchscreen/Kconfig2
-rw-r--r--drivers/input/touchscreen/cyttsp_spi.c1
-rw-r--r--drivers/input/touchscreen/mms114.c54
-rw-r--r--drivers/input/touchscreen/stmpe-ts.c2
-rw-r--r--drivers/input/touchscreen/tsc2005.c1
-rw-r--r--drivers/input/touchscreen/wm831x-ts.c4
-rw-r--r--drivers/iommu/amd_iommu.c8
-rw-r--r--drivers/iommu/amd_iommu_init.c34
-rw-r--r--drivers/iommu/dmar.c2
-rw-r--r--drivers/iommu/intel-iommu.c23
-rw-r--r--drivers/iommu/intel_irq_remapping.c48
-rw-r--r--drivers/iommu/irq_remapping.c231
-rw-r--r--drivers/iommu/irq_remapping.h1
-rw-r--r--drivers/isdn/gigaset/capi.c2
-rw-r--r--drivers/isdn/mISDN/stack.c7
-rw-r--r--drivers/mailbox/Kconfig19
-rw-r--r--drivers/mailbox/Makefile1
-rw-r--r--drivers/mailbox/pl320-ipc.c199
-rw-r--r--drivers/md/dm-raid.c101
-rw-r--r--drivers/md/dm-thin.c13
-rw-r--r--drivers/md/dm.c6
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c6
-rw-r--r--drivers/media/i2c/m5mols/m5mols_core.c2
-rw-r--r--drivers/media/platform/coda.c2
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c3
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-mdevice.c2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c88
-rw-r--r--drivers/media/radio/radio-keene.c1
-rw-r--r--drivers/media/radio/radio-si4713.c1
-rw-r--r--drivers/media/radio/radio-wl1273.c1
-rw-r--r--drivers/media/radio/wl128x/fmdrv_v4l2.c10
-rw-r--r--drivers/media/usb/gspca/kinect.c1
-rw-r--r--drivers/media/usb/gspca/sonixb.c13
-rw-r--r--drivers/media/usb/gspca/sonixj.c1
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c4
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c6
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c4
-rw-r--r--drivers/mfd/Kconfig1
-rw-r--r--drivers/mfd/ab8500-core.c15
-rw-r--r--drivers/mfd/arizona-core.c7
-rw-r--r--drivers/mfd/arizona-irq.c18
-rw-r--r--drivers/mfd/da9052-i2c.c61
-rw-r--r--drivers/mfd/db8500-prcmu.c13
-rw-r--r--drivers/mfd/max77686.c18
-rw-r--r--drivers/mfd/max77693.c34
-rw-r--r--drivers/mfd/pcf50633-core.c5
-rw-r--r--drivers/mfd/rtl8411.c29
-rw-r--r--drivers/mfd/rts5209.c21
-rw-r--r--drivers/mfd/rts5229.c21
-rw-r--r--drivers/mfd/rtsx_pcr.c27
-rw-r--r--drivers/mfd/sec-core.c75
-rw-r--r--drivers/mfd/tc3589x.c17
-rw-r--r--drivers/mfd/twl4030-power.c2
-rw-r--r--drivers/mfd/vexpress-config.c8
-rw-r--r--drivers/mfd/vexpress-sysreg.c32
-rw-r--r--drivers/mfd/wm5102-tables.c3
-rw-r--r--drivers/mfd/wm5110-tables.c1
-rw-r--r--drivers/misc/sgi-gru/grufile.c2
-rw-r--r--drivers/misc/ti-st/st_kim.c37
-rw-r--r--drivers/mmc/host/mmci.c306
-rw-r--r--drivers/mmc/host/mmci.h3
-rw-r--r--drivers/mmc/host/mvsdio.c92
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c30
-rw-r--r--drivers/mtd/devices/Kconfig1
-rw-r--r--drivers/mtd/maps/physmap_of.c2
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c4
-rw-r--r--drivers/mtd/nand/davinci_nand.c2
-rw-r--r--drivers/mtd/nand/nand_base.c7
-rw-r--r--drivers/net/bonding/bond_sysfs.c1
-rw-r--r--drivers/net/can/c_can/c_can.c10
-rw-r--r--drivers/net/can/pch_can.c2
-rw-r--r--drivers/net/can/sja1000/peak_pci.c3
-rw-r--r--drivers/net/can/ti_hecc.c4
-rw-r--r--drivers/net/ethernet/3com/3c574_cs.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c71
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c12
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c62
-rw-r--r--drivers/net/ethernet/cadence/macb.c5
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c17
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h8
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h9
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c11
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c46
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c13
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c2
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c7
-rw-r--r--drivers/net/ethernet/realtek/r8169.c107
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c10
-rw-r--r--drivers/net/ethernet/via/via-rhine.c8
-rw-r--r--drivers/net/hyperv/hyperv_net.h2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c2
-rw-r--r--drivers/net/loopback.c5
-rw-r--r--drivers/net/macvlan.c5
-rw-r--r--drivers/net/phy/icplus.c29
-rw-r--r--drivers/net/phy/marvell.c9
-rw-r--r--drivers/net/phy/mdio-mux-gpio.c4
-rw-r--r--drivers/net/tun.c83
-rw-r--r--drivers/net/usb/cdc_mbim.c19
-rw-r--r--drivers/net/usb/cdc_ncm.c34
-rw-r--r--drivers/net/usb/dm9601.c52
-rw-r--r--drivers/net/usb/qmi_wwan.c16
-rw-r--r--drivers/net/usb/usbnet.c39
-rw-r--r--drivers/net/virtio_net.c118
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c7
-rw-r--r--drivers/net/wimax/i2400m/netdev.c31
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c27
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c54
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c42
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h3
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c40
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pub.h3
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c31
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.h3
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c13
-rw-r--r--drivers/net/wireless/iwlegacy/common.c35
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c26
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c17
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c2
-rw-r--r--drivers/net/wireless/mwifiex/scan.c9
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c9
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c14
-rw-r--r--drivers/net/wireless/mwl8k.c36
-rw-r--r--drivers/net/wireless/rtlwifi/Kconfig4
-rw-r--r--drivers/net/wireless/rtlwifi/base.c7
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c4
-rw-r--r--drivers/net/wireless/ti/wl1251/ps.c3
-rw-r--r--drivers/net/xen-netback/common.h3
-rw-r--r--drivers/net/xen-netback/interface.c23
-rw-r--r--drivers/net/xen-netback/netback.c115
-rw-r--r--drivers/of/base.c303
-rw-r--r--drivers/of/device.c13
-rw-r--r--drivers/of/of_private.h36
-rw-r--r--drivers/of/selftest.c54
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c56
-rw-r--r--drivers/pci/hotplug/pciehp.h2
-rw-r--r--drivers/pci/hotplug/pciehp_core.c11
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c8
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c11
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c15
-rw-r--r--drivers/pci/hotplug/shpchp.h3
-rw-r--r--drivers/pci/hotplug/shpchp_core.c36
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c6
-rw-r--r--drivers/pci/msi.c26
-rw-r--r--drivers/pci/pci-acpi.c56
-rw-r--r--drivers/pci/pci.c26
-rw-r--r--drivers/pci/pci.h5
-rw-r--r--drivers/pci/pcie/Kconfig2
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c1
-rw-r--r--drivers/pci/pcie/aer/aerdrv_errprint.c63
-rw-r--r--drivers/pci/pcie/aspm.c3
-rw-r--r--drivers/pci/probe.c1
-rw-r--r--drivers/pci/remove.c2
-rw-r--r--drivers/pinctrl/Kconfig37
-rw-r--r--drivers/pinctrl/Makefile9
-rw-r--r--drivers/pinctrl/core.c118
-rw-r--r--drivers/pinctrl/core.h29
-rw-r--r--drivers/pinctrl/devicetree.c5
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-dove.c2
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-kirkwood.c8
-rw-r--r--drivers/pinctrl/pinconf-generic.c4
-rw-r--r--drivers/pinctrl/pinconf.c207
-rw-r--r--drivers/pinctrl/pinctrl-ab8500.c484
-rw-r--r--drivers/pinctrl/pinctrl-ab8505.c380
-rw-r--r--drivers/pinctrl/pinctrl-ab8540.c407
-rw-r--r--drivers/pinctrl/pinctrl-ab9540.c485
-rw-r--r--drivers/pinctrl/pinctrl-abx500.c1012
-rw-r--r--drivers/pinctrl/pinctrl-abx500.h234
-rw-r--r--drivers/pinctrl/pinctrl-exynos5440.c10
-rw-r--r--drivers/pinctrl/pinctrl-falcon.c38
-rw-r--r--drivers/pinctrl/pinctrl-lantiq.c56
-rw-r--r--drivers/pinctrl/pinctrl-lantiq.h1
-rw-r--r--drivers/pinctrl/pinctrl-mxs.c9
-rw-r--r--drivers/pinctrl/pinctrl-nomadik.c392
-rw-r--r--drivers/pinctrl/pinctrl-samsung.c1
-rw-r--r--drivers/pinctrl/pinctrl-single.c79
-rw-r--r--drivers/pinctrl/pinctrl-sirf.c18
-rw-r--r--drivers/pinctrl/pinctrl-sunxi.c1505
-rw-r--r--drivers/pinctrl/pinctrl-sunxi.h478
-rw-r--r--drivers/pinctrl/pinctrl-tegra.c14
-rw-r--r--drivers/pinctrl/pinctrl-tegra.h16
-rw-r--r--drivers/pinctrl/pinctrl-tegra114.c2769
-rw-r--r--drivers/pinctrl/pinctrl-tegra20.c6
-rw-r--r--drivers/pinctrl/pinctrl-tegra30.c4
-rw-r--r--drivers/pinctrl/pinctrl-xway.c60
-rw-r--r--drivers/platform/x86/asus-laptop.c2
-rw-r--r--drivers/platform/x86/classmate-laptop.c10
-rw-r--r--drivers/platform/x86/eeepc-laptop.c4
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c4
-rw-r--r--drivers/platform/x86/fujitsu-tablet.c2
-rw-r--r--drivers/platform/x86/hp_accel.c2
-rw-r--r--drivers/platform/x86/ibm_rtl.c2
-rw-r--r--drivers/platform/x86/ideapad-laptop.c2
-rw-r--r--drivers/platform/x86/intel_menlow.c2
-rw-r--r--drivers/platform/x86/panasonic-laptop.c4
-rw-r--r--drivers/platform/x86/samsung-laptop.c4
-rw-r--r--drivers/platform/x86/sony-laptop.c4
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c3
-rw-r--r--drivers/platform/x86/topstar-laptop.c2
-rw-r--r--drivers/platform/x86/toshiba_acpi.c4
-rw-r--r--drivers/platform/x86/toshiba_bluetooth.c4
-rw-r--r--drivers/platform/x86/wmi.c4
-rw-r--r--drivers/platform/x86/xo15-ebook.c2
-rw-r--r--drivers/pnp/pnpacpi/core.c10
-rw-r--r--drivers/pnp/pnpbios/Kconfig4
-rw-r--r--drivers/power/88pm860x_battery.c13
-rw-r--r--drivers/power/Kconfig14
-rw-r--r--drivers/power/Makefile4
-rw-r--r--drivers/power/ab8500_bmdata.c524
-rw-r--r--drivers/power/ab8500_btemp.c192
-rw-r--r--drivers/power/ab8500_charger.c1067
-rw-r--r--drivers/power/ab8500_fg.c447
-rw-r--r--drivers/power/abx500_chargalg.c204
-rw-r--r--drivers/power/bq2415x_charger.c54
-rw-r--r--drivers/power/bq27x00_battery.c12
-rw-r--r--drivers/power/charger-manager.c310
-rw-r--r--drivers/power/da9030_battery.c1
-rw-r--r--drivers/power/da9052-battery.c2
-rw-r--r--drivers/power/ds2782_battery.c69
-rw-r--r--drivers/power/generic-adc-battery.c16
-rw-r--r--drivers/power/goldfish_battery.c236
-rw-r--r--drivers/power/lp8727_charger.c8
-rw-r--r--drivers/power/lp8788-charger.c17
-rw-r--r--drivers/power/max17040_battery.c4
-rw-r--r--drivers/power/pm2301_charger.c1088
-rw-r--r--drivers/power/pm2301_charger.h513
-rw-r--r--drivers/power/power_supply_sysfs.c3
-rw-r--r--drivers/power/reset/Kconfig17
-rw-r--r--drivers/power/reset/Makefile2
-rw-r--r--drivers/power/reset/qnap-poweroff.c116
-rw-r--r--drivers/power/reset/restart-poweroff.c65
-rw-r--r--drivers/regulator/88pm8607.c40
-rw-r--r--drivers/regulator/Kconfig10
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/anatop-regulator.c41
-rw-r--r--drivers/regulator/arizona-micsupp.c78
-rw-r--r--drivers/regulator/as3711-regulator.c2
-rw-r--r--drivers/regulator/core.c43
-rw-r--r--drivers/regulator/da9052-regulator.c46
-rw-r--r--drivers/regulator/da9055-regulator.c3
-rw-r--r--drivers/regulator/dbx500-prcmu.c1
-rw-r--r--drivers/regulator/gpio-regulator.c7
-rw-r--r--drivers/regulator/lp3971.c22
-rw-r--r--drivers/regulator/lp3972.c22
-rw-r--r--drivers/regulator/lp872x.c36
-rw-r--r--drivers/regulator/lp8755.c566
-rw-r--r--drivers/regulator/lp8788-buck.c41
-rw-r--r--drivers/regulator/lp8788-ldo.c133
-rw-r--r--drivers/regulator/max77686.c29
-rw-r--r--drivers/regulator/max8907-regulator.c10
-rw-r--r--drivers/regulator/max8925-regulator.c3
-rw-r--r--drivers/regulator/max8997.c128
-rw-r--r--drivers/regulator/max8998.c16
-rw-r--r--drivers/regulator/mc13892-regulator.c111
-rw-r--r--drivers/regulator/mc13xxx-regulator-core.c25
-rw-r--r--drivers/regulator/mc13xxx.h4
-rw-r--r--drivers/regulator/of_regulator.c6
-rw-r--r--drivers/regulator/palmas-regulator.c7
-rw-r--r--drivers/regulator/s2mps11.c4
-rw-r--r--drivers/regulator/s5m8767.c268
-rw-r--r--drivers/regulator/tps51632-regulator.c152
-rw-r--r--drivers/regulator/tps6507x-regulator.c92
-rw-r--r--drivers/regulator/tps65090-regulator.c106
-rw-r--r--drivers/regulator/tps65217-regulator.c4
-rw-r--r--drivers/regulator/tps6586x-regulator.c54
-rw-r--r--drivers/regulator/tps65910-regulator.c8
-rw-r--r--drivers/regulator/tps80031-regulator.c2
-rw-r--r--drivers/rtc/Kconfig12
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/class.c7
-rw-r--r--drivers/rtc/rtc-isl1208.c3
-rw-r--r--drivers/rtc/rtc-pl031.c10
-rw-r--r--drivers/rtc/rtc-vt8500.c2
-rw-r--r--drivers/rtc/systohc.c44
-rw-r--r--drivers/scsi/isci/init.c2
-rw-r--r--drivers/spi/Kconfig15
-rw-r--r--drivers/spi/Makefile5
-rw-r--r--drivers/spi/spi-altera.c2
-rw-r--r--drivers/spi/spi-ath79.c115
-rw-r--r--drivers/spi/spi-atmel.c2
-rw-r--r--drivers/spi/spi-au1550.c8
-rw-r--r--drivers/spi/spi-bcm63xx.c179
-rw-r--r--drivers/spi/spi-bfin-sport.c3
-rw-r--r--drivers/spi/spi-bfin5xx.c5
-rw-r--r--drivers/spi/spi-bitbang.c33
-rw-r--r--drivers/spi/spi-clps711x.c2
-rw-r--r--drivers/spi/spi-coldfire-qspi.c3
-rw-r--r--drivers/spi/spi-davinci.c119
-rw-r--r--drivers/spi/spi-ep93xx.c2
-rw-r--r--drivers/spi/spi-falcon.c3
-rw-r--r--drivers/spi/spi-fsl-spi.c4
-rw-r--r--drivers/spi/spi-gpio.c23
-rw-r--r--drivers/spi/spi-imx.c1
-rw-r--r--drivers/spi/spi-mpc512x-psc.c17
-rw-r--r--drivers/spi/spi-mxs.c5
-rw-r--r--drivers/spi/spi-oc-tiny.c8
-rw-r--r--drivers/spi/spi-omap-100k.c6
-rw-r--r--drivers/spi/spi-omap-uwire.c6
-rw-r--r--drivers/spi/spi-omap2-mcspi.c42
-rw-r--r--drivers/spi/spi-orion.c21
-rw-r--r--drivers/spi/spi-ppc4xx.c10
-rw-r--r--drivers/spi/spi-pxa2xx-dma.c392
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c133
-rw-r--r--drivers/spi/spi-pxa2xx-pxadma.c490
-rw-r--r--drivers/spi/spi-pxa2xx.c1103
-rw-r--r--drivers/spi/spi-pxa2xx.h221
-rw-r--r--drivers/spi/spi-s3c64xx.c80
-rw-r--r--drivers/spi/spi-sh-msiof.c56
-rw-r--r--drivers/spi/spi-sirf.c10
-rw-r--r--drivers/spi/spi-tegra20-sflash.c14
-rw-r--r--drivers/spi/spi-tegra20-slink.c15
-rw-r--r--drivers/spi/spi-txx9.c12
-rw-r--r--drivers/spi/spi.c22
-rw-r--r--drivers/ssb/driver_gpio.c12
-rw-r--r--drivers/ssb/main.c9
-rw-r--r--drivers/ssb/ssb_private.h5
-rw-r--r--drivers/staging/csr/bh.c2
-rw-r--r--drivers/staging/csr/unifi_sme.c2
-rw-r--r--drivers/staging/iio/adc/mxs-lradc.c2
-rw-r--r--drivers/staging/iio/gyro/adis16080_core.c2
-rw-r--r--drivers/staging/iio/trigger/Kconfig1
-rw-r--r--drivers/staging/omapdrm/Kconfig2
-rw-r--r--drivers/staging/quickstart/quickstart.c2
-rw-r--r--drivers/staging/sb105x/sb_pci_mp.c2
-rw-r--r--drivers/staging/vt6656/bssdb.h1
-rw-r--r--drivers/staging/vt6656/int.h1
-rw-r--r--drivers/staging/vt6656/iocmd.h33
-rw-r--r--drivers/staging/vt6656/iowpa.h8
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.c2
-rw-r--r--drivers/target/target_core_device.c8
-rw-r--r--drivers/target/target_core_fabric_configfs.c5
-rw-r--r--drivers/target/target_core_sbc.c18
-rw-r--r--drivers/target/target_core_spc.c44
-rw-r--r--drivers/tty/pty.c2
-rw-r--r--drivers/tty/serial/8250/8250.c11
-rw-r--r--drivers/tty/serial/8250/8250.h1
-rw-r--r--drivers/tty/serial/8250/8250_dw.c2
-rw-r--r--drivers/tty/serial/8250/8250_pci.c42
-rw-r--r--drivers/tty/serial/ifx6x60.c4
-rw-r--r--drivers/tty/serial/max3100.c3
-rw-r--r--drivers/tty/serial/mxs-auart.c6
-rw-r--r--drivers/tty/serial/samsung.c1
-rw-r--r--drivers/tty/serial/vt8500_serial.c2
-rw-r--r--drivers/tty/sysrq.c277
-rw-r--r--drivers/usb/core/hcd.c44
-rw-r--r--drivers/usb/core/hub.c70
-rw-r--r--drivers/usb/dwc3/gadget.c1
-rw-r--r--drivers/usb/gadget/f_fs.c6
-rw-r--r--drivers/usb/gadget/fsl_mxc_udc.c40
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c42
-rw-r--r--drivers/usb/gadget/fsl_usb2_udc.h5
-rw-r--r--drivers/usb/host/Kconfig2
-rw-r--r--drivers/usb/host/Makefile1
-rw-r--r--drivers/usb/host/ehci-hcd.c13
-rw-r--r--drivers/usb/host/ehci-hub.c9
-rw-r--r--drivers/usb/host/ehci-mxc.c120
-rw-r--r--drivers/usb/host/ehci-q.c50
-rw-r--r--drivers/usb/host/ehci-sched.c9
-rw-r--r--drivers/usb/host/ehci-timer.c29
-rw-r--r--drivers/usb/host/ehci.h7
-rw-r--r--drivers/usb/host/pci-quirks.c1
-rw-r--r--drivers/usb/host/uhci-hcd.c15
-rw-r--r--drivers/usb/host/uhci-hub.c3
-rw-r--r--drivers/usb/host/xhci-ring.c13
-rw-r--r--drivers/usb/musb/cppi_dma.c4
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h9
-rw-r--r--drivers/usb/serial/io_ti.c3
-rw-r--r--drivers/usb/serial/option.c22
-rw-r--r--drivers/usb/serial/qcserial.c1
-rw-r--r--drivers/usb/storage/initializers.c76
-rw-r--r--drivers/usb/storage/initializers.h4
-rw-r--r--drivers/usb/storage/unusual_devs.h329
-rw-r--r--drivers/usb/storage/usb.c12
-rw-r--r--drivers/usb/storage/usual-tables.c15
-rw-r--r--drivers/vfio/pci/vfio_pci_rdwr.c4
-rw-r--r--drivers/vhost/net.c41
-rw-r--r--drivers/vhost/tcm_vhost.c4
-rw-r--r--drivers/vhost/vhost.c18
-rw-r--r--drivers/vhost/vhost.h2
-rw-r--r--drivers/video/backlight/apple_bl.c2
-rw-r--r--drivers/video/exynos/exynos_dp_core.c6
-rw-r--r--drivers/video/imxfb.c13
-rw-r--r--drivers/video/omap2/dss/dss_features.c1
-rw-r--r--drivers/xen/cpu_hotplug.c4
-rw-r--r--drivers/xen/events.c11
-rw-r--r--drivers/xen/gntdev.c130
-rw-r--r--drivers/xen/grant-table.c48
-rw-r--r--drivers/xen/pcpu.c3
-rw-r--r--drivers/xen/privcmd.c89
-rw-r--r--drivers/xen/xen-acpi-pad.c3
-rw-r--r--drivers/xen/xen-pciback/pciback.h2
-rw-r--r--drivers/xen/xen-pciback/pciback_ops.c14
-rw-r--r--fs/Kconfig10
-rw-r--r--fs/binfmt_elf.c8
-rw-r--r--fs/binfmt_elf_fdpic.c7
-rw-r--r--fs/btrfs/extent-tree.c28
-rw-r--r--fs/btrfs/extent_map.c14
-rw-r--r--fs/btrfs/extent_map.h1
-rw-r--r--fs/btrfs/file-item.c4
-rw-r--r--fs/btrfs/file.c35
-rw-r--r--fs/btrfs/free-space-cache.c20
-rw-r--r--fs/btrfs/inode.c137
-rw-r--r--fs/btrfs/ioctl.c134
-rw-r--r--fs/btrfs/ordered-data.c13
-rw-r--r--fs/btrfs/qgroup.c20
-rw-r--r--fs/btrfs/scrub.c25
-rw-r--r--fs/btrfs/send.c4
-rw-r--r--fs/btrfs/super.c2
-rw-r--r--fs/btrfs/transaction.c46
-rw-r--r--fs/btrfs/tree-log.c10
-rw-r--r--fs/btrfs/volumes.c26
-rw-r--r--fs/cifs/cifs_dfs_ref.c2
-rw-r--r--fs/cifs/connect.c2
-rw-r--r--fs/dlm/user.c8
-rw-r--r--fs/f2fs/acl.c13
-rw-r--r--fs/f2fs/checkpoint.c3
-rw-r--r--fs/f2fs/data.c17
-rw-r--r--fs/f2fs/debug.c50
-rw-r--r--fs/f2fs/dir.c2
-rw-r--r--fs/f2fs/f2fs.h18
-rw-r--r--fs/f2fs/file.c16
-rw-r--r--fs/f2fs/gc.c68
-rw-r--r--fs/f2fs/inode.c3
-rw-r--r--fs/f2fs/node.c19
-rw-r--r--fs/f2fs/recovery.c10
-rw-r--r--fs/f2fs/segment.c2
-rw-r--r--fs/f2fs/super.c97
-rw-r--r--fs/f2fs/xattr.c2
-rw-r--r--fs/fuse/Kconfig16
-rw-r--r--fs/fuse/cuse.c36
-rw-r--r--fs/fuse/dev.c5
-rw-r--r--fs/fuse/file.c5
-rw-r--r--fs/gfs2/lock_dlm.c7
-rw-r--r--fs/nfs/namespace.c20
-rw-r--r--fs/nfs/nfs4client.c62
-rw-r--r--fs/nfs/nfs4state.c22
-rw-r--r--fs/nfs/super.c22
-rw-r--r--fs/nilfs2/ioctl.c5
-rw-r--r--fs/proc/array.c4
-rw-r--r--fs/pstore/ram.c10
-rw-r--r--fs/select.c1
-rw-r--r--fs/sysfs/group.c42
-rw-r--r--fs/sysfs/symlink.c45
-rw-r--r--fs/sysfs/sysfs.h2
-rw-r--r--fs/xfs/xfs_aops.c2
-rw-r--r--fs/xfs/xfs_bmap.c6
-rw-r--r--fs/xfs/xfs_buf.c20
-rw-r--r--fs/xfs/xfs_buf_item.c12
-rw-r--r--fs/xfs/xfs_dfrag.c4
-rw-r--r--fs/xfs/xfs_iomap.c9
-rw-r--r--fs/xfs/xfs_mount.c2
-rw-r--r--fs/xfs/xfs_trace.h1
-rw-r--r--include/acpi/acbuffer.h2
-rw-r--r--include/acpi/acconfig.h27
-rw-r--r--include/acpi/acexcep.h2
-rw-r--r--include/acpi/acnames.h2
-rw-r--r--include/acpi/acoutput.h159
-rw-r--r--include/acpi/acpi.h2
-rw-r--r--include/acpi/acpi_bus.h100
-rw-r--r--include/acpi/acpiosxf.h13
-rw-r--r--include/acpi/acpixf.h68
-rw-r--r--include/acpi/acrestyp.h17
-rw-r--r--include/acpi/actbl.h9
-rw-r--r--include/acpi/actbl1.h8
-rw-r--r--include/acpi/actbl2.h25
-rw-r--r--include/acpi/actbl3.h34
-rw-r--r--include/acpi/actypes.h38
-rw-r--r--include/acpi/container.h12
-rw-r--r--include/acpi/platform/acenv.h319
-rw-r--r--include/acpi/platform/acgcc.h6
-rw-r--r--include/acpi/platform/aclinux.h3
-rw-r--r--include/asm-generic/cputime.h66
-rw-r--r--include/asm-generic/cputime_jiffies.h72
-rw-r--r--include/asm-generic/cputime_nsecs.h104
-rw-r--r--include/asm-generic/dma-mapping-broken.h16
-rw-r--r--include/asm-generic/pgtable.h6
-rw-r--r--include/asm-generic/syscalls.h2
-rw-r--r--include/asm-generic/vmlinux.lds.h10
-rw-r--r--include/linux/acpi.h25
-rw-r--r--include/linux/aer.h4
-rw-r--r--include/linux/async.h10
-rw-r--r--include/linux/ata.h8
-rw-r--r--include/linux/bma150.h16
-rw-r--r--include/linux/cgroup.h3
-rw-r--r--include/linux/clk-provider.h6
-rw-r--r--include/linux/clk/sunxi.h22
-rw-r--r--include/linux/clockchips.h9
-rw-r--r--include/linux/context_tracking.h28
-rw-r--r--include/linux/cpufreq.h34
-rw-r--r--include/linux/cpuidle.h22
-rw-r--r--include/linux/device.h7
-rw-r--r--include/linux/efi.h24
-rw-r--r--include/linux/elevator.h5
-rw-r--r--include/linux/freezer.h5
-rw-r--r--include/linux/ftrace.h6
-rw-r--r--include/linux/ftrace_event.h6
-rw-r--r--include/linux/hardirq.h8
-rw-r--r--include/linux/hwmon.h12
-rw-r--r--include/linux/init.h1
-rw-r--r--include/linux/init_task.h12
-rw-r--r--include/linux/input/adxl34x.h2
-rw-r--r--include/linux/input/tegra_kbc.h62
-rw-r--r--include/linux/irq.h8
-rw-r--r--include/linux/irq_work.h22
-rw-r--r--include/linux/kernel_stat.h2
-rw-r--r--include/linux/kprobes.h12
-rw-r--r--include/linux/kvm_host.h55
-rw-r--r--include/linux/libata.h4
-rw-r--r--include/linux/libps2.h2
-rw-r--r--include/linux/llist.h25
-rw-r--r--include/linux/mailbox.h17
-rw-r--r--include/linux/memcontrol.h2
-rw-r--r--include/linux/mfd/abx500.h17
-rw-r--r--include/linux/mfd/abx500/ab8500-bm.h66
-rw-r--r--include/linux/mfd/abx500/ab8500-gpio.h16
-rw-r--r--include/linux/mfd/abx500/ab8500.h279
-rw-r--r--include/linux/mfd/abx500/ux500_chargalg.h5
-rw-r--r--include/linux/mfd/da9052/da9052.h66
-rw-r--r--include/linux/mfd/da9052/reg.h3
-rw-r--r--include/linux/mfd/rtsx_common.h3
-rw-r--r--include/linux/mfd/rtsx_pci.h25
-rw-r--r--include/linux/mfd/samsung/core.h11
-rw-r--r--include/linux/mmu_notifier.h2
-rw-r--r--include/linux/module.h10
-rw-r--r--include/linux/of.h13
-rw-r--r--include/linux/of_gpio.h40
-rw-r--r--include/linux/pci.h7
-rw-r--r--include/linux/perf_event.h20
-rw-r--r--include/linux/pinctrl/devinfo.h45
-rw-r--r--include/linux/pinctrl/pinconf-generic.h16
-rw-r--r--include/linux/pinctrl/pinctrl.h1
-rw-r--r--include/linux/platform_data/imx-iram.h (renamed from arch/arm/mach-imx/iram.h)0
-rw-r--r--include/linux/platform_data/lp8755.h71
-rw-r--r--include/linux/platform_data/max6697.h36
-rw-r--r--include/linux/platform_data/spi-omap2-mcspi.h3
-rw-r--r--include/linux/pm.h1
-rw-r--r--include/linux/pm2301_charger.h61
-rw-r--r--include/linux/pm_runtime.h7
-rw-r--r--include/linux/power/bq2415x_charger.h3
-rw-r--r--include/linux/power_supply.h2
-rw-r--r--include/linux/printk.h3
-rw-r--r--include/linux/profile.h13
-rw-r--r--include/linux/ptrace.h1
-rw-r--r--include/linux/pxa2xx_ssp.h18
-rw-r--r--include/linux/rcupdate.h15
-rw-r--r--include/linux/regmap.h97
-rw-r--r--include/linux/regulator/driver.h6
-rw-r--r--include/linux/ring_buffer.h1
-rw-r--r--include/linux/rtc.h1
-rw-r--r--include/linux/sched.h199
-rw-r--r--include/linux/sched/rt.h58
-rw-r--r--include/linux/sched/sysctl.h110
-rw-r--r--include/linux/security.h59
-rw-r--r--include/linux/smpboot.h5
-rw-r--r--include/linux/spi/pxa2xx_spi.h108
-rw-r--r--include/linux/spi/spi.h5
-rw-r--r--include/linux/spi/spi_gpio.h4
-rw-r--r--include/linux/srcu.h26
-rw-r--r--include/linux/suspend.h6
-rw-r--r--include/linux/sysfs.h16
-rw-r--r--include/linux/tick.h17
-rw-r--r--include/linux/time.h13
-rw-r--r--include/linux/tsacct_kern.h3
-rw-r--r--include/linux/uprobes.h23
-rw-r--r--include/linux/usb.h2
-rw-r--r--include/linux/usb/hcd.h3
-rw-r--r--include/linux/usb/usbnet.h3
-rw-r--r--include/linux/vtime.h59
-rw-r--r--include/linux/workqueue.h35
-rw-r--r--include/net/ip.h2
-rw-r--r--include/net/netfilter/nf_conntrack_core.h2
-rw-r--r--include/net/transp_v6.h22
-rw-r--r--include/trace/events/power.h92
-rw-r--r--include/trace/events/ras.h77
-rw-r--r--include/trace/events/rcu.h31
-rw-r--r--include/trace/events/workqueue.h10
-rw-r--r--include/uapi/linux/auto_fs.h25
-rw-r--r--include/uapi/linux/kvm.h9
-rw-r--r--include/uapi/linux/perf_event.h3
-rw-r--r--include/uapi/linux/serial_core.h3
-rw-r--r--include/uapi/linux/usb/ch9.h6
-rw-r--r--init/Kconfig42
-rw-r--r--init/do_mounts_initrd.c7
-rw-r--r--init/init_task.c2
-rw-r--r--init/initramfs.c8
-rw-r--r--init/main.c24
-rw-r--r--kernel/acct.c6
-rw-r--r--kernel/async.c156
-rw-r--r--kernel/cgroup.c288
-rw-r--r--kernel/compat.c23
-rw-r--r--kernel/context_tracking.c114
-rw-r--r--kernel/cpu.c6
-rw-r--r--kernel/cpuset.c884
-rw-r--r--kernel/debug/kdb/kdb_main.c2
-rw-r--r--kernel/delayacct.c7
-rw-r--r--kernel/events/core.c25
-rw-r--r--kernel/events/hw_breakpoint.c2
-rw-r--r--kernel/events/uprobes.c466
-rw-r--r--kernel/exit.c10
-rw-r--r--kernel/fork.c12
-rw-r--r--kernel/futex.c1
-rw-r--r--kernel/hrtimer.c38
-rw-r--r--kernel/irq/chip.c30
-rw-r--r--kernel/irq/manage.c3
-rw-r--r--kernel/irq/spurious.c7
-rw-r--r--kernel/irq_work.c150
-rw-r--r--kernel/kmod.c9
-rw-r--r--kernel/kprobes.c31
-rw-r--r--kernel/module.c154
-rw-r--r--kernel/mutex.c1
-rw-r--r--kernel/pid.c2
-rw-r--r--kernel/posix-cpu-timers.c51
-rw-r--r--kernel/posix-timers.c2
-rw-r--r--kernel/power/autosleep.c2
-rw-r--r--kernel/power/main.c29
-rw-r--r--kernel/power/process.c4
-rw-r--r--kernel/power/qos.c9
-rw-r--r--kernel/power/suspend.c69
-rw-r--r--kernel/printk.c45
-rw-r--r--kernel/profile.c24
-rw-r--r--kernel/ptrace.c80
-rw-r--r--kernel/rcu.h7
-rw-r--r--kernel/rcupdate.c60
-rw-r--r--kernel/rcutiny.c8
-rw-r--r--kernel/rcutiny_plugin.h56
-rw-r--r--kernel/rcutorture.c66
-rw-r--r--kernel/rcutree.c260
-rw-r--r--kernel/rcutree.h11
-rw-r--r--kernel/rcutree_plugin.h13
-rw-r--r--kernel/rtmutex-debug.c1
-rw-r--r--kernel/rtmutex-tester.c1
-rw-r--r--kernel/rtmutex.c1
-rw-r--r--kernel/sched/auto_group.c3
-rw-r--r--kernel/sched/core.c76
-rw-r--r--kernel/sched/cpupri.c2
-rw-r--r--kernel/sched/cputime.c314
-rw-r--r--kernel/sched/debug.c11
-rw-r--r--kernel/sched/fair.c29
-rw-r--r--kernel/sched/rt.c28
-rw-r--r--kernel/sched/sched.h2
-rw-r--r--kernel/signal.c36
-rw-r--r--kernel/smp.c13
-rw-r--r--kernel/smpboot.c5
-rw-r--r--kernel/softirq.c6
-rw-r--r--kernel/srcu.c37
-rw-r--r--kernel/stop_machine.c156
-rw-r--r--kernel/sysctl.c8
-rw-r--r--kernel/time.c8
-rw-r--r--kernel/time/Kconfig9
-rw-r--r--kernel/time/ntp.c22
-rw-r--r--kernel/time/tick-broadcast.c38
-rw-r--r--kernel/time/tick-sched.c12
-rw-r--r--kernel/time/timekeeping.c45
-rw-r--r--kernel/timeconst.pl6
-rw-r--r--kernel/timer.c2
-rw-r--r--kernel/trace/Kconfig33
-rw-r--r--kernel/trace/blktrace.c2
-rw-r--r--kernel/trace/ftrace.c90
-rw-r--r--kernel/trace/power-traces.c3
-rw-r--r--kernel/trace/ring_buffer.c108
-rw-r--r--kernel/trace/trace.c253
-rw-r--r--kernel/trace/trace.h134
-rw-r--r--kernel/trace/trace_clock.c5
-rw-r--r--kernel/trace/trace_events.c1
-rw-r--r--kernel/trace/trace_functions.c61
-rw-r--r--kernel/trace/trace_functions_graph.c68
-rw-r--r--kernel/trace/trace_probe.h1
-rw-r--r--kernel/trace/trace_sched_wakeup.c2
-rw-r--r--kernel/trace/trace_selftest.c21
-rw-r--r--kernel/trace/trace_syscalls.c18
-rw-r--r--kernel/trace/trace_uprobe.c217
-rw-r--r--kernel/tsacct.c44
-rw-r--r--kernel/watchdog.c1
-rw-r--r--kernel/workqueue.c1530
-rw-r--r--kernel/workqueue_internal.h65
-rw-r--r--kernel/workqueue_sched.h9
-rw-r--r--lib/Kconfig.debug117
-rw-r--r--lib/bug.c1
-rw-r--r--lib/digsig.c2
-rw-r--r--mm/huge_memory.c4
-rw-r--r--mm/hugetlb.c1
-rw-r--r--mm/memcontrol.c4
-rw-r--r--mm/migrate.c4
-rw-r--r--mm/mlock.c6
-rw-r--r--mm/mmap.c3
-rw-r--r--mm/mremap.c1
-rw-r--r--mm/nommu.c1
-rw-r--r--mm/page-writeback.c1
-rw-r--r--mm/page_alloc.c20
-rw-r--r--net/batman-adv/distributed-arp-table.c21
-rw-r--r--net/bluetooth/hci_conn.c6
-rw-r--r--net/bluetooth/hci_core.c8
-rw-r--r--net/bluetooth/hci_event.c2
-rw-r--r--net/bluetooth/hidp/core.c2
-rw-r--r--net/bluetooth/l2cap_core.c11
-rw-r--r--net/bluetooth/sco.c2
-rw-r--r--net/bluetooth/smp.c13
-rw-r--r--net/bridge/br_stp_bpdu.c2
-rw-r--r--net/core/datagram.c2
-rw-r--r--net/core/pktgen.c9
-rw-r--r--net/core/request_sock.c2
-rw-r--r--net/core/scm.c5
-rw-r--r--net/core/skbuff.c46
-rw-r--r--net/ipv4/ah4.c18
-rw-r--r--net/ipv4/arp.c21
-rw-r--r--net/ipv4/datagram.c25
-rw-r--r--net/ipv4/esp4.c12
-rw-r--r--net/ipv4/ip_gre.c6
-rw-r--r--net/ipv4/ipcomp.c7
-rw-r--r--net/ipv4/ping.c1
-rw-r--r--net/ipv4/raw.c1
-rw-r--r--net/ipv4/route.c54
-rw-r--r--net/ipv4/tcp_cong.c14
-rw-r--r--net/ipv4/tcp_input.c8
-rw-r--r--net/ipv4/tcp_ipv4.c15
-rw-r--r--net/ipv4/udp.c1
-rw-r--r--net/ipv6/addrconf.c1
-rw-r--r--net/ipv6/ah6.c11
-rw-r--r--net/ipv6/datagram.c16
-rw-r--r--net/ipv6/esp6.c5
-rw-r--r--net/ipv6/icmp.c12
-rw-r--r--net/ipv6/ip6_flowlabel.c4
-rw-r--r--net/ipv6/ip6_gre.c2
-rw-r--r--net/ipv6/ip6_output.c4
-rw-r--r--net/ipv6/ip6mr.c3
-rw-r--r--net/ipv6/ipv6_sockglue.c6
-rw-r--r--net/ipv6/netfilter/ip6t_NPT.c18
-rw-r--r--net/ipv6/raw.c6
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/ipv6/tcp_ipv6.c6
-rw-r--r--net/ipv6/udp.c6
-rw-r--r--net/l2tp/l2tp_core.c82
-rw-r--r--net/l2tp/l2tp_core.h5
-rw-r--r--net/l2tp/l2tp_ip6.c10
-rw-r--r--net/l2tp/l2tp_ppp.c6
-rw-r--r--net/mac80211/cfg.c15
-rw-r--r--net/mac80211/ieee80211_i.h6
-rw-r--r--net/mac80211/mesh_hwmp.c5
-rw-r--r--net/mac80211/mlme.c11
-rw-r--r--net/mac80211/offchannel.c19
-rw-r--r--net/mac80211/scan.c15
-rw-r--r--net/mac80211/tx.c9
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c35
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c2
-rw-r--r--net/netfilter/nf_conntrack_core.c9
-rw-r--r--net/netfilter/nf_conntrack_standalone.c1
-rw-r--r--net/netfilter/x_tables.c28
-rw-r--r--net/netfilter/xt_CT.c4
-rw-r--r--net/openvswitch/vport-netdev.c16
-rw-r--r--net/packet/af_packet.c10
-rw-r--r--net/rfkill/input.c8
-rw-r--r--net/sched/sch_htb.c4
-rw-r--r--net/sched/sch_netem.c12
-rw-r--r--net/sctp/Kconfig4
-rw-r--r--net/sctp/auth.c2
-rw-r--r--net/sctp/endpointola.c5
-rw-r--r--net/sctp/ipv6.c5
-rw-r--r--net/sctp/outqueue.c12
-rw-r--r--net/sctp/sm_statefuns.c4
-rw-r--r--net/sctp/socket.c2
-rw-r--r--net/sctp/sysctl.c4
-rw-r--r--net/sunrpc/sched.c18
-rw-r--r--net/sunrpc/svcsock.c2
-rw-r--r--net/wireless/scan.c2
-rw-r--r--net/xfrm/xfrm_policy.c2
-rw-r--r--net/xfrm/xfrm_replay.c4
-rw-r--r--samples/Kconfig6
-rw-r--r--samples/Makefile2
-rw-r--r--samples/seccomp/Makefile2
-rw-r--r--samples/tracepoints/Makefile6
-rw-r--r--samples/tracepoints/tp-samples-trace.h11
-rw-r--r--samples/tracepoints/tracepoint-probe-sample.c57
-rw-r--r--samples/tracepoints/tracepoint-probe-sample2.c44
-rw-r--r--samples/tracepoints/tracepoint-sample.c57
-rw-r--r--scripts/Makefile.lib14
-rwxr-xr-xscripts/checkpatch.pl10
-rw-r--r--security/capability.c24
-rw-r--r--security/device_cgroup.c2
-rw-r--r--security/integrity/evm/evm_crypto.c4
-rw-r--r--security/security.c28
-rw-r--r--security/selinux/hooks.c50
-rw-r--r--security/selinux/include/classmap.h2
-rw-r--r--security/selinux/include/objsec.h4
-rw-r--r--sound/pci/hda/hda_codec.c5
-rw-r--r--sound/pci/hda/hda_intel.c49
-rw-r--r--sound/pci/hda/patch_conexant.c9
-rw-r--r--sound/pci/hda/patch_realtek.c4
-rw-r--r--sound/soc/codecs/arizona.c5
-rw-r--r--sound/soc/codecs/wm2200.c3
-rw-r--r--sound/soc/codecs/wm5102.c7
-rw-r--r--sound/soc/codecs/wm5110.c7
-rw-r--r--sound/soc/codecs/wm_adsp.c6
-rw-r--r--sound/soc/fsl/imx-pcm-dma.c21
-rw-r--r--sound/soc/fsl/imx-pcm-fiq.c22
-rw-r--r--sound/soc/fsl/imx-pcm.c32
-rw-r--r--sound/soc/fsl/imx-pcm.h18
-rw-r--r--sound/soc/soc-dapm.c12
-rw-r--r--sound/usb/mixer.c17
-rw-r--r--tools/Makefile21
-rw-r--r--tools/cgroup/.gitignore1
-rw-r--r--tools/cgroup/Makefile11
-rw-r--r--tools/cgroup/cgroup_event_listener.c (renamed from Documentation/cgroups/cgroup_event_listener.c)72
-rw-r--r--tools/lib/traceevent/event-parse.c49
-rw-r--r--tools/lib/traceevent/event-parse.h3
-rw-r--r--tools/lib/traceevent/event-utils.h3
-rw-r--r--tools/lib/traceevent/parse-filter.c3
-rw-r--r--tools/lib/traceevent/parse-utils.c19
-rw-r--r--tools/lib/traceevent/trace-seq.c3
-rw-r--r--tools/perf/Documentation/Makefile4
-rw-r--r--tools/perf/Documentation/perf-annotate.txt7
-rw-r--r--tools/perf/Documentation/perf-buildid-cache.txt7
-rw-r--r--tools/perf/Documentation/perf-diff.txt4
-rw-r--r--tools/perf/Documentation/perf-evlist.txt4
-rw-r--r--tools/perf/Documentation/perf-report.txt41
-rw-r--r--tools/perf/Documentation/perf-script-python.txt2
-rw-r--r--tools/perf/Documentation/perf-stat.txt11
-rw-r--r--tools/perf/Documentation/perf-test.txt4
-rw-r--r--tools/perf/Documentation/perf-top.txt2
-rw-r--r--tools/perf/MANIFEST10
-rw-r--r--tools/perf/Makefile104
-rw-r--r--tools/perf/arch/common.c1
-rw-r--r--tools/perf/bench/bench.h1
-rw-r--r--tools/perf/bench/numa.c1731
-rw-r--r--tools/perf/builtin-annotate.c30
-rw-r--r--tools/perf/builtin-bench.c19
-rw-r--r--tools/perf/builtin-buildid-cache.c96
-rw-r--r--tools/perf/builtin-buildid-list.c21
-rw-r--r--tools/perf/builtin-diff.c205
-rw-r--r--tools/perf/builtin-evlist.c88
-rw-r--r--tools/perf/builtin-kmem.c16
-rw-r--r--tools/perf/builtin-kvm.c3
-rw-r--r--tools/perf/builtin-record.c168
-rw-r--r--tools/perf/builtin-report.c93
-rw-r--r--tools/perf/builtin-sched.c6
-rw-r--r--tools/perf/builtin-script.c17
-rw-r--r--tools/perf/builtin-stat.c328
-rw-r--r--tools/perf/builtin-top.c372
-rw-r--r--tools/perf/builtin-trace.c2
-rw-r--r--tools/perf/config/feature-tests.mak11
-rw-r--r--tools/perf/config/utilities.mak6
-rw-r--r--tools/perf/perf.c32
-rw-r--r--tools/perf/perf.h32
-rw-r--r--tools/perf/scripts/perl/bin/workqueue-stats-record2
-rw-r--r--tools/perf/scripts/perl/bin/workqueue-stats-report3
-rw-r--r--tools/perf/scripts/perl/rwtop.pl6
-rw-r--r--tools/perf/scripts/perl/workqueue-stats.pl129
-rw-r--r--tools/perf/tests/attr.c9
-rw-r--r--tools/perf/tests/attr.py27
-rw-r--r--tools/perf/tests/attr/base-record2
-rw-r--r--tools/perf/tests/attr/test-record-group2
-rw-r--r--tools/perf/tests/attr/test-record-group14
-rw-r--r--tools/perf/tests/builtin-test.c40
-rw-r--r--tools/perf/tests/evsel-roundtrip-name.c4
-rw-r--r--tools/perf/tests/hists_link.c500
-rw-r--r--tools/perf/tests/mmap-basic.c40
-rw-r--r--tools/perf/tests/open-syscall-all-cpus.c19
-rw-r--r--tools/perf/tests/open-syscall.c17
-rw-r--r--tools/perf/tests/parse-events.c324
-rw-r--r--tools/perf/tests/perf-record.c20
-rw-r--r--tools/perf/tests/pmu.c11
-rw-r--r--tools/perf/tests/python-use.c23
-rw-r--r--tools/perf/tests/tests.h11
-rw-r--r--tools/perf/tests/util.c30
-rw-r--r--tools/perf/tests/vmlinux-kallsyms.c7
-rw-r--r--tools/perf/ui/browser.c6
-rw-r--r--tools/perf/ui/browsers/annotate.c33
-rw-r--r--tools/perf/ui/browsers/hists.c341
-rw-r--r--tools/perf/ui/gtk/annotate.c229
-rw-r--r--tools/perf/ui/gtk/browser.c235
-rw-r--r--tools/perf/ui/gtk/gtk.h10
-rw-r--r--tools/perf/ui/gtk/helpline.c23
-rw-r--r--tools/perf/ui/gtk/hists.c312
-rw-r--r--tools/perf/ui/helpline.c12
-rw-r--r--tools/perf/ui/helpline.h22
-rw-r--r--tools/perf/ui/hist.c481
-rw-r--r--tools/perf/ui/keysyms.h1
-rw-r--r--tools/perf/ui/setup.c3
-rw-r--r--tools/perf/ui/stdio/hist.c25
-rw-r--r--tools/perf/ui/tui/helpline.c29
-rw-r--r--tools/perf/ui/util.c1
-rwxr-xr-xtools/perf/util/PERF-VERSION-GEN4
-rw-r--r--tools/perf/util/annotate.c2
-rw-r--r--tools/perf/util/annotate.h24
-rw-r--r--tools/perf/util/callchain.c2
-rw-r--r--tools/perf/util/callchain.h5
-rw-r--r--tools/perf/util/cpumap.c54
-rw-r--r--tools/perf/util/cpumap.h9
-rw-r--r--tools/perf/util/debug.c28
-rw-r--r--tools/perf/util/debug.h34
-rw-r--r--tools/perf/util/dso.c6
-rw-r--r--tools/perf/util/dso.h2
-rw-r--r--tools/perf/util/event.c4
-rw-r--r--tools/perf/util/evlist.c31
-rw-r--r--tools/perf/util/evlist.h34
-rw-r--r--tools/perf/util/evsel.c370
-rw-r--r--tools/perf/util/evsel.h50
-rw-r--r--tools/perf/util/header.c266
-rw-r--r--tools/perf/util/header.h2
-rw-r--r--tools/perf/util/hist.c142
-rw-r--r--tools/perf/util/hist.h26
-rw-r--r--tools/perf/util/include/linux/bitops.h1
-rw-r--r--tools/perf/util/intlist.c36
-rw-r--r--tools/perf/util/intlist.h2
-rw-r--r--tools/perf/util/machine.c784
-rw-r--r--tools/perf/util/machine.h41
-rw-r--r--tools/perf/util/map.c121
-rw-r--r--tools/perf/util/map.h24
-rw-r--r--tools/perf/util/parse-events.c96
-rw-r--r--tools/perf/util/parse-events.h22
-rw-r--r--tools/perf/util/parse-events.y75
-rw-r--r--tools/perf/util/pmu.c46
-rw-r--r--tools/perf/util/pmu.h15
-rw-r--r--tools/perf/util/pmu.y1
-rw-r--r--tools/perf/util/probe-finder.c10
-rw-r--r--tools/perf/util/python-ext-sources1
-rw-r--r--tools/perf/util/python.c9
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c1
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c1
-rw-r--r--tools/perf/util/session.c325
-rw-r--r--tools/perf/util/session.h35
-rw-r--r--tools/perf/util/sort.c245
-rw-r--r--tools/perf/util/sort.h15
-rw-r--r--tools/perf/util/string.c18
-rw-r--r--tools/perf/util/strlist.c54
-rw-r--r--tools/perf/util/strlist.h42
-rw-r--r--tools/perf/util/symbol-elf.c14
-rw-r--r--tools/perf/util/symbol-minimal.c1
-rw-r--r--tools/perf/util/symbol.c536
-rw-r--r--tools/perf/util/symbol.h9
-rw-r--r--tools/perf/util/sysfs.c2
-rw-r--r--tools/perf/util/thread.c20
-rw-r--r--tools/perf/util/thread.h1
-rw-r--r--tools/perf/util/top.c22
-rw-r--r--tools/perf/util/top.h10
-rw-r--r--tools/perf/util/util.c24
-rw-r--r--tools/perf/util/util.h4
-rw-r--r--tools/power/acpi/Makefile2
-rw-r--r--tools/power/x86/turbostat/turbostat.836
-rw-r--r--tools/power/x86/turbostat/turbostat.c48
-rw-r--r--tools/vm/.gitignore2
1924 files changed, 67428 insertions, 27668 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-event_source-devices-events b/Documentation/ABI/testing/sysfs-bus-event_source-devices-events
new file mode 100644
index 000000000000..0adeb524c0d4
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-event_source-devices-events
@@ -0,0 +1,62 @@
1What: /sys/devices/cpu/events/
2 /sys/devices/cpu/events/branch-misses
3 /sys/devices/cpu/events/cache-references
4 /sys/devices/cpu/events/cache-misses
5 /sys/devices/cpu/events/stalled-cycles-frontend
6 /sys/devices/cpu/events/branch-instructions
7 /sys/devices/cpu/events/stalled-cycles-backend
8 /sys/devices/cpu/events/instructions
9 /sys/devices/cpu/events/cpu-cycles
10
11Date: 2013/01/08
12
13Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
14
15Description: Generic performance monitoring events
16
17 A collection of performance monitoring events that may be
18 supported by many/most CPUs. These events can be monitored
19 using the 'perf(1)' tool.
20
21 The contents of each file would look like:
22
23 event=0xNNNN
24
25 where 'N' is a hex digit and the number '0xNNNN' shows the
26 "raw code" for the perf event identified by the file's
27 "basename".
28
29
30What: /sys/devices/cpu/events/PM_LD_MISS_L1
31 /sys/devices/cpu/events/PM_LD_REF_L1
32 /sys/devices/cpu/events/PM_CYC
33 /sys/devices/cpu/events/PM_BRU_FIN
34 /sys/devices/cpu/events/PM_GCT_NOSLOT_CYC
35 /sys/devices/cpu/events/PM_BRU_MPRED
36 /sys/devices/cpu/events/PM_INST_CMPL
37 /sys/devices/cpu/events/PM_CMPLU_STALL
38
39Date: 2013/01/08
40
41Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
42 Linux Powerpc mailing list <linuxppc-dev@ozlabs.org>
43
44Description: POWER-systems specific performance monitoring events
45
46 A collection of performance monitoring events that may be
47 supported by the POWER CPU. These events can be monitored
48 using the 'perf(1)' tool.
49
50 These events may not be supported by other CPUs.
51
52 The contents of each file would look like:
53
54 event=0xNNNN
55
56 where 'N' is a hex digit and the number '0xNNNN' shows the
57 "raw code" for the perf event identified by the file's
58 "basename".
59
60 Further, multiple terms like 'event=0xNNNN' can be specified
61 and separated with comma. All available terms are defined in
62 the /sys/bus/event_source/devices/<dev>/format file.
diff --git a/Documentation/ABI/testing/sysfs-devices-power_resources_D0 b/Documentation/ABI/testing/sysfs-devices-power_resources_D0
new file mode 100644
index 000000000000..73b77a6be196
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-devices-power_resources_D0
@@ -0,0 +1,13 @@
1What: /sys/devices/.../power_resources_D0/
2Date: January 2013
3Contact: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
4Description:
5 The /sys/devices/.../power_resources_D0/ directory is only
6 present for device objects representing ACPI device nodes that
7 use ACPI power resources for power management.
8
9 If present, it contains symbolic links to device directories
10 representing ACPI power resources that need to be turned on for
11 the given device node to be in ACPI power state D0. The names
12 of the links are the same as the names of the directories they
13 point to.
diff --git a/Documentation/ABI/testing/sysfs-devices-power_resources_D1 b/Documentation/ABI/testing/sysfs-devices-power_resources_D1
new file mode 100644
index 000000000000..30c20703fb8c
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-devices-power_resources_D1
@@ -0,0 +1,14 @@
1What: /sys/devices/.../power_resources_D1/
2Date: January 2013
3Contact: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
4Description:
5 The /sys/devices/.../power_resources_D1/ directory is only
6 present for device objects representing ACPI device nodes that
7 use ACPI power resources for power management and support ACPI
8 power state D1.
9
10 If present, it contains symbolic links to device directories
11 representing ACPI power resources that need to be turned on for
12 the given device node to be in ACPI power state D1. The names
13 of the links are the same as the names of the directories they
14 point to.
diff --git a/Documentation/ABI/testing/sysfs-devices-power_resources_D2 b/Documentation/ABI/testing/sysfs-devices-power_resources_D2
new file mode 100644
index 000000000000..fd9d84b421e1
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-devices-power_resources_D2
@@ -0,0 +1,14 @@
1What: /sys/devices/.../power_resources_D2/
2Date: January 2013
3Contact: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
4Description:
5 The /sys/devices/.../power_resources_D2/ directory is only
6 present for device objects representing ACPI device nodes that
7 use ACPI power resources for power management and support ACPI
8 power state D2.
9
10 If present, it contains symbolic links to device directories
11 representing ACPI power resources that need to be turned on for
12 the given device node to be in ACPI power state D2. The names
13 of the links are the same as the names of the directories they
14 point to.
diff --git a/Documentation/ABI/testing/sysfs-devices-power_resources_D3hot b/Documentation/ABI/testing/sysfs-devices-power_resources_D3hot
new file mode 100644
index 000000000000..3df32c20addf
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-devices-power_resources_D3hot
@@ -0,0 +1,14 @@
1What: /sys/devices/.../power_resources_D3hot/
2Date: January 2013
3Contact: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
4Description:
5 The /sys/devices/.../power_resources_D3hot/ directory is only
6 present for device objects representing ACPI device nodes that
7 use ACPI power resources for power management and support ACPI
8 power state D3hot.
9
10 If present, it contains symbolic links to device directories
11 representing ACPI power resources that need to be turned on for
12 the given device node to be in ACPI power state D3hot. The
13 names of the links are the same as the names of the directories
14 they point to.
diff --git a/Documentation/ABI/testing/sysfs-devices-power_state b/Documentation/ABI/testing/sysfs-devices-power_state
new file mode 100644
index 000000000000..7ad9546748f0
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-devices-power_state
@@ -0,0 +1,20 @@
1What: /sys/devices/.../power_state
2Date: January 2013
3Contact: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
4Description:
5 The /sys/devices/.../power_state attribute is only present for
6 device objects representing ACPI device nodes that provide power
7 management methods.
8
9 If present, it contains a string representing the current ACPI
10 power state of the given device node. Its possible values,
11 "D0", "D1", "D2", "D3hot", and "D3cold", reflect the power state
12 names defined by the ACPI specification (ACPI 4 and above).
13
14 If the device node uses shared ACPI power resources, this state
15 determines a list of power resources required not to be turned
16 off. However, some power resources needed by the device node in
17 higher-power (lower-number) states may also be ON because of
18 some other devices using them at the moment.
19
20 This attribute is read-only.
diff --git a/Documentation/ABI/testing/sysfs-devices-real_power_state b/Documentation/ABI/testing/sysfs-devices-real_power_state
new file mode 100644
index 000000000000..8b3527c82a7d
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-devices-real_power_state
@@ -0,0 +1,23 @@
1What: /sys/devices/.../real_power_state
2Date: January 2013
3Contact: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
4Description:
5 The /sys/devices/.../real_power_state attribute is only present
6 for device objects representing ACPI device nodes that provide
7 power management methods and use ACPI power resources for power
8 management.
9
10 If present, it contains a string representing the real ACPI
11 power state of the given device node as returned by the _PSC
12 control method or inferred from the configuration of power
13 resources. Its possible values, "D0", "D1", "D2", "D3hot", and
14 "D3cold", reflect the power state names defined by the ACPI
15 specification (ACPI 4 and above).
16
17 In some situations the value of this attribute may be different
18 from the value of the /sys/devices/.../power_state attribute for
19 the same device object. If that happens, some shared power
20 resources used by the device node are only ON because of some
21 other devices using them at the moment.
22
23 This attribute is read-only.
diff --git a/Documentation/ABI/testing/sysfs-devices-resource_in_use b/Documentation/ABI/testing/sysfs-devices-resource_in_use
new file mode 100644
index 000000000000..b4a3bc5922a3
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-devices-resource_in_use
@@ -0,0 +1,12 @@
1What: /sys/devices/.../resource_in_use
2Date: January 2013
3Contact: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
4Description:
5 The /sys/devices/.../resource_in_use attribute is only present
6 for device objects representing ACPI power resources.
7
8 If present, it contains a number (0 or 1) representing the
9 current status of the given power resource (0 means that the
10 resource is not in use and therefore it has been turned off).
11
12 This attribute is read-only.
diff --git a/Documentation/ABI/testing/sysfs-platform-ts5500 b/Documentation/ABI/testing/sysfs-platform-ts5500
new file mode 100644
index 000000000000..c88375a537a1
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-platform-ts5500
@@ -0,0 +1,47 @@
1What: /sys/devices/platform/ts5500/adc
2Date: January 2013
3KernelVersion: 3.7
4Contact: "Savoir-faire Linux Inc." <kernel@savoirfairelinux.com>
5Description:
6 Indicates the presence of an A/D Converter. If it is present,
7 it will display "1", otherwise "0".
8
9What: /sys/devices/platform/ts5500/ereset
10Date: January 2013
11KernelVersion: 3.7
12Contact: "Savoir-faire Linux Inc." <kernel@savoirfairelinux.com>
13Description:
14 Indicates the presence of an external reset. If it is present,
15 it will display "1", otherwise "0".
16
17What: /sys/devices/platform/ts5500/id
18Date: January 2013
19KernelVersion: 3.7
20Contact: "Savoir-faire Linux Inc." <kernel@savoirfairelinux.com>
21Description:
22 Product ID of the TS board. TS-5500 ID is 0x60.
23
24What: /sys/devices/platform/ts5500/jumpers
25Date: January 2013
26KernelVersion: 3.7
27Contact: "Savoir-faire Linux Inc." <kernel@savoirfairelinux.com>
28Description:
29 Bitfield showing the jumpers' state. If a jumper is present,
30 the corresponding bit is set. For instance, 0x0e means jumpers
31 2, 3 and 4 are set.
32
33What: /sys/devices/platform/ts5500/rs485
34Date: January 2013
35KernelVersion: 3.7
36Contact: "Savoir-faire Linux Inc." <kernel@savoirfairelinux.com>
37Description:
38 Indicates the presence of the RS485 option. If it is present,
39 it will display "1", otherwise "0".
40
41What: /sys/devices/platform/ts5500/sram
42Date: January 2013
43KernelVersion: 3.7
44Contact: "Savoir-faire Linux Inc." <kernel@savoirfairelinux.com>
45Description:
46 Indicates the presence of the SRAM option. If it is present,
47 it will display "1", otherwise "0".
diff --git a/Documentation/PCI/MSI-HOWTO.txt b/Documentation/PCI/MSI-HOWTO.txt
index 53e6fca146d7..a09178086c30 100644
--- a/Documentation/PCI/MSI-HOWTO.txt
+++ b/Documentation/PCI/MSI-HOWTO.txt
@@ -127,15 +127,42 @@ on the number of vectors that can be allocated; pci_enable_msi_block()
127returns as soon as it finds any constraint that doesn't allow the 127returns as soon as it finds any constraint that doesn't allow the
128call to succeed. 128call to succeed.
129 129
1304.2.3 pci_disable_msi 1304.2.3 pci_enable_msi_block_auto
131
132int pci_enable_msi_block_auto(struct pci_dev *dev, unsigned int *count)
133
134This variation on pci_enable_msi() call allows a device driver to request
135the maximum possible number of MSIs. The MSI specification only allows
136interrupts to be allocated in powers of two, up to a maximum of 2^5 (32).
137
138If this function returns a positive number, it indicates that it has
139succeeded and the returned value is the number of allocated interrupts. In
140this case, the function enables MSI on this device and updates dev->irq to
141be the lowest of the new interrupts assigned to it. The other interrupts
142assigned to the device are in the range dev->irq to dev->irq + returned
143value - 1.
144
145If this function returns a negative number, it indicates an error and
146the driver should not attempt to request any more MSI interrupts for
147this device.
148
149If the device driver needs to know the number of interrupts the device
150supports it can pass the pointer count where that number is stored. The
151device driver must decide what action to take if pci_enable_msi_block_auto()
152succeeds, but returns a value less than the number of interrupts supported.
153If the device driver does not need to know the number of interrupts
154supported, it can set the pointer count to NULL.
155
1564.2.4 pci_disable_msi
131 157
132void pci_disable_msi(struct pci_dev *dev) 158void pci_disable_msi(struct pci_dev *dev)
133 159
134This function should be used to undo the effect of pci_enable_msi() or 160This function should be used to undo the effect of pci_enable_msi() or
135pci_enable_msi_block(). Calling it restores dev->irq to the pin-based 161pci_enable_msi_block() or pci_enable_msi_block_auto(). Calling it restores
136interrupt number and frees the previously allocated message signaled 162dev->irq to the pin-based interrupt number and frees the previously
137interrupt(s). The interrupt may subsequently be assigned to another 163allocated message signaled interrupt(s). The interrupt may subsequently be
138device, so drivers should not cache the value of dev->irq. 164assigned to another device, so drivers should not cache the value of
165dev->irq.
139 166
140Before calling this function, a device driver must always call free_irq() 167Before calling this function, a device driver must always call free_irq()
141on any interrupt for which it previously called request_irq(). 168on any interrupt for which it previously called request_irq().
diff --git a/Documentation/acpi/enumeration.txt b/Documentation/acpi/enumeration.txt
index 54469bc81b1c..94a656131885 100644
--- a/Documentation/acpi/enumeration.txt
+++ b/Documentation/acpi/enumeration.txt
@@ -63,8 +63,8 @@ from ACPI tables.
63Currently the kernel is not able to automatically determine from which ACPI 63Currently the kernel is not able to automatically determine from which ACPI
64device it should make the corresponding platform device so we need to add 64device it should make the corresponding platform device so we need to add
65the ACPI device explicitly to acpi_platform_device_ids list defined in 65the ACPI device explicitly to acpi_platform_device_ids list defined in
66drivers/acpi/scan.c. This limitation is only for the platform devices, SPI 66drivers/acpi/acpi_platform.c. This limitation is only for the platform
67and I2C devices are created automatically as described below. 67devices, SPI and I2C devices are created automatically as described below.
68 68
69SPI serial bus support 69SPI serial bus support
70~~~~~~~~~~~~~~~~~~~~~~ 70~~~~~~~~~~~~~~~~~~~~~~
diff --git a/Documentation/acpi/scan_handlers.txt b/Documentation/acpi/scan_handlers.txt
new file mode 100644
index 000000000000..3246ccf15992
--- /dev/null
+++ b/Documentation/acpi/scan_handlers.txt
@@ -0,0 +1,77 @@
1ACPI Scan Handlers
2
3Copyright (C) 2012, Intel Corporation
4Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
5
6During system initialization and ACPI-based device hot-add, the ACPI namespace
7is scanned in search of device objects that generally represent various pieces
8of hardware. This causes a struct acpi_device object to be created and
9registered with the driver core for every device object in the ACPI namespace
10and the hierarchy of those struct acpi_device objects reflects the namespace
11layout (i.e. parent device objects in the namespace are represented by parent
12struct acpi_device objects and analogously for their children). Those struct
13acpi_device objects are referred to as "device nodes" in what follows, but they
14should not be confused with struct device_node objects used by the Device Trees
15parsing code (although their role is analogous to the role of those objects).
16
17During ACPI-based device hot-remove device nodes representing pieces of hardware
18being removed are unregistered and deleted.
19
20The core ACPI namespace scanning code in drivers/acpi/scan.c carries out basic
21initialization of device nodes, such as retrieving common configuration
22information from the device objects represented by them and populating them with
23appropriate data, but some of them require additional handling after they have
24been registered. For example, if the given device node represents a PCI host
25bridge, its registration should cause the PCI bus under that bridge to be
26enumerated and PCI devices on that bus to be registered with the driver core.
27Similarly, if the device node represents a PCI interrupt link, it is necessary
28to configure that link so that the kernel can use it.
29
30Those additional configuration tasks usually depend on the type of the hardware
31component represented by the given device node which can be determined on the
32basis of the device node's hardware ID (HID). They are performed by objects
33called ACPI scan handlers represented by the following structure:
34
35struct acpi_scan_handler {
36 const struct acpi_device_id *ids;
37 struct list_head list_node;
38 int (*attach)(struct acpi_device *dev, const struct acpi_device_id *id);
39 void (*detach)(struct acpi_device *dev);
40};
41
42where ids is the list of IDs of device nodes the given handler is supposed to
43take care of, list_node is the hook to the global list of ACPI scan handlers
44maintained by the ACPI core and the .attach() and .detach() callbacks are
45executed, respectively, after registration of new device nodes and before
46unregistration of device nodes the handler attached to previously.
47
48The namespace scanning function, acpi_bus_scan(), first registers all of the
49device nodes in the given namespace scope with the driver core. Then, it tries
50to match a scan handler against each of them using the ids arrays of the
51available scan handlers. If a matching scan handler is found, its .attach()
52callback is executed for the given device node. If that callback returns 1,
53that means that the handler has claimed the device node and is now responsible
54for carrying out any additional configuration tasks related to it. It also will
55be responsible for preparing the device node for unregistration in that case.
56The device node's handler field is then populated with the address of the scan
57handler that has claimed it.
58
59If the .attach() callback returns 0, it means that the device node is not
60interesting to the given scan handler and may be matched against the next scan
61handler in the list. If it returns a (negative) error code, that means that
62the namespace scan should be terminated due to a serious error. The error code
63returned should then reflect the type of the error.
64
65The namespace trimming function, acpi_bus_trim(), first executes .detach()
66callbacks from the scan handlers of all device nodes in the given namespace
67scope (if they have scan handlers). Next, it unregisters all of the device
68nodes in that scope.
69
70ACPI scan handlers can be added to the list maintained by the ACPI core with the
71help of the acpi_scan_add_handler() function taking a pointer to the new scan
72handler as an argument. The order in which scan handlers are added to the list
73is the order in which they are matched against device nodes during namespace
74scans.
75
76All scan handles must be added to the list before acpi_bus_scan() is run for the
77first time and they cannot be removed from it.
diff --git a/Documentation/atomic_ops.txt b/Documentation/atomic_ops.txt
index 27f2b21a9d5c..d9ca5be9b471 100644
--- a/Documentation/atomic_ops.txt
+++ b/Documentation/atomic_ops.txt
@@ -253,6 +253,8 @@ This performs an atomic exchange operation on the atomic variable v, setting
253the given new value. It returns the old value that the atomic variable v had 253the given new value. It returns the old value that the atomic variable v had
254just before the operation. 254just before the operation.
255 255
256atomic_xchg requires explicit memory barriers around the operation.
257
256 int atomic_cmpxchg(atomic_t *v, int old, int new); 258 int atomic_cmpxchg(atomic_t *v, int old, int new);
257 259
258This performs an atomic compare exchange operation on the atomic value v, 260This performs an atomic compare exchange operation on the atomic value v,
diff --git a/Documentation/cgroups/00-INDEX b/Documentation/cgroups/00-INDEX
index f78b90a35ad0..f5635a09c3f6 100644
--- a/Documentation/cgroups/00-INDEX
+++ b/Documentation/cgroups/00-INDEX
@@ -4,8 +4,6 @@ blkio-controller.txt
4 - Description for Block IO Controller, implementation and usage details. 4 - Description for Block IO Controller, implementation and usage details.
5cgroups.txt 5cgroups.txt
6 - Control Groups definition, implementation details, examples and API. 6 - Control Groups definition, implementation details, examples and API.
7cgroup_event_listener.c
8 - A user program for cgroup listener.
9cpuacct.txt 7cpuacct.txt
10 - CPU Accounting Controller; account CPU usage for groups of tasks. 8 - CPU Accounting Controller; account CPU usage for groups of tasks.
11cpusets.txt 9cpusets.txt
diff --git a/Documentation/cgroups/memcg_test.txt b/Documentation/cgroups/memcg_test.txt
index fc8fa97a09ac..ce94a83a7d9a 100644
--- a/Documentation/cgroups/memcg_test.txt
+++ b/Documentation/cgroups/memcg_test.txt
@@ -399,8 +399,7 @@ Under below explanation, we assume CONFIG_MEM_RES_CTRL_SWAP=y.
399 399
400 9.10 Memory thresholds 400 9.10 Memory thresholds
401 Memory controller implements memory thresholds using cgroups notification 401 Memory controller implements memory thresholds using cgroups notification
402 API. You can use Documentation/cgroups/cgroup_event_listener.c to test 402 API. You can use tools/cgroup/cgroup_event_listener.c to test it.
403 it.
404 403
405 (Shell-A) Create cgroup and run event listener 404 (Shell-A) Create cgroup and run event listener
406 # mkdir /cgroup/A 405 # mkdir /cgroup/A
diff --git a/Documentation/cpu-freq/cpu-drivers.txt b/Documentation/cpu-freq/cpu-drivers.txt
index c436096351f8..72f70b16d299 100644
--- a/Documentation/cpu-freq/cpu-drivers.txt
+++ b/Documentation/cpu-freq/cpu-drivers.txt
@@ -111,6 +111,12 @@ policy->governor must contain the "default policy" for
111For setting some of these values, the frequency table helpers might be 111For setting some of these values, the frequency table helpers might be
112helpful. See the section 2 for more information on them. 112helpful. See the section 2 for more information on them.
113 113
114SMP systems normally have same clock source for a group of cpus. For these the
115.init() would be called only once for the first online cpu. Here the .init()
116routine must initialize policy->cpus with mask of all possible cpus (Online +
117Offline) that share the clock. Then the core would copy this mask onto
118policy->related_cpus and will reset policy->cpus to carry only online cpus.
119
114 120
1151.3 verify 1211.3 verify
116------------ 122------------
diff --git a/Documentation/cpu-freq/user-guide.txt b/Documentation/cpu-freq/user-guide.txt
index 04f6b32993e6..ff2f28332cc4 100644
--- a/Documentation/cpu-freq/user-guide.txt
+++ b/Documentation/cpu-freq/user-guide.txt
@@ -190,11 +190,11 @@ scaling_max_freq show the current "policy limits" (in
190 first set scaling_max_freq, then 190 first set scaling_max_freq, then
191 scaling_min_freq. 191 scaling_min_freq.
192 192
193affected_cpus : List of CPUs that require software coordination 193affected_cpus : List of Online CPUs that require software
194 of frequency. 194 coordination of frequency.
195 195
196related_cpus : List of CPUs that need some sort of frequency 196related_cpus : List of Online + Offline CPUs that need software
197 coordination, whether software or hardware. 197 coordination of frequency.
198 198
199scaling_driver : Hardware driver for cpufreq. 199scaling_driver : Hardware driver for cpufreq.
200 200
diff --git a/Documentation/device-mapper/dm-raid.txt b/Documentation/device-mapper/dm-raid.txt
index 728c38c242d6..56fb62b09fc5 100644
--- a/Documentation/device-mapper/dm-raid.txt
+++ b/Documentation/device-mapper/dm-raid.txt
@@ -141,3 +141,4 @@ Version History
1411.2.0 Handle creation of arrays that contain failed devices. 1411.2.0 Handle creation of arrays that contain failed devices.
1421.3.0 Added support for RAID 10 1421.3.0 Added support for RAID 10
1431.3.1 Allow device replacement/rebuild for RAID 10 1431.3.1 Allow device replacement/rebuild for RAID 10
1441.3.2 Fix/improve redundancy checking for RAID10
diff --git a/Documentation/devicetree/bindings/arm/atmel-aic.txt b/Documentation/devicetree/bindings/arm/atmel-aic.txt
index 19078bf5cca8..ad031211b5b8 100644
--- a/Documentation/devicetree/bindings/arm/atmel-aic.txt
+++ b/Documentation/devicetree/bindings/arm/atmel-aic.txt
@@ -4,7 +4,7 @@ Required properties:
4- compatible: Should be "atmel,<chip>-aic" 4- compatible: Should be "atmel,<chip>-aic"
5- interrupt-controller: Identifies the node as an interrupt controller. 5- interrupt-controller: Identifies the node as an interrupt controller.
6- interrupt-parent: For single AIC system, it is an empty property. 6- interrupt-parent: For single AIC system, it is an empty property.
7- #interrupt-cells: The number of cells to define the interrupts. It sould be 3. 7- #interrupt-cells: The number of cells to define the interrupts. It should be 3.
8 The first cell is the IRQ number (aka "Peripheral IDentifier" on datasheet). 8 The first cell is the IRQ number (aka "Peripheral IDentifier" on datasheet).
9 The second cell is used to specify flags: 9 The second cell is used to specify flags:
10 bits[3:0] trigger type and level flags: 10 bits[3:0] trigger type and level flags:
diff --git a/Documentation/devicetree/bindings/arm/gic.txt b/Documentation/devicetree/bindings/arm/gic.txt
index 62eb8df1e08d..3dfb0c0384f5 100644
--- a/Documentation/devicetree/bindings/arm/gic.txt
+++ b/Documentation/devicetree/bindings/arm/gic.txt
@@ -42,7 +42,7 @@ Main node required properties:
42 42
43Optional 43Optional
44- interrupts : Interrupt source of the parent interrupt controller on 44- interrupts : Interrupt source of the parent interrupt controller on
45 secondary GICs, or VGIC maintainance interrupt on primary GIC (see 45 secondary GICs, or VGIC maintenance interrupt on primary GIC (see
46 below). 46 below).
47 47
48- cpu-offset : per-cpu offset within the distributor and cpu interface 48- cpu-offset : per-cpu offset within the distributor and cpu interface
@@ -74,7 +74,7 @@ Required properties:
74 virtual interface control register base and size. The 2nd additional 74 virtual interface control register base and size. The 2nd additional
75 region is the GIC virtual cpu interface register base and size. 75 region is the GIC virtual cpu interface register base and size.
76 76
77- interrupts : VGIC maintainance interrupt. 77- interrupts : VGIC maintenance interrupt.
78 78
79Example: 79Example:
80 80
diff --git a/Documentation/devicetree/bindings/arm/kirkwood.txt b/Documentation/devicetree/bindings/arm/kirkwood.txt
new file mode 100644
index 000000000000..98cce9a653eb
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/kirkwood.txt
@@ -0,0 +1,27 @@
1Marvell Kirkwood Platforms Device Tree Bindings
2-----------------------------------------------
3
4Boards with a SoC of the Marvell Kirkwood
5shall have the following property:
6
7Required root node property:
8
9compatible: must contain "marvell,kirkwood";
10
11In order to support the kirkwood cpufreq driver, there must be a node
12cpus/cpu@0 with three clocks, "cpu_clk", "ddrclk" and "powersave",
13where the "powersave" clock is a gating clock used to switch the CPU
14between the "cpu_clk" and the "ddrclk".
15
16Example:
17
18 cpus {
19 #address-cells = <1>;
20 #size-cells = <0>;
21
22 cpu@0 {
23 device_type = "cpu";
24 compatible = "marvell,sheeva-88SV131";
25 clocks = <&core_clk 1>, <&core_clk 3>, <&gate_clk 11>;
26 clock-names = "cpu_clk", "ddrclk", "powersave";
27 };
diff --git a/Documentation/devicetree/bindings/arm/omap/omap.txt b/Documentation/devicetree/bindings/arm/omap/omap.txt
index d0051a750587..f8288ea1b530 100644
--- a/Documentation/devicetree/bindings/arm/omap/omap.txt
+++ b/Documentation/devicetree/bindings/arm/omap/omap.txt
@@ -39,16 +39,16 @@ Boards:
39- OMAP3 Tobi with Overo : Commercial expansion board with daughter board 39- OMAP3 Tobi with Overo : Commercial expansion board with daughter board
40 compatible = "ti,omap3-tobi", "ti,omap3-overo", "ti,omap3" 40 compatible = "ti,omap3-tobi", "ti,omap3-overo", "ti,omap3"
41 41
42- OMAP4 SDP : Software Developement Board 42- OMAP4 SDP : Software Development Board
43 compatible = "ti,omap4-sdp", "ti,omap4430" 43 compatible = "ti,omap4-sdp", "ti,omap4430"
44 44
45- OMAP4 PandaBoard : Low cost community board 45- OMAP4 PandaBoard : Low cost community board
46 compatible = "ti,omap4-panda", "ti,omap4430" 46 compatible = "ti,omap4-panda", "ti,omap4430"
47 47
48- OMAP3 EVM : Software Developement Board for OMAP35x, AM/DM37x 48- OMAP3 EVM : Software Development Board for OMAP35x, AM/DM37x
49 compatible = "ti,omap3-evm", "ti,omap3" 49 compatible = "ti,omap3-evm", "ti,omap3"
50 50
51- AM335X EVM : Software Developement Board for AM335x 51- AM335X EVM : Software Development Board for AM335x
52 compatible = "ti,am335x-evm", "ti,am33xx", "ti,omap3" 52 compatible = "ti,am335x-evm", "ti,am33xx", "ti,omap3"
53 53
54- AM335X Bone : Low cost community board 54- AM335X Bone : Low cost community board
diff --git a/Documentation/devicetree/bindings/arm/psci.txt b/Documentation/devicetree/bindings/arm/psci.txt
new file mode 100644
index 000000000000..433afe9cb590
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/psci.txt
@@ -0,0 +1,55 @@
1* Power State Coordination Interface (PSCI)
2
3Firmware implementing the PSCI functions described in ARM document number
4ARM DEN 0022A ("Power State Coordination Interface System Software on ARM
5processors") can be used by Linux to initiate various CPU-centric power
6operations.
7
8Issue A of the specification describes functions for CPU suspend, hotplug
9and migration of secure software.
10
11Functions are invoked by trapping to the privilege level of the PSCI
12firmware (specified as part of the binding below) and passing arguments
13in a manner similar to that specified by AAPCS:
14
15 r0 => 32-bit Function ID / return value
16 {r1 - r3} => Parameters
17
18Note that the immediate field of the trapping instruction must be set
19to #0.
20
21
22Main node required properties:
23
24 - compatible : Must be "arm,psci"
25
26 - method : The method of calling the PSCI firmware. Permitted
27 values are:
28
29 "smc" : SMC #0, with the register assignments specified
30 in this binding.
31
32 "hvc" : HVC #0, with the register assignments specified
33 in this binding.
34
35Main node optional properties:
36
37 - cpu_suspend : Function ID for CPU_SUSPEND operation
38
39 - cpu_off : Function ID for CPU_OFF operation
40
41 - cpu_on : Function ID for CPU_ON operation
42
43 - migrate : Function ID for MIGRATE operation
44
45
46Example:
47
48 psci {
49 compatible = "arm,psci";
50 method = "smc";
51 cpu_suspend = <0x95c10000>;
52 cpu_off = <0x95c10001>;
53 cpu_on = <0x95c10002>;
54 migrate = <0x95c10003>;
55 };
diff --git a/Documentation/devicetree/bindings/clock/prima2-clock.txt b/Documentation/devicetree/bindings/clock/prima2-clock.txt
new file mode 100644
index 000000000000..5016979c0f78
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/prima2-clock.txt
@@ -0,0 +1,73 @@
1* Clock bindings for CSR SiRFprimaII
2
3Required properties:
4- compatible: Should be "sirf,prima2-clkc"
5- reg: Address and length of the register set
6- interrupts: Should contain clock controller interrupt
7- #clock-cells: Should be <1>
8
9The clock consumer should specify the desired clock by having the clock
10ID in its "clocks" phandle cell. The following is a full list of prima2
11clocks and IDs.
12
13 Clock ID
14 ---------------------------
15 rtc 0
16 osc 1
17 pll1 2
18 pll2 3
19 pll3 4
20 mem 5
21 sys 6
22 security 7
23 dsp 8
24 gps 9
25 mf 10
26 io 11
27 cpu 12
28 uart0 13
29 uart1 14
30 uart2 15
31 tsc 16
32 i2c0 17
33 i2c1 18
34 spi0 19
35 spi1 20
36 pwmc 21
37 efuse 22
38 pulse 23
39 dmac0 24
40 dmac1 25
41 nand 26
42 audio 27
43 usp0 28
44 usp1 29
45 usp2 30
46 vip 31
47 gfx 32
48 mm 33
49 lcd 34
50 vpp 35
51 mmc01 36
52 mmc23 37
53 mmc45 38
54 usbpll 39
55 usb0 40
56 usb1 41
57
58Examples:
59
60clks: clock-controller@88000000 {
61 compatible = "sirf,prima2-clkc";
62 reg = <0x88000000 0x1000>;
63 interrupts = <3>;
64 #clock-cells = <1>;
65};
66
67i2c0: i2c@b00e0000 {
68 cell-index = <0>;
69 compatible = "sirf,prima2-i2c";
70 reg = <0xb00e0000 0x10000>;
71 interrupts = <24>;
72 clocks = <&clks 17>;
73};
diff --git a/Documentation/devicetree/bindings/drm/exynos/g2d.txt b/Documentation/devicetree/bindings/drm/exynos/g2d.txt
new file mode 100644
index 000000000000..1eb124d35a99
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/exynos/g2d.txt
@@ -0,0 +1,22 @@
1Samsung 2D Graphic Accelerator using DRM frame work
2
3Samsung FIMG2D is a graphics 2D accelerator which supports Bit Block Transfer.
4We set the drawing-context registers for configuring rendering parameters and
5then start rendering.
6This driver is for SOCs which contain G2D IPs with version 4.1.
7
8Required properties:
9 -compatible:
10 should be "samsung,exynos-g2d-41".
11 -reg:
12 physical base address of the controller and length
13 of memory mapped region.
14 -interrupts:
15 interrupt combiner values.
16
17Example:
18 g2d {
19 compatible = "samsung,exynos-g2d-41";
20 reg = <0x10850000 0x1000>;
21 interrupts = <0 91 0>;
22 };
diff --git a/Documentation/devicetree/bindings/i2c/ina209.txt b/Documentation/devicetree/bindings/i2c/ina209.txt
new file mode 100644
index 000000000000..9dd2bee80840
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/ina209.txt
@@ -0,0 +1,18 @@
1ina209 properties
2
3Required properties:
4- compatible: Must be "ti,ina209"
5- reg: I2C address
6
7Optional properties:
8
9- shunt-resistor
10 Shunt resistor value in micro-Ohm
11
12Example:
13
14temp-sensor@4c {
15 compatible = "ti,ina209";
16 reg = <0x4c>;
17 shunt-resistor = <5000>;
18};
diff --git a/Documentation/devicetree/bindings/i2c/max6697.txt b/Documentation/devicetree/bindings/i2c/max6697.txt
new file mode 100644
index 000000000000..5f793998e4a4
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/max6697.txt
@@ -0,0 +1,64 @@
1max6697 properties
2
3Required properties:
4- compatible:
5 Should be one of
6 maxim,max6581
7 maxim,max6602
8 maxim,max6622
9 maxim,max6636
10 maxim,max6689
11 maxim,max6693
12 maxim,max6694
13 maxim,max6697
14 maxim,max6698
15 maxim,max6699
16- reg: I2C address
17
18Optional properties:
19
20- smbus-timeout-disable
21 Set to disable SMBus timeout. If not specified, SMBus timeout will be
22 enabled.
23- extended-range-enable
24 Only valid for MAX6581. Set to enable extended temperature range.
25 Extended temperature will be disabled if not specified.
26- beta-compensation-enable
27 Only valid for MAX6693 and MX6694. Set to enable beta compensation on
28 remote temperature channel 1.
29 Beta compensation will be disabled if not specified.
30- alert-mask
31 Alert bit mask. Alert disabled for bits set.
32 Select bit 0 for local temperature, bit 1..7 for remote temperatures.
33 If not specified, alert will be enabled for all channels.
34- over-temperature-mask
35 Over-temperature bit mask. Over-temperature reporting disabled for
36 bits set.
37 Select bit 0 for local temperature, bit 1..7 for remote temperatures.
38 If not specified, over-temperature reporting will be enabled for all
39 channels.
40- resistance-cancellation
41 Boolean for all chips other than MAX6581. Set to enable resistance
42 cancellation on remote temperature channel 1.
43 For MAX6581, resistance cancellation enabled for all channels if
44 specified as boolean, otherwise as per bit mask specified.
45 Only supported for remote temperatures (bit 1..7).
46 If not specified, resistance cancellation will be disabled for all
47 channels.
48- transistor-ideality
49 For MAX6581 only. Two values; first is bit mask, second is ideality
50 select value as per MAX6581 data sheet. Select bit 1..7 for remote
51 channels.
52 Transistor ideality will be initialized to default (1.008) if not
53 specified.
54
55Example:
56
57temp-sensor@1a {
58 compatible = "maxim,max6697";
59 reg = <0x1a>;
60 smbus-timeout-disable;
61 resistance-cancellation;
62 alert-mask = <0x72>;
63 over-temperature-mask = <0x7f>;
64};
diff --git a/Documentation/devicetree/bindings/input/imx-keypad.txt b/Documentation/devicetree/bindings/input/imx-keypad.txt
new file mode 100644
index 000000000000..2ebaf7d26843
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/imx-keypad.txt
@@ -0,0 +1,53 @@
1* Freescale i.MX Keypad Port(KPP) device tree bindings
2
3The KPP is designed to interface with a keypad matrix with 2-point contact
4or 3-point contact keys. The KPP is designed to simplify the software task
5of scanning a keypad matrix. The KPP is capable of detecting, debouncing,
6and decoding one or multiple keys pressed simultaneously on a keypad.
7
8Required SoC Specific Properties:
9- compatible: Should be "fsl,<soc>-kpp".
10
11- reg: Physical base address of the KPP and length of memory mapped
12 region.
13
14- interrupts: The KPP interrupt number to the CPU(s).
15
16- clocks: The clock provided by the SoC to the KPP. Some SoCs use dummy
17clock(The clock for the KPP is provided by the SoCs automatically).
18
19Required Board Specific Properties:
20- pinctrl-names: The definition can be found at
21pinctrl/pinctrl-bindings.txt.
22
23- pinctrl-0: The definition can be found at
24pinctrl/pinctrl-bindings.txt.
25
26- linux,keymap: The definition can be found at
27bindings/input/matrix-keymap.txt.
28
29Example:
30kpp: kpp@73f94000 {
31 compatible = "fsl,imx51-kpp", "fsl,imx21-kpp";
32 reg = <0x73f94000 0x4000>;
33 interrupts = <60>;
34 clocks = <&clks 0>;
35 pinctrl-names = "default";
36 pinctrl-0 = <&pinctrl_kpp_1>;
37 linux,keymap = <0x00000067 /* KEY_UP */
38 0x0001006c /* KEY_DOWN */
39 0x00020072 /* KEY_VOLUMEDOWN */
40 0x00030066 /* KEY_HOME */
41 0x0100006a /* KEY_RIGHT */
42 0x01010069 /* KEY_LEFT */
43 0x0102001c /* KEY_ENTER */
44 0x01030073 /* KEY_VOLUMEUP */
45 0x02000040 /* KEY_F6 */
46 0x02010042 /* KEY_F8 */
47 0x02020043 /* KEY_F9 */
48 0x02030044 /* KEY_F10 */
49 0x0300003b /* KEY_F1 */
50 0x0301003c /* KEY_F2 */
51 0x0302003d /* KEY_F3 */
52 0x03030074>; /* KEY_POWER */
53};
diff --git a/Documentation/devicetree/bindings/input/lpc32xx-key.txt b/Documentation/devicetree/bindings/input/lpc32xx-key.txt
index 31afd5014c48..bcf62f856358 100644
--- a/Documentation/devicetree/bindings/input/lpc32xx-key.txt
+++ b/Documentation/devicetree/bindings/input/lpc32xx-key.txt
@@ -1,19 +1,22 @@
1NXP LPC32xx Key Scan Interface 1NXP LPC32xx Key Scan Interface
2 2
3This binding is based on the matrix-keymap binding with the following
4changes:
5
3Required Properties: 6Required Properties:
4- compatible: Should be "nxp,lpc3220-key" 7- compatible: Should be "nxp,lpc3220-key"
5- reg: Physical base address of the controller and length of memory mapped 8- reg: Physical base address of the controller and length of memory mapped
6 region. 9 region.
7- interrupts: The interrupt number to the cpu. 10- interrupts: The interrupt number to the cpu.
8- keypad,num-rows: Number of rows and columns, e.g. 1: 1x1, 6: 6x6
9- keypad,num-columns: Must be equal to keypad,num-rows since LPC32xx only
10 supports square matrices
11- nxp,debounce-delay-ms: Debounce delay in ms 11- nxp,debounce-delay-ms: Debounce delay in ms
12- nxp,scan-delay-ms: Repeated scan period in ms 12- nxp,scan-delay-ms: Repeated scan period in ms
13- linux,keymap: the key-code to be reported when the key is pressed 13- linux,keymap: the key-code to be reported when the key is pressed
14 and released, see also 14 and released, see also
15 Documentation/devicetree/bindings/input/matrix-keymap.txt 15 Documentation/devicetree/bindings/input/matrix-keymap.txt
16 16
17Note: keypad,num-rows and keypad,num-columns are required, and must be equal
18since LPC32xx only supports square matrices
19
17Example: 20Example:
18 21
19 key@40050000 { 22 key@40050000 {
diff --git a/Documentation/devicetree/bindings/input/matrix-keymap.txt b/Documentation/devicetree/bindings/input/matrix-keymap.txt
index 3cd8b98ccd2d..c54919fad17e 100644
--- a/Documentation/devicetree/bindings/input/matrix-keymap.txt
+++ b/Documentation/devicetree/bindings/input/matrix-keymap.txt
@@ -9,6 +9,12 @@ Required properties:
9 row << 24 | column << 16 | key-code 9 row << 24 | column << 16 | key-code
10 10
11Optional properties: 11Optional properties:
12Properties for the number of rows and columns are optional because some
13drivers will use fixed values for these.
14- keypad,num-rows: Number of row lines connected to the keypad controller.
15- keypad,num-columns: Number of column lines connected to the keypad
16 controller.
17
12Some users of this binding might choose to specify secondary keymaps for 18Some users of this binding might choose to specify secondary keymaps for
13cases where there is a modifier key such as a Fn key. Proposed names 19cases where there is a modifier key such as a Fn key. Proposed names
14for said properties are "linux,fn-keymap" or with another descriptive 20for said properties are "linux,fn-keymap" or with another descriptive
@@ -17,3 +23,5 @@ word for the modifier other from "Fn".
17Example: 23Example:
18 linux,keymap = < 0x00030012 24 linux,keymap = < 0x00030012
19 0x0102003a >; 25 0x0102003a >;
26 keypad,num-rows = <2>;
27 keypad,num-columns = <8>;
diff --git a/Documentation/devicetree/bindings/input/nvidia,tegra20-kbc.txt b/Documentation/devicetree/bindings/input/nvidia,tegra20-kbc.txt
index 72683be6de35..2995fae7ee47 100644
--- a/Documentation/devicetree/bindings/input/nvidia,tegra20-kbc.txt
+++ b/Documentation/devicetree/bindings/input/nvidia,tegra20-kbc.txt
@@ -1,7 +1,18 @@
1* Tegra keyboard controller 1* Tegra keyboard controller
2The key controller has maximum 24 pins to make matrix keypad. Any pin
3can be configured as row or column. The maximum column pin can be 8
4and maximum row pins can be 16 for Tegra20/Tegra30.
2 5
3Required properties: 6Required properties:
4- compatible: "nvidia,tegra20-kbc" 7- compatible: "nvidia,tegra20-kbc"
8- reg: Register base address of KBC.
9- interrupts: Interrupt number for the KBC.
10- nvidia,kbc-row-pins: The KBC pins which are configured as row. This is an
11 array of pin numbers which is used as rows.
12- nvidia,kbc-col-pins: The KBC pins which are configured as column. This is an
13 array of pin numbers which is used as column.
14- linux,keymap: The keymap for keys as described in the binding document
15 devicetree/bindings/input/matrix-keymap.txt.
5 16
6Optional properties, in addition to those specified by the shared 17Optional properties, in addition to those specified by the shared
7matrix-keyboard bindings: 18matrix-keyboard bindings:
@@ -19,5 +30,16 @@ Example:
19keyboard: keyboard { 30keyboard: keyboard {
20 compatible = "nvidia,tegra20-kbc"; 31 compatible = "nvidia,tegra20-kbc";
21 reg = <0x7000e200 0x100>; 32 reg = <0x7000e200 0x100>;
33 interrupts = <0 85 0x04>;
22 nvidia,ghost-filter; 34 nvidia,ghost-filter;
35 nvidia,debounce-delay-ms = <640>;
36 nvidia,kbc-row-pins = <0 1 2>; /* pin 0, 1, 2 as rows */
37 nvidia,kbc-col-pins = <11 12 13>; /* pin 11, 12, 13 as columns */
38 linux,keymap = <0x00000074
39 0x00010067
40 0x00020066
41 0x01010068
42 0x02000069
43 0x02010070
44 0x02020071>;
23}; 45};
diff --git a/Documentation/devicetree/bindings/input/omap-keypad.txt b/Documentation/devicetree/bindings/input/omap-keypad.txt
index f2fa5e10493d..34ed1c60ff95 100644
--- a/Documentation/devicetree/bindings/input/omap-keypad.txt
+++ b/Documentation/devicetree/bindings/input/omap-keypad.txt
@@ -6,19 +6,16 @@ A key can be placed at each intersection of a unique row and a unique column.
6The keypad controller can sense a key-press and key-release and report the 6The keypad controller can sense a key-press and key-release and report the
7event using a interrupt to the cpu. 7event using a interrupt to the cpu.
8 8
9This binding is based on the matrix-keymap binding with the following
10changes:
11
12keypad,num-rows and keypad,num-columns are required.
13
9Required SoC Specific Properties: 14Required SoC Specific Properties:
10- compatible: should be one of the following 15- compatible: should be one of the following
11 - "ti,omap4-keypad": For controllers compatible with omap4 keypad 16 - "ti,omap4-keypad": For controllers compatible with omap4 keypad
12 controller. 17 controller.
13 18
14Required Board Specific Properties, in addition to those specified by
15the shared matrix-keyboard bindings:
16- keypad,num-rows: Number of row lines connected to the keypad
17 controller.
18
19- keypad,num-columns: Number of column lines connected to the
20 keypad controller.
21
22Optional Properties specific to linux: 19Optional Properties specific to linux:
23- linux,keypad-no-autorepeat: do no enable autorepeat feature. 20- linux,keypad-no-autorepeat: do no enable autorepeat feature.
24 21
diff --git a/Documentation/devicetree/bindings/input/tca8418_keypad.txt b/Documentation/devicetree/bindings/input/tca8418_keypad.txt
index 2a1538f0053f..255185009167 100644
--- a/Documentation/devicetree/bindings/input/tca8418_keypad.txt
+++ b/Documentation/devicetree/bindings/input/tca8418_keypad.txt
@@ -1,8 +1,10 @@
1This binding is based on the matrix-keymap binding with the following
2changes:
3
4keypad,num-rows and keypad,num-columns are required.
1 5
2Required properties: 6Required properties:
3- compatible: "ti,tca8418" 7- compatible: "ti,tca8418"
4- reg: the I2C address 8- reg: the I2C address
5- interrupts: IRQ line number, should trigger on falling edge 9- interrupts: IRQ line number, should trigger on falling edge
6- keypad,num-rows: The number of rows
7- keypad,num-columns: The number of columns
8- linux,keymap: Keys definitions, see keypad-matrix. 10- linux,keymap: Keys definitions, see keypad-matrix.
diff --git a/Documentation/devicetree/bindings/gpio/leds-ns2.txt b/Documentation/devicetree/bindings/leds/leds-ns2.txt
index aef3aca34d2d..aef3aca34d2d 100644
--- a/Documentation/devicetree/bindings/gpio/leds-ns2.txt
+++ b/Documentation/devicetree/bindings/leds/leds-ns2.txt
diff --git a/Documentation/devicetree/bindings/mfd/tps6507x.txt b/Documentation/devicetree/bindings/mfd/tps6507x.txt
new file mode 100755
index 000000000000..8fffa3c5ed40
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/tps6507x.txt
@@ -0,0 +1,91 @@
1TPS6507x Power Management Integrated Circuit
2
3Required properties:
4- compatible: "ti,tps6507x"
5- reg: I2C slave address
6- regulators: This is the list of child nodes that specify the regulator
7 initialization data for defined regulators. Not all regulators for the
8 given device need to be present. The definition for each of these nodes
9 is defined using the standard binding for regulators found at
10 Documentation/devicetree/bindings/regulator/regulator.txt.
11 The regulator is matched with the regulator-compatible.
12
13 The valid regulator-compatible values are:
14 tps6507x: vdcdc1, vdcdc2, vdcdc3, vldo1, vldo2
15- xxx-supply: Input voltage supply regulator.
16 These entries are required if regulators are enabled for a device.
17 Missing of these properties can cause the regulator registration
18 fails.
19 If some of input supply is powered through battery or always-on
20 supply then also it is require to have these parameters with proper
21 node handle of always on power supply.
22 tps6507x:
23 vindcdc1_2-supply: VDCDC1 and VDCDC2 input.
24 vindcdc3-supply : VDCDC3 input.
25 vldo1_2-supply : VLDO1 and VLDO2 input.
26
27Regulator Optional properties:
28- defdcdc_default: It's property of DCDC2 and DCDC3 regulators.
29 0: If defdcdc pin of DCDC2/DCDC3 is pulled to GND.
30 1: If defdcdc pin of DCDC2/DCDC3 is driven HIGH.
31 If this property is not defined, it defaults to 0 (not enabled).
32
33Example:
34
35 pmu: tps6507x@48 {
36 compatible = "ti,tps6507x";
37 reg = <0x48>;
38
39 vindcdc1_2-supply = <&vbat>;
40 vindcdc3-supply = <...>;
41 vinldo1_2-supply = <...>;
42
43 regulators {
44 #address-cells = <1>;
45 #size-cells = <0>;
46
47 vdcdc1_reg: regulator@0 {
48 regulator-compatible = "VDCDC1";
49 reg = <0>;
50 regulator-min-microvolt = <3150000>;
51 regulator-max-microvolt = <3450000>;
52 regulator-always-on;
53 regulator-boot-on;
54 };
55 vdcdc2_reg: regulator@1 {
56 regulator-compatible = "VDCDC2";
57 reg = <1>;
58 regulator-min-microvolt = <1710000>;
59 regulator-max-microvolt = <3450000>;
60 regulator-always-on;
61 regulator-boot-on;
62 defdcdc_default = <1>;
63 };
64 vdcdc3_reg: regulator@2 {
65 regulator-compatible = "VDCDC3";
66 reg = <2>;
67 regulator-min-microvolt = <950000>
68 regulator-max-microvolt = <1350000>;
69 regulator-always-on;
70 regulator-boot-on;
71 defdcdc_default = <1>;
72 };
73 ldo1_reg: regulator@3 {
74 regulator-compatible = "LDO1";
75 reg = <3>;
76 regulator-min-microvolt = <1710000>;
77 regulator-max-microvolt = <1890000>;
78 regulator-always-on;
79 regulator-boot-on;
80 };
81 ldo2_reg: regulator@4 {
82 regulator-compatible = "LDO2";
83 reg = <4>;
84 regulator-min-microvolt = <1140000>;
85 regulator-max-microvolt = <1320000>;
86 regulator-always-on;
87 regulator-boot-on;
88 };
89 };
90
91 };
diff --git a/Documentation/devicetree/bindings/mips/cavium/dma-engine.txt b/Documentation/devicetree/bindings/mips/cavium/dma-engine.txt
index cb4291e3b1d1..a5bdff400002 100644
--- a/Documentation/devicetree/bindings/mips/cavium/dma-engine.txt
+++ b/Documentation/devicetree/bindings/mips/cavium/dma-engine.txt
@@ -1,7 +1,7 @@
1* DMA Engine. 1* DMA Engine.
2 2
3The Octeon DMA Engine transfers between the Boot Bus and main memory. 3The Octeon DMA Engine transfers between the Boot Bus and main memory.
4The DMA Engine will be refered to by phandle by any device that is 4The DMA Engine will be referred to by phandle by any device that is
5connected to it. 5connected to it.
6 6
7Properties: 7Properties:
diff --git a/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt
index 792768953330..6d1c0988cfc7 100644
--- a/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt
@@ -4,18 +4,18 @@
4The Synopsis designware mobile storage host controller is used to interface 4The Synopsis designware mobile storage host controller is used to interface
5a SoC with storage medium such as eMMC or SD/MMC cards. This file documents 5a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
6differences between the core Synopsis dw mshc controller properties described 6differences between the core Synopsis dw mshc controller properties described
7by synposis-dw-mshc.txt and the properties used by the Samsung Exynos specific 7by synopsis-dw-mshc.txt and the properties used by the Samsung Exynos specific
8extensions to the Synopsis Designware Mobile Storage Host Controller. 8extensions to the Synopsis Designware Mobile Storage Host Controller.
9 9
10Required Properties: 10Required Properties:
11 11
12* compatible: should be 12* compatible: should be
13 - "samsung,exynos4210-dw-mshc": for controllers with Samsung Exynos4210 13 - "samsung,exynos4210-dw-mshc": for controllers with Samsung Exynos4210
14 specific extentions. 14 specific extensions.
15 - "samsung,exynos4412-dw-mshc": for controllers with Samsung Exynos4412 15 - "samsung,exynos4412-dw-mshc": for controllers with Samsung Exynos4412
16 specific extentions. 16 specific extensions.
17 - "samsung,exynos5250-dw-mshc": for controllers with Samsung Exynos5250 17 - "samsung,exynos5250-dw-mshc": for controllers with Samsung Exynos5250
18 specific extentions. 18 specific extensions.
19 19
20* samsung,dw-mshc-ciu-div: Specifies the divider value for the card interface 20* samsung,dw-mshc-ciu-div: Specifies the divider value for the card interface
21 unit (ciu) clock. This property is applicable only for Exynos5 SoC's and 21 unit (ciu) clock. This property is applicable only for Exynos5 SoC's and
diff --git a/Documentation/devicetree/bindings/mmc/samsung-sdhci.txt b/Documentation/devicetree/bindings/mmc/samsung-sdhci.txt
index 97e9e315400d..3b3a1ee055ff 100644
--- a/Documentation/devicetree/bindings/mmc/samsung-sdhci.txt
+++ b/Documentation/devicetree/bindings/mmc/samsung-sdhci.txt
@@ -55,5 +55,5 @@ Example:
55 }; 55 };
56 56
57 Note: This example shows both SoC specific and board specific properties 57 Note: This example shows both SoC specific and board specific properties
58 in a single device node. The properties can be actually be seperated 58 in a single device node. The properties can be actually be separated
59 into SoC specific node and board specific node. 59 into SoC specific node and board specific node.
diff --git a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
new file mode 100644
index 000000000000..dff0e5f995e2
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
@@ -0,0 +1,60 @@
1* Allwinner A1X Pin Controller
2
3The pins controlled by sunXi pin controller are organized in banks,
4each bank has 32 pins. Each pin has 7 multiplexing functions, with
5the first two functions being GPIO in and out. The configuration on
6the pins includes drive strength and pull-up.
7
8Required properties:
9- compatible: "allwinner,<soc>-pinctrl". Supported SoCs for now are:
10 sun5i-a13.
11- reg: Should contain the register physical address and length for the
12 pin controller.
13
14Please refer to pinctrl-bindings.txt in this directory for details of the
15common pinctrl bindings used by client devices.
16
17A pinctrl node should contain at least one subnodes representing the
18pinctrl groups available on the machine. Each subnode will list the
19pins it needs, and how they should be configured, with regard to muxer
20configuration, drive strength and pullups. If one of these options is
21not set, its actual value will be unspecified.
22
23Required subnode-properties:
24
25- allwinner,pins: List of strings containing the pin name.
26- allwinner,function: Function to mux the pins listed above to.
27
28Optional subnode-properties:
29- allwinner,drive: Integer. Represents the current sent to the pin
30 0: 10 mA
31 1: 20 mA
32 2: 30 mA
33 3: 40 mA
34- allwinner,pull: Integer.
35 0: No resistor
36 1: Pull-up resistor
37 2: Pull-down resistor
38
39Examples:
40
41pinctrl@01c20800 {
42 compatible = "allwinner,sun5i-a13-pinctrl";
43 reg = <0x01c20800 0x400>;
44 #address-cells = <1>;
45 #size-cells = <0>;
46
47 uart1_pins_a: uart1@0 {
48 allwinner,pins = "PE10", "PE11";
49 allwinner,function = "uart1";
50 allwinner,drive = <0>;
51 allwinner,pull = <0>;
52 };
53
54 uart1_pins_b: uart1@1 {
55 allwinner,pins = "PG3", "PG4";
56 allwinner,function = "uart1";
57 allwinner,drive = <0>;
58 allwinner,pull = <0>;
59 };
60};
diff --git a/Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt
index 3a268127b054..bc50899e0c81 100644
--- a/Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt
@@ -81,7 +81,8 @@ PA31 TXD4
81Required properties for pin configuration node: 81Required properties for pin configuration node:
82- atmel,pins: 4 integers array, represents a group of pins mux and config 82- atmel,pins: 4 integers array, represents a group of pins mux and config
83 setting. The format is atmel,pins = <PIN_BANK PIN_BANK_NUM PERIPH CONFIG>. 83 setting. The format is atmel,pins = <PIN_BANK PIN_BANK_NUM PERIPH CONFIG>.
84 The PERIPH 0 means gpio. 84 The PERIPH 0 means gpio, PERIPH 1 is periph A, PERIPH 2 is periph B...
85 PIN_BANK 0 is pioA, PIN_BANK 1 is pioB...
85 86
86Bits used for CONFIG: 87Bits used for CONFIG:
87PULL_UP (1 << 0): indicate this pin need a pull up. 88PULL_UP (1 << 0): indicate this pin need a pull up.
@@ -126,7 +127,7 @@ pinctrl@fffff400 {
126 pinctrl_dbgu: dbgu-0 { 127 pinctrl_dbgu: dbgu-0 {
127 atmel,pins = 128 atmel,pins =
128 <1 14 0x1 0x0 /* PB14 periph A */ 129 <1 14 0x1 0x0 /* PB14 periph A */
129 1 15 0x1 0x1>; /* PB15 periph with pullup */ 130 1 15 0x1 0x1>; /* PB15 periph A with pullup */
130 }; 131 };
131 }; 132 };
132}; 133};
diff --git a/Documentation/devicetree/bindings/pinctrl/nvidia,tegra114-pinmux.txt b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra114-pinmux.txt
new file mode 100644
index 000000000000..e204d009f16c
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra114-pinmux.txt
@@ -0,0 +1,120 @@
1NVIDIA Tegra114 pinmux controller
2
3The Tegra114 pinctrl binding is very similar to the Tegra20 and Tegra30
4pinctrl binding, as described in nvidia,tegra20-pinmux.txt and
5nvidia,tegra30-pinmux.txt. In fact, this document assumes that binding as
6a baseline, and only documents the differences between the two bindings.
7
8Required properties:
9- compatible: "nvidia,tegra114-pinmux"
10- reg: Should contain the register physical address and length for each of
11 the pad control and mux registers. The first bank of address must be the
12 driver strength pad control register address and second bank address must
13 be pinmux register address.
14
15Tegra114 adds the following optional properties for pin configuration subnodes:
16- nvidia,enable-input: Integer. Enable the pin's input path. 0: no, 1: yes.
17- nvidia,open-drain: Integer. Enable open drain mode. 0: no, 1: yes.
18- nvidia,lock: Integer. Lock the pin configuration against further changes
19 until reset. 0: no, 1: yes.
20- nvidia,io-reset: Integer. Reset the IO path. 0: no, 1: yes.
21- nvidia,rcv-sel: Integer. Select VIL/VIH receivers. 0: normal, 1: high.
22- nvidia,drive-type: Integer. Valid range 0...3.
23
24As with Tegra20 and Terga30, see the Tegra TRM for complete details regarding
25which groups support which functionality.
26
27Valid values for pin and group names are:
28
29 per-pin mux groups:
30
31 These all support nvidia,function, nvidia,tristate, nvidia,pull,
32 nvidia,enable-input, nvidia,lock. Some support nvidia,open-drain,
33 nvidia,io-reset and nvidia,rcv-sel.
34
35 ulpi_data0_po1, ulpi_data1_po2, ulpi_data2_po3, ulpi_data3_po4,
36 ulpi_data4_po5, ulpi_data5_po6, ulpi_data6_po7, ulpi_data7_po0,
37 ulpi_clk_py0, ulpi_dir_py1, ulpi_nxt_py2, ulpi_stp_py3, dap3_fs_pp0,
38 dap3_din_pp1, dap3_dout_pp2, dap3_sclk_pp3, pv0, pv1, sdmmc1_clk_pz0,
39 sdmmc1_cmd_pz1, sdmmc1_dat3_py4, sdmmc1_dat2_py5, sdmmc1_dat1_py6,
40 sdmmc1_dat0_py7, clk2_out_pw5, clk2_req_pcc5, hdmi_int_pn7, ddc_scl_pv4,
41 ddc_sda_pv5, uart2_rxd_pc3, uart2_txd_pc2, uart2_rts_n_pj6,
42 uart2_cts_n_pj5, uart3_txd_pw6, uart3_rxd_pw7, uart3_cts_n_pa1,
43 uart3_rts_n_pc0, pu0, pu1, pu2, pu3, pu4, pu5, pu6, gen1_i2c_sda_pc5,
44 gen1_i2c_scl_pc4, dap4_fs_pp4, dap4_din_pp5, dap4_dout_pp6, dap4_sclk_pp7,
45 clk3_out_pee0, clk3_req_pee1, gmi_wp_n_pc7, gmi_iordy_pi5, gmi_wait_pi7,
46 gmi_adv_n_pk0, gmi_clk_pk1, gmi_cs0_n_pj0, gmi_cs1_n_pj2, gmi_cs2_n_pk3,
47 gmi_cs3_n_pk4, gmi_cs4_n_pk2, gmi_cs6_n_pi3, gmi_cs7_n_pi6, gmi_ad0_pg0,
48 gmi_ad1_pg1, gmi_ad2_pg2, gmi_ad3_pg3, gmi_ad4_pg4, gmi_ad5_pg5,
49 gmi_ad6_pg6, gmi_ad7_pg7, gmi_ad8_ph0, gmi_ad9_ph1, gmi_ad10_ph2,
50 gmi_ad11_ph3, gmi_ad12_ph4, gmi_ad13_ph5, gmi_ad14_ph6, gmi_ad15_ph7,
51 gmi_a16_pj7, gmi_a17_pb0, gmi_a18_pb1, gmi_a19_pk7, gmi_wr_n_pi0,
52 gmi_oe_n_pi1, gmi_dqs_p_pj3, gmi_rst_n_pi4, gen2_i2c_scl_pt5,
53 gen2_i2c_sda_pt6, sdmmc4_clk_pcc4, sdmmc4_cmd_pt7, sdmmc4_dat0_paa0,
54 sdmmc4_dat1_paa1, sdmmc4_dat2_paa2, sdmmc4_dat3_paa3, sdmmc4_dat4_paa4,
55 sdmmc4_dat5_paa5, sdmmc4_dat6_paa6, sdmmc4_dat7_paa7, cam_mclk_pcc0,
56 pcc1, pbb0, cam_i2c_scl_pbb1, cam_i2c_sda_pbb2, pbb3, pbb4, pbb5, pbb6,
57 pbb7, pcc2, pwr_i2c_scl_pz6, pwr_i2c_sda_pz7, kb_row0_pr0, kb_row1_pr1,
58 kb_row2_pr2, kb_row3_pr3, kb_row4_pr4, kb_row5_pr5, kb_row6_pr6,
59 kb_row7_pr7, kb_row8_ps0, kb_row9_ps1, kb_row10_ps2, kb_col0_pq0,
60 kb_col1_pq1, kb_col2_pq2, kb_col3_pq3, kb_col4_pq4, kb_col5_pq5,
61 kb_col6_pq6, kb_col7_pq7, clk_32k_out_pa0, sys_clk_req_pz5, core_pwr_req,
62 cpu_pwr_req, pwr_int_n, owr, dap1_fs_pn0, dap1_din_pn1, dap1_dout_pn2,
63 dap1_sclk_pn3, clk1_req_pee2, clk1_out_pw4, spdif_in_pk6, spdif_out_pk5,
64 dap2_fs_pa2, dap2_din_pa4, dap2_dout_pa5, dap2_sclk_pa3, dvfs_pwm_px0,
65 gpio_x1_aud_px1, gpio_x3_aud_px3, dvfs_clk_px2, gpio_x4_aud_px4,
66 gpio_x5_aud_px5, gpio_x6_aud_px6, gpio_x7_aud_px7, sdmmc3_clk_pa6,
67 sdmmc3_cmd_pa7, sdmmc3_dat0_pb7, sdmmc3_dat1_pb6, sdmmc3_dat2_pb5,
68 sdmmc3_dat3_pb4, hdmi_cec_pee3, sdmmc1_wp_n_pv3, sdmmc3_cd_n_pv2,
69 gpio_w2_aud_pw2, gpio_w3_aud_pw3, usb_vbus_en0_pn4, usb_vbus_en1_pn5,
70 sdmmc3_clk_lb_in_pee5, sdmmc3_clk_lb_out_pee4, reset_out_n.
71
72 drive groups:
73
74 These all support nvidia,pull-down-strength, nvidia,pull-up-strength,
75 nvidia,slew-rate-rising, nvidia,slew-rate-falling. Most but not all
76 support nvidia,high-speed-mode, nvidia,schmitt, nvidia,low-power-mode
77 and nvidia,drive-type.
78
79 ao1, ao2, at1, at2, at3, at4, at5, cdev1, cdev2, dap1, dap2, dap3, dap4,
80 dbg, sdio3, spi, uaa, uab, uart2, uart3, sdio1, ddc, gma, gme, gmf, gmg,
81 gmh, owr, uda.
82
83Example:
84
85 pinmux: pinmux {
86 compatible = "nvidia,tegra114-pinmux";
87 reg = <0x70000868 0x148 /* Pad control registers */
88 0x70003000 0x40c>; /* PinMux registers */
89 };
90
91Example board file extract:
92
93 pinctrl {
94 sdmmc4_default: pinmux {
95 sdmmc4_clk_pcc4 {
96 nvidia,pins = "sdmmc4_clk_pcc4",
97 nvidia,function = "sdmmc4";
98 nvidia,pull = <0>;
99 nvidia,tristate = <0>;
100 };
101 sdmmc4_dat0_paa0 {
102 nvidia,pins = "sdmmc4_dat0_paa0",
103 "sdmmc4_dat1_paa1",
104 "sdmmc4_dat2_paa2",
105 "sdmmc4_dat3_paa3",
106 "sdmmc4_dat4_paa4",
107 "sdmmc4_dat5_paa5",
108 "sdmmc4_dat6_paa6",
109 "sdmmc4_dat7_paa7";
110 nvidia,function = "sdmmc4";
111 nvidia,pull = <2>;
112 nvidia,tristate = <0>;
113 };
114 };
115 };
116
117 sdhci@78000400 {
118 pinctrl-names = "default";
119 pinctrl-0 = <&sdmmc4_default>;
120 };
diff --git a/Documentation/devicetree/bindings/pinctrl/ste,nomadik.txt b/Documentation/devicetree/bindings/pinctrl/ste,nomadik.txt
new file mode 100644
index 000000000000..9a2f3f420526
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/ste,nomadik.txt
@@ -0,0 +1,140 @@
1ST Ericsson Nomadik pinmux controller
2
3Required properties:
4- compatible: "stericsson,nmk-pinctrl", "stericsson,nmk-pinctrl-db8540",
5 "stericsson,nmk-pinctrl-stn8815"
6- reg: Should contain the register physical address and length of the PRCMU.
7
8Please refer to pinctrl-bindings.txt in this directory for details of the
9common pinctrl bindings used by client devices, including the meaning of the
10phrase "pin configuration node".
11
12ST Ericsson's pin configuration nodes act as a container for an arbitrary number of
13subnodes. Each of these subnodes represents some desired configuration for a
14pin, a group, or a list of pins or groups. This configuration can include the
15mux function to select on those pin(s)/group(s), and various pin configuration
16parameters, such as input, output, pull up, pull down...
17
18The name of each subnode is not important; all subnodes should be enumerated
19and processed purely based on their content.
20
21Required subnode-properties:
22- ste,pins : An array of strings. Each string contains the name of a pin or
23 group.
24
25Optional subnode-properties:
26- ste,function: A string containing the name of the function to mux to the
27 pin or group.
28
29- ste,config: Handle of pin configuration node (e.g. ste,config = <&slpm_in_wkup_pdis>)
30
31- ste,input : <0/1/2>
32 0: input with no pull
33 1: input with pull up,
34 2: input with pull down,
35
36- ste,output: <0/1/2>
37 0: output low,
38 1: output high,
39 2: output (value is not specified).
40
41- ste,sleep: <0/1>
42 0: sleep mode disable,
43 1: sleep mode enable.
44
45- ste,sleep-input: <0/1/2/3>
46 0: sleep input with no pull,
47 1: sleep input with pull up,
48 2: sleep input with pull down.
49 3: sleep input and keep last input configuration (no pull, pull up or pull down).
50
51- ste,sleep-output: <0/1/2>
52 0: sleep output low,
53 1: sleep output high,
54 2: sleep output (value is not specified).
55
56- ste,sleep-gpio: <0/1>
57 0: disable sleep gpio mode,
58 1: enable sleep gpio mode.
59
60- ste,sleep-wakeup: <0/1>
61 0: wake-up detection enabled,
62 1: wake-up detection disabled.
63
64- ste,sleep-pull-disable: <0/1>
65 0: GPIO pull-up or pull-down resistor is enabled, when pin is an input,
66 1: GPIO pull-up and pull-down resistor are disabled.
67
68Example board file extract:
69
70 pinctrl@80157000 {
71 compatible = "stericsson,nmk-pinctrl";
72 reg = <0x80157000 0x2000>;
73
74 pinctrl-names = "default";
75
76 slpm_in_wkup_pdis: slpm_in_wkup_pdis {
77 ste,sleep = <1>;
78 ste,sleep-input = <3>;
79 ste,sleep-wakeup = <1>;
80 ste,sleep-pull-disable = <0>;
81 };
82
83 slpm_out_hi_wkup_pdis: slpm_out_hi_wkup_pdis {
84 ste,sleep = <1>;
85 ste,sleep-output = <1>;
86 ste,sleep-wakeup = <1>;
87 ste,sleep-pull-disable = <0>;
88 };
89
90 slpm_out_wkup_pdis: slpm_out_wkup_pdis {
91 ste,sleep = <1>;
92 ste,sleep-output = <2>;
93 ste,sleep-wakeup = <1>;
94 ste,sleep-pull-disable = <0>;
95 };
96
97 uart0 {
98 uart0_default_mux: uart0_mux {
99 u0_default_mux {
100 ste,function = "u0";
101 ste,pins = "u0_a_1";
102 };
103 };
104 uart0_default_mode: uart0_default {
105 uart0_default_cfg1 {
106 ste,pins = "GPIO0", "GPIO2";
107 ste,input = <1>;
108 };
109
110 uart0_default_cfg2 {
111 ste,pins = "GPIO1", "GPIO3";
112 ste,output = <1>;
113 };
114 };
115 uart0_sleep_mode: uart0_sleep {
116 uart0_sleep_cfg1 {
117 ste,pins = "GPIO0", "GPIO2";
118 ste,config = <&slpm_in_wkup_pdis>;
119 };
120 uart0_sleep_cfg2 {
121 ste,pins = "GPIO1";
122 ste,config = <&slpm_out_hi_wkup_pdis>;
123 };
124 uart0_sleep_cfg3 {
125 ste,pins = "GPIO3";
126 ste,config = <&slpm_out_wkup_pdis>;
127 };
128 };
129 };
130 };
131
132 uart@80120000 {
133 compatible = "arm,pl011", "arm,primecell";
134 reg = <0x80120000 0x1000>;
135 interrupts = <0 11 0x4>;
136
137 pinctrl-names = "default","sleep";
138 pinctrl-0 = <&uart0_default_mux>, <&uart0_default_mode>;
139 pinctrl-1 = <&uart0_sleep_mode>;
140 };
diff --git a/Documentation/devicetree/bindings/power_supply/qnap-poweroff.txt b/Documentation/devicetree/bindings/power_supply/qnap-poweroff.txt
new file mode 100644
index 000000000000..9a599d27bd75
--- /dev/null
+++ b/Documentation/devicetree/bindings/power_supply/qnap-poweroff.txt
@@ -0,0 +1,13 @@
1* QNAP Power Off
2
3QNAP NAS devices have a microcontroller controlling the main power
4supply. This microcontroller is connected to UART1 of the Kirkwood and
5Orion5x SoCs. Sending the charactor 'A', at 19200 baud, tells the
6microcontroller to turn the power off. This driver adds a handler to
7pm_power_off which is called to turn the power off.
8
9Required Properties:
10- compatible: Should be "qnap,power-off"
11
12- reg: Address and length of the register set for UART1
13- clocks: tclk clock
diff --git a/Documentation/devicetree/bindings/power_supply/restart-poweroff.txt b/Documentation/devicetree/bindings/power_supply/restart-poweroff.txt
new file mode 100644
index 000000000000..5776e684afda
--- /dev/null
+++ b/Documentation/devicetree/bindings/power_supply/restart-poweroff.txt
@@ -0,0 +1,8 @@
1* Restart Power Off
2
3Buffalo Linkstation LS-XHL and LS-CHLv2, and other devices power off
4by restarting and letting u-boot keep hold of the machine until the
5user presses a button.
6
7Required Properties:
8- compatible: Should be "restart-poweroff"
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/srio.txt b/Documentation/devicetree/bindings/powerpc/fsl/srio.txt
index b039bcbee134..07abf0f2f440 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/srio.txt
+++ b/Documentation/devicetree/bindings/powerpc/fsl/srio.txt
@@ -8,9 +8,9 @@ Properties:
8 Definition: Must include "fsl,srio" for IP blocks with IP Block 8 Definition: Must include "fsl,srio" for IP blocks with IP Block
9 Revision Register (SRIO IPBRR1) Major ID equal to 0x01c0. 9 Revision Register (SRIO IPBRR1) Major ID equal to 0x01c0.
10 10
11 Optionally, a compatiable string of "fsl,srio-vX.Y" where X is Major 11 Optionally, a compatible string of "fsl,srio-vX.Y" where X is Major
12 version in IP Block Revision Register and Y is Minor version. If this 12 version in IP Block Revision Register and Y is Minor version. If this
13 compatiable is provided it should be ordered before "fsl,srio". 13 compatible is provided it should be ordered before "fsl,srio".
14 14
15 - reg 15 - reg
16 Usage: required 16 Usage: required
diff --git a/Documentation/devicetree/bindings/regulator/anatop-regulator.txt b/Documentation/devicetree/bindings/regulator/anatop-regulator.txt
index 357758cb6e92..758eae24082a 100644
--- a/Documentation/devicetree/bindings/regulator/anatop-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/anatop-regulator.txt
@@ -9,6 +9,11 @@ Required properties:
9- anatop-min-voltage: Minimum voltage of this regulator 9- anatop-min-voltage: Minimum voltage of this regulator
10- anatop-max-voltage: Maximum voltage of this regulator 10- anatop-max-voltage: Maximum voltage of this regulator
11 11
12Optional properties:
13- anatop-delay-reg-offset: Anatop MFD step time register offset
14- anatop-delay-bit-shift: Bit shift for the step time register
15- anatop-delay-bit-width: Number of bits used in the step time register
16
12Any property defined as part of the core regulator 17Any property defined as part of the core regulator
13binding, defined in regulator.txt, can also be used. 18binding, defined in regulator.txt, can also be used.
14 19
@@ -23,6 +28,9 @@ Example:
23 anatop-reg-offset = <0x140>; 28 anatop-reg-offset = <0x140>;
24 anatop-vol-bit-shift = <9>; 29 anatop-vol-bit-shift = <9>;
25 anatop-vol-bit-width = <5>; 30 anatop-vol-bit-width = <5>;
31 anatop-delay-reg-offset = <0x170>;
32 anatop-delay-bit-shift = <24>;
33 anatop-delay-bit-width = <2>;
26 anatop-min-bit-val = <1>; 34 anatop-min-bit-val = <1>;
27 anatop-min-voltage = <725000>; 35 anatop-min-voltage = <725000>;
28 anatop-max-voltage = <1300000>; 36 anatop-max-voltage = <1300000>;
diff --git a/Documentation/devicetree/bindings/regulator/s5m8767-regulator.txt b/Documentation/devicetree/bindings/regulator/s5m8767-regulator.txt
new file mode 100644
index 000000000000..a35ff99003a5
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/s5m8767-regulator.txt
@@ -0,0 +1,152 @@
1* Samsung S5M8767 Voltage and Current Regulator
2
3The Samsung S5M8767 is a multi-function device which includes volatage and
4current regulators, rtc, charger controller and other sub-blocks. It is
5interfaced to the host controller using a i2c interface. Each sub-block is
6addressed by the host system using different i2c slave address. This document
7describes the bindings for 'pmic' sub-block of s5m8767.
8
9Required properties:
10- compatible: Should be "samsung,s5m8767-pmic".
11- reg: Specifies the i2c slave address of the pmic block. It should be 0x66.
12
13- s5m8767,pmic-buck2-dvs-voltage: A set of 8 voltage values in micro-volt (uV)
14 units for buck2 when changing voltage using gpio dvs. Refer to [1] below
15 for additional information.
16
17- s5m8767,pmic-buck3-dvs-voltage: A set of 8 voltage values in micro-volt (uV)
18 units for buck3 when changing voltage using gpio dvs. Refer to [1] below
19 for additional information.
20
21- s5m8767,pmic-buck4-dvs-voltage: A set of 8 voltage values in micro-volt (uV)
22 units for buck4 when changing voltage using gpio dvs. Refer to [1] below
23 for additional information.
24
25- s5m8767,pmic-buck-ds-gpios: GPIO specifiers for three host gpio's used
26 for selecting GPIO DVS lines. It is one-to-one mapped to dvs gpio lines.
27
28[1] If none of the 's5m8767,pmic-buck[2/3/4]-uses-gpio-dvs' optional
29 property is specified, the 's5m8767,pmic-buck[2/3/4]-dvs-voltage'
30 property should specify atleast one voltage level (which would be a
31 safe operating voltage).
32
33 If either of the 's5m8767,pmic-buck[2/3/4]-uses-gpio-dvs' optional
34 property is specified, then all the eight voltage values for the
35 's5m8767,pmic-buck[2/3/4]-dvs-voltage' should be specified.
36
37Optional properties:
38- interrupt-parent: Specifies the phandle of the interrupt controller to which
39 the interrupts from s5m8767 are delivered to.
40- interrupts: Interrupt specifiers for two interrupt sources.
41 - First interrupt specifier is for 'irq1' interrupt.
42 - Second interrupt specifier is for 'alert' interrupt.
43- s5m8767,pmic-buck2-uses-gpio-dvs: 'buck2' can be controlled by gpio dvs.
44- s5m8767,pmic-buck3-uses-gpio-dvs: 'buck3' can be controlled by gpio dvs.
45- s5m8767,pmic-buck4-uses-gpio-dvs: 'buck4' can be controlled by gpio dvs.
46
47Additional properties required if either of the optional properties are used:
48
49- s5m8767,pmic-buck234-default-dvs-idx: Default voltage setting selected from
50 the possible 8 options selectable by the dvs gpios. The value of this
51 property should be between 0 and 7. If not specified or if out of range, the
52 default value of this property is set to 0.
53
54- s5m8767,pmic-buck-dvs-gpios: GPIO specifiers for three host gpio's used
55 for dvs. The format of the gpio specifier depends in the gpio controller.
56
57Regulators: The regulators of s5m8767 that have to be instantiated should be
58included in a sub-node named 'regulators'. Regulator nodes included in this
59sub-node should be of the format as listed below.
60
61 regulator_name {
62 ldo1_reg: LDO1 {
63 regulator-name = "VDD_ALIVE_1.0V";
64 regulator-min-microvolt = <1100000>;
65 regulator-max-microvolt = <1100000>;
66 regulator-always-on;
67 regulator-boot-on;
68 op_mode = <1>; /* Normal Mode */
69 };
70 };
71The above regulator entries are defined in regulator bindings documentation
72except op_mode description.
73 - op_mode: describes the different operating modes of the LDO's with
74 power mode change in SOC. The different possible values are,
75 0 - always off mode
76 1 - on in normal mode
77 2 - low power mode
78 3 - suspend mode
79
80The following are the names of the regulators that the s5m8767 pmic block
81supports. Note: The 'n' in LDOn and BUCKn represents the LDO or BUCK number
82as per the datasheet of s5m8767.
83
84 - LDOn
85 - valid values for n are 1 to 28
86 - Example: LDO0, LD01, LDO28
87 - BUCKn
88 - valid values for n are 1 to 9.
89 - Example: BUCK1, BUCK2, BUCK9
90
91The bindings inside the regulator nodes use the standard regulator bindings
92which are documented elsewhere.
93
94Example:
95
96 s5m8767_pmic@66 {
97 compatible = "samsung,s5m8767-pmic";
98 reg = <0x66>;
99
100 s5m8767,pmic-buck2-uses-gpio-dvs;
101 s5m8767,pmic-buck3-uses-gpio-dvs;
102 s5m8767,pmic-buck4-uses-gpio-dvs;
103
104 s5m8767,pmic-buck-default-dvs-idx = <0>;
105
106 s5m8767,pmic-buck-dvs-gpios = <&gpx0 0 1 0 0>, /* DVS1 */
107 <&gpx0 1 1 0 0>, /* DVS2 */
108 <&gpx0 2 1 0 0>; /* DVS3 */
109
110 s5m8767,pmic-buck-ds-gpios = <&gpx2 3 1 0 0>, /* SET1 */
111 <&gpx2 4 1 0 0>, /* SET2 */
112 <&gpx2 5 1 0 0>; /* SET3 */
113
114 s5m8767,pmic-buck2-dvs-voltage = <1350000>, <1300000>,
115 <1250000>, <1200000>,
116 <1150000>, <1100000>,
117 <1000000>, <950000>;
118
119 s5m8767,pmic-buck3-dvs-voltage = <1100000>, <1100000>,
120 <1100000>, <1100000>,
121 <1000000>, <1000000>,
122 <1000000>, <1000000>;
123
124 s5m8767,pmic-buck4-dvs-voltage = <1200000>, <1200000>,
125 <1200000>, <1200000>,
126 <1200000>, <1200000>,
127 <1200000>, <1200000>;
128
129 regulators {
130 ldo1_reg: LDO1 {
131 regulator-name = "VDD_ABB_3.3V";
132 regulator-min-microvolt = <3300000>;
133 regulator-max-microvolt = <3300000>;
134 op_mode = <1>; /* Normal Mode */
135 };
136
137 ldo2_reg: LDO2 {
138 regulator-name = "VDD_ALIVE_1.1V";
139 regulator-min-microvolt = <1100000>;
140 regulator-max-microvolt = <1100000>;
141 regulator-always-on;
142 };
143
144 buck1_reg: BUCK1 {
145 regulator-name = "VDD_MIF_1.2V";
146 regulator-min-microvolt = <950000>;
147 regulator-max-microvolt = <1350000>;
148 regulator-always-on;
149 regulator-boot-on;
150 };
151 };
152 };
diff --git a/Documentation/devicetree/bindings/regulator/tps51632-regulator.txt b/Documentation/devicetree/bindings/regulator/tps51632-regulator.txt
new file mode 100644
index 000000000000..2f7e44a96414
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/tps51632-regulator.txt
@@ -0,0 +1,27 @@
1TPS51632 Voltage regulators
2
3Required properties:
4- compatible: Must be "ti,tps51632"
5- reg: I2C slave address
6
7Optional properties:
8- ti,enable-pwm-dvfs: Enable the DVFS voltage control through the PWM interface.
9- ti,dvfs-step-20mV: The 20mV step voltage when PWM DVFS enabled. Missing this
10 will set 10mV step voltage in PWM DVFS mode. In normal mode, the voltage
11 step is 10mV as per datasheet.
12
13Any property defined as part of the core regulator binding, defined in
14regulator.txt, can also be used.
15
16Example:
17
18 tps51632 {
19 compatible = "ti,tps51632";
20 reg = <0x43>;
21 regulator-name = "tps51632-vout";
22 regulator-min-microvolt = <500000>;
23 regulator-max-microvolt = <1500000>;
24 regulator-boot-on;
25 ti,enable-pwm-dvfs;
26 ti,dvfs-step-20mV;
27 };
diff --git a/Documentation/devicetree/bindings/regulator/tps62360-regulator.txt b/Documentation/devicetree/bindings/regulator/tps62360-regulator.txt
index c8ca6b8f6582..1b20c3dbcdb8 100644
--- a/Documentation/devicetree/bindings/regulator/tps62360-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/tps62360-regulator.txt
@@ -17,9 +17,9 @@ Optional properties:
17- ti,vsel1-gpio: Gpio for controlling VSEL1 line. 17- ti,vsel1-gpio: Gpio for controlling VSEL1 line.
18 If this property is missing, then assume that there is no GPIO 18 If this property is missing, then assume that there is no GPIO
19 for vsel1 control. 19 for vsel1 control.
20- ti,vsel0-state-high: Inital state of vsel0 input is high. 20- ti,vsel0-state-high: Initial state of vsel0 input is high.
21 If this property is missing, then assume the state as low (0). 21 If this property is missing, then assume the state as low (0).
22- ti,vsel1-state-high: Inital state of vsel1 input is high. 22- ti,vsel1-state-high: Initial state of vsel1 input is high.
23 If this property is missing, then assume the state as low (0). 23 If this property is missing, then assume the state as low (0).
24 24
25Any property defined as part of the core regulator binding, defined in 25Any property defined as part of the core regulator binding, defined in
diff --git a/Documentation/devicetree/bindings/rtc/s3c-rtc.txt b/Documentation/devicetree/bindings/rtc/s3c-rtc.txt
index 90ec45fd33ec..7ac7259fe9ea 100644
--- a/Documentation/devicetree/bindings/rtc/s3c-rtc.txt
+++ b/Documentation/devicetree/bindings/rtc/s3c-rtc.txt
@@ -7,7 +7,7 @@ Required properties:
7- reg: physical base address of the controller and length of memory mapped 7- reg: physical base address of the controller and length of memory mapped
8 region. 8 region.
9- interrupts: Two interrupt numbers to the cpu should be specified. First 9- interrupts: Two interrupt numbers to the cpu should be specified. First
10 interrupt number is the rtc alarm interupt and second interrupt number 10 interrupt number is the rtc alarm interrupt and second interrupt number
11 is the rtc tick interrupt. The number of cells representing a interrupt 11 is the rtc tick interrupt. The number of cells representing a interrupt
12 depends on the parent interrupt controller. 12 depends on the parent interrupt controller.
13 13
diff --git a/Documentation/devicetree/bindings/spi/sh-msiof.txt b/Documentation/devicetree/bindings/spi/sh-msiof.txt
new file mode 100644
index 000000000000..e6222106ca36
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/sh-msiof.txt
@@ -0,0 +1,12 @@
1Renesas MSIOF spi controller
2
3Required properties:
4- compatible : "renesas,sh-msiof" for SuperH or
5 "renesas,sh-mobile-msiof" for SH Mobile series
6- reg : Offset and length of the register set for the device
7- interrupts : interrupt line used by MSIOF
8
9Optional properties:
10- num-cs : total number of chip-selects
11- renesas,tx-fifo-size : Overrides the default tx fifo size given in words
12- renesas,rx-fifo-size : Overrides the default rx fifo size given in words
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 902b1b1f568e..19e1ef73ab0d 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -14,6 +14,7 @@ bosch Bosch Sensortec GmbH
14brcm Broadcom Corporation 14brcm Broadcom Corporation
15cavium Cavium, Inc. 15cavium Cavium, Inc.
16chrp Common Hardware Reference Platform 16chrp Common Hardware Reference Platform
17cirrus Cirrus Logic, Inc.
17cortina Cortina Systems, Inc. 18cortina Cortina Systems, Inc.
18dallas Maxim Integrated Products (formerly Dallas Semiconductor) 19dallas Maxim Integrated Products (formerly Dallas Semiconductor)
19denx Denx Software Engineering 20denx Denx Software Engineering
@@ -42,6 +43,7 @@ powervr PowerVR (deprecated, use img)
42qcom Qualcomm, Inc. 43qcom Qualcomm, Inc.
43ramtron Ramtron International 44ramtron Ramtron International
44realtek Realtek Semiconductor Corp. 45realtek Realtek Semiconductor Corp.
46renesas Renesas Electronics Corporation
45samsung Samsung Semiconductor 47samsung Samsung Semiconductor
46sbs Smart Battery System 48sbs Smart Battery System
47schindler Schindler 49schindler Schindler
@@ -50,8 +52,10 @@ simtek
50sirf SiRF Technology, Inc. 52sirf SiRF Technology, Inc.
51snps Synopsys, Inc. 53snps Synopsys, Inc.
52st STMicroelectronics 54st STMicroelectronics
55ste ST-Ericsson
53stericsson ST-Ericsson 56stericsson ST-Ericsson
54ti Texas Instruments 57ti Texas Instruments
58toshiba Toshiba Corporation
55via VIA Technologies, Inc. 59via VIA Technologies, Inc.
56wlf Wolfson Microelectronics 60wlf Wolfson Microelectronics
57wm Wondermedia Technologies, Inc. 61wm Wondermedia Technologies, Inc.
diff --git a/Documentation/devicetree/bindings/watchdog/samsung-wdt.txt b/Documentation/devicetree/bindings/watchdog/samsung-wdt.txt
index 79ead8263ae4..ce0d8e78ed8f 100644
--- a/Documentation/devicetree/bindings/watchdog/samsung-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/samsung-wdt.txt
@@ -2,7 +2,7 @@
2 2
3The Samsung's Watchdog controller is used for resuming system operation 3The Samsung's Watchdog controller is used for resuming system operation
4after a preset amount of time during which the WDT reset event has not 4after a preset amount of time during which the WDT reset event has not
5occured. 5occurred.
6 6
7Required properties: 7Required properties:
8- compatible : should be "samsung,s3c2410-wdt" 8- compatible : should be "samsung,s3c2410-wdt"
diff --git a/Documentation/filesystems/f2fs.txt b/Documentation/filesystems/f2fs.txt
index 8fbd8b46ee34..dcf338e62b71 100644
--- a/Documentation/filesystems/f2fs.txt
+++ b/Documentation/filesystems/f2fs.txt
@@ -175,9 +175,9 @@ consists of multiple segments as described below.
175 align with the zone size <-| 175 align with the zone size <-|
176 |-> align with the segment size 176 |-> align with the segment size
177 _________________________________________________________________________ 177 _________________________________________________________________________
178 | | | Node | Segment | Segment | | 178 | | | Segment | Node | Segment | |
179 | Superblock | Checkpoint | Address | Info. | Summary | Main | 179 | Superblock | Checkpoint | Info. | Address | Summary | Main |
180 | (SB) | (CP) | Table (NAT) | Table (SIT) | Area (SSA) | | 180 | (SB) | (CP) | Table (SIT) | Table (NAT) | Area (SSA) | |
181 |____________|_____2______|______N______|______N______|______N_____|__N___| 181 |____________|_____2______|______N______|______N______|______N_____|__N___|
182 . . 182 . .
183 . . 183 . .
@@ -200,14 +200,14 @@ consists of multiple segments as described below.
200 : It contains file system information, bitmaps for valid NAT/SIT sets, orphan 200 : It contains file system information, bitmaps for valid NAT/SIT sets, orphan
201 inode lists, and summary entries of current active segments. 201 inode lists, and summary entries of current active segments.
202 202
203- Node Address Table (NAT)
204 : It is composed of a block address table for all the node blocks stored in
205 Main area.
206
207- Segment Information Table (SIT) 203- Segment Information Table (SIT)
208 : It contains segment information such as valid block count and bitmap for the 204 : It contains segment information such as valid block count and bitmap for the
209 validity of all the blocks. 205 validity of all the blocks.
210 206
207- Node Address Table (NAT)
208 : It is composed of a block address table for all the node blocks stored in
209 Main area.
210
211- Segment Summary Area (SSA) 211- Segment Summary Area (SSA)
212 : It contains summary entries which contains the owner information of all the 212 : It contains summary entries which contains the owner information of all the
213 data and node blocks stored in Main area. 213 data and node blocks stored in Main area.
@@ -236,13 +236,13 @@ For file system consistency, each CP points to which NAT and SIT copies are
236valid, as shown as below. 236valid, as shown as below.
237 237
238 +--------+----------+---------+ 238 +--------+----------+---------+
239 | CP | NAT | SIT | 239 | CP | SIT | NAT |
240 +--------+----------+---------+ 240 +--------+----------+---------+
241 . . . . 241 . . . .
242 . . . . 242 . . . .
243 . . . . 243 . . . .
244 +-------+-------+--------+--------+--------+--------+ 244 +-------+-------+--------+--------+--------+--------+
245 | CP #0 | CP #1 | NAT #0 | NAT #1 | SIT #0 | SIT #1 | 245 | CP #0 | CP #1 | SIT #0 | SIT #1 | NAT #0 | NAT #1 |
246 +-------+-------+--------+--------+--------+--------+ 246 +-------+-------+--------+--------+--------+--------+
247 | ^ ^ 247 | ^ ^
248 | | | 248 | | |
diff --git a/Documentation/hid/hid-sensor.txt b/Documentation/hid/hid-sensor.txt
index 948b0989c433..948b0989c433 100755..100644
--- a/Documentation/hid/hid-sensor.txt
+++ b/Documentation/hid/hid-sensor.txt
diff --git a/Documentation/hwmon/coretemp b/Documentation/hwmon/coretemp
index 3374c085678d..fec5a9bf755f 100644
--- a/Documentation/hwmon/coretemp
+++ b/Documentation/hwmon/coretemp
@@ -66,6 +66,7 @@ Process Processor TjMax(C)
66 i5 3470T 91 66 i5 3470T 91
67 67
6832nm Core i3/i5/i7 Processors 6832nm Core i3/i5/i7 Processors
69 i7 2600 98
69 i7 660UM/640/620, 640LM/620, 620M, 610E 105 70 i7 660UM/640/620, 640LM/620, 620M, 610E 105
70 i5 540UM/520/430, 540M/520/450/430 105 71 i5 540UM/520/430, 540M/520/450/430 105
71 i3 330E, 370M/350/330 90 rPGA, 105 BGA 72 i3 330E, 370M/350/330 90 rPGA, 105 BGA
@@ -79,7 +80,10 @@ Process Processor TjMax(C)
79 P4505/P4500 90 80 P4505/P4500 90
80 81
8132nm Atom Processors 8232nm Atom Processors
83 S1260/1220 95
84 S1240 102
82 Z2460 90 85 Z2460 90
86 Z2760 90
83 D2700/2550/2500 100 87 D2700/2550/2500 100
84 N2850/2800/2650/2600 100 88 N2850/2800/2650/2600 100
85 89
@@ -98,6 +102,7 @@ Process Processor TjMax(C)
98 102
9945nm Atom Processors 10345nm Atom Processors
100 D525/510/425/410 100 104 D525/510/425/410 100
105 K525/510/425/410 100
101 Z670/650 90 106 Z670/650 90
102 Z560/550/540/530P/530/520PT/520/515/510PT/510P 90 107 Z560/550/540/530P/530/520PT/520/515/510PT/510P 90
103 Z510/500 90 108 Z510/500 90
@@ -107,7 +112,11 @@ Process Processor TjMax(C)
107 330/230 125 112 330/230 125
108 E680/660/640/620 90 113 E680/660/640/620 90
109 E680T/660T/640T/620T 110 114 E680T/660T/640T/620T 110
115 E665C/645C 90
116 E665CT/645CT 110
110 CE4170/4150/4110 110 117 CE4170/4150/4110 110
118 CE4200 series unknown
119 CE5300 series unknown
111 120
11245nm Core2 Processors 12145nm Core2 Processors
113 Solo ULV SU3500/3300 100 122 Solo ULV SU3500/3300 100
diff --git a/Documentation/hwmon/ina209 b/Documentation/hwmon/ina209
new file mode 100644
index 000000000000..672501de4509
--- /dev/null
+++ b/Documentation/hwmon/ina209
@@ -0,0 +1,93 @@
1Kernel driver ina209
2=====================
3
4Supported chips:
5 * Burr-Brown / Texas Instruments INA209
6 Prefix: 'ina209'
7 Addresses scanned: -
8 Datasheet:
9 http://www.ti.com/lit/gpn/ina209
10
11Author: Paul Hays <Paul.Hays@cattail.ca>
12Author: Ira W. Snyder <iws@ovro.caltech.edu>
13Author: Guenter Roeck <linux@roeck-us.net>
14
15
16Description
17-----------
18
19The TI / Burr-Brown INA209 monitors voltage, current, and power on the high side
20of a D.C. power supply. It can perform measurements and calculations in the
21background to supply readings at any time. It includes a programmable
22calibration multiplier to scale the displayed current and power values.
23
24
25Sysfs entries
26-------------
27
28The INA209 chip is highly configurable both via hardwiring and via
29the I2C bus. See the datasheet for details.
30
31This tries to expose most monitoring features of the hardware via
32sysfs. It does not support every feature of this chip.
33
34
35in0_input shunt voltage (mV)
36in0_input_highest shunt voltage historical maximum reading (mV)
37in0_input_lowest shunt voltage historical minimum reading (mV)
38in0_reset_history reset shunt voltage history
39in0_max shunt voltage max alarm limit (mV)
40in0_min shunt voltage min alarm limit (mV)
41in0_crit_max shunt voltage crit max alarm limit (mV)
42in0_crit_min shunt voltage crit min alarm limit (mV)
43in0_max_alarm shunt voltage max alarm limit exceeded
44in0_min_alarm shunt voltage min alarm limit exceeded
45in0_crit_max_alarm shunt voltage crit max alarm limit exceeded
46in0_crit_min_alarm shunt voltage crit min alarm limit exceeded
47
48in1_input bus voltage (mV)
49in1_input_highest bus voltage historical maximum reading (mV)
50in1_input_lowest bus voltage historical minimum reading (mV)
51in1_reset_history reset bus voltage history
52in1_max bus voltage max alarm limit (mV)
53in1_min bus voltage min alarm limit (mV)
54in1_crit_max bus voltage crit max alarm limit (mV)
55in1_crit_min bus voltage crit min alarm limit (mV)
56in1_max_alarm bus voltage max alarm limit exceeded
57in1_min_alarm bus voltage min alarm limit exceeded
58in1_crit_max_alarm bus voltage crit max alarm limit exceeded
59in1_crit_min_alarm bus voltage crit min alarm limit exceeded
60
61power1_input power measurement (uW)
62power1_input_highest power historical maximum reading (uW)
63power1_reset_history reset power history
64power1_max power max alarm limit (uW)
65power1_crit power crit alarm limit (uW)
66power1_max_alarm power max alarm limit exceeded
67power1_crit_alarm power crit alarm limit exceeded
68
69curr1_input current measurement (mA)
70
71update_interval data conversion time; affects number of samples used
72 to average results for shunt and bus voltages.
73
74General Remarks
75---------------
76
77The power and current registers in this chip require that the calibration
78register is programmed correctly before they are used. Normally this is expected
79to be done in the BIOS. In the absence of BIOS programming, the shunt resistor
80voltage can be provided using platform data. The driver uses platform data from
81the ina2xx driver for this purpose. If calibration register data is not provided
82via platform data, the driver checks if the calibration register has been
83programmed (ie has a value not equal to zero). If so, this value is retained.
84Otherwise, a default value reflecting a shunt resistor value of 10 mOhm is
85programmed into the calibration register.
86
87
88Output Pins
89-----------
90
91Output pin programming is a board feature which depends on the BIOS. It is
92outside the scope of a hardware monitoring driver to enable or disable output
93pins.
diff --git a/Documentation/hwmon/it87 b/Documentation/hwmon/it87
index 8386aadc0a82..c263740f0cba 100644
--- a/Documentation/hwmon/it87
+++ b/Documentation/hwmon/it87
@@ -30,6 +30,14 @@ Supported chips:
30 Prefix: 'it8728' 30 Prefix: 'it8728'
31 Addresses scanned: from Super I/O config space (8 I/O ports) 31 Addresses scanned: from Super I/O config space (8 I/O ports)
32 Datasheet: Not publicly available 32 Datasheet: Not publicly available
33 * IT8771E
34 Prefix: 'it8771'
35 Addresses scanned: from Super I/O config space (8 I/O ports)
36 Datasheet: Not publicly available
37 * IT8772E
38 Prefix: 'it8772'
39 Addresses scanned: from Super I/O config space (8 I/O ports)
40 Datasheet: Not publicly available
33 * IT8782F 41 * IT8782F
34 Prefix: 'it8782' 42 Prefix: 'it8782'
35 Addresses scanned: from Super I/O config space (8 I/O ports) 43 Addresses scanned: from Super I/O config space (8 I/O ports)
@@ -83,8 +91,8 @@ Description
83----------- 91-----------
84 92
85This driver implements support for the IT8705F, IT8712F, IT8716F, 93This driver implements support for the IT8705F, IT8712F, IT8716F,
86IT8718F, IT8720F, IT8721F, IT8726F, IT8728F, IT8758E, IT8781F, IT8782F, 94IT8718F, IT8720F, IT8721F, IT8726F, IT8728F, IT8758E, IT8771E, IT8772E,
87IT8783E/F, and SiS950 chips. 95IT8782F, IT8783E/F, and SiS950 chips.
88 96
89These chips are 'Super I/O chips', supporting floppy disks, infrared ports, 97These chips are 'Super I/O chips', supporting floppy disks, infrared ports,
90joysticks and other miscellaneous stuff. For hardware monitoring, they 98joysticks and other miscellaneous stuff. For hardware monitoring, they
@@ -118,8 +126,8 @@ The IT8726F is just bit enhanced IT8716F with additional hardware
118for AMD power sequencing. Therefore the chip will appear as IT8716F 126for AMD power sequencing. Therefore the chip will appear as IT8716F
119to userspace applications. 127to userspace applications.
120 128
121The IT8728F is considered compatible with the IT8721F, until a datasheet 129The IT8728F, IT8771E, and IT8772E are considered compatible with the IT8721F,
122becomes available (hopefully.) 130until a datasheet becomes available (hopefully.)
123 131
124Temperatures are measured in degrees Celsius. An alarm is triggered once 132Temperatures are measured in degrees Celsius. An alarm is triggered once
125when the Overtemperature Shutdown limit is crossed. 133when the Overtemperature Shutdown limit is crossed.
diff --git a/Documentation/hwmon/jc42 b/Documentation/hwmon/jc42
index 66ecb9fc8246..165077121238 100644
--- a/Documentation/hwmon/jc42
+++ b/Documentation/hwmon/jc42
@@ -17,12 +17,13 @@ Supported chips:
17 * Maxim MAX6604 17 * Maxim MAX6604
18 Datasheets: 18 Datasheets:
19 http://datasheets.maxim-ic.com/en/ds/MAX6604.pdf 19 http://datasheets.maxim-ic.com/en/ds/MAX6604.pdf
20 * Microchip MCP9804, MCP9805, MCP98242, MCP98243, MCP9843 20 * Microchip MCP9804, MCP9805, MCP98242, MCP98243, MCP98244, MCP9843
21 Datasheets: 21 Datasheets:
22 http://ww1.microchip.com/downloads/en/DeviceDoc/22203C.pdf 22 http://ww1.microchip.com/downloads/en/DeviceDoc/22203C.pdf
23 http://ww1.microchip.com/downloads/en/DeviceDoc/21977b.pdf 23 http://ww1.microchip.com/downloads/en/DeviceDoc/21977b.pdf
24 http://ww1.microchip.com/downloads/en/DeviceDoc/21996a.pdf 24 http://ww1.microchip.com/downloads/en/DeviceDoc/21996a.pdf
25 http://ww1.microchip.com/downloads/en/DeviceDoc/22153c.pdf 25 http://ww1.microchip.com/downloads/en/DeviceDoc/22153c.pdf
26 http://ww1.microchip.com/downloads/en/DeviceDoc/22327A.pdf
26 * NXP Semiconductors SE97, SE97B, SE98, SE98A 27 * NXP Semiconductors SE97, SE97B, SE98, SE98A
27 Datasheets: 28 Datasheets:
28 http://www.nxp.com/documents/data_sheet/SE97.pdf 29 http://www.nxp.com/documents/data_sheet/SE97.pdf
diff --git a/Documentation/hwmon/lm73 b/Documentation/hwmon/lm73
new file mode 100644
index 000000000000..8af059dcb642
--- /dev/null
+++ b/Documentation/hwmon/lm73
@@ -0,0 +1,90 @@
1Kernel driver lm73
2==================
3
4Supported chips:
5 * Texas Instruments LM73
6 Prefix: 'lm73'
7 Addresses scanned: I2C 0x48, 0x49, 0x4a, 0x4c, 0x4d, and 0x4e
8 Datasheet: Publicly available at the Texas Instruments website
9 http://www.ti.com/product/lm73
10
11Author: Guillaume Ligneul <guillaume.ligneul@gmail.com>
12Documentation: Chris Verges <kg4ysn@gmail.com>
13
14
15Description
16-----------
17
18The LM73 is a digital temperature sensor. All temperature values are
19given in degrees Celsius.
20
21Measurement Resolution Support
22------------------------------
23
24The LM73 supports four resolutions, defined in terms of degrees C per
25LSB: 0.25, 0.125, 0.0625, and 0.3125. Changing the resolution mode
26affects the conversion time of the LM73's analog-to-digital converter.
27From userspace, the desired resolution can be specified as a function of
28conversion time via the 'update_interval' sysfs attribute for the
29device. This attribute will normalize ranges of input values to the
30maximum times defined for the resolution in the datasheet.
31
32 Resolution Conv. Time Input Range
33 (C/LSB) (msec) (msec)
34 --------------------------------------
35 0.25 14 0..14
36 0.125 28 15..28
37 0.0625 56 29..56
38 0.03125 112 57..infinity
39 --------------------------------------
40
41The following examples show how the 'update_interval' attribute can be
42used to change the conversion time:
43
44 $ echo 0 > update_interval
45 $ cat update_interval
46 14
47 $ cat temp1_input
48 24250
49
50 $ echo 22 > update_interval
51 $ cat update_interval
52 28
53 $ cat temp1_input
54 24125
55
56 $ echo 56 > update_interval
57 $ cat update_interval
58 56
59 $ cat temp1_input
60 24062
61
62 $ echo 85 > update_interval
63 $ cat update_interval
64 112
65 $ cat temp1_input
66 24031
67
68As shown here, the lm73 driver automatically adjusts any user input for
69'update_interval' via a step function. Reading back the
70'update_interval' value after a write operation will confirm the
71conversion time actively in use.
72
73Mathematically, the resolution can be derived from the conversion time
74via the following function:
75
76 g(x) = 0.250 * [log(x/14) / log(2)]
77
78where 'x' is the output from 'update_interval' and 'g(x)' is the
79resolution in degrees C per LSB.
80
81Alarm Support
82-------------
83
84The LM73 features a simple over-temperature alarm mechanism. This
85feature is exposed via the sysfs attributes.
86
87The attributes 'temp1_max_alarm' and 'temp1_min_alarm' are flags
88provided by the LM73 that indicate whether the measured temperature has
89passed the 'temp1_max' and 'temp1_min' thresholds, respectively. These
90values _must_ be read to clear the registers on the LM73.
diff --git a/Documentation/hwmon/max34440 b/Documentation/hwmon/max34440
index 04482226db20..47651ff341ae 100644
--- a/Documentation/hwmon/max34440
+++ b/Documentation/hwmon/max34440
@@ -16,6 +16,16 @@ Supported chips:
16 Prefixes: 'max34446' 16 Prefixes: 'max34446'
17 Addresses scanned: - 17 Addresses scanned: -
18 Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34446.pdf 18 Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34446.pdf
19 * Maxim MAX34460
20 PMBus 12-Channel Voltage Monitor & Sequencer
21 Prefix: 'max34460'
22 Addresses scanned: -
23 Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX34460.pdf
24 * Maxim MAX34461
25 PMBus 16-Channel Voltage Monitor & Sequencer
26 Prefix: 'max34461'
27 Addresses scanned: -
28 Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX34461.pdf
19 29
20Author: Guenter Roeck <guenter.roeck@ericsson.com> 30Author: Guenter Roeck <guenter.roeck@ericsson.com>
21 31
@@ -26,6 +36,9 @@ Description
26This driver supports hardware montoring for Maxim MAX34440 PMBus 6-Channel 36This driver supports hardware montoring for Maxim MAX34440 PMBus 6-Channel
27Power-Supply Manager, MAX34441 PMBus 5-Channel Power-Supply Manager 37Power-Supply Manager, MAX34441 PMBus 5-Channel Power-Supply Manager
28and Intelligent Fan Controller, and MAX34446 PMBus Power-Supply Data Logger. 38and Intelligent Fan Controller, and MAX34446 PMBus Power-Supply Data Logger.
39It also supports the MAX34460 and MAX34461 PMBus Voltage Monitor & Sequencers.
40The MAX34460 supports 12 voltage channels, and the MAX34461 supports 16 voltage
41channels.
29 42
30The driver is a client driver to the core PMBus driver. Please see 43The driver is a client driver to the core PMBus driver. Please see
31Documentation/hwmon/pmbus for details on PMBus client drivers. 44Documentation/hwmon/pmbus for details on PMBus client drivers.
@@ -109,3 +122,6 @@ temp[1-8]_reset_history Write any value to reset history.
109 122
110 temp7 and temp8 attributes only exist for MAX34440. 123 temp7 and temp8 attributes only exist for MAX34440.
111 MAX34446 only supports temp[1-3]. 124 MAX34446 only supports temp[1-3].
125
126MAX34460 supports attribute groups in[1-12] and temp[1-5].
127MAX34461 supports attribute groups in[1-16] and temp[1-5].
diff --git a/Documentation/hwmon/max6697 b/Documentation/hwmon/max6697
new file mode 100644
index 000000000000..6594177ededa
--- /dev/null
+++ b/Documentation/hwmon/max6697
@@ -0,0 +1,58 @@
1Kernel driver max6697
2=====================
3
4Supported chips:
5 * Maxim MAX6581
6 Prefix: 'max6581'
7 Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX6581.pdf
8 * Maxim MAX6602
9 Prefix: 'max6602'
10 Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX6602.pdf
11 * Maxim MAX6622
12 Prefix: 'max6622'
13 Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX6622.pdf
14 * Maxim MAX6636
15 Prefix: 'max6636'
16 Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX6636.pdf
17 * Maxim MAX6689
18 Prefix: 'max6689'
19 Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX6689.pdf
20 * Maxim MAX6693
21 Prefix: 'max6693'
22 Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX6693.pdf
23 * Maxim MAX6694
24 Prefix: 'max6694'
25 Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX6694.pdf
26 * Maxim MAX6697
27 Prefix: 'max6697'
28 Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX6697.pdf
29 * Maxim MAX6698
30 Prefix: 'max6698'
31 Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX6698.pdf
32 * Maxim MAX6699
33 Prefix: 'max6699'
34 Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX6699.pdf
35
36Author:
37 Guenter Roeck <linux@roeck-us.net>
38
39Description
40-----------
41
42This driver implements support for several MAX6697 compatible temperature sensor
43chips. The chips support one local temperature sensor plus four, six, or seven
44remote temperature sensors. Remote temperature sensors are diode-connected
45thermal transitors, except for MAX6698 which supports three diode-connected
46thermal transistors plus three thermistors in addition to the local temperature
47sensor.
48
49The driver provides the following sysfs attributes. temp1 is the local (chip)
50temperature, temp[2..n] are remote temperatures. The actually supported
51per-channel attributes are chip type and channel dependent.
52
53tempX_input RO temperature
54tempX_max RW temperature maximum threshold
55tempX_max_alarm RO temperature maximum threshold alarm
56tempX_crit RW temperature critical threshold
57tempX_crit_alarm RO temperature critical threshold alarm
58tempX_fault RO temperature diode fault (remote sensors only)
diff --git a/Documentation/hwmon/sysfs-interface b/Documentation/hwmon/sysfs-interface
index 1f4dd855a299..79f8257dd790 100644
--- a/Documentation/hwmon/sysfs-interface
+++ b/Documentation/hwmon/sysfs-interface
@@ -722,14 +722,14 @@ add/subtract if it has been divided before the add/subtract.
722What to do if a value is found to be invalid, depends on the type of the 722What to do if a value is found to be invalid, depends on the type of the
723sysfs attribute that is being set. If it is a continuous setting like a 723sysfs attribute that is being set. If it is a continuous setting like a
724tempX_max or inX_max attribute, then the value should be clamped to its 724tempX_max or inX_max attribute, then the value should be clamped to its
725limits using SENSORS_LIMIT(value, min_limit, max_limit). If it is not 725limits using clamp_val(value, min_limit, max_limit). If it is not continuous
726continuous like for example a tempX_type, then when an invalid value is 726like for example a tempX_type, then when an invalid value is written,
727written, -EINVAL should be returned. 727-EINVAL should be returned.
728 728
729Example1, temp1_max, register is a signed 8 bit value (-128 - 127 degrees): 729Example1, temp1_max, register is a signed 8 bit value (-128 - 127 degrees):
730 730
731 long v = simple_strtol(buf, NULL, 10) / 1000; 731 long v = simple_strtol(buf, NULL, 10) / 1000;
732 v = SENSORS_LIMIT(v, -128, 127); 732 v = clamp_val(v, -128, 127);
733 /* write v to register */ 733 /* write v to register */
734 734
735Example2, fan divider setting, valid values 2, 4 and 8: 735Example2, fan divider setting, valid values 2, 4 and 8:
diff --git a/Documentation/hwmon/zl6100 b/Documentation/hwmon/zl6100
index a995b41724fd..3d924b6b59e9 100644
--- a/Documentation/hwmon/zl6100
+++ b/Documentation/hwmon/zl6100
@@ -121,12 +121,26 @@ in1_max_alarm Input voltage high alarm.
121in1_lcrit_alarm Input voltage critical low alarm. 121in1_lcrit_alarm Input voltage critical low alarm.
122in1_crit_alarm Input voltage critical high alarm. 122in1_crit_alarm Input voltage critical high alarm.
123 123
124in2_label "vout1" 124in2_label "vmon"
125in2_input Measured output voltage. 125in2_input Measured voltage on VMON (ZL2004) or VDRV (ZL9101M,
126in2_lcrit Critical minimum output Voltage. 126 ZL9117M) pin. Reported voltage is 16x the voltage on the
127in2_crit Critical maximum output voltage. 127 pin (adjusted internally by the chip).
128in2_lcrit_alarm Critical output voltage critical low alarm. 128in2_lcrit Critical minumum VMON/VDRV Voltage.
129in2_crit_alarm Critical output voltage critical high alarm. 129in2_crit Critical maximum VMON/VDRV voltage.
130in2_lcrit_alarm VMON/VDRV voltage critical low alarm.
131in2_crit_alarm VMON/VDRV voltage critical high alarm.
132
133 vmon attributes are supported on ZL2004, ZL9101M,
134 and ZL9117M only.
135
136inX_label "vout1"
137inX_input Measured output voltage.
138inX_lcrit Critical minimum output Voltage.
139inX_crit Critical maximum output voltage.
140inX_lcrit_alarm Critical output voltage critical low alarm.
141inX_crit_alarm Critical output voltage critical high alarm.
142
143 X is 3 for ZL2004, ZL9101M, and ZL9117M, 2 otherwise.
130 144
131curr1_label "iout1" 145curr1_label "iout1"
132curr1_input Measured output current. 146curr1_input Measured output current.
diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
index 14c3f4f1b617..5198b742fde1 100644
--- a/Documentation/kbuild/makefiles.txt
+++ b/Documentation/kbuild/makefiles.txt
@@ -1186,6 +1186,29 @@ When kbuild executes, the following steps are followed (roughly):
1186 clean-files += *.dtb 1186 clean-files += *.dtb
1187 DTC_FLAGS ?= -p 1024 1187 DTC_FLAGS ?= -p 1024
1188 1188
1189 dtc_cpp
1190 This is just like dtc as describe above, except that the C pre-
1191 processor is invoked upon the .dtsp file before compiling the result
1192 with dtc.
1193
1194 In order for build dependencies to work, all files compiled using
1195 dtc_cpp must use the C pre-processor's #include functionality and not
1196 dtc's /include/ functionality.
1197
1198 Using the C pre-processor allows use of #define to create named
1199 constants. In turn, the #defines will typically appear in a header
1200 file, which may be shared with regular C code. Since the dtc language
1201 represents a data structure rather than code in C syntax, similar
1202 restrictions are placed on a header file included by a device tree
1203 file as for a header file included by an assembly language file.
1204 In particular, the C pre-processor is passed -x assembler-with-cpp,
1205 which sets macro __ASSEMBLY__. __DTS__ is also set. These allow header
1206 files to restrict their content to that compatible with device tree
1207 source.
1208
1209 A central rule exists to create $(obj)/%.dtb from $(src)/%.dtsp;
1210 architecture Makefiles do no need to explicitly write out that rule.
1211
1189--- 6.8 Custom kbuild commands 1212--- 6.8 Custom kbuild commands
1190 1213
1191 When kbuild is executing with KBUILD_VERBOSE=0, then only a shorthand 1214 When kbuild is executing with KBUILD_VERBOSE=0, then only a shorthand
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 363e348bff9b..4c5b3f993bbb 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1039,16 +1039,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1039 Claim all unknown PCI IDE storage controllers. 1039 Claim all unknown PCI IDE storage controllers.
1040 1040
1041 idle= [X86] 1041 idle= [X86]
1042 Format: idle=poll, idle=mwait, idle=halt, idle=nomwait 1042 Format: idle=poll, idle=halt, idle=nomwait
1043 Poll forces a polling idle loop that can slightly 1043 Poll forces a polling idle loop that can slightly
1044 improve the performance of waking up a idle CPU, but 1044 improve the performance of waking up a idle CPU, but
1045 will use a lot of power and make the system run hot. 1045 will use a lot of power and make the system run hot.
1046 Not recommended. 1046 Not recommended.
1047 idle=mwait: On systems which support MONITOR/MWAIT but
1048 the kernel chose to not use it because it doesn't save
1049 as much power as a normal idle loop, use the
1050 MONITOR/MWAIT idle loop anyways. Performance should be
1051 the same as idle=poll.
1052 idle=halt: Halt is forced to be used for CPU idle. 1047 idle=halt: Halt is forced to be used for CPU idle.
1053 In such case C2/C3 won't be used again. 1048 In such case C2/C3 won't be used again.
1054 idle=nomwait: Disable mwait for CPU C-states 1049 idle=nomwait: Disable mwait for CPU C-states
@@ -1131,6 +1126,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1131 0 disables intel_idle and fall back on acpi_idle. 1126 0 disables intel_idle and fall back on acpi_idle.
1132 1 to 6 specify maximum depth of C-state. 1127 1 to 6 specify maximum depth of C-state.
1133 1128
1129 intel_pstate= [X86]
1130 disable
1131 Do not enable intel_pstate as the default
1132 scaling driver for the supported processors
1133
1134 intremap= [X86-64, Intel-IOMMU] 1134 intremap= [X86-64, Intel-IOMMU]
1135 on enable Interrupt Remapping (default) 1135 on enable Interrupt Remapping (default)
1136 off disable Interrupt Remapping 1136 off disable Interrupt Remapping
@@ -1886,10 +1886,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1886 wfi(ARM) instruction doesn't work correctly and not to 1886 wfi(ARM) instruction doesn't work correctly and not to
1887 use it. This is also useful when using JTAG debugger. 1887 use it. This is also useful when using JTAG debugger.
1888 1888
1889 no-hlt [BUGS=X86-32] Tells the kernel that the hlt
1890 instruction doesn't work correctly and not to
1891 use it.
1892
1893 no_file_caps Tells the kernel not to honor file capabilities. The 1889 no_file_caps Tells the kernel not to honor file capabilities. The
1894 only way then for a file to be executed with privilege 1890 only way then for a file to be executed with privilege
1895 is to be setuid root or executed by root. 1891 is to be setuid root or executed by root.
@@ -2438,7 +2434,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
2438 real-time workloads. It can also improve energy 2434 real-time workloads. It can also improve energy
2439 efficiency for asymmetric multiprocessors. 2435 efficiency for asymmetric multiprocessors.
2440 2436
2441 rcu_nocbs_poll [KNL,BOOT] 2437 rcu_nocb_poll [KNL,BOOT]
2442 Rather than requiring that offloaded CPUs 2438 Rather than requiring that offloaded CPUs
2443 (specified by rcu_nocbs= above) explicitly 2439 (specified by rcu_nocbs= above) explicitly
2444 awaken the corresponding "rcuoN" kthreads, 2440 awaken the corresponding "rcuoN" kthreads,
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index 3c4e1b3b80a1..fa5d8a9ae205 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -1685,6 +1685,7 @@ explicit lock operations, described later). These include:
1685 1685
1686 xchg(); 1686 xchg();
1687 cmpxchg(); 1687 cmpxchg();
1688 atomic_xchg();
1688 atomic_cmpxchg(); 1689 atomic_cmpxchg();
1689 atomic_inc_return(); 1690 atomic_inc_return();
1690 atomic_dec_return(); 1691 atomic_dec_return();
diff --git a/Documentation/pinctrl.txt b/Documentation/pinctrl.txt
index da40efbef6ec..a2b57e0a1db0 100644
--- a/Documentation/pinctrl.txt
+++ b/Documentation/pinctrl.txt
@@ -972,6 +972,18 @@ pinmux core.
972Pin control requests from drivers 972Pin control requests from drivers
973================================= 973=================================
974 974
975When a device driver is about to probe the device core will automatically
976attempt to issue pinctrl_get_select_default() on these devices.
977This way driver writers do not need to add any of the boilerplate code
978of the type found below. However when doing fine-grained state selection
979and not using the "default" state, you may have to do some device driver
980handling of the pinctrl handles and states.
981
982So if you just want to put the pins for a certain device into the default
983state and be done with it, there is nothing you need to do besides
984providing the proper mapping table. The device core will take care of
985the rest.
986
975Generally it is discouraged to let individual drivers get and enable pin 987Generally it is discouraged to let individual drivers get and enable pin
976control. So if possible, handle the pin control in platform code or some other 988control. So if possible, handle the pin control in platform code or some other
977place where you have access to all the affected struct device * pointers. In 989place where you have access to all the affected struct device * pointers. In
@@ -1097,9 +1109,9 @@ situations that can be electrically unpleasant, you will certainly want to
1097mux in and bias pins in a certain way before the GPIO subsystems starts to 1109mux in and bias pins in a certain way before the GPIO subsystems starts to
1098deal with them. 1110deal with them.
1099 1111
1100The above can be hidden: using pinctrl hogs, the pin control driver may be 1112The above can be hidden: using the device core, the pinctrl core may be
1101setting up the config and muxing for the pins when it is probing, 1113setting up the config and muxing for the pins right before the device is
1102nevertheless orthogonal to the GPIO subsystem. 1114probing, nevertheless orthogonal to the GPIO subsystem.
1103 1115
1104But there are also situations where it makes sense for the GPIO subsystem 1116But there are also situations where it makes sense for the GPIO subsystem
1105to communicate directly with with the pinctrl subsystem, using the latter 1117to communicate directly with with the pinctrl subsystem, using the latter
diff --git a/Documentation/power/freezing-of-tasks.txt b/Documentation/power/freezing-of-tasks.txt
index 6ec291ea1c78..85894d83b352 100644
--- a/Documentation/power/freezing-of-tasks.txt
+++ b/Documentation/power/freezing-of-tasks.txt
@@ -223,3 +223,8 @@ since they ask the freezer to skip freezing this task, since it is anyway
223only after the entire suspend/hibernation sequence is complete. 223only after the entire suspend/hibernation sequence is complete.
224So, to summarize, use [un]lock_system_sleep() instead of directly using 224So, to summarize, use [un]lock_system_sleep() instead of directly using
225mutex_[un]lock(&pm_mutex). That would prevent freezing failures. 225mutex_[un]lock(&pm_mutex). That would prevent freezing failures.
226
227V. Miscellaneous
228/sys/power/pm_freeze_timeout controls how long it will cost at most to freeze
229all user space processes or all freezable kernel threads, in unit of millisecond.
230The default value is 20000, with range of unsigned integer.
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
index 03591a750f99..6c9f5d9aa115 100644
--- a/Documentation/power/runtime_pm.txt
+++ b/Documentation/power/runtime_pm.txt
@@ -426,6 +426,10 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
426 'power.runtime_error' is set or 'power.disable_depth' is greater than 426 'power.runtime_error' is set or 'power.disable_depth' is greater than
427 zero) 427 zero)
428 428
429 bool pm_runtime_active(struct device *dev);
430 - return true if the device's runtime PM status is 'active' or its
431 'power.disable_depth' field is not equal to zero, or false otherwise
432
429 bool pm_runtime_suspended(struct device *dev); 433 bool pm_runtime_suspended(struct device *dev);
430 - return true if the device's runtime PM status is 'suspended' and its 434 - return true if the device's runtime PM status is 'suspended' and its
431 'power.disable_depth' field is equal to zero, or false otherwise 435 'power.disable_depth' field is equal to zero, or false otherwise
diff --git a/Documentation/trace/events-power.txt b/Documentation/trace/events-power.txt
index cf794af22855..e1498ff8cf94 100644
--- a/Documentation/trace/events-power.txt
+++ b/Documentation/trace/events-power.txt
@@ -17,7 +17,7 @@ Cf. include/trace/events/power.h for the events definitions.
171. Power state switch events 171. Power state switch events
18============================ 18============================
19 19
201.1 New trace API 201.1 Trace API
21----------------- 21-----------------
22 22
23A 'cpu' event class gathers the CPU-related events: cpuidle and 23A 'cpu' event class gathers the CPU-related events: cpuidle and
@@ -41,31 +41,6 @@ The event which has 'state=4294967295' in the trace is very important to the use
41space tools which are using it to detect the end of the current state, and so to 41space tools which are using it to detect the end of the current state, and so to
42correctly draw the states diagrams and to calculate accurate statistics etc. 42correctly draw the states diagrams and to calculate accurate statistics etc.
43 43
441.2 DEPRECATED trace API
45------------------------
46
47A new Kconfig option CONFIG_EVENT_POWER_TRACING_DEPRECATED with the default value of
48'y' has been created. This allows the legacy trace power API to be used conjointly
49with the new trace API.
50The Kconfig option, the old trace API (in include/trace/events/power.h) and the
51old trace points will disappear in a future release (namely 2.6.41).
52
53power_start "type=%lu state=%lu cpu_id=%lu"
54power_frequency "type=%lu state=%lu cpu_id=%lu"
55power_end "cpu_id=%lu"
56
57The 'type' parameter takes one of those macros:
58 . POWER_NONE = 0,
59 . POWER_CSTATE = 1, /* C-State */
60 . POWER_PSTATE = 2, /* Frequency change or DVFS */
61
62The 'state' parameter is set depending on the type:
63 . Target C-state for type=POWER_CSTATE,
64 . Target frequency for type=POWER_PSTATE,
65
66power_end is used to indicate the exit of a state, corresponding to the latest
67power_start event.
68
692. Clocks events 442. Clocks events
70================ 45================
71The clock events are used for clock enable/disable and for 46The clock events are used for clock enable/disable and for
diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt
index 6f51fed45f2d..53d6a3c51d87 100644
--- a/Documentation/trace/ftrace.txt
+++ b/Documentation/trace/ftrace.txt
@@ -1842,6 +1842,89 @@ an error.
1842 # cat buffer_size_kb 1842 # cat buffer_size_kb
184385 184385
1844 1844
1845Snapshot
1846--------
1847CONFIG_TRACER_SNAPSHOT makes a generic snapshot feature
1848available to all non latency tracers. (Latency tracers which
1849record max latency, such as "irqsoff" or "wakeup", can't use
1850this feature, since those are already using the snapshot
1851mechanism internally.)
1852
1853Snapshot preserves a current trace buffer at a particular point
1854in time without stopping tracing. Ftrace swaps the current
1855buffer with a spare buffer, and tracing continues in the new
1856current (=previous spare) buffer.
1857
1858The following debugfs files in "tracing" are related to this
1859feature:
1860
1861 snapshot:
1862
1863 This is used to take a snapshot and to read the output
1864 of the snapshot. Echo 1 into this file to allocate a
1865 spare buffer and to take a snapshot (swap), then read
1866 the snapshot from this file in the same format as
1867 "trace" (described above in the section "The File
1868 System"). Both reads snapshot and tracing are executable
1869 in parallel. When the spare buffer is allocated, echoing
1870 0 frees it, and echoing else (positive) values clear the
1871 snapshot contents.
1872 More details are shown in the table below.
1873
1874 status\input | 0 | 1 | else |
1875 --------------+------------+------------+------------+
1876 not allocated |(do nothing)| alloc+swap | EINVAL |
1877 --------------+------------+------------+------------+
1878 allocated | free | swap | clear |
1879 --------------+------------+------------+------------+
1880
1881Here is an example of using the snapshot feature.
1882
1883 # echo 1 > events/sched/enable
1884 # echo 1 > snapshot
1885 # cat snapshot
1886# tracer: nop
1887#
1888# entries-in-buffer/entries-written: 71/71 #P:8
1889#
1890# _-----=> irqs-off
1891# / _----=> need-resched
1892# | / _---=> hardirq/softirq
1893# || / _--=> preempt-depth
1894# ||| / delay
1895# TASK-PID CPU# |||| TIMESTAMP FUNCTION
1896# | | | |||| | |
1897 <idle>-0 [005] d... 2440.603828: sched_switch: prev_comm=swapper/5 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=snapshot-test-2 next_pid=2242 next_prio=120
1898 sleep-2242 [005] d... 2440.603846: sched_switch: prev_comm=snapshot-test-2 prev_pid=2242 prev_prio=120 prev_state=R ==> next_comm=kworker/5:1 next_pid=60 next_prio=120
1899[...]
1900 <idle>-0 [002] d... 2440.707230: sched_switch: prev_comm=swapper/2 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=snapshot-test-2 next_pid=2229 next_prio=120
1901
1902 # cat trace
1903# tracer: nop
1904#
1905# entries-in-buffer/entries-written: 77/77 #P:8
1906#
1907# _-----=> irqs-off
1908# / _----=> need-resched
1909# | / _---=> hardirq/softirq
1910# || / _--=> preempt-depth
1911# ||| / delay
1912# TASK-PID CPU# |||| TIMESTAMP FUNCTION
1913# | | | |||| | |
1914 <idle>-0 [007] d... 2440.707395: sched_switch: prev_comm=swapper/7 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=snapshot-test-2 next_pid=2243 next_prio=120
1915 snapshot-test-2-2229 [002] d... 2440.707438: sched_switch: prev_comm=snapshot-test-2 prev_pid=2229 prev_prio=120 prev_state=S ==> next_comm=swapper/2 next_pid=0 next_prio=120
1916[...]
1917
1918
1919If you try to use this snapshot feature when current tracer is
1920one of the latency tracers, you will get the following results.
1921
1922 # echo wakeup > current_tracer
1923 # echo 1 > snapshot
1924bash: echo: write error: Device or resource busy
1925 # cat snapshot
1926cat: snapshot: Device or resource busy
1927
1845----------- 1928-----------
1846 1929
1847More details can be found in the source code, in the 1930More details can be found in the source code, in the
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index a4df5535996b..c25439a58274 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -293,7 +293,7 @@ kvm_run' (see below).
2934.11 KVM_GET_REGS 2934.11 KVM_GET_REGS
294 294
295Capability: basic 295Capability: basic
296Architectures: all 296Architectures: all except ARM
297Type: vcpu ioctl 297Type: vcpu ioctl
298Parameters: struct kvm_regs (out) 298Parameters: struct kvm_regs (out)
299Returns: 0 on success, -1 on error 299Returns: 0 on success, -1 on error
@@ -314,7 +314,7 @@ struct kvm_regs {
3144.12 KVM_SET_REGS 3144.12 KVM_SET_REGS
315 315
316Capability: basic 316Capability: basic
317Architectures: all 317Architectures: all except ARM
318Type: vcpu ioctl 318Type: vcpu ioctl
319Parameters: struct kvm_regs (in) 319Parameters: struct kvm_regs (in)
320Returns: 0 on success, -1 on error 320Returns: 0 on success, -1 on error
@@ -600,7 +600,7 @@ struct kvm_fpu {
6004.24 KVM_CREATE_IRQCHIP 6004.24 KVM_CREATE_IRQCHIP
601 601
602Capability: KVM_CAP_IRQCHIP 602Capability: KVM_CAP_IRQCHIP
603Architectures: x86, ia64 603Architectures: x86, ia64, ARM
604Type: vm ioctl 604Type: vm ioctl
605Parameters: none 605Parameters: none
606Returns: 0 on success, -1 on error 606Returns: 0 on success, -1 on error
@@ -608,21 +608,39 @@ Returns: 0 on success, -1 on error
608Creates an interrupt controller model in the kernel. On x86, creates a virtual 608Creates an interrupt controller model in the kernel. On x86, creates a virtual
609ioapic, a virtual PIC (two PICs, nested), and sets up future vcpus to have a 609ioapic, a virtual PIC (two PICs, nested), and sets up future vcpus to have a
610local APIC. IRQ routing for GSIs 0-15 is set to both PIC and IOAPIC; GSI 16-23 610local APIC. IRQ routing for GSIs 0-15 is set to both PIC and IOAPIC; GSI 16-23
611only go to the IOAPIC. On ia64, a IOSAPIC is created. 611only go to the IOAPIC. On ia64, a IOSAPIC is created. On ARM, a GIC is
612created.
612 613
613 614
6144.25 KVM_IRQ_LINE 6154.25 KVM_IRQ_LINE
615 616
616Capability: KVM_CAP_IRQCHIP 617Capability: KVM_CAP_IRQCHIP
617Architectures: x86, ia64 618Architectures: x86, ia64, arm
618Type: vm ioctl 619Type: vm ioctl
619Parameters: struct kvm_irq_level 620Parameters: struct kvm_irq_level
620Returns: 0 on success, -1 on error 621Returns: 0 on success, -1 on error
621 622
622Sets the level of a GSI input to the interrupt controller model in the kernel. 623Sets the level of a GSI input to the interrupt controller model in the kernel.
623Requires that an interrupt controller model has been previously created with 624On some architectures it is required that an interrupt controller model has
624KVM_CREATE_IRQCHIP. Note that edge-triggered interrupts require the level 625been previously created with KVM_CREATE_IRQCHIP. Note that edge-triggered
625to be set to 1 and then back to 0. 626interrupts require the level to be set to 1 and then back to 0.
627
628ARM can signal an interrupt either at the CPU level, or at the in-kernel irqchip
629(GIC), and for in-kernel irqchip can tell the GIC to use PPIs designated for
630specific cpus. The irq field is interpreted like this:
631
632  bits: | 31 ... 24 | 23 ... 16 | 15 ... 0 |
633 field: | irq_type | vcpu_index | irq_id |
634
635The irq_type field has the following values:
636- irq_type[0]: out-of-kernel GIC: irq_id 0 is IRQ, irq_id 1 is FIQ
637- irq_type[1]: in-kernel GIC: SPI, irq_id between 32 and 1019 (incl.)
638 (the vcpu_index field is ignored)
639- irq_type[2]: in-kernel GIC: PPI, irq_id between 16 and 31 (incl.)
640
641(The irq_id field thus corresponds nicely to the IRQ ID in the ARM GIC specs)
642
643In both cases, level is used to raise/lower the line.
626 644
627struct kvm_irq_level { 645struct kvm_irq_level {
628 union { 646 union {
@@ -1775,6 +1793,27 @@ registers, find a list below:
1775 PPC | KVM_REG_PPC_VPA_DTL | 128 1793 PPC | KVM_REG_PPC_VPA_DTL | 128
1776 PPC | KVM_REG_PPC_EPCR | 32 1794 PPC | KVM_REG_PPC_EPCR | 32
1777 1795
1796ARM registers are mapped using the lower 32 bits. The upper 16 of that
1797is the register group type, or coprocessor number:
1798
1799ARM core registers have the following id bit patterns:
1800 0x4002 0000 0010 <index into the kvm_regs struct:16>
1801
1802ARM 32-bit CP15 registers have the following id bit patterns:
1803 0x4002 0000 000F <zero:1> <crn:4> <crm:4> <opc1:4> <opc2:3>
1804
1805ARM 64-bit CP15 registers have the following id bit patterns:
1806 0x4003 0000 000F <zero:1> <zero:4> <crm:4> <opc1:4> <zero:3>
1807
1808ARM CCSIDR registers are demultiplexed by CSSELR value:
1809 0x4002 0000 0011 00 <csselr:8>
1810
1811ARM 32-bit VFP control registers have the following id bit patterns:
1812 0x4002 0000 0012 1 <regno:12>
1813
1814ARM 64-bit FP registers have the following id bit patterns:
1815 0x4002 0000 0012 0 <regno:12>
1816
17784.69 KVM_GET_ONE_REG 18174.69 KVM_GET_ONE_REG
1779 1818
1780Capability: KVM_CAP_ONE_REG 1819Capability: KVM_CAP_ONE_REG
@@ -2127,6 +2166,50 @@ written, then `n_invalid' invalid entries, invalidating any previously
2127valid entries found. 2166valid entries found.
2128 2167
2129 2168
21694.77 KVM_ARM_VCPU_INIT
2170
2171Capability: basic
2172Architectures: arm
2173Type: vcpu ioctl
2174Parameters: struct struct kvm_vcpu_init (in)
2175Returns: 0 on success; -1 on error
2176Errors:
2177  EINVAL:    the target is unknown, or the combination of features is invalid.
2178  ENOENT:    a features bit specified is unknown.
2179
2180This tells KVM what type of CPU to present to the guest, and what
2181optional features it should have.  This will cause a reset of the cpu
2182registers to their initial values.  If this is not called, KVM_RUN will
2183return ENOEXEC for that vcpu.
2184
2185Note that because some registers reflect machine topology, all vcpus
2186should be created before this ioctl is invoked.
2187
2188Possible features:
2189 - KVM_ARM_VCPU_POWER_OFF: Starts the CPU in a power-off state.
2190 Depends on KVM_CAP_ARM_PSCI.
2191
2192
21934.78 KVM_GET_REG_LIST
2194
2195Capability: basic
2196Architectures: arm
2197Type: vcpu ioctl
2198Parameters: struct kvm_reg_list (in/out)
2199Returns: 0 on success; -1 on error
2200Errors:
2201  E2BIG:     the reg index list is too big to fit in the array specified by
2202             the user (the number required will be written into n).
2203
2204struct kvm_reg_list {
2205 __u64 n; /* number of registers in reg[] */
2206 __u64 reg[0];
2207};
2208
2209This ioctl returns the guest registers that are supported for the
2210KVM_GET_ONE_REG/KVM_SET_ONE_REG calls.
2211
2212
21305. The kvm_run structure 22135. The kvm_run structure
2131------------------------ 2214------------------------
2132 2215
diff --git a/Documentation/x86/boot.txt b/Documentation/x86/boot.txt
index 406d82d5d2bb..b443f1de0e5a 100644
--- a/Documentation/x86/boot.txt
+++ b/Documentation/x86/boot.txt
@@ -57,6 +57,10 @@ Protocol 2.10: (Kernel 2.6.31) Added a protocol for relaxed alignment
57Protocol 2.11: (Kernel 3.6) Added a field for offset of EFI handover 57Protocol 2.11: (Kernel 3.6) Added a field for offset of EFI handover
58 protocol entry point. 58 protocol entry point.
59 59
60Protocol 2.12: (Kernel 3.8) Added the xloadflags field and extension fields
61 to struct boot_params for for loading bzImage and ramdisk
62 above 4G in 64bit.
63
60**** MEMORY LAYOUT 64**** MEMORY LAYOUT
61 65
62The traditional memory map for the kernel loader, used for Image or 66The traditional memory map for the kernel loader, used for Image or
@@ -182,7 +186,7 @@ Offset Proto Name Meaning
1820230/4 2.05+ kernel_alignment Physical addr alignment required for kernel 1860230/4 2.05+ kernel_alignment Physical addr alignment required for kernel
1830234/1 2.05+ relocatable_kernel Whether kernel is relocatable or not 1870234/1 2.05+ relocatable_kernel Whether kernel is relocatable or not
1840235/1 2.10+ min_alignment Minimum alignment, as a power of two 1880235/1 2.10+ min_alignment Minimum alignment, as a power of two
1850236/2 N/A pad3 Unused 1890236/2 2.12+ xloadflags Boot protocol option flags
1860238/4 2.06+ cmdline_size Maximum size of the kernel command line 1900238/4 2.06+ cmdline_size Maximum size of the kernel command line
187023C/4 2.07+ hardware_subarch Hardware subarchitecture 191023C/4 2.07+ hardware_subarch Hardware subarchitecture
1880240/8 2.07+ hardware_subarch_data Subarchitecture-specific data 1920240/8 2.07+ hardware_subarch_data Subarchitecture-specific data
@@ -386,6 +390,7 @@ Protocol: 2.00+
386 F Special (0xFF = undefined) 390 F Special (0xFF = undefined)
387 10 Reserved 391 10 Reserved
388 11 Minimal Linux Bootloader <http://sebastian-plotz.blogspot.de> 392 11 Minimal Linux Bootloader <http://sebastian-plotz.blogspot.de>
393 12 OVMF UEFI virtualization stack
389 394
390 Please contact <hpa@zytor.com> if you need a bootloader ID 395 Please contact <hpa@zytor.com> if you need a bootloader ID
391 value assigned. 396 value assigned.
@@ -582,6 +587,27 @@ Protocol: 2.10+
582 misaligned kernel. Therefore, a loader should typically try each 587 misaligned kernel. Therefore, a loader should typically try each
583 power-of-two alignment from kernel_alignment down to this alignment. 588 power-of-two alignment from kernel_alignment down to this alignment.
584 589
590Field name: xloadflags
591Type: read
592Offset/size: 0x236/2
593Protocol: 2.12+
594
595 This field is a bitmask.
596
597 Bit 0 (read): XLF_KERNEL_64
598 - If 1, this kernel has the legacy 64-bit entry point at 0x200.
599
600 Bit 1 (read): XLF_CAN_BE_LOADED_ABOVE_4G
601 - If 1, kernel/boot_params/cmdline/ramdisk can be above 4G.
602
603 Bit 2 (read): XLF_EFI_HANDOVER_32
604 - If 1, the kernel supports the 32-bit EFI handoff entry point
605 given at handover_offset.
606
607 Bit 3 (read): XLF_EFI_HANDOVER_64
608 - If 1, the kernel supports the 64-bit EFI handoff entry point
609 given at handover_offset + 0x200.
610
585Field name: cmdline_size 611Field name: cmdline_size
586Type: read 612Type: read
587Offset/size: 0x238/4 613Offset/size: 0x238/4
diff --git a/Documentation/x86/zero-page.txt b/Documentation/x86/zero-page.txt
index cf5437deda81..199f453cb4de 100644
--- a/Documentation/x86/zero-page.txt
+++ b/Documentation/x86/zero-page.txt
@@ -19,6 +19,9 @@ Offset Proto Name Meaning
19090/010 ALL hd1_info hd1 disk parameter, OBSOLETE!! 19090/010 ALL hd1_info hd1 disk parameter, OBSOLETE!!
200A0/010 ALL sys_desc_table System description table (struct sys_desc_table) 200A0/010 ALL sys_desc_table System description table (struct sys_desc_table)
210B0/010 ALL olpc_ofw_header OLPC's OpenFirmware CIF and friends 210B0/010 ALL olpc_ofw_header OLPC's OpenFirmware CIF and friends
220C0/004 ALL ext_ramdisk_image ramdisk_image high 32bits
230C4/004 ALL ext_ramdisk_size ramdisk_size high 32bits
240C8/004 ALL ext_cmd_line_ptr cmd_line_ptr high 32bits
22140/080 ALL edid_info Video mode setup (struct edid_info) 25140/080 ALL edid_info Video mode setup (struct edid_info)
231C0/020 ALL efi_info EFI 32 information (struct efi_info) 261C0/020 ALL efi_info EFI 32 information (struct efi_info)
241E0/004 ALL alk_mem_k Alternative mem check, in KB 271E0/004 ALL alk_mem_k Alternative mem check, in KB
@@ -27,6 +30,7 @@ Offset Proto Name Meaning
271E9/001 ALL eddbuf_entries Number of entries in eddbuf (below) 301E9/001 ALL eddbuf_entries Number of entries in eddbuf (below)
281EA/001 ALL edd_mbr_sig_buf_entries Number of entries in edd_mbr_sig_buffer 311EA/001 ALL edd_mbr_sig_buf_entries Number of entries in edd_mbr_sig_buffer
29 (below) 32 (below)
331EF/001 ALL sentinel Used to detect broken bootloaders
30290/040 ALL edd_mbr_sig_buffer EDD MBR signatures 34290/040 ALL edd_mbr_sig_buffer EDD MBR signatures
312D0/A00 ALL e820_map E820 memory map table 352D0/A00 ALL e820_map E820 memory map table
32 (array of struct e820entry) 36 (array of struct e820entry)
diff --git a/MAINTAINERS b/MAINTAINERS
index 3105c4868c4e..7ffaf79d90f7 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -670,8 +670,16 @@ F: drivers/input/serio/ambakmi.*
670F: include/linux/amba/kmi.h 670F: include/linux/amba/kmi.h
671 671
672ARM PRIMECELL MMCI PL180/1 DRIVER 672ARM PRIMECELL MMCI PL180/1 DRIVER
673S: Orphan 673M: Russell King <linux@arm.linux.org.uk>
674S: Maintained
674F: drivers/mmc/host/mmci.* 675F: drivers/mmc/host/mmci.*
676F: include/linux/amba/mmci.h
677
678ARM PRIMECELL UART PL010 AND PL011 DRIVERS
679M: Russell King <linux@arm.linux.org.uk>
680S: Maintained
681F: drivers/tty/serial/amba-pl01*.c
682F: include/linux/amba/serial.h
675 683
676ARM PRIMECELL BUS SUPPORT 684ARM PRIMECELL BUS SUPPORT
677M: Russell King <linux@arm.linux.org.uk> 685M: Russell King <linux@arm.linux.org.uk>
@@ -1303,7 +1311,7 @@ F: include/linux/dmaengine.h
1303F: include/linux/async_tx.h 1311F: include/linux/async_tx.h
1304 1312
1305AT24 EEPROM DRIVER 1313AT24 EEPROM DRIVER
1306M: Wolfram Sang <w.sang@pengutronix.de> 1314M: Wolfram Sang <wsa@the-dreams.de>
1307L: linux-i2c@vger.kernel.org 1315L: linux-i2c@vger.kernel.org
1308S: Maintained 1316S: Maintained
1309F: drivers/misc/eeprom/at24.c 1317F: drivers/misc/eeprom/at24.c
@@ -1489,7 +1497,7 @@ AVR32 ARCHITECTURE
1489M: Haavard Skinnemoen <hskinnemoen@gmail.com> 1497M: Haavard Skinnemoen <hskinnemoen@gmail.com>
1490M: Hans-Christian Egtvedt <egtvedt@samfundet.no> 1498M: Hans-Christian Egtvedt <egtvedt@samfundet.no>
1491W: http://www.atmel.com/products/AVR32/ 1499W: http://www.atmel.com/products/AVR32/
1492W: http://avr32linux.org/ 1500W: http://mirror.egtvedt.no/avr32linux.org/
1493W: http://avrfreaks.net/ 1501W: http://avrfreaks.net/
1494S: Maintained 1502S: Maintained
1495F: arch/avr32/ 1503F: arch/avr32/
@@ -2140,10 +2148,10 @@ S: Maintained
2140F: tools/power/cpupower 2148F: tools/power/cpupower
2141 2149
2142CPUSETS 2150CPUSETS
2143M: Paul Menage <paul@paulmenage.org> 2151M: Li Zefan <lizefan@huawei.com>
2144W: http://www.bullopensource.org/cpuset/ 2152W: http://www.bullopensource.org/cpuset/
2145W: http://oss.sgi.com/projects/cpusets/ 2153W: http://oss.sgi.com/projects/cpusets/
2146S: Supported 2154S: Maintained
2147F: Documentation/cgroups/cpusets.txt 2155F: Documentation/cgroups/cpusets.txt
2148F: include/linux/cpuset.h 2156F: include/linux/cpuset.h
2149F: kernel/cpuset.c 2157F: kernel/cpuset.c
@@ -2966,7 +2974,7 @@ S: Maintained
2966F: drivers/net/ethernet/i825xx/eexpress.* 2974F: drivers/net/ethernet/i825xx/eexpress.*
2967 2975
2968ETHERNET BRIDGE 2976ETHERNET BRIDGE
2969M: Stephen Hemminger <shemminger@vyatta.com> 2977M: Stephen Hemminger <stephen@networkplumber.org>
2970L: bridge@lists.linux-foundation.org 2978L: bridge@lists.linux-foundation.org
2971L: netdev@vger.kernel.org 2979L: netdev@vger.kernel.org
2972W: http://www.linuxfoundation.org/en/Net:Bridge 2980W: http://www.linuxfoundation.org/en/Net:Bridge
@@ -3757,12 +3765,11 @@ S: Maintained
3757F: drivers/i2c/i2c-stub.c 3765F: drivers/i2c/i2c-stub.c
3758 3766
3759I2C SUBSYSTEM 3767I2C SUBSYSTEM
3760M: Wolfram Sang <w.sang@pengutronix.de> 3768M: Wolfram Sang <wsa@the-dreams.de>
3761M: "Ben Dooks (embedded platforms)" <ben-linux@fluff.org> 3769M: "Ben Dooks (embedded platforms)" <ben-linux@fluff.org>
3762L: linux-i2c@vger.kernel.org 3770L: linux-i2c@vger.kernel.org
3763W: http://i2c.wiki.kernel.org/ 3771W: http://i2c.wiki.kernel.org/
3764T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-i2c/ 3772T: git git://git.kernel.org/pub/scm/linux/kernel/git/wsa/linux.git
3765T: git git://git.pengutronix.de/git/wsa/linux.git
3766S: Maintained 3773S: Maintained
3767F: Documentation/i2c/ 3774F: Documentation/i2c/
3768F: drivers/i2c/ 3775F: drivers/i2c/
@@ -4481,6 +4488,15 @@ F: arch/s390/include/asm/kvm*
4481F: arch/s390/kvm/ 4488F: arch/s390/kvm/
4482F: drivers/s390/kvm/ 4489F: drivers/s390/kvm/
4483 4490
4491KERNEL VIRTUAL MACHINE (KVM) FOR ARM
4492M: Christoffer Dall <cdall@cs.columbia.edu>
4493L: kvmarm@lists.cs.columbia.edu
4494W: http://systems.cs.columbia.edu/projects/kvm-arm
4495S: Maintained
4496F: arch/arm/include/uapi/asm/kvm*
4497F: arch/arm/include/asm/kvm*
4498F: arch/arm/kvm/
4499
4484KEXEC 4500KEXEC
4485M: Eric Biederman <ebiederm@xmission.com> 4501M: Eric Biederman <ebiederm@xmission.com>
4486W: http://kernel.org/pub/linux/utils/kernel/kexec/ 4502W: http://kernel.org/pub/linux/utils/kernel/kexec/
@@ -4905,7 +4921,7 @@ S: Maintained
4905 4921
4906MARVELL GIGABIT ETHERNET DRIVERS (skge/sky2) 4922MARVELL GIGABIT ETHERNET DRIVERS (skge/sky2)
4907M: Mirko Lindner <mlindner@marvell.com> 4923M: Mirko Lindner <mlindner@marvell.com>
4908M: Stephen Hemminger <shemminger@vyatta.com> 4924M: Stephen Hemminger <stephen@networkplumber.org>
4909L: netdev@vger.kernel.org 4925L: netdev@vger.kernel.org
4910S: Maintained 4926S: Maintained
4911F: drivers/net/ethernet/marvell/sk* 4927F: drivers/net/ethernet/marvell/sk*
@@ -5180,7 +5196,7 @@ S: Supported
5180F: drivers/infiniband/hw/nes/ 5196F: drivers/infiniband/hw/nes/
5181 5197
5182NETEM NETWORK EMULATOR 5198NETEM NETWORK EMULATOR
5183M: Stephen Hemminger <shemminger@vyatta.com> 5199M: Stephen Hemminger <stephen@networkplumber.org>
5184L: netem@lists.linux-foundation.org 5200L: netem@lists.linux-foundation.org
5185S: Maintained 5201S: Maintained
5186F: net/sched/sch_netem.c 5202F: net/sched/sch_netem.c
@@ -5778,15 +5794,6 @@ L: linux-i2c@vger.kernel.org
5778S: Maintained 5794S: Maintained
5779F: drivers/i2c/muxes/i2c-mux-pca9541.c 5795F: drivers/i2c/muxes/i2c-mux-pca9541.c
5780 5796
5781PCA9564/PCA9665 I2C BUS DRIVER
5782M: Wolfram Sang <w.sang@pengutronix.de>
5783L: linux-i2c@vger.kernel.org
5784S: Maintained
5785F: drivers/i2c/algos/i2c-algo-pca.c
5786F: drivers/i2c/busses/i2c-pca-*
5787F: include/linux/i2c-algo-pca.h
5788F: include/linux/i2c-pca-platform.h
5789
5790PCDP - PRIMARY CONSOLE AND DEBUG PORT 5797PCDP - PRIMARY CONSOLE AND DEBUG PORT
5791M: Khalid Aziz <khalid@gonehiking.org> 5798M: Khalid Aziz <khalid@gonehiking.org>
5792S: Maintained 5799S: Maintained
@@ -6585,7 +6592,7 @@ F: drivers/media/platform/s3c-camif/
6585F: include/media/s3c_camif.h 6592F: include/media/s3c_camif.h
6586 6593
6587SERIAL DRIVERS 6594SERIAL DRIVERS
6588M: Alan Cox <alan@linux.intel.com> 6595M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
6589L: linux-serial@vger.kernel.org 6596L: linux-serial@vger.kernel.org
6590S: Maintained 6597S: Maintained
6591F: drivers/tty/serial 6598F: drivers/tty/serial
@@ -6598,7 +6605,7 @@ F: drivers/dma/dw_dmac_regs.h
6598F: drivers/dma/dw_dmac.c 6605F: drivers/dma/dw_dmac.c
6599 6606
6600TIMEKEEPING, NTP 6607TIMEKEEPING, NTP
6601M: John Stultz <johnstul@us.ibm.com> 6608M: John Stultz <john.stultz@linaro.org>
6602M: Thomas Gleixner <tglx@linutronix.de> 6609M: Thomas Gleixner <tglx@linutronix.de>
6603T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core 6610T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
6604S: Supported 6611S: Supported
@@ -7088,7 +7095,7 @@ F: include/uapi/sound/
7088F: sound/ 7095F: sound/
7089 7096
7090SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT (ASoC) 7097SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT (ASoC)
7091M: Liam Girdwood <lrg@ti.com> 7098M: Liam Girdwood <lgirdwood@gmail.com>
7092M: Mark Brown <broonie@opensource.wolfsonmicro.com> 7099M: Mark Brown <broonie@opensource.wolfsonmicro.com>
7093T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound.git 7100T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound.git
7094L: alsa-devel@alsa-project.org (moderated for non-subscribers) 7101L: alsa-devel@alsa-project.org (moderated for non-subscribers)
@@ -7178,6 +7185,7 @@ F: drivers/clk/spear/
7178 7185
7179SPI SUBSYSTEM 7186SPI SUBSYSTEM
7180M: Grant Likely <grant.likely@secretlab.ca> 7187M: Grant Likely <grant.likely@secretlab.ca>
7188M: Mark Brown <broonie@opensource.wolfsonmicro.com>
7181L: spi-devel-general@lists.sourceforge.net 7189L: spi-devel-general@lists.sourceforge.net
7182Q: http://patchwork.kernel.org/project/spi-devel-general/list/ 7190Q: http://patchwork.kernel.org/project/spi-devel-general/list/
7183T: git git://git.secretlab.ca/git/linux-2.6.git 7191T: git git://git.secretlab.ca/git/linux-2.6.git
@@ -7543,6 +7551,11 @@ F: drivers/net/team/
7543F: include/linux/if_team.h 7551F: include/linux/if_team.h
7544F: include/uapi/linux/if_team.h 7552F: include/uapi/linux/if_team.h
7545 7553
7554TECHNOLOGIC SYSTEMS TS-5500 PLATFORM SUPPORT
7555M: Savoir-faire Linux Inc. <kernel@savoirfairelinux.com>
7556S: Maintained
7557F: arch/x86/platform/ts5500/
7558
7546TECHNOTREND USB IR RECEIVER 7559TECHNOTREND USB IR RECEIVER
7547M: Sean Young <sean@mess.org> 7560M: Sean Young <sean@mess.org>
7548L: linux-media@vger.kernel.org 7561L: linux-media@vger.kernel.org
@@ -7617,6 +7630,22 @@ F: Documentation/backlight/lp855x-driver.txt
7617F: drivers/video/backlight/lp855x_bl.c 7630F: drivers/video/backlight/lp855x_bl.c
7618F: include/linux/platform_data/lp855x.h 7631F: include/linux/platform_data/lp855x.h
7619 7632
7633TI LP8727 CHARGER DRIVER
7634M: Milo Kim <milo.kim@ti.com>
7635S: Maintained
7636F: drivers/power/lp8727_charger.c
7637F: include/linux/platform_data/lp8727.h
7638
7639TI LP8788 MFD DRIVER
7640M: Milo Kim <milo.kim@ti.com>
7641S: Maintained
7642F: drivers/iio/adc/lp8788_adc.c
7643F: drivers/leds/leds-lp8788.c
7644F: drivers/mfd/lp8788*.c
7645F: drivers/power/lp8788-charger.c
7646F: drivers/regulator/lp8788-*.c
7647F: include/linux/mfd/lp8788*.h
7648
7620TI TWL4030 SERIES SOC CODEC DRIVER 7649TI TWL4030 SERIES SOC CODEC DRIVER
7621M: Peter Ujfalusi <peter.ujfalusi@ti.com> 7650M: Peter Ujfalusi <peter.ujfalusi@ti.com>
7622L: alsa-devel@alsa-project.org (moderated for non-subscribers) 7651L: alsa-devel@alsa-project.org (moderated for non-subscribers)
diff --git a/Makefile b/Makefile
index 2cd4c6be44f2..6fccf6531770 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 8 2PATCHLEVEL = 8
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc4 4EXTRAVERSION =
5NAME = Terrified Chipmunk 5NAME = Unicycling Gorilla
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
8# To see a list of typical targets execute "make help" 8# To see a list of typical targets execute "make help"
@@ -165,7 +165,8 @@ export srctree objtree VPATH
165# then ARCH is assigned, getting whatever value it gets normally, and 165# then ARCH is assigned, getting whatever value it gets normally, and
166# SUBARCH is subsequently ignored. 166# SUBARCH is subsequently ignored.
167 167
168SUBARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \ 168SUBARCH := $(shell uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ \
169 -e s/sun4u/sparc64/ \
169 -e s/arm.*/arm/ -e s/sa110/arm/ \ 170 -e s/arm.*/arm/ -e s/sa110/arm/ \
170 -e s/s390x/s390/ -e s/parisc64/parisc/ \ 171 -e s/s390x/s390/ -e s/parisc64/parisc/ \
171 -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \ 172 -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
diff --git a/arch/Kconfig b/arch/Kconfig
index 7f8f281f2585..97fb7d0365d1 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -76,6 +76,15 @@ config OPTPROBES
76 depends on KPROBES && HAVE_OPTPROBES 76 depends on KPROBES && HAVE_OPTPROBES
77 depends on !PREEMPT 77 depends on !PREEMPT
78 78
79config KPROBES_ON_FTRACE
80 def_bool y
81 depends on KPROBES && HAVE_KPROBES_ON_FTRACE
82 depends on DYNAMIC_FTRACE_WITH_REGS
83 help
84 If function tracer is enabled and the arch supports full
85 passing of pt_regs to function tracing, then kprobes can
86 optimize on top of function tracing.
87
79config UPROBES 88config UPROBES
80 bool "Transparent user-space probes (EXPERIMENTAL)" 89 bool "Transparent user-space probes (EXPERIMENTAL)"
81 depends on UPROBE_EVENT && PERF_EVENTS 90 depends on UPROBE_EVENT && PERF_EVENTS
@@ -158,6 +167,9 @@ config HAVE_KRETPROBES
158config HAVE_OPTPROBES 167config HAVE_OPTPROBES
159 bool 168 bool
160 169
170config HAVE_KPROBES_ON_FTRACE
171 bool
172
161config HAVE_NMI_WATCHDOG 173config HAVE_NMI_WATCHDOG
162 bool 174 bool
163# 175#
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 9d5904cc7712..9b504af2e966 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -5,7 +5,6 @@ config ALPHA
5 select HAVE_IDE 5 select HAVE_IDE
6 select HAVE_OPROFILE 6 select HAVE_OPROFILE
7 select HAVE_SYSCALL_WRAPPERS 7 select HAVE_SYSCALL_WRAPPERS
8 select HAVE_IRQ_WORK
9 select HAVE_PCSPKR_PLATFORM 8 select HAVE_PCSPKR_PLATFORM
10 select HAVE_PERF_EVENTS 9 select HAVE_PERF_EVENTS
11 select HAVE_DMA_ATTRS 10 select HAVE_DMA_ATTRS
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 14db93e4c8a8..dbc1760f418b 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -1139,6 +1139,7 @@ struct rusage32 {
1139SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru) 1139SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru)
1140{ 1140{
1141 struct rusage32 r; 1141 struct rusage32 r;
1142 cputime_t utime, stime;
1142 1143
1143 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN) 1144 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN)
1144 return -EINVAL; 1145 return -EINVAL;
@@ -1146,8 +1147,9 @@ SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru)
1146 memset(&r, 0, sizeof(r)); 1147 memset(&r, 0, sizeof(r));
1147 switch (who) { 1148 switch (who) {
1148 case RUSAGE_SELF: 1149 case RUSAGE_SELF:
1149 jiffies_to_timeval32(current->utime, &r.ru_utime); 1150 task_cputime(current, &utime, &stime);
1150 jiffies_to_timeval32(current->stime, &r.ru_stime); 1151 jiffies_to_timeval32(utime, &r.ru_utime);
1152 jiffies_to_timeval32(stime, &r.ru_stime);
1151 r.ru_minflt = current->min_flt; 1153 r.ru_minflt = current->min_flt;
1152 r.ru_majflt = current->maj_flt; 1154 r.ru_majflt = current->maj_flt;
1153 break; 1155 break;
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 67874b82a4ed..2f66b2e42490 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -36,7 +36,6 @@ config ARM
36 select HAVE_GENERIC_HARDIRQS 36 select HAVE_GENERIC_HARDIRQS
37 select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)) 37 select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7))
38 select HAVE_IDE if PCI || ISA || PCMCIA 38 select HAVE_IDE if PCI || ISA || PCMCIA
39 select HAVE_IRQ_WORK
40 select HAVE_KERNEL_GZIP 39 select HAVE_KERNEL_GZIP
41 select HAVE_KERNEL_LZMA 40 select HAVE_KERNEL_LZMA
42 select HAVE_KERNEL_LZO 41 select HAVE_KERNEL_LZO
@@ -1620,6 +1619,16 @@ config HOTPLUG_CPU
1620 Say Y here to experiment with turning CPUs off and on. CPUs 1619 Say Y here to experiment with turning CPUs off and on. CPUs
1621 can be controlled through /sys/devices/system/cpu. 1620 can be controlled through /sys/devices/system/cpu.
1622 1621
1622config ARM_PSCI
1623 bool "Support for the ARM Power State Coordination Interface (PSCI)"
1624 depends on CPU_V7
1625 help
1626 Say Y here if you want Linux to communicate with system firmware
1627 implementing the PSCI specification for CPU-centric power
1628 management operations described in ARM document number ARM DEN
1629 0022A ("Power State Coordination Interface System Software on
1630 ARM processors").
1631
1623config LOCAL_TIMERS 1632config LOCAL_TIMERS
1624 bool "Use local timer interrupts" 1633 bool "Use local timer interrupts"
1625 depends on SMP 1634 depends on SMP
@@ -1637,7 +1646,7 @@ config ARCH_NR_GPIO
1637 default 355 if ARCH_U8500 1646 default 355 if ARCH_U8500
1638 default 264 if MACH_H4700 1647 default 264 if MACH_H4700
1639 default 512 if SOC_OMAP5 1648 default 512 if SOC_OMAP5
1640 default 288 if ARCH_VT8500 1649 default 288 if ARCH_VT8500 || ARCH_SUNXI
1641 default 0 1650 default 0
1642 help 1651 help
1643 Maximum number of GPIOs in the system. 1652 Maximum number of GPIOs in the system.
@@ -1655,6 +1664,9 @@ config HZ
1655 default SHMOBILE_TIMER_HZ if ARCH_SHMOBILE 1664 default SHMOBILE_TIMER_HZ if ARCH_SHMOBILE
1656 default 100 1665 default 100
1657 1666
1667config SCHED_HRTICK
1668 def_bool HIGH_RES_TIMERS
1669
1658config THUMB2_KERNEL 1670config THUMB2_KERNEL
1659 bool "Compile the kernel in Thumb-2 mode" 1671 bool "Compile the kernel in Thumb-2 mode"
1660 depends on CPU_V7 && !CPU_V6 && !CPU_V6K 1672 depends on CPU_V7 && !CPU_V6 && !CPU_V6K
@@ -2322,3 +2334,5 @@ source "security/Kconfig"
2322source "crypto/Kconfig" 2334source "crypto/Kconfig"
2323 2335
2324source "lib/Kconfig" 2336source "lib/Kconfig"
2337
2338source "arch/arm/kvm/Kconfig"
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 30c443c406f3..4bcd2d6b0535 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -252,6 +252,7 @@ core-$(CONFIG_FPE_NWFPE) += arch/arm/nwfpe/
252core-$(CONFIG_FPE_FASTFPE) += $(FASTFPE_OBJ) 252core-$(CONFIG_FPE_FASTFPE) += $(FASTFPE_OBJ)
253core-$(CONFIG_VFP) += arch/arm/vfp/ 253core-$(CONFIG_VFP) += arch/arm/vfp/
254core-$(CONFIG_XEN) += arch/arm/xen/ 254core-$(CONFIG_XEN) += arch/arm/xen/
255core-$(CONFIG_KVM_ARM_HOST) += arch/arm/kvm/
255 256
256# If we have a machine-specific directory, then include it in the build. 257# If we have a machine-specific directory, then include it in the build.
257core-y += arch/arm/kernel/ arch/arm/mm/ arch/arm/common/ 258core-y += arch/arm/kernel/ arch/arm/mm/ arch/arm/common/
diff --git a/arch/arm/boot/dts/armada-370-db.dts b/arch/arm/boot/dts/armada-370-db.dts
index 00044026ef1f..9b82facb2561 100644
--- a/arch/arm/boot/dts/armada-370-db.dts
+++ b/arch/arm/boot/dts/armada-370-db.dts
@@ -26,7 +26,7 @@
26 26
27 memory { 27 memory {
28 device_type = "memory"; 28 device_type = "memory";
29 reg = <0x00000000 0x20000000>; /* 512 MB */ 29 reg = <0x00000000 0x40000000>; /* 1 GB */
30 }; 30 };
31 31
32 soc { 32 soc {
diff --git a/arch/arm/boot/dts/armada-xp-mv78230.dtsi b/arch/arm/boot/dts/armada-xp-mv78230.dtsi
index 271855a6e224..e041f42ed711 100644
--- a/arch/arm/boot/dts/armada-xp-mv78230.dtsi
+++ b/arch/arm/boot/dts/armada-xp-mv78230.dtsi
@@ -50,27 +50,25 @@
50 }; 50 };
51 51
52 gpio0: gpio@d0018100 { 52 gpio0: gpio@d0018100 {
53 compatible = "marvell,armadaxp-gpio"; 53 compatible = "marvell,orion-gpio";
54 reg = <0xd0018100 0x40>, 54 reg = <0xd0018100 0x40>;
55 <0xd0018800 0x30>;
56 ngpios = <32>; 55 ngpios = <32>;
57 gpio-controller; 56 gpio-controller;
58 #gpio-cells = <2>; 57 #gpio-cells = <2>;
59 interrupt-controller; 58 interrupt-controller;
60 #interrupts-cells = <2>; 59 #interrupts-cells = <2>;
61 interrupts = <16>, <17>, <18>, <19>; 60 interrupts = <82>, <83>, <84>, <85>;
62 }; 61 };
63 62
64 gpio1: gpio@d0018140 { 63 gpio1: gpio@d0018140 {
65 compatible = "marvell,armadaxp-gpio"; 64 compatible = "marvell,orion-gpio";
66 reg = <0xd0018140 0x40>, 65 reg = <0xd0018140 0x40>;
67 <0xd0018840 0x30>;
68 ngpios = <17>; 66 ngpios = <17>;
69 gpio-controller; 67 gpio-controller;
70 #gpio-cells = <2>; 68 #gpio-cells = <2>;
71 interrupt-controller; 69 interrupt-controller;
72 #interrupts-cells = <2>; 70 #interrupts-cells = <2>;
73 interrupts = <20>, <21>, <22>; 71 interrupts = <87>, <88>, <89>;
74 }; 72 };
75 }; 73 };
76}; 74};
diff --git a/arch/arm/boot/dts/armada-xp-mv78260.dtsi b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
index 1c1937dbce73..9e23bd8c9536 100644
--- a/arch/arm/boot/dts/armada-xp-mv78260.dtsi
+++ b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
@@ -51,39 +51,36 @@
51 }; 51 };
52 52
53 gpio0: gpio@d0018100 { 53 gpio0: gpio@d0018100 {
54 compatible = "marvell,armadaxp-gpio"; 54 compatible = "marvell,orion-gpio";
55 reg = <0xd0018100 0x40>, 55 reg = <0xd0018100 0x40>;
56 <0xd0018800 0x30>;
57 ngpios = <32>; 56 ngpios = <32>;
58 gpio-controller; 57 gpio-controller;
59 #gpio-cells = <2>; 58 #gpio-cells = <2>;
60 interrupt-controller; 59 interrupt-controller;
61 #interrupts-cells = <2>; 60 #interrupts-cells = <2>;
62 interrupts = <16>, <17>, <18>, <19>; 61 interrupts = <82>, <83>, <84>, <85>;
63 }; 62 };
64 63
65 gpio1: gpio@d0018140 { 64 gpio1: gpio@d0018140 {
66 compatible = "marvell,armadaxp-gpio"; 65 compatible = "marvell,orion-gpio";
67 reg = <0xd0018140 0x40>, 66 reg = <0xd0018140 0x40>;
68 <0xd0018840 0x30>;
69 ngpios = <32>; 67 ngpios = <32>;
70 gpio-controller; 68 gpio-controller;
71 #gpio-cells = <2>; 69 #gpio-cells = <2>;
72 interrupt-controller; 70 interrupt-controller;
73 #interrupts-cells = <2>; 71 #interrupts-cells = <2>;
74 interrupts = <20>, <21>, <22>, <23>; 72 interrupts = <87>, <88>, <89>, <90>;
75 }; 73 };
76 74
77 gpio2: gpio@d0018180 { 75 gpio2: gpio@d0018180 {
78 compatible = "marvell,armadaxp-gpio"; 76 compatible = "marvell,orion-gpio";
79 reg = <0xd0018180 0x40>, 77 reg = <0xd0018180 0x40>;
80 <0xd0018870 0x30>;
81 ngpios = <3>; 78 ngpios = <3>;
82 gpio-controller; 79 gpio-controller;
83 #gpio-cells = <2>; 80 #gpio-cells = <2>;
84 interrupt-controller; 81 interrupt-controller;
85 #interrupts-cells = <2>; 82 #interrupts-cells = <2>;
86 interrupts = <24>; 83 interrupts = <91>;
87 }; 84 };
88 85
89 ethernet@d0034000 { 86 ethernet@d0034000 {
diff --git a/arch/arm/boot/dts/armada-xp-mv78460.dtsi b/arch/arm/boot/dts/armada-xp-mv78460.dtsi
index 4905cf3a5ef8..965966110e38 100644
--- a/arch/arm/boot/dts/armada-xp-mv78460.dtsi
+++ b/arch/arm/boot/dts/armada-xp-mv78460.dtsi
@@ -66,39 +66,36 @@
66 }; 66 };
67 67
68 gpio0: gpio@d0018100 { 68 gpio0: gpio@d0018100 {
69 compatible = "marvell,armadaxp-gpio"; 69 compatible = "marvell,orion-gpio";
70 reg = <0xd0018100 0x40>, 70 reg = <0xd0018100 0x40>;
71 <0xd0018800 0x30>;
72 ngpios = <32>; 71 ngpios = <32>;
73 gpio-controller; 72 gpio-controller;
74 #gpio-cells = <2>; 73 #gpio-cells = <2>;
75 interrupt-controller; 74 interrupt-controller;
76 #interrupts-cells = <2>; 75 #interrupts-cells = <2>;
77 interrupts = <16>, <17>, <18>, <19>; 76 interrupts = <82>, <83>, <84>, <85>;
78 }; 77 };
79 78
80 gpio1: gpio@d0018140 { 79 gpio1: gpio@d0018140 {
81 compatible = "marvell,armadaxp-gpio"; 80 compatible = "marvell,orion-gpio";
82 reg = <0xd0018140 0x40>, 81 reg = <0xd0018140 0x40>;
83 <0xd0018840 0x30>;
84 ngpios = <32>; 82 ngpios = <32>;
85 gpio-controller; 83 gpio-controller;
86 #gpio-cells = <2>; 84 #gpio-cells = <2>;
87 interrupt-controller; 85 interrupt-controller;
88 #interrupts-cells = <2>; 86 #interrupts-cells = <2>;
89 interrupts = <20>, <21>, <22>, <23>; 87 interrupts = <87>, <88>, <89>, <90>;
90 }; 88 };
91 89
92 gpio2: gpio@d0018180 { 90 gpio2: gpio@d0018180 {
93 compatible = "marvell,armadaxp-gpio"; 91 compatible = "marvell,orion-gpio";
94 reg = <0xd0018180 0x40>, 92 reg = <0xd0018180 0x40>;
95 <0xd0018870 0x30>;
96 ngpios = <3>; 93 ngpios = <3>;
97 gpio-controller; 94 gpio-controller;
98 #gpio-cells = <2>; 95 #gpio-cells = <2>;
99 interrupt-controller; 96 interrupt-controller;
100 #interrupts-cells = <2>; 97 #interrupts-cells = <2>;
101 interrupts = <24>; 98 interrupts = <91>;
102 }; 99 };
103 100
104 ethernet@d0034000 { 101 ethernet@d0034000 {
diff --git a/arch/arm/boot/dts/at91rm9200.dtsi b/arch/arm/boot/dts/at91rm9200.dtsi
index e154f242c680..222047f1ece9 100644
--- a/arch/arm/boot/dts/at91rm9200.dtsi
+++ b/arch/arm/boot/dts/at91rm9200.dtsi
@@ -336,8 +336,8 @@
336 336
337 i2c@0 { 337 i2c@0 {
338 compatible = "i2c-gpio"; 338 compatible = "i2c-gpio";
339 gpios = <&pioA 23 0 /* sda */ 339 gpios = <&pioA 25 0 /* sda */
340 &pioA 24 0 /* scl */ 340 &pioA 26 0 /* scl */
341 >; 341 >;
342 i2c-gpio,sda-open-drain; 342 i2c-gpio,sda-open-drain;
343 i2c-gpio,scl-open-drain; 343 i2c-gpio,scl-open-drain;
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index 3a47cf952146..8ecca6948d81 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -143,6 +143,11 @@
143 atmel,pins = 143 atmel,pins =
144 <0 3 0x1 0x0>; /* PA3 periph A */ 144 <0 3 0x1 0x0>; /* PA3 periph A */
145 }; 145 };
146
147 pinctrl_usart0_sck: usart0_sck-0 {
148 atmel,pins =
149 <0 4 0x1 0x0>; /* PA4 periph A */
150 };
146 }; 151 };
147 152
148 usart1 { 153 usart1 {
@@ -154,12 +159,17 @@
154 159
155 pinctrl_usart1_rts: usart1_rts-0 { 160 pinctrl_usart1_rts: usart1_rts-0 {
156 atmel,pins = 161 atmel,pins =
157 <3 27 0x3 0x0>; /* PC27 periph C */ 162 <2 27 0x3 0x0>; /* PC27 periph C */
158 }; 163 };
159 164
160 pinctrl_usart1_cts: usart1_cts-0 { 165 pinctrl_usart1_cts: usart1_cts-0 {
161 atmel,pins = 166 atmel,pins =
162 <3 28 0x3 0x0>; /* PC28 periph C */ 167 <2 28 0x3 0x0>; /* PC28 periph C */
168 };
169
170 pinctrl_usart1_sck: usart1_sck-0 {
171 atmel,pins =
172 <2 28 0x3 0x0>; /* PC29 periph C */
163 }; 173 };
164 }; 174 };
165 175
@@ -172,46 +182,56 @@
172 182
173 pinctrl_uart2_rts: uart2_rts-0 { 183 pinctrl_uart2_rts: uart2_rts-0 {
174 atmel,pins = 184 atmel,pins =
175 <0 0 0x2 0x0>; /* PB0 periph B */ 185 <1 0 0x2 0x0>; /* PB0 periph B */
176 }; 186 };
177 187
178 pinctrl_uart2_cts: uart2_cts-0 { 188 pinctrl_uart2_cts: uart2_cts-0 {
179 atmel,pins = 189 atmel,pins =
180 <0 1 0x2 0x0>; /* PB1 periph B */ 190 <1 1 0x2 0x0>; /* PB1 periph B */
191 };
192
193 pinctrl_usart2_sck: usart2_sck-0 {
194 atmel,pins =
195 <1 2 0x2 0x0>; /* PB2 periph B */
181 }; 196 };
182 }; 197 };
183 198
184 usart3 { 199 usart3 {
185 pinctrl_uart3: usart3-0 { 200 pinctrl_uart3: usart3-0 {
186 atmel,pins = 201 atmel,pins =
187 <3 23 0x2 0x1 /* PC22 periph B with pullup */ 202 <2 23 0x2 0x1 /* PC22 periph B with pullup */
188 3 23 0x2 0x0>; /* PC23 periph B */ 203 2 23 0x2 0x0>; /* PC23 periph B */
189 }; 204 };
190 205
191 pinctrl_usart3_rts: usart3_rts-0 { 206 pinctrl_usart3_rts: usart3_rts-0 {
192 atmel,pins = 207 atmel,pins =
193 <3 24 0x2 0x0>; /* PC24 periph B */ 208 <2 24 0x2 0x0>; /* PC24 periph B */
194 }; 209 };
195 210
196 pinctrl_usart3_cts: usart3_cts-0 { 211 pinctrl_usart3_cts: usart3_cts-0 {
197 atmel,pins = 212 atmel,pins =
198 <3 25 0x2 0x0>; /* PC25 periph B */ 213 <2 25 0x2 0x0>; /* PC25 periph B */
214 };
215
216 pinctrl_usart3_sck: usart3_sck-0 {
217 atmel,pins =
218 <2 26 0x2 0x0>; /* PC26 periph B */
199 }; 219 };
200 }; 220 };
201 221
202 uart0 { 222 uart0 {
203 pinctrl_uart0: uart0-0 { 223 pinctrl_uart0: uart0-0 {
204 atmel,pins = 224 atmel,pins =
205 <3 8 0x3 0x0 /* PC8 periph C */ 225 <2 8 0x3 0x0 /* PC8 periph C */
206 3 9 0x3 0x1>; /* PC9 periph C with pullup */ 226 2 9 0x3 0x1>; /* PC9 periph C with pullup */
207 }; 227 };
208 }; 228 };
209 229
210 uart1 { 230 uart1 {
211 pinctrl_uart1: uart1-0 { 231 pinctrl_uart1: uart1-0 {
212 atmel,pins = 232 atmel,pins =
213 <3 16 0x3 0x0 /* PC16 periph C */ 233 <2 16 0x3 0x0 /* PC16 periph C */
214 3 17 0x3 0x1>; /* PC17 periph C with pullup */ 234 2 17 0x3 0x1>; /* PC17 periph C with pullup */
215 }; 235 };
216 }; 236 };
217 237
@@ -240,14 +260,14 @@
240 260
241 pinctrl_macb0_rmii_mii: macb0_rmii_mii-0 { 261 pinctrl_macb0_rmii_mii: macb0_rmii_mii-0 {
242 atmel,pins = 262 atmel,pins =
243 <1 8 0x1 0x0 /* PA8 periph A */ 263 <1 8 0x1 0x0 /* PB8 periph A */
244 1 11 0x1 0x0 /* PA11 periph A */ 264 1 11 0x1 0x0 /* PB11 periph A */
245 1 12 0x1 0x0 /* PA12 periph A */ 265 1 12 0x1 0x0 /* PB12 periph A */
246 1 13 0x1 0x0 /* PA13 periph A */ 266 1 13 0x1 0x0 /* PB13 periph A */
247 1 14 0x1 0x0 /* PA14 periph A */ 267 1 14 0x1 0x0 /* PB14 periph A */
248 1 15 0x1 0x0 /* PA15 periph A */ 268 1 15 0x1 0x0 /* PB15 periph A */
249 1 16 0x1 0x0 /* PA16 periph A */ 269 1 16 0x1 0x0 /* PB16 periph A */
250 1 17 0x1 0x0>; /* PA17 periph A */ 270 1 17 0x1 0x0>; /* PB17 periph A */
251 }; 271 };
252 }; 272 };
253 273
diff --git a/arch/arm/boot/dts/cros5250-common.dtsi b/arch/arm/boot/dts/cros5250-common.dtsi
index fddd17417433..46c098017036 100644
--- a/arch/arm/boot/dts/cros5250-common.dtsi
+++ b/arch/arm/boot/dts/cros5250-common.dtsi
@@ -96,8 +96,8 @@
96 fifo-depth = <0x80>; 96 fifo-depth = <0x80>;
97 card-detect-delay = <200>; 97 card-detect-delay = <200>;
98 samsung,dw-mshc-ciu-div = <3>; 98 samsung,dw-mshc-ciu-div = <3>;
99 samsung,dw-mshc-sdr-timing = <2 3 3>; 99 samsung,dw-mshc-sdr-timing = <2 3>;
100 samsung,dw-mshc-ddr-timing = <1 2 3>; 100 samsung,dw-mshc-ddr-timing = <1 2>;
101 101
102 slot@0 { 102 slot@0 {
103 reg = <0>; 103 reg = <0>;
@@ -120,8 +120,8 @@
120 fifo-depth = <0x80>; 120 fifo-depth = <0x80>;
121 card-detect-delay = <200>; 121 card-detect-delay = <200>;
122 samsung,dw-mshc-ciu-div = <3>; 122 samsung,dw-mshc-ciu-div = <3>;
123 samsung,dw-mshc-sdr-timing = <2 3 3>; 123 samsung,dw-mshc-sdr-timing = <2 3>;
124 samsung,dw-mshc-ddr-timing = <1 2 3>; 124 samsung,dw-mshc-ddr-timing = <1 2>;
125 125
126 slot@0 { 126 slot@0 {
127 reg = <0>; 127 reg = <0>;
@@ -141,8 +141,8 @@
141 fifo-depth = <0x80>; 141 fifo-depth = <0x80>;
142 card-detect-delay = <200>; 142 card-detect-delay = <200>;
143 samsung,dw-mshc-ciu-div = <3>; 143 samsung,dw-mshc-ciu-div = <3>;
144 samsung,dw-mshc-sdr-timing = <2 3 3>; 144 samsung,dw-mshc-sdr-timing = <2 3>;
145 samsung,dw-mshc-ddr-timing = <1 2 3>; 145 samsung,dw-mshc-ddr-timing = <1 2>;
146 146
147 slot@0 { 147 slot@0 {
148 reg = <0>; 148 reg = <0>;
diff --git a/arch/arm/boot/dts/dbx5x0.dtsi b/arch/arm/boot/dts/dbx5x0.dtsi
index 63f2fbcfe819..69140ba99f46 100644
--- a/arch/arm/boot/dts/dbx5x0.dtsi
+++ b/arch/arm/boot/dts/dbx5x0.dtsi
@@ -170,10 +170,9 @@
170 gpio-bank = <8>; 170 gpio-bank = <8>;
171 }; 171 };
172 172
173 pinctrl@80157000 { 173 pinctrl {
174 // This is actually the PRCMU base address 174 compatible = "stericsson,nmk-pinctrl";
175 reg = <0x80157000 0x2000>; 175 prcm = <&prcmu>;
176 compatible = "stericsson,nmk_pinctrl";
177 }; 176 };
178 177
179 usb@a03e0000 { 178 usb@a03e0000 {
@@ -190,9 +189,10 @@
190 interrupts = <0 25 0x4>; 189 interrupts = <0 25 0x4>;
191 }; 190 };
192 191
193 prcmu@80157000 { 192 prcmu: prcmu@80157000 {
194 compatible = "stericsson,db8500-prcmu"; 193 compatible = "stericsson,db8500-prcmu";
195 reg = <0x80157000 0x1000>; 194 reg = <0x80157000 0x1000>;
195 reg-names = "prcmu";
196 interrupts = <0 47 0x4>; 196 interrupts = <0 47 0x4>;
197 #address-cells = <1>; 197 #address-cells = <1>;
198 #size-cells = <1>; 198 #size-cells = <1>;
diff --git a/arch/arm/boot/dts/dove-cubox.dts b/arch/arm/boot/dts/dove-cubox.dts
index fed7d3f9f431..cdee96fca6e2 100644
--- a/arch/arm/boot/dts/dove-cubox.dts
+++ b/arch/arm/boot/dts/dove-cubox.dts
@@ -26,10 +26,15 @@
26}; 26};
27 27
28&uart0 { status = "okay"; }; 28&uart0 { status = "okay"; };
29&sdio0 { status = "okay"; };
30&sata0 { status = "okay"; }; 29&sata0 { status = "okay"; };
31&i2c0 { status = "okay"; }; 30&i2c0 { status = "okay"; };
32 31
32&sdio0 {
33 status = "okay";
34 /* sdio0 card detect is connected to wrong pin on CuBox */
35 cd-gpios = <&gpio0 12 1>;
36};
37
33&spi0 { 38&spi0 {
34 status = "okay"; 39 status = "okay";
35 40
@@ -42,9 +47,14 @@
42}; 47};
43 48
44&pinctrl { 49&pinctrl {
45 pinctrl-0 = <&pmx_gpio_18>; 50 pinctrl-0 = <&pmx_gpio_12 &pmx_gpio_18>;
46 pinctrl-names = "default"; 51 pinctrl-names = "default";
47 52
53 pmx_gpio_12: pmx-gpio-12 {
54 marvell,pins = "mpp12";
55 marvell,function = "gpio";
56 };
57
48 pmx_gpio_18: pmx-gpio-18 { 58 pmx_gpio_18: pmx-gpio-18 {
49 marvell,pins = "mpp18"; 59 marvell,pins = "mpp18";
50 marvell,function = "gpio"; 60 marvell,function = "gpio";
diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts
index 942d5761ca97..e05b18f3c33d 100644
--- a/arch/arm/boot/dts/exynos5250-smdk5250.dts
+++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts
@@ -115,8 +115,8 @@
115 fifo-depth = <0x80>; 115 fifo-depth = <0x80>;
116 card-detect-delay = <200>; 116 card-detect-delay = <200>;
117 samsung,dw-mshc-ciu-div = <3>; 117 samsung,dw-mshc-ciu-div = <3>;
118 samsung,dw-mshc-sdr-timing = <2 3 3>; 118 samsung,dw-mshc-sdr-timing = <2 3>;
119 samsung,dw-mshc-ddr-timing = <1 2 3>; 119 samsung,dw-mshc-ddr-timing = <1 2>;
120 120
121 slot@0 { 121 slot@0 {
122 reg = <0>; 122 reg = <0>;
@@ -139,8 +139,8 @@
139 fifo-depth = <0x80>; 139 fifo-depth = <0x80>;
140 card-detect-delay = <200>; 140 card-detect-delay = <200>;
141 samsung,dw-mshc-ciu-div = <3>; 141 samsung,dw-mshc-ciu-div = <3>;
142 samsung,dw-mshc-sdr-timing = <2 3 3>; 142 samsung,dw-mshc-sdr-timing = <2 3>;
143 samsung,dw-mshc-ddr-timing = <1 2 3>; 143 samsung,dw-mshc-ddr-timing = <1 2>;
144 144
145 slot@0 { 145 slot@0 {
146 reg = <0>; 146 reg = <0>;
diff --git a/arch/arm/boot/dts/highbank.dts b/arch/arm/boot/dts/highbank.dts
index 5927a8df5625..6aad34ad9517 100644
--- a/arch/arm/boot/dts/highbank.dts
+++ b/arch/arm/boot/dts/highbank.dts
@@ -37,6 +37,16 @@
37 next-level-cache = <&L2>; 37 next-level-cache = <&L2>;
38 clocks = <&a9pll>; 38 clocks = <&a9pll>;
39 clock-names = "cpu"; 39 clock-names = "cpu";
40 operating-points = <
41 /* kHz ignored */
42 1300000 1000000
43 1200000 1000000
44 1100000 1000000
45 800000 1000000
46 400000 1000000
47 200000 1000000
48 >;
49 clock-latency = <100000>;
40 }; 50 };
41 51
42 cpu@901 { 52 cpu@901 {
diff --git a/arch/arm/boot/dts/kirkwood-ns2-common.dtsi b/arch/arm/boot/dts/kirkwood-ns2-common.dtsi
index 9bc6785ad228..77d21abfcdf7 100644
--- a/arch/arm/boot/dts/kirkwood-ns2-common.dtsi
+++ b/arch/arm/boot/dts/kirkwood-ns2-common.dtsi
@@ -1,4 +1,5 @@
1/include/ "kirkwood.dtsi" 1/include/ "kirkwood.dtsi"
2/include/ "kirkwood-6281.dtsi"
2 3
3/ { 4/ {
4 chosen { 5 chosen {
@@ -6,6 +7,21 @@
6 }; 7 };
7 8
8 ocp@f1000000 { 9 ocp@f1000000 {
10 pinctrl: pinctrl@10000 {
11 pinctrl-0 = < &pmx_spi &pmx_twsi0 &pmx_uart0
12 &pmx_ns2_sata0 &pmx_ns2_sata1>;
13 pinctrl-names = "default";
14
15 pmx_ns2_sata0: pmx-ns2-sata0 {
16 marvell,pins = "mpp21";
17 marvell,function = "sata0";
18 };
19 pmx_ns2_sata1: pmx-ns2-sata1 {
20 marvell,pins = "mpp20";
21 marvell,function = "sata1";
22 };
23 };
24
9 serial@12000 { 25 serial@12000 {
10 clock-frequency = <166666667>; 26 clock-frequency = <166666667>;
11 status = "okay"; 27 status = "okay";
diff --git a/arch/arm/boot/dts/kirkwood.dtsi b/arch/arm/boot/dts/kirkwood.dtsi
index 110d6cbb795b..d6ab442b7011 100644
--- a/arch/arm/boot/dts/kirkwood.dtsi
+++ b/arch/arm/boot/dts/kirkwood.dtsi
@@ -36,6 +36,7 @@
36 reg = <0x10100 0x40>; 36 reg = <0x10100 0x40>;
37 ngpios = <32>; 37 ngpios = <32>;
38 interrupt-controller; 38 interrupt-controller;
39 #interrupt-cells = <2>;
39 interrupts = <35>, <36>, <37>, <38>; 40 interrupts = <35>, <36>, <37>, <38>;
40 }; 41 };
41 42
@@ -46,6 +47,7 @@
46 reg = <0x10140 0x40>; 47 reg = <0x10140 0x40>;
47 ngpios = <18>; 48 ngpios = <18>;
48 interrupt-controller; 49 interrupt-controller;
50 #interrupt-cells = <2>;
49 interrupts = <39>, <40>, <41>; 51 interrupts = <39>, <40>, <41>;
50 }; 52 };
51 53
diff --git a/arch/arm/boot/dts/kizbox.dts b/arch/arm/boot/dts/kizbox.dts
index e8814fe0e277..b4dc3ed9a3ec 100644
--- a/arch/arm/boot/dts/kizbox.dts
+++ b/arch/arm/boot/dts/kizbox.dts
@@ -48,6 +48,8 @@
48 48
49 macb0: ethernet@fffc4000 { 49 macb0: ethernet@fffc4000 {
50 phy-mode = "mii"; 50 phy-mode = "mii";
51 pinctrl-0 = <&pinctrl_macb_rmii
52 &pinctrl_macb_rmii_mii_alt>;
51 status = "okay"; 53 status = "okay";
52 }; 54 };
53 55
diff --git a/arch/arm/boot/dts/prima2.dtsi b/arch/arm/boot/dts/prima2.dtsi
index 055fca542120..3329719a9412 100644
--- a/arch/arm/boot/dts/prima2.dtsi
+++ b/arch/arm/boot/dts/prima2.dtsi
@@ -58,10 +58,11 @@
58 #size-cells = <1>; 58 #size-cells = <1>;
59 ranges = <0x88000000 0x88000000 0x40000>; 59 ranges = <0x88000000 0x88000000 0x40000>;
60 60
61 clock-controller@88000000 { 61 clks: clock-controller@88000000 {
62 compatible = "sirf,prima2-clkc"; 62 compatible = "sirf,prima2-clkc";
63 reg = <0x88000000 0x1000>; 63 reg = <0x88000000 0x1000>;
64 interrupts = <3>; 64 interrupts = <3>;
65 #clock-cells = <1>;
65 }; 66 };
66 67
67 reset-controller@88010000 { 68 reset-controller@88010000 {
@@ -85,6 +86,7 @@
85 compatible = "sirf,prima2-memc"; 86 compatible = "sirf,prima2-memc";
86 reg = <0x90000000 0x10000>; 87 reg = <0x90000000 0x10000>;
87 interrupts = <27>; 88 interrupts = <27>;
89 clocks = <&clks 5>;
88 }; 90 };
89 }; 91 };
90 92
@@ -104,6 +106,7 @@
104 compatible = "sirf,prima2-vpp"; 106 compatible = "sirf,prima2-vpp";
105 reg = <0x90020000 0x10000>; 107 reg = <0x90020000 0x10000>;
106 interrupts = <31>; 108 interrupts = <31>;
109 clocks = <&clks 35>;
107 }; 110 };
108 }; 111 };
109 112
@@ -117,6 +120,7 @@
117 compatible = "powervr,sgx531"; 120 compatible = "powervr,sgx531";
118 reg = <0x98000000 0x8000000>; 121 reg = <0x98000000 0x8000000>;
119 interrupts = <6>; 122 interrupts = <6>;
123 clocks = <&clks 32>;
120 }; 124 };
121 }; 125 };
122 126
@@ -130,6 +134,7 @@
130 compatible = "sirf,prima2-video-codec"; 134 compatible = "sirf,prima2-video-codec";
131 reg = <0xa0000000 0x8000000>; 135 reg = <0xa0000000 0x8000000>;
132 interrupts = <5>; 136 interrupts = <5>;
137 clocks = <&clks 33>;
133 }; 138 };
134 }; 139 };
135 140
@@ -149,12 +154,14 @@
149 compatible = "sirf,prima2-gps"; 154 compatible = "sirf,prima2-gps";
150 reg = <0xa8010000 0x10000>; 155 reg = <0xa8010000 0x10000>;
151 interrupts = <7>; 156 interrupts = <7>;
157 clocks = <&clks 9>;
152 }; 158 };
153 159
154 dsp@a9000000 { 160 dsp@a9000000 {
155 compatible = "sirf,prima2-dsp"; 161 compatible = "sirf,prima2-dsp";
156 reg = <0xa9000000 0x1000000>; 162 reg = <0xa9000000 0x1000000>;
157 interrupts = <8>; 163 interrupts = <8>;
164 clocks = <&clks 8>;
158 }; 165 };
159 }; 166 };
160 167
@@ -174,12 +181,14 @@
174 compatible = "sirf,prima2-nand"; 181 compatible = "sirf,prima2-nand";
175 reg = <0xb0030000 0x10000>; 182 reg = <0xb0030000 0x10000>;
176 interrupts = <41>; 183 interrupts = <41>;
184 clocks = <&clks 26>;
177 }; 185 };
178 186
179 audio@b0040000 { 187 audio@b0040000 {
180 compatible = "sirf,prima2-audio"; 188 compatible = "sirf,prima2-audio";
181 reg = <0xb0040000 0x10000>; 189 reg = <0xb0040000 0x10000>;
182 interrupts = <35>; 190 interrupts = <35>;
191 clocks = <&clks 27>;
183 }; 192 };
184 193
185 uart0: uart@b0050000 { 194 uart0: uart@b0050000 {
@@ -187,6 +196,7 @@
187 compatible = "sirf,prima2-uart"; 196 compatible = "sirf,prima2-uart";
188 reg = <0xb0050000 0x10000>; 197 reg = <0xb0050000 0x10000>;
189 interrupts = <17>; 198 interrupts = <17>;
199 clocks = <&clks 13>;
190 }; 200 };
191 201
192 uart1: uart@b0060000 { 202 uart1: uart@b0060000 {
@@ -194,6 +204,7 @@
194 compatible = "sirf,prima2-uart"; 204 compatible = "sirf,prima2-uart";
195 reg = <0xb0060000 0x10000>; 205 reg = <0xb0060000 0x10000>;
196 interrupts = <18>; 206 interrupts = <18>;
207 clocks = <&clks 14>;
197 }; 208 };
198 209
199 uart2: uart@b0070000 { 210 uart2: uart@b0070000 {
@@ -201,6 +212,7 @@
201 compatible = "sirf,prima2-uart"; 212 compatible = "sirf,prima2-uart";
202 reg = <0xb0070000 0x10000>; 213 reg = <0xb0070000 0x10000>;
203 interrupts = <19>; 214 interrupts = <19>;
215 clocks = <&clks 15>;
204 }; 216 };
205 217
206 usp0: usp@b0080000 { 218 usp0: usp@b0080000 {
@@ -208,6 +220,7 @@
208 compatible = "sirf,prima2-usp"; 220 compatible = "sirf,prima2-usp";
209 reg = <0xb0080000 0x10000>; 221 reg = <0xb0080000 0x10000>;
210 interrupts = <20>; 222 interrupts = <20>;
223 clocks = <&clks 28>;
211 }; 224 };
212 225
213 usp1: usp@b0090000 { 226 usp1: usp@b0090000 {
@@ -215,6 +228,7 @@
215 compatible = "sirf,prima2-usp"; 228 compatible = "sirf,prima2-usp";
216 reg = <0xb0090000 0x10000>; 229 reg = <0xb0090000 0x10000>;
217 interrupts = <21>; 230 interrupts = <21>;
231 clocks = <&clks 29>;
218 }; 232 };
219 233
220 usp2: usp@b00a0000 { 234 usp2: usp@b00a0000 {
@@ -222,6 +236,7 @@
222 compatible = "sirf,prima2-usp"; 236 compatible = "sirf,prima2-usp";
223 reg = <0xb00a0000 0x10000>; 237 reg = <0xb00a0000 0x10000>;
224 interrupts = <22>; 238 interrupts = <22>;
239 clocks = <&clks 30>;
225 }; 240 };
226 241
227 dmac0: dma-controller@b00b0000 { 242 dmac0: dma-controller@b00b0000 {
@@ -229,6 +244,7 @@
229 compatible = "sirf,prima2-dmac"; 244 compatible = "sirf,prima2-dmac";
230 reg = <0xb00b0000 0x10000>; 245 reg = <0xb00b0000 0x10000>;
231 interrupts = <12>; 246 interrupts = <12>;
247 clocks = <&clks 24>;
232 }; 248 };
233 249
234 dmac1: dma-controller@b0160000 { 250 dmac1: dma-controller@b0160000 {
@@ -236,11 +252,13 @@
236 compatible = "sirf,prima2-dmac"; 252 compatible = "sirf,prima2-dmac";
237 reg = <0xb0160000 0x10000>; 253 reg = <0xb0160000 0x10000>;
238 interrupts = <13>; 254 interrupts = <13>;
255 clocks = <&clks 25>;
239 }; 256 };
240 257
241 vip@b00C0000 { 258 vip@b00C0000 {
242 compatible = "sirf,prima2-vip"; 259 compatible = "sirf,prima2-vip";
243 reg = <0xb00C0000 0x10000>; 260 reg = <0xb00C0000 0x10000>;
261 clocks = <&clks 31>;
244 }; 262 };
245 263
246 spi0: spi@b00d0000 { 264 spi0: spi@b00d0000 {
@@ -248,6 +266,7 @@
248 compatible = "sirf,prima2-spi"; 266 compatible = "sirf,prima2-spi";
249 reg = <0xb00d0000 0x10000>; 267 reg = <0xb00d0000 0x10000>;
250 interrupts = <15>; 268 interrupts = <15>;
269 clocks = <&clks 19>;
251 }; 270 };
252 271
253 spi1: spi@b0170000 { 272 spi1: spi@b0170000 {
@@ -255,6 +274,7 @@
255 compatible = "sirf,prima2-spi"; 274 compatible = "sirf,prima2-spi";
256 reg = <0xb0170000 0x10000>; 275 reg = <0xb0170000 0x10000>;
257 interrupts = <16>; 276 interrupts = <16>;
277 clocks = <&clks 20>;
258 }; 278 };
259 279
260 i2c0: i2c@b00e0000 { 280 i2c0: i2c@b00e0000 {
@@ -262,6 +282,7 @@
262 compatible = "sirf,prima2-i2c"; 282 compatible = "sirf,prima2-i2c";
263 reg = <0xb00e0000 0x10000>; 283 reg = <0xb00e0000 0x10000>;
264 interrupts = <24>; 284 interrupts = <24>;
285 clocks = <&clks 17>;
265 }; 286 };
266 287
267 i2c1: i2c@b00f0000 { 288 i2c1: i2c@b00f0000 {
@@ -269,12 +290,14 @@
269 compatible = "sirf,prima2-i2c"; 290 compatible = "sirf,prima2-i2c";
270 reg = <0xb00f0000 0x10000>; 291 reg = <0xb00f0000 0x10000>;
271 interrupts = <25>; 292 interrupts = <25>;
293 clocks = <&clks 18>;
272 }; 294 };
273 295
274 tsc@b0110000 { 296 tsc@b0110000 {
275 compatible = "sirf,prima2-tsc"; 297 compatible = "sirf,prima2-tsc";
276 reg = <0xb0110000 0x10000>; 298 reg = <0xb0110000 0x10000>;
277 interrupts = <33>; 299 interrupts = <33>;
300 clocks = <&clks 16>;
278 }; 301 };
279 302
280 gpio: pinctrl@b0120000 { 303 gpio: pinctrl@b0120000 {
@@ -507,17 +530,20 @@
507 pwm@b0130000 { 530 pwm@b0130000 {
508 compatible = "sirf,prima2-pwm"; 531 compatible = "sirf,prima2-pwm";
509 reg = <0xb0130000 0x10000>; 532 reg = <0xb0130000 0x10000>;
533 clocks = <&clks 21>;
510 }; 534 };
511 535
512 efusesys@b0140000 { 536 efusesys@b0140000 {
513 compatible = "sirf,prima2-efuse"; 537 compatible = "sirf,prima2-efuse";
514 reg = <0xb0140000 0x10000>; 538 reg = <0xb0140000 0x10000>;
539 clocks = <&clks 22>;
515 }; 540 };
516 541
517 pulsec@b0150000 { 542 pulsec@b0150000 {
518 compatible = "sirf,prima2-pulsec"; 543 compatible = "sirf,prima2-pulsec";
519 reg = <0xb0150000 0x10000>; 544 reg = <0xb0150000 0x10000>;
520 interrupts = <48>; 545 interrupts = <48>;
546 clocks = <&clks 23>;
521 }; 547 };
522 548
523 pci-iobg { 549 pci-iobg {
@@ -616,12 +642,14 @@
616 compatible = "chipidea,ci13611a-prima2"; 642 compatible = "chipidea,ci13611a-prima2";
617 reg = <0xb8000000 0x10000>; 643 reg = <0xb8000000 0x10000>;
618 interrupts = <10>; 644 interrupts = <10>;
645 clocks = <&clks 40>;
619 }; 646 };
620 647
621 usb1: usb@b00f0000 { 648 usb1: usb@b00f0000 {
622 compatible = "chipidea,ci13611a-prima2"; 649 compatible = "chipidea,ci13611a-prima2";
623 reg = <0xb8010000 0x10000>; 650 reg = <0xb8010000 0x10000>;
624 interrupts = <11>; 651 interrupts = <11>;
652 clocks = <&clks 41>;
625 }; 653 };
626 654
627 sata@b00f0000 { 655 sata@b00f0000 {
@@ -634,6 +662,7 @@
634 compatible = "sirf,prima2-security"; 662 compatible = "sirf,prima2-security";
635 reg = <0xb8030000 0x10000>; 663 reg = <0xb8030000 0x10000>;
636 interrupts = <42>; 664 interrupts = <42>;
665 clocks = <&clks 7>;
637 }; 666 };
638 }; 667 };
639 }; 668 };
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index e61fdd47bd01..f99f60dadf5d 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -16,4 +16,34 @@
16 memory { 16 memory {
17 reg = <0x40000000 0x80000000>; 17 reg = <0x40000000 0x80000000>;
18 }; 18 };
19
20 soc {
21 pinctrl@01c20800 {
22 compatible = "allwinner,sun4i-a10-pinctrl";
23 reg = <0x01c20800 0x400>;
24 #address-cells = <1>;
25 #size-cells = <0>;
26
27 uart0_pins_a: uart0@0 {
28 allwinner,pins = "PB22", "PB23";
29 allwinner,function = "uart0";
30 allwinner,drive = <0>;
31 allwinner,pull = <0>;
32 };
33
34 uart0_pins_b: uart0@1 {
35 allwinner,pins = "PF2", "PF4";
36 allwinner,function = "uart0";
37 allwinner,drive = <0>;
38 allwinner,pull = <0>;
39 };
40
41 uart1_pins_a: uart1@0 {
42 allwinner,pins = "PA10", "PA11";
43 allwinner,function = "uart1";
44 allwinner,drive = <0>;
45 allwinner,pull = <0>;
46 };
47 };
48 };
19}; 49};
diff --git a/arch/arm/boot/dts/sun5i-a13-olinuxino.dts b/arch/arm/boot/dts/sun5i-a13-olinuxino.dts
index 498a091a4ea2..4a1e45d4aace 100644
--- a/arch/arm/boot/dts/sun5i-a13-olinuxino.dts
+++ b/arch/arm/boot/dts/sun5i-a13-olinuxino.dts
@@ -24,6 +24,8 @@
24 24
25 soc { 25 soc {
26 uart1: uart@01c28400 { 26 uart1: uart@01c28400 {
27 pinctrl-names = "default";
28 pinctrl-0 = <&uart1_pins_b>;
27 status = "okay"; 29 status = "okay";
28 }; 30 };
29 }; 31 };
diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi
index 59a2d265a98e..e1121890fb29 100644
--- a/arch/arm/boot/dts/sun5i-a13.dtsi
+++ b/arch/arm/boot/dts/sun5i-a13.dtsi
@@ -17,4 +17,27 @@
17 memory { 17 memory {
18 reg = <0x40000000 0x20000000>; 18 reg = <0x40000000 0x20000000>;
19 }; 19 };
20
21 soc {
22 pinctrl@01c20800 {
23 compatible = "allwinner,sun5i-a13-pinctrl";
24 reg = <0x01c20800 0x400>;
25 #address-cells = <1>;
26 #size-cells = <0>;
27
28 uart1_pins_a: uart1@0 {
29 allwinner,pins = "PE10", "PE11";
30 allwinner,function = "uart1";
31 allwinner,drive = <0>;
32 allwinner,pull = <0>;
33 };
34
35 uart1_pins_b: uart1@1 {
36 allwinner,pins = "PG3", "PG4";
37 allwinner,function = "uart1";
38 allwinner,drive = <0>;
39 allwinner,pull = <0>;
40 };
41 };
42 };
20}; 43};
diff --git a/arch/arm/boot/dts/sunxi.dtsi b/arch/arm/boot/dts/sunxi.dtsi
index 8bbc2bfef221..8b36abea9f2e 100644
--- a/arch/arm/boot/dts/sunxi.dtsi
+++ b/arch/arm/boot/dts/sunxi.dtsi
@@ -60,19 +60,21 @@
60 }; 60 };
61 61
62 uart0: uart@01c28000 { 62 uart0: uart@01c28000 {
63 compatible = "ns8250"; 63 compatible = "snps,dw-apb-uart";
64 reg = <0x01c28000 0x400>; 64 reg = <0x01c28000 0x400>;
65 interrupts = <1>; 65 interrupts = <1>;
66 reg-shift = <2>; 66 reg-shift = <2>;
67 reg-io-width = <4>;
67 clock-frequency = <24000000>; 68 clock-frequency = <24000000>;
68 status = "disabled"; 69 status = "disabled";
69 }; 70 };
70 71
71 uart1: uart@01c28400 { 72 uart1: uart@01c28400 {
72 compatible = "ns8250"; 73 compatible = "snps,dw-apb-uart";
73 reg = <0x01c28400 0x400>; 74 reg = <0x01c28400 0x400>;
74 interrupts = <2>; 75 interrupts = <2>;
75 reg-shift = <2>; 76 reg-shift = <2>;
77 reg-io-width = <4>;
76 clock-frequency = <24000000>; 78 clock-frequency = <24000000>;
77 status = "disabled"; 79 status = "disabled";
78 }; 80 };
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
index 1fc405a9ecfb..cf8071ad22d5 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
@@ -45,7 +45,6 @@
45 reg = <1>; 45 reg = <1>;
46 }; 46 };
47 47
48/* A7s disabled till big.LITTLE patches are available...
49 cpu2: cpu@2 { 48 cpu2: cpu@2 {
50 device_type = "cpu"; 49 device_type = "cpu";
51 compatible = "arm,cortex-a7"; 50 compatible = "arm,cortex-a7";
@@ -63,7 +62,6 @@
63 compatible = "arm,cortex-a7"; 62 compatible = "arm,cortex-a7";
64 reg = <0x102>; 63 reg = <0x102>;
65 }; 64 };
66*/
67 }; 65 };
68 66
69 memory@80000000 { 67 memory@80000000 {
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index 36ae03a3f5d1..87dfa9026c5b 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -351,6 +351,25 @@ void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
351 irq_set_chained_handler(irq, gic_handle_cascade_irq); 351 irq_set_chained_handler(irq, gic_handle_cascade_irq);
352} 352}
353 353
354static u8 gic_get_cpumask(struct gic_chip_data *gic)
355{
356 void __iomem *base = gic_data_dist_base(gic);
357 u32 mask, i;
358
359 for (i = mask = 0; i < 32; i += 4) {
360 mask = readl_relaxed(base + GIC_DIST_TARGET + i);
361 mask |= mask >> 16;
362 mask |= mask >> 8;
363 if (mask)
364 break;
365 }
366
367 if (!mask)
368 pr_crit("GIC CPU mask not found - kernel will fail to boot.\n");
369
370 return mask;
371}
372
354static void __init gic_dist_init(struct gic_chip_data *gic) 373static void __init gic_dist_init(struct gic_chip_data *gic)
355{ 374{
356 unsigned int i; 375 unsigned int i;
@@ -369,7 +388,9 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
369 /* 388 /*
370 * Set all global interrupts to this CPU only. 389 * Set all global interrupts to this CPU only.
371 */ 390 */
372 cpumask = readl_relaxed(base + GIC_DIST_TARGET + 0); 391 cpumask = gic_get_cpumask(gic);
392 cpumask |= cpumask << 8;
393 cpumask |= cpumask << 16;
373 for (i = 32; i < gic_irqs; i += 4) 394 for (i = 32; i < gic_irqs; i += 4)
374 writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); 395 writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
375 396
@@ -400,7 +421,7 @@ static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
400 * Get what the GIC says our CPU mask is. 421 * Get what the GIC says our CPU mask is.
401 */ 422 */
402 BUG_ON(cpu >= NR_GIC_CPU_IF); 423 BUG_ON(cpu >= NR_GIC_CPU_IF);
403 cpu_mask = readl_relaxed(dist_base + GIC_DIST_TARGET + 0); 424 cpu_mask = gic_get_cpumask(gic);
404 gic_cpu_map[cpu] = cpu_mask; 425 gic_cpu_map[cpu] = cpu_mask;
405 426
406 /* 427 /*
diff --git a/arch/arm/configs/at91_dt_defconfig b/arch/arm/configs/at91_dt_defconfig
index b175577d7abb..1ea959019fcd 100644
--- a/arch/arm/configs/at91_dt_defconfig
+++ b/arch/arm/configs/at91_dt_defconfig
@@ -19,6 +19,7 @@ CONFIG_SOC_AT91SAM9260=y
19CONFIG_SOC_AT91SAM9263=y 19CONFIG_SOC_AT91SAM9263=y
20CONFIG_SOC_AT91SAM9G45=y 20CONFIG_SOC_AT91SAM9G45=y
21CONFIG_SOC_AT91SAM9X5=y 21CONFIG_SOC_AT91SAM9X5=y
22CONFIG_SOC_AT91SAM9N12=y
22CONFIG_MACH_AT91SAM_DT=y 23CONFIG_MACH_AT91SAM_DT=y
23CONFIG_AT91_PROGRAMMABLE_CLOCKS=y 24CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
24CONFIG_AT91_TIMER_HZ=128 25CONFIG_AT91_TIMER_HZ=128
@@ -31,7 +32,7 @@ CONFIG_ZBOOT_ROM_TEXT=0x0
31CONFIG_ZBOOT_ROM_BSS=0x0 32CONFIG_ZBOOT_ROM_BSS=0x0
32CONFIG_ARM_APPENDED_DTB=y 33CONFIG_ARM_APPENDED_DTB=y
33CONFIG_ARM_ATAG_DTB_COMPAT=y 34CONFIG_ARM_ATAG_DTB_COMPAT=y
34CONFIG_CMDLINE="mem=128M console=ttyS0,115200 initrd=0x21100000,25165824 root=/dev/ram0 rw" 35CONFIG_CMDLINE="console=ttyS0,115200 initrd=0x21100000,25165824 root=/dev/ram0 rw"
35CONFIG_KEXEC=y 36CONFIG_KEXEC=y
36CONFIG_AUTO_ZRELADDR=y 37CONFIG_AUTO_ZRELADDR=y
37# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 38# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
diff --git a/arch/arm/crypto/aes-armv4.S b/arch/arm/crypto/aes-armv4.S
index e59b1d505d6c..19d6cd6f29f9 100644
--- a/arch/arm/crypto/aes-armv4.S
+++ b/arch/arm/crypto/aes-armv4.S
@@ -34,8 +34,9 @@
34@ A little glue here to select the correct code below for the ARM CPU 34@ A little glue here to select the correct code below for the ARM CPU
35@ that is being targetted. 35@ that is being targetted.
36 36
37#include <linux/linkage.h>
38
37.text 39.text
38.code 32
39 40
40.type AES_Te,%object 41.type AES_Te,%object
41.align 5 42.align 5
@@ -145,10 +146,8 @@ AES_Te:
145 146
146@ void AES_encrypt(const unsigned char *in, unsigned char *out, 147@ void AES_encrypt(const unsigned char *in, unsigned char *out,
147@ const AES_KEY *key) { 148@ const AES_KEY *key) {
148.global AES_encrypt
149.type AES_encrypt,%function
150.align 5 149.align 5
151AES_encrypt: 150ENTRY(AES_encrypt)
152 sub r3,pc,#8 @ AES_encrypt 151 sub r3,pc,#8 @ AES_encrypt
153 stmdb sp!,{r1,r4-r12,lr} 152 stmdb sp!,{r1,r4-r12,lr}
154 mov r12,r0 @ inp 153 mov r12,r0 @ inp
@@ -239,15 +238,8 @@ AES_encrypt:
239 strb r6,[r12,#14] 238 strb r6,[r12,#14]
240 strb r3,[r12,#15] 239 strb r3,[r12,#15]
241#endif 240#endif
242#if __ARM_ARCH__>=5
243 ldmia sp!,{r4-r12,pc} 241 ldmia sp!,{r4-r12,pc}
244#else 242ENDPROC(AES_encrypt)
245 ldmia sp!,{r4-r12,lr}
246 tst lr,#1
247 moveq pc,lr @ be binary compatible with V4, yet
248 .word 0xe12fff1e @ interoperable with Thumb ISA:-)
249#endif
250.size AES_encrypt,.-AES_encrypt
251 243
252.type _armv4_AES_encrypt,%function 244.type _armv4_AES_encrypt,%function
253.align 2 245.align 2
@@ -386,10 +378,8 @@ _armv4_AES_encrypt:
386 ldr pc,[sp],#4 @ pop and return 378 ldr pc,[sp],#4 @ pop and return
387.size _armv4_AES_encrypt,.-_armv4_AES_encrypt 379.size _armv4_AES_encrypt,.-_armv4_AES_encrypt
388 380
389.global private_AES_set_encrypt_key
390.type private_AES_set_encrypt_key,%function
391.align 5 381.align 5
392private_AES_set_encrypt_key: 382ENTRY(private_AES_set_encrypt_key)
393_armv4_AES_set_encrypt_key: 383_armv4_AES_set_encrypt_key:
394 sub r3,pc,#8 @ AES_set_encrypt_key 384 sub r3,pc,#8 @ AES_set_encrypt_key
395 teq r0,#0 385 teq r0,#0
@@ -658,15 +648,11 @@ _armv4_AES_set_encrypt_key:
658 648
659.Ldone: mov r0,#0 649.Ldone: mov r0,#0
660 ldmia sp!,{r4-r12,lr} 650 ldmia sp!,{r4-r12,lr}
661.Labrt: tst lr,#1 651.Labrt: mov pc,lr
662 moveq pc,lr @ be binary compatible with V4, yet 652ENDPROC(private_AES_set_encrypt_key)
663 .word 0xe12fff1e @ interoperable with Thumb ISA:-)
664.size private_AES_set_encrypt_key,.-private_AES_set_encrypt_key
665 653
666.global private_AES_set_decrypt_key
667.type private_AES_set_decrypt_key,%function
668.align 5 654.align 5
669private_AES_set_decrypt_key: 655ENTRY(private_AES_set_decrypt_key)
670 str lr,[sp,#-4]! @ push lr 656 str lr,[sp,#-4]! @ push lr
671#if 0 657#if 0
672 @ kernel does both of these in setkey so optimise this bit out by 658 @ kernel does both of these in setkey so optimise this bit out by
@@ -748,15 +734,8 @@ private_AES_set_decrypt_key:
748 bne .Lmix 734 bne .Lmix
749 735
750 mov r0,#0 736 mov r0,#0
751#if __ARM_ARCH__>=5
752 ldmia sp!,{r4-r12,pc} 737 ldmia sp!,{r4-r12,pc}
753#else 738ENDPROC(private_AES_set_decrypt_key)
754 ldmia sp!,{r4-r12,lr}
755 tst lr,#1
756 moveq pc,lr @ be binary compatible with V4, yet
757 .word 0xe12fff1e @ interoperable with Thumb ISA:-)
758#endif
759.size private_AES_set_decrypt_key,.-private_AES_set_decrypt_key
760 739
761.type AES_Td,%object 740.type AES_Td,%object
762.align 5 741.align 5
@@ -862,10 +841,8 @@ AES_Td:
862 841
863@ void AES_decrypt(const unsigned char *in, unsigned char *out, 842@ void AES_decrypt(const unsigned char *in, unsigned char *out,
864@ const AES_KEY *key) { 843@ const AES_KEY *key) {
865.global AES_decrypt
866.type AES_decrypt,%function
867.align 5 844.align 5
868AES_decrypt: 845ENTRY(AES_decrypt)
869 sub r3,pc,#8 @ AES_decrypt 846 sub r3,pc,#8 @ AES_decrypt
870 stmdb sp!,{r1,r4-r12,lr} 847 stmdb sp!,{r1,r4-r12,lr}
871 mov r12,r0 @ inp 848 mov r12,r0 @ inp
@@ -956,15 +933,8 @@ AES_decrypt:
956 strb r6,[r12,#14] 933 strb r6,[r12,#14]
957 strb r3,[r12,#15] 934 strb r3,[r12,#15]
958#endif 935#endif
959#if __ARM_ARCH__>=5
960 ldmia sp!,{r4-r12,pc} 936 ldmia sp!,{r4-r12,pc}
961#else 937ENDPROC(AES_decrypt)
962 ldmia sp!,{r4-r12,lr}
963 tst lr,#1
964 moveq pc,lr @ be binary compatible with V4, yet
965 .word 0xe12fff1e @ interoperable with Thumb ISA:-)
966#endif
967.size AES_decrypt,.-AES_decrypt
968 938
969.type _armv4_AES_decrypt,%function 939.type _armv4_AES_decrypt,%function
970.align 2 940.align 2
@@ -1064,7 +1034,9 @@ _armv4_AES_decrypt:
1064 and r9,lr,r1,lsr#8 1034 and r9,lr,r1,lsr#8
1065 1035
1066 ldrb r7,[r10,r7] @ Td4[s1>>0] 1036 ldrb r7,[r10,r7] @ Td4[s1>>0]
1067 ldrb r1,[r10,r1,lsr#24] @ Td4[s1>>24] 1037 ARM( ldrb r1,[r10,r1,lsr#24] ) @ Td4[s1>>24]
1038 THUMB( add r1,r10,r1,lsr#24 ) @ Td4[s1>>24]
1039 THUMB( ldrb r1,[r1] )
1068 ldrb r8,[r10,r8] @ Td4[s1>>16] 1040 ldrb r8,[r10,r8] @ Td4[s1>>16]
1069 eor r0,r7,r0,lsl#24 1041 eor r0,r7,r0,lsl#24
1070 ldrb r9,[r10,r9] @ Td4[s1>>8] 1042 ldrb r9,[r10,r9] @ Td4[s1>>8]
@@ -1077,7 +1049,9 @@ _armv4_AES_decrypt:
1077 ldrb r8,[r10,r8] @ Td4[s2>>0] 1049 ldrb r8,[r10,r8] @ Td4[s2>>0]
1078 and r9,lr,r2,lsr#16 1050 and r9,lr,r2,lsr#16
1079 1051
1080 ldrb r2,[r10,r2,lsr#24] @ Td4[s2>>24] 1052 ARM( ldrb r2,[r10,r2,lsr#24] ) @ Td4[s2>>24]
1053 THUMB( add r2,r10,r2,lsr#24 ) @ Td4[s2>>24]
1054 THUMB( ldrb r2,[r2] )
1081 eor r0,r0,r7,lsl#8 1055 eor r0,r0,r7,lsl#8
1082 ldrb r9,[r10,r9] @ Td4[s2>>16] 1056 ldrb r9,[r10,r9] @ Td4[s2>>16]
1083 eor r1,r8,r1,lsl#16 1057 eor r1,r8,r1,lsl#16
@@ -1090,7 +1064,9 @@ _armv4_AES_decrypt:
1090 and r9,lr,r3 @ i2 1064 and r9,lr,r3 @ i2
1091 1065
1092 ldrb r9,[r10,r9] @ Td4[s3>>0] 1066 ldrb r9,[r10,r9] @ Td4[s3>>0]
1093 ldrb r3,[r10,r3,lsr#24] @ Td4[s3>>24] 1067 ARM( ldrb r3,[r10,r3,lsr#24] ) @ Td4[s3>>24]
1068 THUMB( add r3,r10,r3,lsr#24 ) @ Td4[s3>>24]
1069 THUMB( ldrb r3,[r3] )
1094 eor r0,r0,r7,lsl#16 1070 eor r0,r0,r7,lsl#16
1095 ldr r7,[r11,#0] 1071 ldr r7,[r11,#0]
1096 eor r1,r1,r8,lsl#8 1072 eor r1,r1,r8,lsl#8
diff --git a/arch/arm/crypto/sha1-armv4-large.S b/arch/arm/crypto/sha1-armv4-large.S
index 7050ab133b9d..92c6eed7aac9 100644
--- a/arch/arm/crypto/sha1-armv4-large.S
+++ b/arch/arm/crypto/sha1-armv4-large.S
@@ -51,13 +51,12 @@
51@ Profiler-assisted and platform-specific optimization resulted in 10% 51@ Profiler-assisted and platform-specific optimization resulted in 10%
52@ improvement on Cortex A8 core and 12.2 cycles per byte. 52@ improvement on Cortex A8 core and 12.2 cycles per byte.
53 53
54.text 54#include <linux/linkage.h>
55 55
56.global sha1_block_data_order 56.text
57.type sha1_block_data_order,%function
58 57
59.align 2 58.align 2
60sha1_block_data_order: 59ENTRY(sha1_block_data_order)
61 stmdb sp!,{r4-r12,lr} 60 stmdb sp!,{r4-r12,lr}
62 add r2,r1,r2,lsl#6 @ r2 to point at the end of r1 61 add r2,r1,r2,lsl#6 @ r2 to point at the end of r1
63 ldmia r0,{r3,r4,r5,r6,r7} 62 ldmia r0,{r3,r4,r5,r6,r7}
@@ -194,7 +193,7 @@ sha1_block_data_order:
194 eor r10,r10,r7,ror#2 @ F_00_19(B,C,D) 193 eor r10,r10,r7,ror#2 @ F_00_19(B,C,D)
195 str r9,[r14,#-4]! 194 str r9,[r14,#-4]!
196 add r3,r3,r10 @ E+=F_00_19(B,C,D) 195 add r3,r3,r10 @ E+=F_00_19(B,C,D)
197 teq r14,sp 196 cmp r14,sp
198 bne .L_00_15 @ [((11+4)*5+2)*3] 197 bne .L_00_15 @ [((11+4)*5+2)*3]
199#if __ARM_ARCH__<7 198#if __ARM_ARCH__<7
200 ldrb r10,[r1,#2] 199 ldrb r10,[r1,#2]
@@ -374,7 +373,9 @@ sha1_block_data_order:
374 @ F_xx_xx 373 @ F_xx_xx
375 add r3,r3,r9 @ E+=X[i] 374 add r3,r3,r9 @ E+=X[i]
376 add r3,r3,r10 @ E+=F_20_39(B,C,D) 375 add r3,r3,r10 @ E+=F_20_39(B,C,D)
377 teq r14,sp @ preserve carry 376 ARM( teq r14,sp ) @ preserve carry
377 THUMB( mov r11,sp )
378 THUMB( teq r14,r11 ) @ preserve carry
378 bne .L_20_39_or_60_79 @ [+((12+3)*5+2)*4] 379 bne .L_20_39_or_60_79 @ [+((12+3)*5+2)*4]
379 bcs .L_done @ [+((12+3)*5+2)*4], spare 300 bytes 380 bcs .L_done @ [+((12+3)*5+2)*4], spare 300 bytes
380 381
@@ -466,7 +467,7 @@ sha1_block_data_order:
466 add r3,r3,r9 @ E+=X[i] 467 add r3,r3,r9 @ E+=X[i]
467 add r3,r3,r10 @ E+=F_40_59(B,C,D) 468 add r3,r3,r10 @ E+=F_40_59(B,C,D)
468 add r3,r3,r11,ror#2 469 add r3,r3,r11,ror#2
469 teq r14,sp 470 cmp r14,sp
470 bne .L_40_59 @ [+((12+5)*5+2)*4] 471 bne .L_40_59 @ [+((12+5)*5+2)*4]
471 472
472 ldr r8,.LK_60_79 473 ldr r8,.LK_60_79
@@ -485,19 +486,12 @@ sha1_block_data_order:
485 teq r1,r2 486 teq r1,r2
486 bne .Lloop @ [+18], total 1307 487 bne .Lloop @ [+18], total 1307
487 488
488#if __ARM_ARCH__>=5
489 ldmia sp!,{r4-r12,pc} 489 ldmia sp!,{r4-r12,pc}
490#else
491 ldmia sp!,{r4-r12,lr}
492 tst lr,#1
493 moveq pc,lr @ be binary compatible with V4, yet
494 .word 0xe12fff1e @ interoperable with Thumb ISA:-)
495#endif
496.align 2 490.align 2
497.LK_00_19: .word 0x5a827999 491.LK_00_19: .word 0x5a827999
498.LK_20_39: .word 0x6ed9eba1 492.LK_20_39: .word 0x6ed9eba1
499.LK_40_59: .word 0x8f1bbcdc 493.LK_40_59: .word 0x8f1bbcdc
500.LK_60_79: .word 0xca62c1d6 494.LK_60_79: .word 0xca62c1d6
501.size sha1_block_data_order,.-sha1_block_data_order 495ENDPROC(sha1_block_data_order)
502.asciz "SHA1 block transform for ARMv4, CRYPTOGAMS by <appro@openssl.org>" 496.asciz "SHA1 block transform for ARMv4, CRYPTOGAMS by <appro@openssl.org>"
503.align 2 497.align 2
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index eb87200aa4b5..05ee9eebad6b 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -246,18 +246,14 @@
246 * 246 *
247 * This macro is intended for forcing the CPU into SVC mode at boot time. 247 * This macro is intended for forcing the CPU into SVC mode at boot time.
248 * you cannot return to the original mode. 248 * you cannot return to the original mode.
249 *
250 * Beware, it also clobers LR.
251 */ 249 */
252.macro safe_svcmode_maskall reg:req 250.macro safe_svcmode_maskall reg:req
253#if __LINUX_ARM_ARCH__ >= 6 251#if __LINUX_ARM_ARCH__ >= 6
254 mrs \reg , cpsr 252 mrs \reg , cpsr
255 mov lr , \reg 253 eor \reg, \reg, #HYP_MODE
256 and lr , lr , #MODE_MASK 254 tst \reg, #MODE_MASK
257 cmp lr , #HYP_MODE
258 orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT
259 bic \reg , \reg , #MODE_MASK 255 bic \reg , \reg , #MODE_MASK
260 orr \reg , \reg , #SVC_MODE 256 orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE
261THUMB( orr \reg , \reg , #PSR_T_BIT ) 257THUMB( orr \reg , \reg , #PSR_T_BIT )
262 bne 1f 258 bne 1f
263 orr \reg, \reg, #PSR_A_BIT 259 orr \reg, \reg, #PSR_A_BIT
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index a59dcb5ab5fc..ad41ec2471e8 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -64,6 +64,24 @@ extern unsigned int processor_id;
64#define read_cpuid_ext(reg) 0 64#define read_cpuid_ext(reg) 0
65#endif 65#endif
66 66
67#define ARM_CPU_IMP_ARM 0x41
68#define ARM_CPU_IMP_INTEL 0x69
69
70#define ARM_CPU_PART_ARM1136 0xB360
71#define ARM_CPU_PART_ARM1156 0xB560
72#define ARM_CPU_PART_ARM1176 0xB760
73#define ARM_CPU_PART_ARM11MPCORE 0xB020
74#define ARM_CPU_PART_CORTEX_A8 0xC080
75#define ARM_CPU_PART_CORTEX_A9 0xC090
76#define ARM_CPU_PART_CORTEX_A5 0xC050
77#define ARM_CPU_PART_CORTEX_A15 0xC0F0
78#define ARM_CPU_PART_CORTEX_A7 0xC070
79
80#define ARM_CPU_XSCALE_ARCH_MASK 0xe000
81#define ARM_CPU_XSCALE_ARCH_V1 0x2000
82#define ARM_CPU_XSCALE_ARCH_V2 0x4000
83#define ARM_CPU_XSCALE_ARCH_V3 0x6000
84
67/* 85/*
68 * The CPU ID never changes at run time, so we might as well tell the 86 * The CPU ID never changes at run time, so we might as well tell the
69 * compiler that it's constant. Use this function to read the CPU ID 87 * compiler that it's constant. Use this function to read the CPU ID
@@ -74,6 +92,21 @@ static inline unsigned int __attribute_const__ read_cpuid_id(void)
74 return read_cpuid(CPUID_ID); 92 return read_cpuid(CPUID_ID);
75} 93}
76 94
95static inline unsigned int __attribute_const__ read_cpuid_implementor(void)
96{
97 return (read_cpuid_id() & 0xFF000000) >> 24;
98}
99
100static inline unsigned int __attribute_const__ read_cpuid_part_number(void)
101{
102 return read_cpuid_id() & 0xFFF0;
103}
104
105static inline unsigned int __attribute_const__ xscale_cpu_arch_version(void)
106{
107 return read_cpuid_part_number() & ARM_CPU_XSCALE_ARCH_MASK;
108}
109
77static inline unsigned int __attribute_const__ read_cpuid_cachetype(void) 110static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)
78{ 111{
79 return read_cpuid(CPUID_CACHETYPE); 112 return read_cpuid(CPUID_CACHETYPE);
diff --git a/arch/arm/include/asm/cti.h b/arch/arm/include/asm/cti.h
index f2e5cad3f306..2381199acb7d 100644
--- a/arch/arm/include/asm/cti.h
+++ b/arch/arm/include/asm/cti.h
@@ -2,6 +2,7 @@
2#define __ASMARM_CTI_H 2#define __ASMARM_CTI_H
3 3
4#include <asm/io.h> 4#include <asm/io.h>
5#include <asm/hardware/coresight.h>
5 6
6/* The registers' definition is from section 3.2 of 7/* The registers' definition is from section 3.2 of
7 * Embedded Cross Trigger Revision: r0p0 8 * Embedded Cross Trigger Revision: r0p0
@@ -35,11 +36,6 @@
35#define LOCKACCESS 0xFB0 36#define LOCKACCESS 0xFB0
36#define LOCKSTATUS 0xFB4 37#define LOCKSTATUS 0xFB4
37 38
38/* write this value to LOCKACCESS will unlock the module, and
39 * other value will lock the module
40 */
41#define LOCKCODE 0xC5ACCE55
42
43/** 39/**
44 * struct cti - cross trigger interface struct 40 * struct cti - cross trigger interface struct
45 * @base: mapped virtual address for the cti base 41 * @base: mapped virtual address for the cti base
@@ -146,7 +142,7 @@ static inline void cti_irq_ack(struct cti *cti)
146 */ 142 */
147static inline void cti_unlock(struct cti *cti) 143static inline void cti_unlock(struct cti *cti)
148{ 144{
149 __raw_writel(LOCKCODE, cti->base + LOCKACCESS); 145 __raw_writel(CS_LAR_KEY, cti->base + LOCKACCESS);
150} 146}
151 147
152/** 148/**
@@ -158,6 +154,6 @@ static inline void cti_unlock(struct cti *cti)
158 */ 154 */
159static inline void cti_lock(struct cti *cti) 155static inline void cti_lock(struct cti *cti)
160{ 156{
161 __raw_writel(~LOCKCODE, cti->base + LOCKACCESS); 157 __raw_writel(~CS_LAR_KEY, cti->base + LOCKACCESS);
162} 158}
163#endif 159#endif
diff --git a/arch/arm/include/asm/hardware/coresight.h b/arch/arm/include/asm/hardware/coresight.h
index 7ecd793b8f5a..0cf7a6b842ff 100644
--- a/arch/arm/include/asm/hardware/coresight.h
+++ b/arch/arm/include/asm/hardware/coresight.h
@@ -36,7 +36,7 @@
36/* CoreSight Component Registers */ 36/* CoreSight Component Registers */
37#define CSCR_CLASS 0xff4 37#define CSCR_CLASS 0xff4
38 38
39#define UNLOCK_MAGIC 0xc5acce55 39#define CS_LAR_KEY 0xc5acce55
40 40
41/* ETM control register, "ETM Architecture", 3.3.1 */ 41/* ETM control register, "ETM Architecture", 3.3.1 */
42#define ETMR_CTRL 0 42#define ETMR_CTRL 0
@@ -147,11 +147,11 @@
147 147
148#define etm_lock(t) do { etm_writel((t), 0, CSMR_LOCKACCESS); } while (0) 148#define etm_lock(t) do { etm_writel((t), 0, CSMR_LOCKACCESS); } while (0)
149#define etm_unlock(t) \ 149#define etm_unlock(t) \
150 do { etm_writel((t), UNLOCK_MAGIC, CSMR_LOCKACCESS); } while (0) 150 do { etm_writel((t), CS_LAR_KEY, CSMR_LOCKACCESS); } while (0)
151 151
152#define etb_lock(t) do { etb_writel((t), 0, CSMR_LOCKACCESS); } while (0) 152#define etb_lock(t) do { etb_writel((t), 0, CSMR_LOCKACCESS); } while (0)
153#define etb_unlock(t) \ 153#define etb_unlock(t) \
154 do { etb_writel((t), UNLOCK_MAGIC, CSMR_LOCKACCESS); } while (0) 154 do { etb_writel((t), CS_LAR_KEY, CSMR_LOCKACCESS); } while (0)
155 155
156#endif /* __ASM_HARDWARE_CORESIGHT_H */ 156#endif /* __ASM_HARDWARE_CORESIGHT_H */
157 157
diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h
index 01169dd723f1..eef55ea9ef00 100644
--- a/arch/arm/include/asm/hw_breakpoint.h
+++ b/arch/arm/include/asm/hw_breakpoint.h
@@ -85,6 +85,9 @@ static inline void decode_ctrl_reg(u32 reg,
85#define ARM_DSCR_HDBGEN (1 << 14) 85#define ARM_DSCR_HDBGEN (1 << 14)
86#define ARM_DSCR_MDBGEN (1 << 15) 86#define ARM_DSCR_MDBGEN (1 << 15)
87 87
88/* OSLSR os lock model bits */
89#define ARM_OSLSR_OSLM0 (1 << 0)
90
88/* opcode2 numbers for the co-processor instructions. */ 91/* opcode2 numbers for the co-processor instructions. */
89#define ARM_OP2_BVR 4 92#define ARM_OP2_BVR 4
90#define ARM_OP2_BCR 5 93#define ARM_OP2_BCR 5
diff --git a/arch/arm/include/asm/idmap.h b/arch/arm/include/asm/idmap.h
index bf863edb517d..1a66f907e5cc 100644
--- a/arch/arm/include/asm/idmap.h
+++ b/arch/arm/include/asm/idmap.h
@@ -8,6 +8,7 @@
8#define __idmap __section(.idmap.text) noinline notrace 8#define __idmap __section(.idmap.text) noinline notrace
9 9
10extern pgd_t *idmap_pgd; 10extern pgd_t *idmap_pgd;
11extern pgd_t *hyp_pgd;
11 12
12void setup_mm_for_reboot(void); 13void setup_mm_for_reboot(void);
13 14
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
new file mode 100644
index 000000000000..7c3d813e15df
--- /dev/null
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -0,0 +1,214 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#ifndef __ARM_KVM_ARM_H__
20#define __ARM_KVM_ARM_H__
21
22#include <linux/types.h>
23
24/* Hyp Configuration Register (HCR) bits */
25#define HCR_TGE (1 << 27)
26#define HCR_TVM (1 << 26)
27#define HCR_TTLB (1 << 25)
28#define HCR_TPU (1 << 24)
29#define HCR_TPC (1 << 23)
30#define HCR_TSW (1 << 22)
31#define HCR_TAC (1 << 21)
32#define HCR_TIDCP (1 << 20)
33#define HCR_TSC (1 << 19)
34#define HCR_TID3 (1 << 18)
35#define HCR_TID2 (1 << 17)
36#define HCR_TID1 (1 << 16)
37#define HCR_TID0 (1 << 15)
38#define HCR_TWE (1 << 14)
39#define HCR_TWI (1 << 13)
40#define HCR_DC (1 << 12)
41#define HCR_BSU (3 << 10)
42#define HCR_BSU_IS (1 << 10)
43#define HCR_FB (1 << 9)
44#define HCR_VA (1 << 8)
45#define HCR_VI (1 << 7)
46#define HCR_VF (1 << 6)
47#define HCR_AMO (1 << 5)
48#define HCR_IMO (1 << 4)
49#define HCR_FMO (1 << 3)
50#define HCR_PTW (1 << 2)
51#define HCR_SWIO (1 << 1)
52#define HCR_VM 1
53
54/*
55 * The bits we set in HCR:
56 * TAC: Trap ACTLR
57 * TSC: Trap SMC
58 * TSW: Trap cache operations by set/way
59 * TWI: Trap WFI
60 * TIDCP: Trap L2CTLR/L2ECTLR
61 * BSU_IS: Upgrade barriers to the inner shareable domain
62 * FB: Force broadcast of all maintainance operations
63 * AMO: Override CPSR.A and enable signaling with VA
64 * IMO: Override CPSR.I and enable signaling with VI
65 * FMO: Override CPSR.F and enable signaling with VF
66 * SWIO: Turn set/way invalidates into set/way clean+invalidate
67 */
68#define HCR_GUEST_MASK (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \
69 HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \
70 HCR_SWIO | HCR_TIDCP)
71#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
72
73/* System Control Register (SCTLR) bits */
74#define SCTLR_TE (1 << 30)
75#define SCTLR_EE (1 << 25)
76#define SCTLR_V (1 << 13)
77
78/* Hyp System Control Register (HSCTLR) bits */
79#define HSCTLR_TE (1 << 30)
80#define HSCTLR_EE (1 << 25)
81#define HSCTLR_FI (1 << 21)
82#define HSCTLR_WXN (1 << 19)
83#define HSCTLR_I (1 << 12)
84#define HSCTLR_C (1 << 2)
85#define HSCTLR_A (1 << 1)
86#define HSCTLR_M 1
87#define HSCTLR_MASK (HSCTLR_M | HSCTLR_A | HSCTLR_C | HSCTLR_I | \
88 HSCTLR_WXN | HSCTLR_FI | HSCTLR_EE | HSCTLR_TE)
89
90/* TTBCR and HTCR Registers bits */
91#define TTBCR_EAE (1 << 31)
92#define TTBCR_IMP (1 << 30)
93#define TTBCR_SH1 (3 << 28)
94#define TTBCR_ORGN1 (3 << 26)
95#define TTBCR_IRGN1 (3 << 24)
96#define TTBCR_EPD1 (1 << 23)
97#define TTBCR_A1 (1 << 22)
98#define TTBCR_T1SZ (3 << 16)
99#define TTBCR_SH0 (3 << 12)
100#define TTBCR_ORGN0 (3 << 10)
101#define TTBCR_IRGN0 (3 << 8)
102#define TTBCR_EPD0 (1 << 7)
103#define TTBCR_T0SZ 3
104#define HTCR_MASK (TTBCR_T0SZ | TTBCR_IRGN0 | TTBCR_ORGN0 | TTBCR_SH0)
105
106/* Hyp System Trap Register */
107#define HSTR_T(x) (1 << x)
108#define HSTR_TTEE (1 << 16)
109#define HSTR_TJDBX (1 << 17)
110
111/* Hyp Coprocessor Trap Register */
112#define HCPTR_TCP(x) (1 << x)
113#define HCPTR_TCP_MASK (0x3fff)
114#define HCPTR_TASE (1 << 15)
115#define HCPTR_TTA (1 << 20)
116#define HCPTR_TCPAC (1 << 31)
117
118/* Hyp Debug Configuration Register bits */
119#define HDCR_TDRA (1 << 11)
120#define HDCR_TDOSA (1 << 10)
121#define HDCR_TDA (1 << 9)
122#define HDCR_TDE (1 << 8)
123#define HDCR_HPME (1 << 7)
124#define HDCR_TPM (1 << 6)
125#define HDCR_TPMCR (1 << 5)
126#define HDCR_HPMN_MASK (0x1F)
127
128/*
129 * The architecture supports 40-bit IPA as input to the 2nd stage translations
130 * and PTRS_PER_S2_PGD becomes 1024, because each entry covers 1GB of address
131 * space.
132 */
133#define KVM_PHYS_SHIFT (40)
134#define KVM_PHYS_SIZE (1ULL << KVM_PHYS_SHIFT)
135#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1ULL)
136#define PTRS_PER_S2_PGD (1ULL << (KVM_PHYS_SHIFT - 30))
137#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
138#define S2_PGD_SIZE (1 << S2_PGD_ORDER)
139
140/* Virtualization Translation Control Register (VTCR) bits */
141#define VTCR_SH0 (3 << 12)
142#define VTCR_ORGN0 (3 << 10)
143#define VTCR_IRGN0 (3 << 8)
144#define VTCR_SL0 (3 << 6)
145#define VTCR_S (1 << 4)
146#define VTCR_T0SZ (0xf)
147#define VTCR_MASK (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0 | VTCR_SL0 | \
148 VTCR_S | VTCR_T0SZ)
149#define VTCR_HTCR_SH (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0)
150#define VTCR_SL_L2 (0 << 6) /* Starting-level: 2 */
151#define VTCR_SL_L1 (1 << 6) /* Starting-level: 1 */
152#define KVM_VTCR_SL0 VTCR_SL_L1
153/* stage-2 input address range defined as 2^(32-T0SZ) */
154#define KVM_T0SZ (32 - KVM_PHYS_SHIFT)
155#define KVM_VTCR_T0SZ (KVM_T0SZ & VTCR_T0SZ)
156#define KVM_VTCR_S ((KVM_VTCR_T0SZ << 1) & VTCR_S)
157
158/* Virtualization Translation Table Base Register (VTTBR) bits */
159#if KVM_VTCR_SL0 == VTCR_SL_L2 /* see ARM DDI 0406C: B4-1720 */
160#define VTTBR_X (14 - KVM_T0SZ)
161#else
162#define VTTBR_X (5 - KVM_T0SZ)
163#endif
164#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
165#define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
166#define VTTBR_VMID_SHIFT (48LLU)
167#define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT)
168
169/* Hyp Syndrome Register (HSR) bits */
170#define HSR_EC_SHIFT (26)
171#define HSR_EC (0x3fU << HSR_EC_SHIFT)
172#define HSR_IL (1U << 25)
173#define HSR_ISS (HSR_IL - 1)
174#define HSR_ISV_SHIFT (24)
175#define HSR_ISV (1U << HSR_ISV_SHIFT)
176#define HSR_SRT_SHIFT (16)
177#define HSR_SRT_MASK (0xf << HSR_SRT_SHIFT)
178#define HSR_FSC (0x3f)
179#define HSR_FSC_TYPE (0x3c)
180#define HSR_SSE (1 << 21)
181#define HSR_WNR (1 << 6)
182#define HSR_CV_SHIFT (24)
183#define HSR_CV (1U << HSR_CV_SHIFT)
184#define HSR_COND_SHIFT (20)
185#define HSR_COND (0xfU << HSR_COND_SHIFT)
186
187#define FSC_FAULT (0x04)
188#define FSC_PERM (0x0c)
189
190/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
191#define HPFAR_MASK (~0xf)
192
193#define HSR_EC_UNKNOWN (0x00)
194#define HSR_EC_WFI (0x01)
195#define HSR_EC_CP15_32 (0x03)
196#define HSR_EC_CP15_64 (0x04)
197#define HSR_EC_CP14_MR (0x05)
198#define HSR_EC_CP14_LS (0x06)
199#define HSR_EC_CP_0_13 (0x07)
200#define HSR_EC_CP10_ID (0x08)
201#define HSR_EC_JAZELLE (0x09)
202#define HSR_EC_BXJ (0x0A)
203#define HSR_EC_CP14_64 (0x0C)
204#define HSR_EC_SVC_HYP (0x11)
205#define HSR_EC_HVC (0x12)
206#define HSR_EC_SMC (0x13)
207#define HSR_EC_IABT (0x20)
208#define HSR_EC_IABT_HYP (0x21)
209#define HSR_EC_DABT (0x24)
210#define HSR_EC_DABT_HYP (0x25)
211
212#define HSR_HVC_IMM_MASK ((1UL << 16) - 1)
213
214#endif /* __ARM_KVM_ARM_H__ */
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
new file mode 100644
index 000000000000..5e06e8177784
--- /dev/null
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -0,0 +1,82 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#ifndef __ARM_KVM_ASM_H__
20#define __ARM_KVM_ASM_H__
21
22/* 0 is reserved as an invalid value. */
23#define c0_MPIDR 1 /* MultiProcessor ID Register */
24#define c0_CSSELR 2 /* Cache Size Selection Register */
25#define c1_SCTLR 3 /* System Control Register */
26#define c1_ACTLR 4 /* Auxilliary Control Register */
27#define c1_CPACR 5 /* Coprocessor Access Control */
28#define c2_TTBR0 6 /* Translation Table Base Register 0 */
29#define c2_TTBR0_high 7 /* TTBR0 top 32 bits */
30#define c2_TTBR1 8 /* Translation Table Base Register 1 */
31#define c2_TTBR1_high 9 /* TTBR1 top 32 bits */
32#define c2_TTBCR 10 /* Translation Table Base Control R. */
33#define c3_DACR 11 /* Domain Access Control Register */
34#define c5_DFSR 12 /* Data Fault Status Register */
35#define c5_IFSR 13 /* Instruction Fault Status Register */
36#define c5_ADFSR 14 /* Auxilary Data Fault Status R */
37#define c5_AIFSR 15 /* Auxilary Instrunction Fault Status R */
38#define c6_DFAR 16 /* Data Fault Address Register */
39#define c6_IFAR 17 /* Instruction Fault Address Register */
40#define c9_L2CTLR 18 /* Cortex A15 L2 Control Register */
41#define c10_PRRR 19 /* Primary Region Remap Register */
42#define c10_NMRR 20 /* Normal Memory Remap Register */
43#define c12_VBAR 21 /* Vector Base Address Register */
44#define c13_CID 22 /* Context ID Register */
45#define c13_TID_URW 23 /* Thread ID, User R/W */
46#define c13_TID_URO 24 /* Thread ID, User R/O */
47#define c13_TID_PRIV 25 /* Thread ID, Privileged */
48#define NR_CP15_REGS 26 /* Number of regs (incl. invalid) */
49
50#define ARM_EXCEPTION_RESET 0
51#define ARM_EXCEPTION_UNDEFINED 1
52#define ARM_EXCEPTION_SOFTWARE 2
53#define ARM_EXCEPTION_PREF_ABORT 3
54#define ARM_EXCEPTION_DATA_ABORT 4
55#define ARM_EXCEPTION_IRQ 5
56#define ARM_EXCEPTION_FIQ 6
57#define ARM_EXCEPTION_HVC 7
58
59#ifndef __ASSEMBLY__
60struct kvm;
61struct kvm_vcpu;
62
63extern char __kvm_hyp_init[];
64extern char __kvm_hyp_init_end[];
65
66extern char __kvm_hyp_exit[];
67extern char __kvm_hyp_exit_end[];
68
69extern char __kvm_hyp_vector[];
70
71extern char __kvm_hyp_code_start[];
72extern char __kvm_hyp_code_end[];
73
74extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
75
76extern void __kvm_flush_vm_context(void);
77extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
78
79extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
80#endif
81
82#endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm/include/asm/kvm_coproc.h b/arch/arm/include/asm/kvm_coproc.h
new file mode 100644
index 000000000000..4917c2f7e459
--- /dev/null
+++ b/arch/arm/include/asm/kvm_coproc.h
@@ -0,0 +1,47 @@
1/*
2 * Copyright (C) 2012 Rusty Russell IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2, as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
16 */
17
18#ifndef __ARM_KVM_COPROC_H__
19#define __ARM_KVM_COPROC_H__
20#include <linux/kvm_host.h>
21
22void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
23
24struct kvm_coproc_target_table {
25 unsigned target;
26 const struct coproc_reg *table;
27 size_t num;
28};
29void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table);
30
31int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run);
32int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
33int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
34int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
35int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
36int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
37
38unsigned long kvm_arm_num_guest_msrs(struct kvm_vcpu *vcpu);
39int kvm_arm_copy_msrindices(struct kvm_vcpu *vcpu, u64 __user *uindices);
40void kvm_coproc_table_init(void);
41
42struct kvm_one_reg;
43int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
44int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
45int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
46unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
47#endif /* __ARM_KVM_COPROC_H__ */
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
new file mode 100644
index 000000000000..fd611996bfb5
--- /dev/null
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -0,0 +1,72 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#ifndef __ARM_KVM_EMULATE_H__
20#define __ARM_KVM_EMULATE_H__
21
22#include <linux/kvm_host.h>
23#include <asm/kvm_asm.h>
24#include <asm/kvm_mmio.h>
25
26u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
27u32 *vcpu_spsr(struct kvm_vcpu *vcpu);
28
29int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run);
30void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr);
31void kvm_inject_undefined(struct kvm_vcpu *vcpu);
32void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
33void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
34
35static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
36{
37 return 1;
38}
39
40static inline u32 *vcpu_pc(struct kvm_vcpu *vcpu)
41{
42 return (u32 *)&vcpu->arch.regs.usr_regs.ARM_pc;
43}
44
45static inline u32 *vcpu_cpsr(struct kvm_vcpu *vcpu)
46{
47 return (u32 *)&vcpu->arch.regs.usr_regs.ARM_cpsr;
48}
49
50static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
51{
52 *vcpu_cpsr(vcpu) |= PSR_T_BIT;
53}
54
55static inline bool mode_has_spsr(struct kvm_vcpu *vcpu)
56{
57 unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK;
58 return (cpsr_mode > USR_MODE && cpsr_mode < SYSTEM_MODE);
59}
60
61static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu)
62{
63 unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK;
64 return cpsr_mode > USR_MODE;;
65}
66
67static inline bool kvm_vcpu_reg_is_pc(struct kvm_vcpu *vcpu, int reg)
68{
69 return reg == 15;
70}
71
72#endif /* __ARM_KVM_EMULATE_H__ */
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
new file mode 100644
index 000000000000..98b4d1a72923
--- /dev/null
+++ b/arch/arm/include/asm/kvm_host.h
@@ -0,0 +1,161 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#ifndef __ARM_KVM_HOST_H__
20#define __ARM_KVM_HOST_H__
21
22#include <asm/kvm.h>
23#include <asm/kvm_asm.h>
24#include <asm/kvm_mmio.h>
25#include <asm/fpstate.h>
26
27#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
28#define KVM_MEMORY_SLOTS 32
29#define KVM_PRIVATE_MEM_SLOTS 4
30#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
31#define KVM_HAVE_ONE_REG
32
33#define KVM_VCPU_MAX_FEATURES 1
34
35/* We don't currently support large pages. */
36#define KVM_HPAGE_GFN_SHIFT(x) 0
37#define KVM_NR_PAGE_SIZES 1
38#define KVM_PAGES_PER_HPAGE(x) (1UL<<31)
39
40struct kvm_vcpu;
41u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
42int kvm_target_cpu(void);
43int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
44void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
45
46struct kvm_arch {
47 /* VTTBR value associated with below pgd and vmid */
48 u64 vttbr;
49
50 /*
51 * Anything that is not used directly from assembly code goes
52 * here.
53 */
54
55 /* The VMID generation used for the virt. memory system */
56 u64 vmid_gen;
57 u32 vmid;
58
59 /* Stage-2 page table */
60 pgd_t *pgd;
61};
62
63#define KVM_NR_MEM_OBJS 40
64
65/*
66 * We don't want allocation failures within the mmu code, so we preallocate
67 * enough memory for a single page fault in a cache.
68 */
69struct kvm_mmu_memory_cache {
70 int nobjs;
71 void *objects[KVM_NR_MEM_OBJS];
72};
73
74struct kvm_vcpu_arch {
75 struct kvm_regs regs;
76
77 int target; /* Processor target */
78 DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
79
80 /* System control coprocessor (cp15) */
81 u32 cp15[NR_CP15_REGS];
82
83 /* The CPU type we expose to the VM */
84 u32 midr;
85
86 /* Exception Information */
87 u32 hsr; /* Hyp Syndrome Register */
88 u32 hxfar; /* Hyp Data/Inst Fault Address Register */
89 u32 hpfar; /* Hyp IPA Fault Address Register */
90
91 /* Floating point registers (VFP and Advanced SIMD/NEON) */
92 struct vfp_hard_struct vfp_guest;
93 struct vfp_hard_struct *vfp_host;
94
95 /*
96 * Anything that is not used directly from assembly code goes
97 * here.
98 */
99 /* dcache set/way operation pending */
100 int last_pcpu;
101 cpumask_t require_dcache_flush;
102
103 /* Don't run the guest on this vcpu */
104 bool pause;
105
106 /* IO related fields */
107 struct kvm_decode mmio_decode;
108
109 /* Interrupt related fields */
110 u32 irq_lines; /* IRQ and FIQ levels */
111
112 /* Hyp exception information */
113 u32 hyp_pc; /* PC when exception was taken from Hyp mode */
114
115 /* Cache some mmu pages needed inside spinlock regions */
116 struct kvm_mmu_memory_cache mmu_page_cache;
117
118 /* Detect first run of a vcpu */
119 bool has_run_once;
120};
121
122struct kvm_vm_stat {
123 u32 remote_tlb_flush;
124};
125
126struct kvm_vcpu_stat {
127 u32 halt_wakeup;
128};
129
130struct kvm_vcpu_init;
131int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
132 const struct kvm_vcpu_init *init);
133unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
134int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
135struct kvm_one_reg;
136int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
137int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
138u64 kvm_call_hyp(void *hypfn, ...);
139void force_vm_exit(const cpumask_t *mask);
140
141#define KVM_ARCH_WANT_MMU_NOTIFIER
142struct kvm;
143int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
144int kvm_unmap_hva_range(struct kvm *kvm,
145 unsigned long start, unsigned long end);
146void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
147
148unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
149int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
150
151/* We do not have shadow page tables, hence the empty hooks */
152static inline int kvm_age_hva(struct kvm *kvm, unsigned long hva)
153{
154 return 0;
155}
156
157static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
158{
159 return 0;
160}
161#endif /* __ARM_KVM_HOST_H__ */
diff --git a/arch/arm/include/asm/kvm_mmio.h b/arch/arm/include/asm/kvm_mmio.h
new file mode 100644
index 000000000000..adcc0d7d3175
--- /dev/null
+++ b/arch/arm/include/asm/kvm_mmio.h
@@ -0,0 +1,56 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#ifndef __ARM_KVM_MMIO_H__
20#define __ARM_KVM_MMIO_H__
21
22#include <linux/kvm_host.h>
23#include <asm/kvm_asm.h>
24#include <asm/kvm_arm.h>
25
26struct kvm_decode {
27 unsigned long rt;
28 bool sign_extend;
29};
30
31/*
32 * The in-kernel MMIO emulation code wants to use a copy of run->mmio,
33 * which is an anonymous type. Use our own type instead.
34 */
35struct kvm_exit_mmio {
36 phys_addr_t phys_addr;
37 u8 data[8];
38 u32 len;
39 bool is_write;
40};
41
42static inline void kvm_prepare_mmio(struct kvm_run *run,
43 struct kvm_exit_mmio *mmio)
44{
45 run->mmio.phys_addr = mmio->phys_addr;
46 run->mmio.len = mmio->len;
47 run->mmio.is_write = mmio->is_write;
48 memcpy(run->mmio.data, mmio->data, mmio->len);
49 run->exit_reason = KVM_EXIT_MMIO;
50}
51
52int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
53int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
54 phys_addr_t fault_ipa);
55
56#endif /* __ARM_KVM_MMIO_H__ */
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
new file mode 100644
index 000000000000..421a20b34874
--- /dev/null
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -0,0 +1,50 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#ifndef __ARM_KVM_MMU_H__
20#define __ARM_KVM_MMU_H__
21
22int create_hyp_mappings(void *from, void *to);
23int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
24void free_hyp_pmds(void);
25
26int kvm_alloc_stage2_pgd(struct kvm *kvm);
27void kvm_free_stage2_pgd(struct kvm *kvm);
28int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
29 phys_addr_t pa, unsigned long size);
30
31int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
32
33void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
34
35phys_addr_t kvm_mmu_get_httbr(void);
36int kvm_mmu_init(void);
37void kvm_clear_hyp_idmap(void);
38
39static inline bool kvm_is_write_fault(unsigned long hsr)
40{
41 unsigned long hsr_ec = hsr >> HSR_EC_SHIFT;
42 if (hsr_ec == HSR_EC_IABT)
43 return false;
44 else if ((hsr & HSR_ISV) && !(hsr & HSR_WNR))
45 return false;
46 else
47 return true;
48}
49
50#endif /* __ARM_KVM_MMU_H__ */
diff --git a/arch/arm/include/asm/kvm_psci.h b/arch/arm/include/asm/kvm_psci.h
new file mode 100644
index 000000000000..9a83d98bf170
--- /dev/null
+++ b/arch/arm/include/asm/kvm_psci.h
@@ -0,0 +1,23 @@
1/*
2 * Copyright (C) 2012 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __ARM_KVM_PSCI_H__
19#define __ARM_KVM_PSCI_H__
20
21bool kvm_psci_call(struct kvm_vcpu *vcpu);
22
23#endif /* __ARM_KVM_PSCI_H__ */
diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h
index db9fedb57f2c..5cf2e979b4be 100644
--- a/arch/arm/include/asm/mach/pci.h
+++ b/arch/arm/include/asm/mach/pci.h
@@ -23,6 +23,7 @@ struct hw_pci {
23#endif 23#endif
24 struct pci_ops *ops; 24 struct pci_ops *ops;
25 int nr_controllers; 25 int nr_controllers;
26 void **private_data;
26 int (*setup)(int nr, struct pci_sys_data *); 27 int (*setup)(int nr, struct pci_sys_data *);
27 struct pci_bus *(*scan)(int nr, struct pci_sys_data *); 28 struct pci_bus *(*scan)(int nr, struct pci_sys_data *);
28 void (*preinit)(void); 29 void (*preinit)(void);
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 73cf03aa981e..64c770d24198 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -36,23 +36,23 @@
36 * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area 36 * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
37 */ 37 */
38#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) 38#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
39#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(0x01000000)) 39#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M))
40#define TASK_UNMAPPED_BASE (UL(CONFIG_PAGE_OFFSET) / 3) 40#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M)
41 41
42/* 42/*
43 * The maximum size of a 26-bit user space task. 43 * The maximum size of a 26-bit user space task.
44 */ 44 */
45#define TASK_SIZE_26 UL(0x04000000) 45#define TASK_SIZE_26 (UL(1) << 26)
46 46
47/* 47/*
48 * The module space lives between the addresses given by TASK_SIZE 48 * The module space lives between the addresses given by TASK_SIZE
49 * and PAGE_OFFSET - it must be within 32MB of the kernel text. 49 * and PAGE_OFFSET - it must be within 32MB of the kernel text.
50 */ 50 */
51#ifndef CONFIG_THUMB2_KERNEL 51#ifndef CONFIG_THUMB2_KERNEL
52#define MODULES_VADDR (PAGE_OFFSET - 16*1024*1024) 52#define MODULES_VADDR (PAGE_OFFSET - SZ_16M)
53#else 53#else
54/* smaller range for Thumb-2 symbols relocation (2^24)*/ 54/* smaller range for Thumb-2 symbols relocation (2^24)*/
55#define MODULES_VADDR (PAGE_OFFSET - 8*1024*1024) 55#define MODULES_VADDR (PAGE_OFFSET - SZ_8M)
56#endif 56#endif
57 57
58#if TASK_SIZE > MODULES_VADDR 58#if TASK_SIZE > MODULES_VADDR
diff --git a/arch/arm/include/asm/opcodes-sec.h b/arch/arm/include/asm/opcodes-sec.h
new file mode 100644
index 000000000000..bc3a9174417c
--- /dev/null
+++ b/arch/arm/include/asm/opcodes-sec.h
@@ -0,0 +1,24 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright (C) 2012 ARM Limited
12 */
13
14#ifndef __ASM_ARM_OPCODES_SEC_H
15#define __ASM_ARM_OPCODES_SEC_H
16
17#include <asm/opcodes.h>
18
19#define __SMC(imm4) __inst_arm_thumb32( \
20 0xE1600070 | (((imm4) & 0xF) << 0), \
21 0xF7F08000 | (((imm4) & 0xF) << 16) \
22)
23
24#endif /* __ASM_ARM_OPCODES_SEC_H */
diff --git a/arch/arm/include/asm/opcodes.h b/arch/arm/include/asm/opcodes.h
index 74e211a6fb24..e796c598513b 100644
--- a/arch/arm/include/asm/opcodes.h
+++ b/arch/arm/include/asm/opcodes.h
@@ -10,6 +10,7 @@
10#define __ASM_ARM_OPCODES_H 10#define __ASM_ARM_OPCODES_H
11 11
12#ifndef __ASSEMBLY__ 12#ifndef __ASSEMBLY__
13#include <linux/linkage.h>
13extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr); 14extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr);
14#endif 15#endif
15 16
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
index 53426c66352a..12f71a190422 100644
--- a/arch/arm/include/asm/outercache.h
+++ b/arch/arm/include/asm/outercache.h
@@ -92,6 +92,7 @@ static inline void outer_flush_range(phys_addr_t start, phys_addr_t end)
92static inline void outer_flush_all(void) { } 92static inline void outer_flush_all(void) { }
93static inline void outer_inv_all(void) { } 93static inline void outer_inv_all(void) { }
94static inline void outer_disable(void) { } 94static inline void outer_disable(void) { }
95static inline void outer_resume(void) { }
95 96
96#endif 97#endif
97 98
diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
index d7952824c5c4..18f5cef82ad5 100644
--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
@@ -32,6 +32,9 @@
32#define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0) 32#define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0)
33#define PMD_BIT4 (_AT(pmdval_t, 0)) 33#define PMD_BIT4 (_AT(pmdval_t, 0))
34#define PMD_DOMAIN(x) (_AT(pmdval_t, 0)) 34#define PMD_DOMAIN(x) (_AT(pmdval_t, 0))
35#define PMD_APTABLE_SHIFT (61)
36#define PMD_APTABLE (_AT(pgdval_t, 3) << PGD_APTABLE_SHIFT)
37#define PMD_PXNTABLE (_AT(pgdval_t, 1) << 59)
35 38
36/* 39/*
37 * - section 40 * - section
@@ -41,9 +44,11 @@
41#define PMD_SECT_S (_AT(pmdval_t, 3) << 8) 44#define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
42#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10) 45#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
43#define PMD_SECT_nG (_AT(pmdval_t, 1) << 11) 46#define PMD_SECT_nG (_AT(pmdval_t, 1) << 11)
47#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 53)
44#define PMD_SECT_XN (_AT(pmdval_t, 1) << 54) 48#define PMD_SECT_XN (_AT(pmdval_t, 1) << 54)
45#define PMD_SECT_AP_WRITE (_AT(pmdval_t, 0)) 49#define PMD_SECT_AP_WRITE (_AT(pmdval_t, 0))
46#define PMD_SECT_AP_READ (_AT(pmdval_t, 0)) 50#define PMD_SECT_AP_READ (_AT(pmdval_t, 0))
51#define PMD_SECT_AP1 (_AT(pmdval_t, 1) << 6)
47#define PMD_SECT_TEX(x) (_AT(pmdval_t, 0)) 52#define PMD_SECT_TEX(x) (_AT(pmdval_t, 0))
48 53
49/* 54/*
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index a3f37929940a..6ef8afd1b64c 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -104,11 +104,29 @@
104 */ 104 */
105#define L_PGD_SWAPPER (_AT(pgdval_t, 1) << 55) /* swapper_pg_dir entry */ 105#define L_PGD_SWAPPER (_AT(pgdval_t, 1) << 55) /* swapper_pg_dir entry */
106 106
107/*
108 * 2nd stage PTE definitions for LPAE.
109 */
110#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x5) << 2) /* MemAttr[3:0] */
111#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */
112#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */
113#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
114#define L_PTE_S2_RDWR (_AT(pteval_t, 2) << 6) /* HAP[2:1] */
115
116/*
117 * Hyp-mode PL2 PTE definitions for LPAE.
118 */
119#define L_PTE_HYP L_PTE_USER
120
107#ifndef __ASSEMBLY__ 121#ifndef __ASSEMBLY__
108 122
109#define pud_none(pud) (!pud_val(pud)) 123#define pud_none(pud) (!pud_val(pud))
110#define pud_bad(pud) (!(pud_val(pud) & 2)) 124#define pud_bad(pud) (!(pud_val(pud) & 2))
111#define pud_present(pud) (pud_val(pud)) 125#define pud_present(pud) (pud_val(pud))
126#define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
127 PMD_TYPE_TABLE)
128#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
129 PMD_TYPE_SECT)
112 130
113#define pud_clear(pudp) \ 131#define pud_clear(pudp) \
114 do { \ 132 do { \
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 9c82f988c0e3..f30ac3b55ba9 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -70,6 +70,9 @@ extern void __pgd_error(const char *file, int line, pgd_t);
70 70
71extern pgprot_t pgprot_user; 71extern pgprot_t pgprot_user;
72extern pgprot_t pgprot_kernel; 72extern pgprot_t pgprot_kernel;
73extern pgprot_t pgprot_hyp_device;
74extern pgprot_t pgprot_s2;
75extern pgprot_t pgprot_s2_device;
73 76
74#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) 77#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
75 78
@@ -82,6 +85,10 @@ extern pgprot_t pgprot_kernel;
82#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY) 85#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
83#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN) 86#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN)
84#define PAGE_KERNEL_EXEC pgprot_kernel 87#define PAGE_KERNEL_EXEC pgprot_kernel
88#define PAGE_HYP _MOD_PROT(pgprot_kernel, L_PTE_HYP)
89#define PAGE_HYP_DEVICE _MOD_PROT(pgprot_hyp_device, L_PTE_HYP)
90#define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY)
91#define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_USER | L_PTE_S2_RDONLY)
85 92
86#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE) 93#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)
87#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN) 94#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
new file mode 100644
index 000000000000..ce0dbe7c1625
--- /dev/null
+++ b/arch/arm/include/asm/psci.h
@@ -0,0 +1,36 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright (C) 2012 ARM Limited
12 */
13
14#ifndef __ASM_ARM_PSCI_H
15#define __ASM_ARM_PSCI_H
16
17#define PSCI_POWER_STATE_TYPE_STANDBY 0
18#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
19
20struct psci_power_state {
21 u16 id;
22 u8 type;
23 u8 affinity_level;
24};
25
26struct psci_operations {
27 int (*cpu_suspend)(struct psci_power_state state,
28 unsigned long entry_point);
29 int (*cpu_off)(struct psci_power_state state);
30 int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
31 int (*migrate)(unsigned long cpuid);
32};
33
34extern struct psci_operations psci_ops;
35
36#endif /* __ASM_ARM_PSCI_H */
diff --git a/arch/arm/include/asm/smp_scu.h b/arch/arm/include/asm/smp_scu.h
index 4eb6d005ffaa..86dff32a0737 100644
--- a/arch/arm/include/asm/smp_scu.h
+++ b/arch/arm/include/asm/smp_scu.h
@@ -7,8 +7,14 @@
7 7
8#ifndef __ASSEMBLER__ 8#ifndef __ASSEMBLER__
9unsigned int scu_get_core_count(void __iomem *); 9unsigned int scu_get_core_count(void __iomem *);
10void scu_enable(void __iomem *);
11int scu_power_mode(void __iomem *, unsigned int); 10int scu_power_mode(void __iomem *, unsigned int);
11
12#ifdef CONFIG_SMP
13void scu_enable(void __iomem *scu_base);
14#else
15static inline void scu_enable(void __iomem *scu_base) {}
16#endif
17
12#endif 18#endif
13 19
14#endif 20#endif
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index b4ca707d0a69..6220e9fdf4c7 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -119,22 +119,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
119 119
120static inline void arch_spin_unlock(arch_spinlock_t *lock) 120static inline void arch_spin_unlock(arch_spinlock_t *lock)
121{ 121{
122 unsigned long tmp;
123 u32 slock;
124
125 smp_mb(); 122 smp_mb();
126 123 lock->tickets.owner++;
127 __asm__ __volatile__(
128" mov %1, #1\n"
129"1: ldrex %0, [%2]\n"
130" uadd16 %0, %0, %1\n"
131" strex %1, %0, [%2]\n"
132" teq %1, #0\n"
133" bne 1b"
134 : "=&r" (slock), "=&r" (tmp)
135 : "r" (&lock->slock)
136 : "cc");
137
138 dsb_sev(); 124 dsb_sev();
139} 125}
140 126
diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h
index 86164df86cb4..50af92bac737 100644
--- a/arch/arm/include/asm/virt.h
+++ b/arch/arm/include/asm/virt.h
@@ -24,9 +24,9 @@
24/* 24/*
25 * Flag indicating that the kernel was not entered in the same mode on every 25 * Flag indicating that the kernel was not entered in the same mode on every
26 * CPU. The zImage loader stashes this value in an SPSR, so we need an 26 * CPU. The zImage loader stashes this value in an SPSR, so we need an
27 * architecturally defined flag bit here (the N flag, as it happens) 27 * architecturally defined flag bit here.
28 */ 28 */
29#define BOOT_CPU_MODE_MISMATCH (1<<31) 29#define BOOT_CPU_MODE_MISMATCH PSR_N_BIT
30 30
31#ifndef __ASSEMBLY__ 31#ifndef __ASSEMBLY__
32 32
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
new file mode 100644
index 000000000000..3303ff5adbf3
--- /dev/null
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -0,0 +1,164 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#ifndef __ARM_KVM_H__
20#define __ARM_KVM_H__
21
22#include <linux/types.h>
23#include <asm/ptrace.h>
24
25#define __KVM_HAVE_GUEST_DEBUG
26#define __KVM_HAVE_IRQ_LINE
27
28#define KVM_REG_SIZE(id) \
29 (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
30
31/* Valid for svc_regs, abt_regs, und_regs, irq_regs in struct kvm_regs */
32#define KVM_ARM_SVC_sp svc_regs[0]
33#define KVM_ARM_SVC_lr svc_regs[1]
34#define KVM_ARM_SVC_spsr svc_regs[2]
35#define KVM_ARM_ABT_sp abt_regs[0]
36#define KVM_ARM_ABT_lr abt_regs[1]
37#define KVM_ARM_ABT_spsr abt_regs[2]
38#define KVM_ARM_UND_sp und_regs[0]
39#define KVM_ARM_UND_lr und_regs[1]
40#define KVM_ARM_UND_spsr und_regs[2]
41#define KVM_ARM_IRQ_sp irq_regs[0]
42#define KVM_ARM_IRQ_lr irq_regs[1]
43#define KVM_ARM_IRQ_spsr irq_regs[2]
44
45/* Valid only for fiq_regs in struct kvm_regs */
46#define KVM_ARM_FIQ_r8 fiq_regs[0]
47#define KVM_ARM_FIQ_r9 fiq_regs[1]
48#define KVM_ARM_FIQ_r10 fiq_regs[2]
49#define KVM_ARM_FIQ_fp fiq_regs[3]
50#define KVM_ARM_FIQ_ip fiq_regs[4]
51#define KVM_ARM_FIQ_sp fiq_regs[5]
52#define KVM_ARM_FIQ_lr fiq_regs[6]
53#define KVM_ARM_FIQ_spsr fiq_regs[7]
54
55struct kvm_regs {
56 struct pt_regs usr_regs;/* R0_usr - R14_usr, PC, CPSR */
57 __u32 svc_regs[3]; /* SP_svc, LR_svc, SPSR_svc */
58 __u32 abt_regs[3]; /* SP_abt, LR_abt, SPSR_abt */
59 __u32 und_regs[3]; /* SP_und, LR_und, SPSR_und */
60 __u32 irq_regs[3]; /* SP_irq, LR_irq, SPSR_irq */
61 __u32 fiq_regs[8]; /* R8_fiq - R14_fiq, SPSR_fiq */
62};
63
64/* Supported Processor Types */
65#define KVM_ARM_TARGET_CORTEX_A15 0
66#define KVM_ARM_NUM_TARGETS 1
67
68#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */
69
70struct kvm_vcpu_init {
71 __u32 target;
72 __u32 features[7];
73};
74
75struct kvm_sregs {
76};
77
78struct kvm_fpu {
79};
80
81struct kvm_guest_debug_arch {
82};
83
84struct kvm_debug_exit_arch {
85};
86
87struct kvm_sync_regs {
88};
89
90struct kvm_arch_memory_slot {
91};
92
93/* If you need to interpret the index values, here is the key: */
94#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000
95#define KVM_REG_ARM_COPROC_SHIFT 16
96#define KVM_REG_ARM_32_OPC2_MASK 0x0000000000000007
97#define KVM_REG_ARM_32_OPC2_SHIFT 0
98#define KVM_REG_ARM_OPC1_MASK 0x0000000000000078
99#define KVM_REG_ARM_OPC1_SHIFT 3
100#define KVM_REG_ARM_CRM_MASK 0x0000000000000780
101#define KVM_REG_ARM_CRM_SHIFT 7
102#define KVM_REG_ARM_32_CRN_MASK 0x0000000000007800
103#define KVM_REG_ARM_32_CRN_SHIFT 11
104
105/* Normal registers are mapped as coprocessor 16. */
106#define KVM_REG_ARM_CORE (0x0010 << KVM_REG_ARM_COPROC_SHIFT)
107#define KVM_REG_ARM_CORE_REG(name) (offsetof(struct kvm_regs, name) / 4)
108
109/* Some registers need more space to represent values. */
110#define KVM_REG_ARM_DEMUX (0x0011 << KVM_REG_ARM_COPROC_SHIFT)
111#define KVM_REG_ARM_DEMUX_ID_MASK 0x000000000000FF00
112#define KVM_REG_ARM_DEMUX_ID_SHIFT 8
113#define KVM_REG_ARM_DEMUX_ID_CCSIDR (0x00 << KVM_REG_ARM_DEMUX_ID_SHIFT)
114#define KVM_REG_ARM_DEMUX_VAL_MASK 0x00000000000000FF
115#define KVM_REG_ARM_DEMUX_VAL_SHIFT 0
116
117/* VFP registers: we could overload CP10 like ARM does, but that's ugly. */
118#define KVM_REG_ARM_VFP (0x0012 << KVM_REG_ARM_COPROC_SHIFT)
119#define KVM_REG_ARM_VFP_MASK 0x000000000000FFFF
120#define KVM_REG_ARM_VFP_BASE_REG 0x0
121#define KVM_REG_ARM_VFP_FPSID 0x1000
122#define KVM_REG_ARM_VFP_FPSCR 0x1001
123#define KVM_REG_ARM_VFP_MVFR1 0x1006
124#define KVM_REG_ARM_VFP_MVFR0 0x1007
125#define KVM_REG_ARM_VFP_FPEXC 0x1008
126#define KVM_REG_ARM_VFP_FPINST 0x1009
127#define KVM_REG_ARM_VFP_FPINST2 0x100A
128
129
130/* KVM_IRQ_LINE irq field index values */
131#define KVM_ARM_IRQ_TYPE_SHIFT 24
132#define KVM_ARM_IRQ_TYPE_MASK 0xff
133#define KVM_ARM_IRQ_VCPU_SHIFT 16
134#define KVM_ARM_IRQ_VCPU_MASK 0xff
135#define KVM_ARM_IRQ_NUM_SHIFT 0
136#define KVM_ARM_IRQ_NUM_MASK 0xffff
137
138/* irq_type field */
139#define KVM_ARM_IRQ_TYPE_CPU 0
140#define KVM_ARM_IRQ_TYPE_SPI 1
141#define KVM_ARM_IRQ_TYPE_PPI 2
142
143/* out-of-kernel GIC cpu interrupt injection irq_number field */
144#define KVM_ARM_IRQ_CPU_IRQ 0
145#define KVM_ARM_IRQ_CPU_FIQ 1
146
147/* Highest supported SPI, from VGIC_NR_IRQS */
148#define KVM_ARM_IRQ_GIC_MAX 127
149
150/* PSCI interface */
151#define KVM_PSCI_FN_BASE 0x95c1ba5e
152#define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n))
153
154#define KVM_PSCI_FN_CPU_SUSPEND KVM_PSCI_FN(0)
155#define KVM_PSCI_FN_CPU_OFF KVM_PSCI_FN(1)
156#define KVM_PSCI_FN_CPU_ON KVM_PSCI_FN(2)
157#define KVM_PSCI_FN_MIGRATE KVM_PSCI_FN(3)
158
159#define KVM_PSCI_RET_SUCCESS 0
160#define KVM_PSCI_RET_NI ((unsigned long)-1)
161#define KVM_PSCI_RET_INVAL ((unsigned long)-2)
162#define KVM_PSCI_RET_DENIED ((unsigned long)-3)
163
164#endif /* __ARM_KVM_H__ */
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 5bbec7b8183e..5f3338eacad2 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -82,5 +82,6 @@ obj-$(CONFIG_DEBUG_LL) += debug.o
82obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 82obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
83 83
84obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o 84obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o
85obj-$(CONFIG_ARM_PSCI) += psci.o
85 86
86extra-y := $(head-y) vmlinux.lds 87extra-y := $(head-y) vmlinux.lds
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index c985b481192c..c8b3272dfed1 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -13,6 +13,9 @@
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/mm.h> 14#include <linux/mm.h>
15#include <linux/dma-mapping.h> 15#include <linux/dma-mapping.h>
16#ifdef CONFIG_KVM_ARM_HOST
17#include <linux/kvm_host.h>
18#endif
16#include <asm/cacheflush.h> 19#include <asm/cacheflush.h>
17#include <asm/glue-df.h> 20#include <asm/glue-df.h>
18#include <asm/glue-pf.h> 21#include <asm/glue-pf.h>
@@ -146,5 +149,27 @@ int main(void)
146 DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); 149 DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL);
147 DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); 150 DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE);
148 DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE); 151 DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE);
152#ifdef CONFIG_KVM_ARM_HOST
153 DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
154 DEFINE(VCPU_MIDR, offsetof(struct kvm_vcpu, arch.midr));
155 DEFINE(VCPU_CP15, offsetof(struct kvm_vcpu, arch.cp15));
156 DEFINE(VCPU_VFP_GUEST, offsetof(struct kvm_vcpu, arch.vfp_guest));
157 DEFINE(VCPU_VFP_HOST, offsetof(struct kvm_vcpu, arch.vfp_host));
158 DEFINE(VCPU_REGS, offsetof(struct kvm_vcpu, arch.regs));
159 DEFINE(VCPU_USR_REGS, offsetof(struct kvm_vcpu, arch.regs.usr_regs));
160 DEFINE(VCPU_SVC_REGS, offsetof(struct kvm_vcpu, arch.regs.svc_regs));
161 DEFINE(VCPU_ABT_REGS, offsetof(struct kvm_vcpu, arch.regs.abt_regs));
162 DEFINE(VCPU_UND_REGS, offsetof(struct kvm_vcpu, arch.regs.und_regs));
163 DEFINE(VCPU_IRQ_REGS, offsetof(struct kvm_vcpu, arch.regs.irq_regs));
164 DEFINE(VCPU_FIQ_REGS, offsetof(struct kvm_vcpu, arch.regs.fiq_regs));
165 DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc));
166 DEFINE(VCPU_CPSR, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr));
167 DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines));
168 DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.hsr));
169 DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.hxfar));
170 DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.hpfar));
171 DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.hyp_pc));
172 DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
173#endif
149 return 0; 174 return 0;
150} 175}
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index 379cf3292390..a1f73b502ef0 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -413,7 +413,7 @@ static int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
413 return irq; 413 return irq;
414} 414}
415 415
416static int __init pcibios_init_resources(int busnr, struct pci_sys_data *sys) 416static int pcibios_init_resources(int busnr, struct pci_sys_data *sys)
417{ 417{
418 int ret; 418 int ret;
419 struct pci_host_bridge_window *window; 419 struct pci_host_bridge_window *window;
@@ -445,7 +445,7 @@ static int __init pcibios_init_resources(int busnr, struct pci_sys_data *sys)
445 return 0; 445 return 0;
446} 446}
447 447
448static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head) 448static void pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
449{ 449{
450 struct pci_sys_data *sys = NULL; 450 struct pci_sys_data *sys = NULL;
451 int ret; 451 int ret;
@@ -464,6 +464,9 @@ static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
464 sys->map_irq = hw->map_irq; 464 sys->map_irq = hw->map_irq;
465 INIT_LIST_HEAD(&sys->resources); 465 INIT_LIST_HEAD(&sys->resources);
466 466
467 if (hw->private_data)
468 sys->private_data = hw->private_data[nr];
469
467 ret = hw->setup(nr, sys); 470 ret = hw->setup(nr, sys);
468 471
469 if (ret > 0) { 472 if (ret > 0) {
@@ -493,7 +496,7 @@ static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
493 } 496 }
494} 497}
495 498
496void __init pci_common_init(struct hw_pci *hw) 499void pci_common_init(struct hw_pci *hw)
497{ 500{
498 struct pci_sys_data *sys; 501 struct pci_sys_data *sys;
499 LIST_HEAD(head); 502 LIST_HEAD(head);
diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S
index 6809200c31fb..14f7c3b14632 100644
--- a/arch/arm/kernel/debug.S
+++ b/arch/arm/kernel/debug.S
@@ -100,12 +100,14 @@ ENTRY(printch)
100 b 1b 100 b 1b
101ENDPROC(printch) 101ENDPROC(printch)
102 102
103#ifdef CONFIG_MMU
103ENTRY(debug_ll_addr) 104ENTRY(debug_ll_addr)
104 addruart r2, r3, ip 105 addruart r2, r3, ip
105 str r2, [r0] 106 str r2, [r0]
106 str r3, [r1] 107 str r3, [r1]
107 mov pc, lr 108 mov pc, lr
108ENDPROC(debug_ll_addr) 109ENDPROC(debug_ll_addr)
110#endif
109 111
110#else 112#else
111 113
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 4eee351f4668..486a15ae9011 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -246,6 +246,7 @@ __create_page_tables:
246 246
247 /* 247 /*
248 * Then map boot params address in r2 if specified. 248 * Then map boot params address in r2 if specified.
249 * We map 2 sections in case the ATAGs/DTB crosses a section boundary.
249 */ 250 */
250 mov r0, r2, lsr #SECTION_SHIFT 251 mov r0, r2, lsr #SECTION_SHIFT
251 movs r0, r0, lsl #SECTION_SHIFT 252 movs r0, r0, lsl #SECTION_SHIFT
@@ -253,6 +254,8 @@ __create_page_tables:
253 addne r3, r3, #PAGE_OFFSET 254 addne r3, r3, #PAGE_OFFSET
254 addne r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER) 255 addne r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER)
255 orrne r6, r7, r0 256 orrne r6, r7, r0
257 strne r6, [r3], #1 << PMD_ORDER
258 addne r6, r6, #1 << SECTION_SHIFT
256 strne r6, [r3] 259 strne r6, [r3]
257 260
258#ifdef CONFIG_DEBUG_LL 261#ifdef CONFIG_DEBUG_LL
@@ -331,7 +334,7 @@ ENTRY(secondary_startup)
331 * as it has already been validated by the primary processor. 334 * as it has already been validated by the primary processor.
332 */ 335 */
333#ifdef CONFIG_ARM_VIRT_EXT 336#ifdef CONFIG_ARM_VIRT_EXT
334 bl __hyp_stub_install 337 bl __hyp_stub_install_secondary
335#endif 338#endif
336 safe_svcmode_maskall r9 339 safe_svcmode_maskall r9
337 340
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 5ff2e77782b1..5eae53e7a2e1 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -28,6 +28,7 @@
28#include <linux/perf_event.h> 28#include <linux/perf_event.h>
29#include <linux/hw_breakpoint.h> 29#include <linux/hw_breakpoint.h>
30#include <linux/smp.h> 30#include <linux/smp.h>
31#include <linux/cpu_pm.h>
31 32
32#include <asm/cacheflush.h> 33#include <asm/cacheflush.h>
33#include <asm/cputype.h> 34#include <asm/cputype.h>
@@ -35,6 +36,7 @@
35#include <asm/hw_breakpoint.h> 36#include <asm/hw_breakpoint.h>
36#include <asm/kdebug.h> 37#include <asm/kdebug.h>
37#include <asm/traps.h> 38#include <asm/traps.h>
39#include <asm/hardware/coresight.h>
38 40
39/* Breakpoint currently in use for each BRP. */ 41/* Breakpoint currently in use for each BRP. */
40static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]); 42static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
@@ -49,6 +51,9 @@ static int core_num_wrps;
49/* Debug architecture version. */ 51/* Debug architecture version. */
50static u8 debug_arch; 52static u8 debug_arch;
51 53
54/* Does debug architecture support OS Save and Restore? */
55static bool has_ossr;
56
52/* Maximum supported watchpoint length. */ 57/* Maximum supported watchpoint length. */
53static u8 max_watchpoint_len; 58static u8 max_watchpoint_len;
54 59
@@ -903,6 +908,23 @@ static struct undef_hook debug_reg_hook = {
903 .fn = debug_reg_trap, 908 .fn = debug_reg_trap,
904}; 909};
905 910
911/* Does this core support OS Save and Restore? */
912static bool core_has_os_save_restore(void)
913{
914 u32 oslsr;
915
916 switch (get_debug_arch()) {
917 case ARM_DEBUG_ARCH_V7_1:
918 return true;
919 case ARM_DEBUG_ARCH_V7_ECP14:
920 ARM_DBG_READ(c1, c1, 4, oslsr);
921 if (oslsr & ARM_OSLSR_OSLM0)
922 return true;
923 default:
924 return false;
925 }
926}
927
906static void reset_ctrl_regs(void *unused) 928static void reset_ctrl_regs(void *unused)
907{ 929{
908 int i, raw_num_brps, err = 0, cpu = smp_processor_id(); 930 int i, raw_num_brps, err = 0, cpu = smp_processor_id();
@@ -930,11 +952,7 @@ static void reset_ctrl_regs(void *unused)
930 if ((val & 0x1) == 0) 952 if ((val & 0x1) == 0)
931 err = -EPERM; 953 err = -EPERM;
932 954
933 /* 955 if (!has_ossr)
934 * Check whether we implement OS save and restore.
935 */
936 ARM_DBG_READ(c1, c1, 4, val);
937 if ((val & 0x9) == 0)
938 goto clear_vcr; 956 goto clear_vcr;
939 break; 957 break;
940 case ARM_DEBUG_ARCH_V7_1: 958 case ARM_DEBUG_ARCH_V7_1:
@@ -955,9 +973,9 @@ static void reset_ctrl_regs(void *unused)
955 973
956 /* 974 /*
957 * Unconditionally clear the OS lock by writing a value 975 * Unconditionally clear the OS lock by writing a value
958 * other than 0xC5ACCE55 to the access register. 976 * other than CS_LAR_KEY to the access register.
959 */ 977 */
960 ARM_DBG_WRITE(c1, c0, 4, 0); 978 ARM_DBG_WRITE(c1, c0, 4, ~CS_LAR_KEY);
961 isb(); 979 isb();
962 980
963 /* 981 /*
@@ -1015,6 +1033,30 @@ static struct notifier_block __cpuinitdata dbg_reset_nb = {
1015 .notifier_call = dbg_reset_notify, 1033 .notifier_call = dbg_reset_notify,
1016}; 1034};
1017 1035
1036#ifdef CONFIG_CPU_PM
1037static int dbg_cpu_pm_notify(struct notifier_block *self, unsigned long action,
1038 void *v)
1039{
1040 if (action == CPU_PM_EXIT)
1041 reset_ctrl_regs(NULL);
1042
1043 return NOTIFY_OK;
1044}
1045
1046static struct notifier_block __cpuinitdata dbg_cpu_pm_nb = {
1047 .notifier_call = dbg_cpu_pm_notify,
1048};
1049
1050static void __init pm_init(void)
1051{
1052 cpu_pm_register_notifier(&dbg_cpu_pm_nb);
1053}
1054#else
1055static inline void pm_init(void)
1056{
1057}
1058#endif
1059
1018static int __init arch_hw_breakpoint_init(void) 1060static int __init arch_hw_breakpoint_init(void)
1019{ 1061{
1020 debug_arch = get_debug_arch(); 1062 debug_arch = get_debug_arch();
@@ -1024,6 +1066,8 @@ static int __init arch_hw_breakpoint_init(void)
1024 return 0; 1066 return 0;
1025 } 1067 }
1026 1068
1069 has_ossr = core_has_os_save_restore();
1070
1027 /* Determine how many BRPs/WRPs are available. */ 1071 /* Determine how many BRPs/WRPs are available. */
1028 core_num_brps = get_num_brps(); 1072 core_num_brps = get_num_brps();
1029 core_num_wrps = get_num_wrps(); 1073 core_num_wrps = get_num_wrps();
@@ -1062,8 +1106,9 @@ static int __init arch_hw_breakpoint_init(void)
1062 hook_ifault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP, 1106 hook_ifault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP,
1063 TRAP_HWBKPT, "breakpoint debug exception"); 1107 TRAP_HWBKPT, "breakpoint debug exception");
1064 1108
1065 /* Register hotplug notifier. */ 1109 /* Register hotplug and PM notifiers. */
1066 register_cpu_notifier(&dbg_reset_nb); 1110 register_cpu_notifier(&dbg_reset_nb);
1111 pm_init();
1067 return 0; 1112 return 0;
1068} 1113}
1069arch_initcall(arch_hw_breakpoint_init); 1114arch_initcall(arch_hw_breakpoint_init);
diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S
index 65b2417aebce..1315c4ccfa56 100644
--- a/arch/arm/kernel/hyp-stub.S
+++ b/arch/arm/kernel/hyp-stub.S
@@ -99,7 +99,7 @@ ENTRY(__hyp_stub_install_secondary)
99 * immediately. 99 * immediately.
100 */ 100 */
101 compare_cpu_mode_with_primary r4, r5, r6, r7 101 compare_cpu_mode_with_primary r4, r5, r6, r7
102 bxne lr 102 movne pc, lr
103 103
104 /* 104 /*
105 * Once we have given up on one CPU, we do not try to install the 105 * Once we have given up on one CPU, we do not try to install the
@@ -111,7 +111,7 @@ ENTRY(__hyp_stub_install_secondary)
111 */ 111 */
112 112
113 cmp r4, #HYP_MODE 113 cmp r4, #HYP_MODE
114 bxne lr @ give up if the CPU is not in HYP mode 114 movne pc, lr @ give up if the CPU is not in HYP mode
115 115
116/* 116/*
117 * Configure HSCTLR to set correct exception endianness/instruction set 117 * Configure HSCTLR to set correct exception endianness/instruction set
@@ -120,7 +120,8 @@ ENTRY(__hyp_stub_install_secondary)
120 * Eventually, CPU-specific code might be needed -- assume not for now 120 * Eventually, CPU-specific code might be needed -- assume not for now
121 * 121 *
122 * This code relies on the "eret" instruction to synchronize the 122 * This code relies on the "eret" instruction to synchronize the
123 * various coprocessor accesses. 123 * various coprocessor accesses. This is done when we switch to SVC
124 * (see safe_svcmode_maskall).
124 */ 125 */
125 @ Now install the hypervisor stub: 126 @ Now install the hypervisor stub:
126 adr r7, __hyp_stub_vectors 127 adr r7, __hyp_stub_vectors
@@ -155,14 +156,7 @@ THUMB( orr r7, #(1 << 30) ) @ HSCTLR.TE
1551: 1561:
156#endif 157#endif
157 158
158 bic r7, r4, #MODE_MASK 159 bx lr @ The boot CPU mode is left in r4.
159 orr r7, r7, #SVC_MODE
160THUMB( orr r7, r7, #PSR_T_BIT )
161 msr spsr_cxsf, r7 @ This is SPSR_hyp.
162
163 __MSR_ELR_HYP(14) @ msr elr_hyp, lr
164 __ERET @ return, switching to SVC mode
165 @ The boot CPU mode is left in r4.
166ENDPROC(__hyp_stub_install_secondary) 160ENDPROC(__hyp_stub_install_secondary)
167 161
168__hyp_stub_do_trap: 162__hyp_stub_do_trap:
@@ -200,7 +194,7 @@ ENDPROC(__hyp_get_vectors)
200 @ fall through 194 @ fall through
201ENTRY(__hyp_set_vectors) 195ENTRY(__hyp_set_vectors)
202 __HVC(0) 196 __HVC(0)
203 bx lr 197 mov pc, lr
204ENDPROC(__hyp_set_vectors) 198ENDPROC(__hyp_set_vectors)
205 199
206#ifndef ZIMAGE 200#ifndef ZIMAGE
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index f9e8657dd241..31e0eb353cd8 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -149,12 +149,6 @@ again:
149static void 149static void
150armpmu_read(struct perf_event *event) 150armpmu_read(struct perf_event *event)
151{ 151{
152 struct hw_perf_event *hwc = &event->hw;
153
154 /* Don't read disabled counters! */
155 if (hwc->idx < 0)
156 return;
157
158 armpmu_event_update(event); 152 armpmu_event_update(event);
159} 153}
160 154
@@ -207,8 +201,6 @@ armpmu_del(struct perf_event *event, int flags)
207 struct hw_perf_event *hwc = &event->hw; 201 struct hw_perf_event *hwc = &event->hw;
208 int idx = hwc->idx; 202 int idx = hwc->idx;
209 203
210 WARN_ON(idx < 0);
211
212 armpmu_stop(event, PERF_EF_UPDATE); 204 armpmu_stop(event, PERF_EF_UPDATE);
213 hw_events->events[idx] = NULL; 205 hw_events->events[idx] = NULL;
214 clear_bit(idx, hw_events->used_mask); 206 clear_bit(idx, hw_events->used_mask);
@@ -358,7 +350,7 @@ __hw_perf_event_init(struct perf_event *event)
358{ 350{
359 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 351 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
360 struct hw_perf_event *hwc = &event->hw; 352 struct hw_perf_event *hwc = &event->hw;
361 int mapping, err; 353 int mapping;
362 354
363 mapping = armpmu->map_event(event); 355 mapping = armpmu->map_event(event);
364 356
@@ -407,14 +399,12 @@ __hw_perf_event_init(struct perf_event *event)
407 local64_set(&hwc->period_left, hwc->sample_period); 399 local64_set(&hwc->period_left, hwc->sample_period);
408 } 400 }
409 401
410 err = 0;
411 if (event->group_leader != event) { 402 if (event->group_leader != event) {
412 err = validate_group(event); 403 if (validate_group(event) != 0);
413 if (err)
414 return -EINVAL; 404 return -EINVAL;
415 } 405 }
416 406
417 return err; 407 return 0;
418} 408}
419 409
420static int armpmu_event_init(struct perf_event *event) 410static int armpmu_event_init(struct perf_event *event)
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index 5f6620684e25..1f2740e3dbc0 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -147,7 +147,7 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
147 cpu_pmu->free_irq = cpu_pmu_free_irq; 147 cpu_pmu->free_irq = cpu_pmu_free_irq;
148 148
149 /* Ensure the PMU has sane values out of reset. */ 149 /* Ensure the PMU has sane values out of reset. */
150 if (cpu_pmu && cpu_pmu->reset) 150 if (cpu_pmu->reset)
151 on_each_cpu(cpu_pmu->reset, cpu_pmu, 1); 151 on_each_cpu(cpu_pmu->reset, cpu_pmu, 1);
152} 152}
153 153
@@ -201,48 +201,46 @@ static struct platform_device_id cpu_pmu_plat_device_ids[] = {
201static int probe_current_pmu(struct arm_pmu *pmu) 201static int probe_current_pmu(struct arm_pmu *pmu)
202{ 202{
203 int cpu = get_cpu(); 203 int cpu = get_cpu();
204 unsigned long cpuid = read_cpuid_id(); 204 unsigned long implementor = read_cpuid_implementor();
205 unsigned long implementor = (cpuid & 0xFF000000) >> 24; 205 unsigned long part_number = read_cpuid_part_number();
206 unsigned long part_number = (cpuid & 0xFFF0);
207 int ret = -ENODEV; 206 int ret = -ENODEV;
208 207
209 pr_info("probing PMU on CPU %d\n", cpu); 208 pr_info("probing PMU on CPU %d\n", cpu);
210 209
211 /* ARM Ltd CPUs. */ 210 /* ARM Ltd CPUs. */
212 if (0x41 == implementor) { 211 if (implementor == ARM_CPU_IMP_ARM) {
213 switch (part_number) { 212 switch (part_number) {
214 case 0xB360: /* ARM1136 */ 213 case ARM_CPU_PART_ARM1136:
215 case 0xB560: /* ARM1156 */ 214 case ARM_CPU_PART_ARM1156:
216 case 0xB760: /* ARM1176 */ 215 case ARM_CPU_PART_ARM1176:
217 ret = armv6pmu_init(pmu); 216 ret = armv6pmu_init(pmu);
218 break; 217 break;
219 case 0xB020: /* ARM11mpcore */ 218 case ARM_CPU_PART_ARM11MPCORE:
220 ret = armv6mpcore_pmu_init(pmu); 219 ret = armv6mpcore_pmu_init(pmu);
221 break; 220 break;
222 case 0xC080: /* Cortex-A8 */ 221 case ARM_CPU_PART_CORTEX_A8:
223 ret = armv7_a8_pmu_init(pmu); 222 ret = armv7_a8_pmu_init(pmu);
224 break; 223 break;
225 case 0xC090: /* Cortex-A9 */ 224 case ARM_CPU_PART_CORTEX_A9:
226 ret = armv7_a9_pmu_init(pmu); 225 ret = armv7_a9_pmu_init(pmu);
227 break; 226 break;
228 case 0xC050: /* Cortex-A5 */ 227 case ARM_CPU_PART_CORTEX_A5:
229 ret = armv7_a5_pmu_init(pmu); 228 ret = armv7_a5_pmu_init(pmu);
230 break; 229 break;
231 case 0xC0F0: /* Cortex-A15 */ 230 case ARM_CPU_PART_CORTEX_A15:
232 ret = armv7_a15_pmu_init(pmu); 231 ret = armv7_a15_pmu_init(pmu);
233 break; 232 break;
234 case 0xC070: /* Cortex-A7 */ 233 case ARM_CPU_PART_CORTEX_A7:
235 ret = armv7_a7_pmu_init(pmu); 234 ret = armv7_a7_pmu_init(pmu);
236 break; 235 break;
237 } 236 }
238 /* Intel CPUs [xscale]. */ 237 /* Intel CPUs [xscale]. */
239 } else if (0x69 == implementor) { 238 } else if (implementor == ARM_CPU_IMP_INTEL) {
240 part_number = (cpuid >> 13) & 0x7; 239 switch (xscale_cpu_arch_version()) {
241 switch (part_number) { 240 case ARM_CPU_XSCALE_ARCH_V1:
242 case 1:
243 ret = xscale1pmu_init(pmu); 241 ret = xscale1pmu_init(pmu);
244 break; 242 break;
245 case 2: 243 case ARM_CPU_XSCALE_ARCH_V2:
246 ret = xscale2pmu_init(pmu); 244 ret = xscale2pmu_init(pmu);
247 break; 245 break;
248 } 246 }
@@ -279,17 +277,22 @@ static int cpu_pmu_device_probe(struct platform_device *pdev)
279 } 277 }
280 278
281 if (ret) { 279 if (ret) {
282 pr_info("failed to register PMU devices!"); 280 pr_info("failed to probe PMU!");
283 kfree(pmu); 281 goto out_free;
284 return ret;
285 } 282 }
286 283
287 cpu_pmu = pmu; 284 cpu_pmu = pmu;
288 cpu_pmu->plat_device = pdev; 285 cpu_pmu->plat_device = pdev;
289 cpu_pmu_init(cpu_pmu); 286 cpu_pmu_init(cpu_pmu);
290 armpmu_register(cpu_pmu, PERF_TYPE_RAW); 287 ret = armpmu_register(cpu_pmu, PERF_TYPE_RAW);
291 288
292 return 0; 289 if (!ret)
290 return 0;
291
292out_free:
293 pr_info("failed to register PMU devices!");
294 kfree(pmu);
295 return ret;
293} 296}
294 297
295static struct platform_driver cpu_pmu_driver = { 298static struct platform_driver cpu_pmu_driver = {
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index 041d0526a288..03664b0e8fa4 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -106,7 +106,7 @@ static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
106 }, 106 },
107 [C(OP_WRITE)] = { 107 [C(OP_WRITE)] = {
108 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 108 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
109 [C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS, 109 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
110 }, 110 },
111 [C(OP_PREFETCH)] = { 111 [C(OP_PREFETCH)] = {
112 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 112 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -259,7 +259,7 @@ static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
259 }, 259 },
260 [C(OP_WRITE)] = { 260 [C(OP_WRITE)] = {
261 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 261 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
262 [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS, 262 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
263 }, 263 },
264 [C(OP_PREFETCH)] = { 264 [C(OP_PREFETCH)] = {
265 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 265 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 4fbc757d9cff..8c79a9e70b83 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -157,8 +157,8 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
157 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, 157 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
158 }, 158 },
159 [C(OP_WRITE)] = { 159 [C(OP_WRITE)] = {
160 [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS, 160 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
161 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, 161 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
162 }, 162 },
163 [C(OP_PREFETCH)] = { 163 [C(OP_PREFETCH)] = {
164 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 164 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -282,7 +282,7 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
282 }, 282 },
283 [C(OP_WRITE)] = { 283 [C(OP_WRITE)] = {
284 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 284 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
285 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, 285 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
286 }, 286 },
287 [C(OP_PREFETCH)] = { 287 [C(OP_PREFETCH)] = {
288 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 288 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -399,8 +399,8 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
399 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, 399 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
400 }, 400 },
401 [C(OP_WRITE)] = { 401 [C(OP_WRITE)] = {
402 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, 402 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
403 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, 403 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
404 }, 404 },
405 /* 405 /*
406 * The prefetch counters don't differentiate between the I 406 * The prefetch counters don't differentiate between the I
@@ -527,8 +527,8 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
527 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, 527 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
528 }, 528 },
529 [C(OP_WRITE)] = { 529 [C(OP_WRITE)] = {
530 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, 530 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
531 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, 531 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
532 }, 532 },
533 [C(OP_PREFETCH)] = { 533 [C(OP_PREFETCH)] = {
534 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 534 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -651,8 +651,8 @@ static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
651 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, 651 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
652 }, 652 },
653 [C(OP_WRITE)] = { 653 [C(OP_WRITE)] = {
654 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, 654 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
655 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, 655 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
656 }, 656 },
657 [C(OP_PREFETCH)] = { 657 [C(OP_PREFETCH)] = {
658 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 658 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index 2b0fe30ec12e..63990c42fac9 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -83,7 +83,7 @@ static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
83 }, 83 },
84 [C(OP_WRITE)] = { 84 [C(OP_WRITE)] = {
85 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 85 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
86 [C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS, 86 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
87 }, 87 },
88 [C(OP_PREFETCH)] = { 88 [C(OP_PREFETCH)] = {
89 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 89 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index c6dec5fc20aa..047d3e40e470 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -172,14 +172,9 @@ static void default_idle(void)
172 local_irq_enable(); 172 local_irq_enable();
173} 173}
174 174
175void (*pm_idle)(void) = default_idle;
176EXPORT_SYMBOL(pm_idle);
177
178/* 175/*
179 * The idle thread, has rather strange semantics for calling pm_idle, 176 * The idle thread.
180 * but this is what x86 does and we need to do the same, so that 177 * We always respect 'hlt_counter' to prevent low power idle.
181 * things like cpuidle get called in the same way. The only difference
182 * is that we always respect 'hlt_counter' to prevent low power idle.
183 */ 178 */
184void cpu_idle(void) 179void cpu_idle(void)
185{ 180{
@@ -210,10 +205,10 @@ void cpu_idle(void)
210 } else if (!need_resched()) { 205 } else if (!need_resched()) {
211 stop_critical_timings(); 206 stop_critical_timings();
212 if (cpuidle_idle_call()) 207 if (cpuidle_idle_call())
213 pm_idle(); 208 default_idle();
214 start_critical_timings(); 209 start_critical_timings();
215 /* 210 /*
216 * pm_idle functions must always 211 * default_idle functions must always
217 * return with IRQs enabled. 212 * return with IRQs enabled.
218 */ 213 */
219 WARN_ON(irqs_disabled()); 214 WARN_ON(irqs_disabled());
diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
new file mode 100644
index 000000000000..36531643cc2c
--- /dev/null
+++ b/arch/arm/kernel/psci.c
@@ -0,0 +1,211 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright (C) 2012 ARM Limited
12 *
13 * Author: Will Deacon <will.deacon@arm.com>
14 */
15
16#define pr_fmt(fmt) "psci: " fmt
17
18#include <linux/init.h>
19#include <linux/of.h>
20
21#include <asm/compiler.h>
22#include <asm/errno.h>
23#include <asm/opcodes-sec.h>
24#include <asm/opcodes-virt.h>
25#include <asm/psci.h>
26
27struct psci_operations psci_ops;
28
29static int (*invoke_psci_fn)(u32, u32, u32, u32);
30
31enum psci_function {
32 PSCI_FN_CPU_SUSPEND,
33 PSCI_FN_CPU_ON,
34 PSCI_FN_CPU_OFF,
35 PSCI_FN_MIGRATE,
36 PSCI_FN_MAX,
37};
38
39static u32 psci_function_id[PSCI_FN_MAX];
40
41#define PSCI_RET_SUCCESS 0
42#define PSCI_RET_EOPNOTSUPP -1
43#define PSCI_RET_EINVAL -2
44#define PSCI_RET_EPERM -3
45
46static int psci_to_linux_errno(int errno)
47{
48 switch (errno) {
49 case PSCI_RET_SUCCESS:
50 return 0;
51 case PSCI_RET_EOPNOTSUPP:
52 return -EOPNOTSUPP;
53 case PSCI_RET_EINVAL:
54 return -EINVAL;
55 case PSCI_RET_EPERM:
56 return -EPERM;
57 };
58
59 return -EINVAL;
60}
61
62#define PSCI_POWER_STATE_ID_MASK 0xffff
63#define PSCI_POWER_STATE_ID_SHIFT 0
64#define PSCI_POWER_STATE_TYPE_MASK 0x1
65#define PSCI_POWER_STATE_TYPE_SHIFT 16
66#define PSCI_POWER_STATE_AFFL_MASK 0x3
67#define PSCI_POWER_STATE_AFFL_SHIFT 24
68
69static u32 psci_power_state_pack(struct psci_power_state state)
70{
71 return ((state.id & PSCI_POWER_STATE_ID_MASK)
72 << PSCI_POWER_STATE_ID_SHIFT) |
73 ((state.type & PSCI_POWER_STATE_TYPE_MASK)
74 << PSCI_POWER_STATE_TYPE_SHIFT) |
75 ((state.affinity_level & PSCI_POWER_STATE_AFFL_MASK)
76 << PSCI_POWER_STATE_AFFL_SHIFT);
77}
78
79/*
80 * The following two functions are invoked via the invoke_psci_fn pointer
81 * and will not be inlined, allowing us to piggyback on the AAPCS.
82 */
83static noinline int __invoke_psci_fn_hvc(u32 function_id, u32 arg0, u32 arg1,
84 u32 arg2)
85{
86 asm volatile(
87 __asmeq("%0", "r0")
88 __asmeq("%1", "r1")
89 __asmeq("%2", "r2")
90 __asmeq("%3", "r3")
91 __HVC(0)
92 : "+r" (function_id)
93 : "r" (arg0), "r" (arg1), "r" (arg2));
94
95 return function_id;
96}
97
98static noinline int __invoke_psci_fn_smc(u32 function_id, u32 arg0, u32 arg1,
99 u32 arg2)
100{
101 asm volatile(
102 __asmeq("%0", "r0")
103 __asmeq("%1", "r1")
104 __asmeq("%2", "r2")
105 __asmeq("%3", "r3")
106 __SMC(0)
107 : "+r" (function_id)
108 : "r" (arg0), "r" (arg1), "r" (arg2));
109
110 return function_id;
111}
112
113static int psci_cpu_suspend(struct psci_power_state state,
114 unsigned long entry_point)
115{
116 int err;
117 u32 fn, power_state;
118
119 fn = psci_function_id[PSCI_FN_CPU_SUSPEND];
120 power_state = psci_power_state_pack(state);
121 err = invoke_psci_fn(fn, power_state, entry_point, 0);
122 return psci_to_linux_errno(err);
123}
124
125static int psci_cpu_off(struct psci_power_state state)
126{
127 int err;
128 u32 fn, power_state;
129
130 fn = psci_function_id[PSCI_FN_CPU_OFF];
131 power_state = psci_power_state_pack(state);
132 err = invoke_psci_fn(fn, power_state, 0, 0);
133 return psci_to_linux_errno(err);
134}
135
136static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point)
137{
138 int err;
139 u32 fn;
140
141 fn = psci_function_id[PSCI_FN_CPU_ON];
142 err = invoke_psci_fn(fn, cpuid, entry_point, 0);
143 return psci_to_linux_errno(err);
144}
145
146static int psci_migrate(unsigned long cpuid)
147{
148 int err;
149 u32 fn;
150
151 fn = psci_function_id[PSCI_FN_MIGRATE];
152 err = invoke_psci_fn(fn, cpuid, 0, 0);
153 return psci_to_linux_errno(err);
154}
155
156static const struct of_device_id psci_of_match[] __initconst = {
157 { .compatible = "arm,psci", },
158 {},
159};
160
161static int __init psci_init(void)
162{
163 struct device_node *np;
164 const char *method;
165 u32 id;
166
167 np = of_find_matching_node(NULL, psci_of_match);
168 if (!np)
169 return 0;
170
171 pr_info("probing function IDs from device-tree\n");
172
173 if (of_property_read_string(np, "method", &method)) {
174 pr_warning("missing \"method\" property\n");
175 goto out_put_node;
176 }
177
178 if (!strcmp("hvc", method)) {
179 invoke_psci_fn = __invoke_psci_fn_hvc;
180 } else if (!strcmp("smc", method)) {
181 invoke_psci_fn = __invoke_psci_fn_smc;
182 } else {
183 pr_warning("invalid \"method\" property: %s\n", method);
184 goto out_put_node;
185 }
186
187 if (!of_property_read_u32(np, "cpu_suspend", &id)) {
188 psci_function_id[PSCI_FN_CPU_SUSPEND] = id;
189 psci_ops.cpu_suspend = psci_cpu_suspend;
190 }
191
192 if (!of_property_read_u32(np, "cpu_off", &id)) {
193 psci_function_id[PSCI_FN_CPU_OFF] = id;
194 psci_ops.cpu_off = psci_cpu_off;
195 }
196
197 if (!of_property_read_u32(np, "cpu_on", &id)) {
198 psci_function_id[PSCI_FN_CPU_ON] = id;
199 psci_ops.cpu_on = psci_cpu_on;
200 }
201
202 if (!of_property_read_u32(np, "migrate", &id)) {
203 psci_function_id[PSCI_FN_MIGRATE] = id;
204 psci_ops.migrate = psci_migrate;
205 }
206
207out_put_node:
208 of_node_put(np);
209 return 0;
210}
211early_initcall(psci_init);
diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c
index fc6692e2b603..bd6f56b9ec21 100644
--- a/arch/arm/kernel/sched_clock.c
+++ b/arch/arm/kernel/sched_clock.c
@@ -93,11 +93,11 @@ static void notrace update_sched_clock(void)
93 * detectable in cyc_to_fixed_sched_clock(). 93 * detectable in cyc_to_fixed_sched_clock().
94 */ 94 */
95 raw_local_irq_save(flags); 95 raw_local_irq_save(flags);
96 cd.epoch_cyc = cyc; 96 cd.epoch_cyc_copy = cyc;
97 smp_wmb(); 97 smp_wmb();
98 cd.epoch_ns = ns; 98 cd.epoch_ns = ns;
99 smp_wmb(); 99 smp_wmb();
100 cd.epoch_cyc_copy = cyc; 100 cd.epoch_cyc = cyc;
101 raw_local_irq_restore(flags); 101 raw_local_irq_restore(flags);
102} 102}
103 103
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 84f4cbf652e5..365c8d92e2eb 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -125,18 +125,6 @@ void __init smp_init_cpus(void)
125 smp_ops.smp_init_cpus(); 125 smp_ops.smp_init_cpus();
126} 126}
127 127
128static void __init platform_smp_prepare_cpus(unsigned int max_cpus)
129{
130 if (smp_ops.smp_prepare_cpus)
131 smp_ops.smp_prepare_cpus(max_cpus);
132}
133
134static void __cpuinit platform_secondary_init(unsigned int cpu)
135{
136 if (smp_ops.smp_secondary_init)
137 smp_ops.smp_secondary_init(cpu);
138}
139
140int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) 128int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
141{ 129{
142 if (smp_ops.smp_boot_secondary) 130 if (smp_ops.smp_boot_secondary)
@@ -154,12 +142,6 @@ static int platform_cpu_kill(unsigned int cpu)
154 return 1; 142 return 1;
155} 143}
156 144
157static void platform_cpu_die(unsigned int cpu)
158{
159 if (smp_ops.cpu_die)
160 smp_ops.cpu_die(cpu);
161}
162
163static int platform_cpu_disable(unsigned int cpu) 145static int platform_cpu_disable(unsigned int cpu)
164{ 146{
165 if (smp_ops.cpu_disable) 147 if (smp_ops.cpu_disable)
@@ -257,7 +239,8 @@ void __ref cpu_die(void)
257 * actual CPU shutdown procedure is at least platform (if not 239 * actual CPU shutdown procedure is at least platform (if not
258 * CPU) specific. 240 * CPU) specific.
259 */ 241 */
260 platform_cpu_die(cpu); 242 if (smp_ops.cpu_die)
243 smp_ops.cpu_die(cpu);
261 244
262 /* 245 /*
263 * Do not return to the idle loop - jump back to the secondary 246 * Do not return to the idle loop - jump back to the secondary
@@ -324,7 +307,8 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
324 /* 307 /*
325 * Give the platform a chance to do its own initialisation. 308 * Give the platform a chance to do its own initialisation.
326 */ 309 */
327 platform_secondary_init(cpu); 310 if (smp_ops.smp_secondary_init)
311 smp_ops.smp_secondary_init(cpu);
328 312
329 notify_cpu_starting(cpu); 313 notify_cpu_starting(cpu);
330 314
@@ -399,8 +383,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
399 /* 383 /*
400 * Initialise the present map, which describes the set of CPUs 384 * Initialise the present map, which describes the set of CPUs
401 * actually populated at the present time. A platform should 385 * actually populated at the present time. A platform should
402 * re-initialize the map in platform_smp_prepare_cpus() if 386 * re-initialize the map in the platforms smp_prepare_cpus()
403 * present != possible (e.g. physical hotplug). 387 * if present != possible (e.g. physical hotplug).
404 */ 388 */
405 init_cpu_present(cpu_possible_mask); 389 init_cpu_present(cpu_possible_mask);
406 390
@@ -408,7 +392,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
408 * Initialise the SCU if there are more than one CPU 392 * Initialise the SCU if there are more than one CPU
409 * and let them know where to start. 393 * and let them know where to start.
410 */ 394 */
411 platform_smp_prepare_cpus(max_cpus); 395 if (smp_ops.smp_prepare_cpus)
396 smp_ops.smp_prepare_cpus(max_cpus);
412 } 397 }
413} 398}
414 399
diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c
index b9f015e843d8..45eac87ed66a 100644
--- a/arch/arm/kernel/smp_scu.c
+++ b/arch/arm/kernel/smp_scu.c
@@ -75,7 +75,7 @@ void scu_enable(void __iomem *scu_base)
75int scu_power_mode(void __iomem *scu_base, unsigned int mode) 75int scu_power_mode(void __iomem *scu_base, unsigned int mode)
76{ 76{
77 unsigned int val; 77 unsigned int val;
78 int cpu = cpu_logical_map(smp_processor_id()); 78 int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0);
79 79
80 if (mode > 3 || mode == 1 || cpu > 3) 80 if (mode > 3 || mode == 1 || cpu > 3)
81 return -EINVAL; 81 return -EINVAL;
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index 49f335d301ba..ae0c7bb39ae8 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -31,7 +31,6 @@ static void __iomem *twd_base;
31 31
32static struct clk *twd_clk; 32static struct clk *twd_clk;
33static unsigned long twd_timer_rate; 33static unsigned long twd_timer_rate;
34static bool common_setup_called;
35static DEFINE_PER_CPU(bool, percpu_setup_called); 34static DEFINE_PER_CPU(bool, percpu_setup_called);
36 35
37static struct clock_event_device __percpu **twd_evt; 36static struct clock_event_device __percpu **twd_evt;
@@ -239,25 +238,28 @@ static irqreturn_t twd_handler(int irq, void *dev_id)
239 return IRQ_NONE; 238 return IRQ_NONE;
240} 239}
241 240
242static struct clk *twd_get_clock(void) 241static void twd_get_clock(struct device_node *np)
243{ 242{
244 struct clk *clk;
245 int err; 243 int err;
246 244
247 clk = clk_get_sys("smp_twd", NULL); 245 if (np)
248 if (IS_ERR(clk)) { 246 twd_clk = of_clk_get(np, 0);
249 pr_err("smp_twd: clock not found: %d\n", (int)PTR_ERR(clk)); 247 else
250 return clk; 248 twd_clk = clk_get_sys("smp_twd", NULL);
249
250 if (IS_ERR(twd_clk)) {
251 pr_err("smp_twd: clock not found %d\n", (int) PTR_ERR(twd_clk));
252 return;
251 } 253 }
252 254
253 err = clk_prepare_enable(clk); 255 err = clk_prepare_enable(twd_clk);
254 if (err) { 256 if (err) {
255 pr_err("smp_twd: clock failed to prepare+enable: %d\n", err); 257 pr_err("smp_twd: clock failed to prepare+enable: %d\n", err);
256 clk_put(clk); 258 clk_put(twd_clk);
257 return ERR_PTR(err); 259 return;
258 } 260 }
259 261
260 return clk; 262 twd_timer_rate = clk_get_rate(twd_clk);
261} 263}
262 264
263/* 265/*
@@ -280,26 +282,7 @@ static int __cpuinit twd_timer_setup(struct clock_event_device *clk)
280 } 282 }
281 per_cpu(percpu_setup_called, cpu) = true; 283 per_cpu(percpu_setup_called, cpu) = true;
282 284
283 /* 285 twd_calibrate_rate();
284 * This stuff only need to be done once for the entire TWD cluster
285 * during the runtime of the system.
286 */
287 if (!common_setup_called) {
288 twd_clk = twd_get_clock();
289
290 /*
291 * We use IS_ERR_OR_NULL() here, because if the clock stubs
292 * are active we will get a valid clk reference which is
293 * however NULL and will return the rate 0. In that case we
294 * need to calibrate the rate instead.
295 */
296 if (!IS_ERR_OR_NULL(twd_clk))
297 twd_timer_rate = clk_get_rate(twd_clk);
298 else
299 twd_calibrate_rate();
300
301 common_setup_called = true;
302 }
303 286
304 /* 287 /*
305 * The following is done once per CPU the first time .setup() is 288 * The following is done once per CPU the first time .setup() is
@@ -330,7 +313,7 @@ static struct local_timer_ops twd_lt_ops __cpuinitdata = {
330 .stop = twd_timer_stop, 313 .stop = twd_timer_stop,
331}; 314};
332 315
333static int __init twd_local_timer_common_register(void) 316static int __init twd_local_timer_common_register(struct device_node *np)
334{ 317{
335 int err; 318 int err;
336 319
@@ -350,6 +333,8 @@ static int __init twd_local_timer_common_register(void)
350 if (err) 333 if (err)
351 goto out_irq; 334 goto out_irq;
352 335
336 twd_get_clock(np);
337
353 return 0; 338 return 0;
354 339
355out_irq: 340out_irq:
@@ -373,7 +358,7 @@ int __init twd_local_timer_register(struct twd_local_timer *tlt)
373 if (!twd_base) 358 if (!twd_base)
374 return -ENOMEM; 359 return -ENOMEM;
375 360
376 return twd_local_timer_common_register(); 361 return twd_local_timer_common_register(NULL);
377} 362}
378 363
379#ifdef CONFIG_OF 364#ifdef CONFIG_OF
@@ -405,7 +390,7 @@ void __init twd_local_timer_of_register(void)
405 goto out; 390 goto out;
406 } 391 }
407 392
408 err = twd_local_timer_common_register(); 393 err = twd_local_timer_common_register(np);
409 394
410out: 395out:
411 WARN(err, "twd_local_timer_of_register failed (%d)\n", err); 396 WARN(err, "twd_local_timer_of_register failed (%d)\n", err);
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 11c1785bf63e..b571484e9f03 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -19,7 +19,11 @@
19 ALIGN_FUNCTION(); \ 19 ALIGN_FUNCTION(); \
20 VMLINUX_SYMBOL(__idmap_text_start) = .; \ 20 VMLINUX_SYMBOL(__idmap_text_start) = .; \
21 *(.idmap.text) \ 21 *(.idmap.text) \
22 VMLINUX_SYMBOL(__idmap_text_end) = .; 22 VMLINUX_SYMBOL(__idmap_text_end) = .; \
23 ALIGN_FUNCTION(); \
24 VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
25 *(.hyp.idmap.text) \
26 VMLINUX_SYMBOL(__hyp_idmap_text_end) = .;
23 27
24#ifdef CONFIG_HOTPLUG_CPU 28#ifdef CONFIG_HOTPLUG_CPU
25#define ARM_CPU_DISCARD(x) 29#define ARM_CPU_DISCARD(x)
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
new file mode 100644
index 000000000000..05227cb57a7b
--- /dev/null
+++ b/arch/arm/kvm/Kconfig
@@ -0,0 +1,56 @@
1#
2# KVM configuration
3#
4
5source "virt/kvm/Kconfig"
6
7menuconfig VIRTUALIZATION
8 bool "Virtualization"
9 ---help---
10 Say Y here to get to see options for using your Linux host to run
11 other operating systems inside virtual machines (guests).
12 This option alone does not add any kernel code.
13
14 If you say N, all options in this submenu will be skipped and
15 disabled.
16
17if VIRTUALIZATION
18
19config KVM
20 bool "Kernel-based Virtual Machine (KVM) support"
21 select PREEMPT_NOTIFIERS
22 select ANON_INODES
23 select KVM_MMIO
24 select KVM_ARM_HOST
25 depends on ARM_VIRT_EXT && ARM_LPAE
26 ---help---
27 Support hosting virtualized guest machines. You will also
28 need to select one or more of the processor modules below.
29
30 This module provides access to the hardware capabilities through
31 a character device node named /dev/kvm.
32
33 If unsure, say N.
34
35config KVM_ARM_HOST
36 bool "KVM host support for ARM cpus."
37 depends on KVM
38 depends on MMU
39 select MMU_NOTIFIER
40 ---help---
41 Provides host support for ARM processors.
42
43config KVM_ARM_MAX_VCPUS
44 int "Number maximum supported virtual CPUs per VM"
45 depends on KVM_ARM_HOST
46 default 4
47 help
48 Static number of max supported virtual CPUs per VM.
49
50 If you choose a high number, the vcpu structures will be quite
51 large, so only choose a reasonable number that you expect to
52 actually use.
53
54source drivers/virtio/Kconfig
55
56endif # VIRTUALIZATION
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
new file mode 100644
index 000000000000..ea27987bd07f
--- /dev/null
+++ b/arch/arm/kvm/Makefile
@@ -0,0 +1,21 @@
1#
2# Makefile for Kernel-based Virtual Machine module
3#
4
5plus_virt := $(call as-instr,.arch_extension virt,+virt)
6ifeq ($(plus_virt),+virt)
7 plus_virt_def := -DREQUIRES_VIRT=1
8endif
9
10ccflags-y += -Ivirt/kvm -Iarch/arm/kvm
11CFLAGS_arm.o := -I. $(plus_virt_def)
12CFLAGS_mmu.o := -I.
13
14AFLAGS_init.o := -Wa,-march=armv7-a$(plus_virt)
15AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt)
16
17kvm-arm-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
18
19obj-y += kvm-arm.o init.o interrupts.o
20obj-y += arm.o guest.o mmu.o emulate.o reset.o
21obj-y += coproc.o coproc_a15.o mmio.o psci.o
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
new file mode 100644
index 000000000000..2d30e3afdaf9
--- /dev/null
+++ b/arch/arm/kvm/arm.c
@@ -0,0 +1,1015 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#include <linux/errno.h>
20#include <linux/err.h>
21#include <linux/kvm_host.h>
22#include <linux/module.h>
23#include <linux/vmalloc.h>
24#include <linux/fs.h>
25#include <linux/mman.h>
26#include <linux/sched.h>
27#include <linux/kvm.h>
28#include <trace/events/kvm.h>
29
30#define CREATE_TRACE_POINTS
31#include "trace.h"
32
33#include <asm/unified.h>
34#include <asm/uaccess.h>
35#include <asm/ptrace.h>
36#include <asm/mman.h>
37#include <asm/cputype.h>
38#include <asm/tlbflush.h>
39#include <asm/cacheflush.h>
40#include <asm/virt.h>
41#include <asm/kvm_arm.h>
42#include <asm/kvm_asm.h>
43#include <asm/kvm_mmu.h>
44#include <asm/kvm_emulate.h>
45#include <asm/kvm_coproc.h>
46#include <asm/kvm_psci.h>
47#include <asm/opcodes.h>
48
49#ifdef REQUIRES_VIRT
50__asm__(".arch_extension virt");
51#endif
52
53static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
54static struct vfp_hard_struct __percpu *kvm_host_vfp_state;
55static unsigned long hyp_default_vectors;
56
57/* The VMID used in the VTTBR */
58static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
59static u8 kvm_next_vmid;
60static DEFINE_SPINLOCK(kvm_vmid_lock);
61
62int kvm_arch_hardware_enable(void *garbage)
63{
64 return 0;
65}
66
67int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
68{
69 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
70}
71
72void kvm_arch_hardware_disable(void *garbage)
73{
74}
75
76int kvm_arch_hardware_setup(void)
77{
78 return 0;
79}
80
81void kvm_arch_hardware_unsetup(void)
82{
83}
84
85void kvm_arch_check_processor_compat(void *rtn)
86{
87 *(int *)rtn = 0;
88}
89
90void kvm_arch_sync_events(struct kvm *kvm)
91{
92}
93
94/**
95 * kvm_arch_init_vm - initializes a VM data structure
96 * @kvm: pointer to the KVM struct
97 */
98int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
99{
100 int ret = 0;
101
102 if (type)
103 return -EINVAL;
104
105 ret = kvm_alloc_stage2_pgd(kvm);
106 if (ret)
107 goto out_fail_alloc;
108
109 ret = create_hyp_mappings(kvm, kvm + 1);
110 if (ret)
111 goto out_free_stage2_pgd;
112
113 /* Mark the initial VMID generation invalid */
114 kvm->arch.vmid_gen = 0;
115
116 return ret;
117out_free_stage2_pgd:
118 kvm_free_stage2_pgd(kvm);
119out_fail_alloc:
120 return ret;
121}
122
123int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
124{
125 return VM_FAULT_SIGBUS;
126}
127
128void kvm_arch_free_memslot(struct kvm_memory_slot *free,
129 struct kvm_memory_slot *dont)
130{
131}
132
133int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
134{
135 return 0;
136}
137
138/**
139 * kvm_arch_destroy_vm - destroy the VM data structure
140 * @kvm: pointer to the KVM struct
141 */
142void kvm_arch_destroy_vm(struct kvm *kvm)
143{
144 int i;
145
146 kvm_free_stage2_pgd(kvm);
147
148 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
149 if (kvm->vcpus[i]) {
150 kvm_arch_vcpu_free(kvm->vcpus[i]);
151 kvm->vcpus[i] = NULL;
152 }
153 }
154}
155
156int kvm_dev_ioctl_check_extension(long ext)
157{
158 int r;
159 switch (ext) {
160 case KVM_CAP_USER_MEMORY:
161 case KVM_CAP_SYNC_MMU:
162 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
163 case KVM_CAP_ONE_REG:
164 case KVM_CAP_ARM_PSCI:
165 r = 1;
166 break;
167 case KVM_CAP_COALESCED_MMIO:
168 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
169 break;
170 case KVM_CAP_NR_VCPUS:
171 r = num_online_cpus();
172 break;
173 case KVM_CAP_MAX_VCPUS:
174 r = KVM_MAX_VCPUS;
175 break;
176 default:
177 r = 0;
178 break;
179 }
180 return r;
181}
182
183long kvm_arch_dev_ioctl(struct file *filp,
184 unsigned int ioctl, unsigned long arg)
185{
186 return -EINVAL;
187}
188
189int kvm_arch_set_memory_region(struct kvm *kvm,
190 struct kvm_userspace_memory_region *mem,
191 struct kvm_memory_slot old,
192 int user_alloc)
193{
194 return 0;
195}
196
197int kvm_arch_prepare_memory_region(struct kvm *kvm,
198 struct kvm_memory_slot *memslot,
199 struct kvm_memory_slot old,
200 struct kvm_userspace_memory_region *mem,
201 int user_alloc)
202{
203 return 0;
204}
205
206void kvm_arch_commit_memory_region(struct kvm *kvm,
207 struct kvm_userspace_memory_region *mem,
208 struct kvm_memory_slot old,
209 int user_alloc)
210{
211}
212
213void kvm_arch_flush_shadow_all(struct kvm *kvm)
214{
215}
216
217void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
218 struct kvm_memory_slot *slot)
219{
220}
221
222struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
223{
224 int err;
225 struct kvm_vcpu *vcpu;
226
227 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
228 if (!vcpu) {
229 err = -ENOMEM;
230 goto out;
231 }
232
233 err = kvm_vcpu_init(vcpu, kvm, id);
234 if (err)
235 goto free_vcpu;
236
237 err = create_hyp_mappings(vcpu, vcpu + 1);
238 if (err)
239 goto vcpu_uninit;
240
241 return vcpu;
242vcpu_uninit:
243 kvm_vcpu_uninit(vcpu);
244free_vcpu:
245 kmem_cache_free(kvm_vcpu_cache, vcpu);
246out:
247 return ERR_PTR(err);
248}
249
250int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
251{
252 return 0;
253}
254
255void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
256{
257 kvm_mmu_free_memory_caches(vcpu);
258 kmem_cache_free(kvm_vcpu_cache, vcpu);
259}
260
261void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
262{
263 kvm_arch_vcpu_free(vcpu);
264}
265
266int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
267{
268 return 0;
269}
270
271int __attribute_const__ kvm_target_cpu(void)
272{
273 unsigned long implementor = read_cpuid_implementor();
274 unsigned long part_number = read_cpuid_part_number();
275
276 if (implementor != ARM_CPU_IMP_ARM)
277 return -EINVAL;
278
279 switch (part_number) {
280 case ARM_CPU_PART_CORTEX_A15:
281 return KVM_ARM_TARGET_CORTEX_A15;
282 default:
283 return -EINVAL;
284 }
285}
286
287int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
288{
289 /* Force users to call KVM_ARM_VCPU_INIT */
290 vcpu->arch.target = -1;
291 return 0;
292}
293
294void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
295{
296}
297
298void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
299{
300 vcpu->cpu = cpu;
301 vcpu->arch.vfp_host = this_cpu_ptr(kvm_host_vfp_state);
302
303 /*
304 * Check whether this vcpu requires the cache to be flushed on
305 * this physical CPU. This is a consequence of doing dcache
306 * operations by set/way on this vcpu. We do it here to be in
307 * a non-preemptible section.
308 */
309 if (cpumask_test_and_clear_cpu(cpu, &vcpu->arch.require_dcache_flush))
310 flush_cache_all(); /* We'd really want v7_flush_dcache_all() */
311}
312
313void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
314{
315}
316
317int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
318 struct kvm_guest_debug *dbg)
319{
320 return -EINVAL;
321}
322
323
324int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
325 struct kvm_mp_state *mp_state)
326{
327 return -EINVAL;
328}
329
330int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
331 struct kvm_mp_state *mp_state)
332{
333 return -EINVAL;
334}
335
336/**
337 * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled
338 * @v: The VCPU pointer
339 *
340 * If the guest CPU is not waiting for interrupts or an interrupt line is
341 * asserted, the CPU is by definition runnable.
342 */
343int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
344{
345 return !!v->arch.irq_lines;
346}
347
348/* Just ensure a guest exit from a particular CPU */
349static void exit_vm_noop(void *info)
350{
351}
352
353void force_vm_exit(const cpumask_t *mask)
354{
355 smp_call_function_many(mask, exit_vm_noop, NULL, true);
356}
357
358/**
359 * need_new_vmid_gen - check that the VMID is still valid
360 * @kvm: The VM's VMID to checkt
361 *
362 * return true if there is a new generation of VMIDs being used
363 *
364 * The hardware supports only 256 values with the value zero reserved for the
365 * host, so we check if an assigned value belongs to a previous generation,
366 * which which requires us to assign a new value. If we're the first to use a
367 * VMID for the new generation, we must flush necessary caches and TLBs on all
368 * CPUs.
369 */
370static bool need_new_vmid_gen(struct kvm *kvm)
371{
372 return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
373}
374
375/**
376 * update_vttbr - Update the VTTBR with a valid VMID before the guest runs
377 * @kvm The guest that we are about to run
378 *
379 * Called from kvm_arch_vcpu_ioctl_run before entering the guest to ensure the
380 * VM has a valid VMID, otherwise assigns a new one and flushes corresponding
381 * caches and TLBs.
382 */
383static void update_vttbr(struct kvm *kvm)
384{
385 phys_addr_t pgd_phys;
386 u64 vmid;
387
388 if (!need_new_vmid_gen(kvm))
389 return;
390
391 spin_lock(&kvm_vmid_lock);
392
393 /*
394 * We need to re-check the vmid_gen here to ensure that if another vcpu
395 * already allocated a valid vmid for this vm, then this vcpu should
396 * use the same vmid.
397 */
398 if (!need_new_vmid_gen(kvm)) {
399 spin_unlock(&kvm_vmid_lock);
400 return;
401 }
402
403 /* First user of a new VMID generation? */
404 if (unlikely(kvm_next_vmid == 0)) {
405 atomic64_inc(&kvm_vmid_gen);
406 kvm_next_vmid = 1;
407
408 /*
409 * On SMP we know no other CPUs can use this CPU's or each
410 * other's VMID after force_vm_exit returns since the
411 * kvm_vmid_lock blocks them from reentry to the guest.
412 */
413 force_vm_exit(cpu_all_mask);
414 /*
415 * Now broadcast TLB + ICACHE invalidation over the inner
416 * shareable domain to make sure all data structures are
417 * clean.
418 */
419 kvm_call_hyp(__kvm_flush_vm_context);
420 }
421
422 kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
423 kvm->arch.vmid = kvm_next_vmid;
424 kvm_next_vmid++;
425
426 /* update vttbr to be used with the new vmid */
427 pgd_phys = virt_to_phys(kvm->arch.pgd);
428 vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK;
429 kvm->arch.vttbr = pgd_phys & VTTBR_BADDR_MASK;
430 kvm->arch.vttbr |= vmid;
431
432 spin_unlock(&kvm_vmid_lock);
433}
434
435static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
436{
437 /* SVC called from Hyp mode should never get here */
438 kvm_debug("SVC called from Hyp mode shouldn't go here\n");
439 BUG();
440 return -EINVAL; /* Squash warning */
441}
442
443static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
444{
445 trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0),
446 vcpu->arch.hsr & HSR_HVC_IMM_MASK);
447
448 if (kvm_psci_call(vcpu))
449 return 1;
450
451 kvm_inject_undefined(vcpu);
452 return 1;
453}
454
455static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
456{
457 if (kvm_psci_call(vcpu))
458 return 1;
459
460 kvm_inject_undefined(vcpu);
461 return 1;
462}
463
464static int handle_pabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
465{
466 /* The hypervisor should never cause aborts */
467 kvm_err("Prefetch Abort taken from Hyp mode at %#08x (HSR: %#08x)\n",
468 vcpu->arch.hxfar, vcpu->arch.hsr);
469 return -EFAULT;
470}
471
472static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
473{
474 /* This is either an error in the ws. code or an external abort */
475 kvm_err("Data Abort taken from Hyp mode at %#08x (HSR: %#08x)\n",
476 vcpu->arch.hxfar, vcpu->arch.hsr);
477 return -EFAULT;
478}
479
480typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
481static exit_handle_fn arm_exit_handlers[] = {
482 [HSR_EC_WFI] = kvm_handle_wfi,
483 [HSR_EC_CP15_32] = kvm_handle_cp15_32,
484 [HSR_EC_CP15_64] = kvm_handle_cp15_64,
485 [HSR_EC_CP14_MR] = kvm_handle_cp14_access,
486 [HSR_EC_CP14_LS] = kvm_handle_cp14_load_store,
487 [HSR_EC_CP14_64] = kvm_handle_cp14_access,
488 [HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access,
489 [HSR_EC_CP10_ID] = kvm_handle_cp10_id,
490 [HSR_EC_SVC_HYP] = handle_svc_hyp,
491 [HSR_EC_HVC] = handle_hvc,
492 [HSR_EC_SMC] = handle_smc,
493 [HSR_EC_IABT] = kvm_handle_guest_abort,
494 [HSR_EC_IABT_HYP] = handle_pabt_hyp,
495 [HSR_EC_DABT] = kvm_handle_guest_abort,
496 [HSR_EC_DABT_HYP] = handle_dabt_hyp,
497};
498
499/*
500 * A conditional instruction is allowed to trap, even though it
501 * wouldn't be executed. So let's re-implement the hardware, in
502 * software!
503 */
504static bool kvm_condition_valid(struct kvm_vcpu *vcpu)
505{
506 unsigned long cpsr, cond, insn;
507
508 /*
509 * Exception Code 0 can only happen if we set HCR.TGE to 1, to
510 * catch undefined instructions, and then we won't get past
511 * the arm_exit_handlers test anyway.
512 */
513 BUG_ON(((vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT) == 0);
514
515 /* Top two bits non-zero? Unconditional. */
516 if (vcpu->arch.hsr >> 30)
517 return true;
518
519 cpsr = *vcpu_cpsr(vcpu);
520
521 /* Is condition field valid? */
522 if ((vcpu->arch.hsr & HSR_CV) >> HSR_CV_SHIFT)
523 cond = (vcpu->arch.hsr & HSR_COND) >> HSR_COND_SHIFT;
524 else {
525 /* This can happen in Thumb mode: examine IT state. */
526 unsigned long it;
527
528 it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3);
529
530 /* it == 0 => unconditional. */
531 if (it == 0)
532 return true;
533
534 /* The cond for this insn works out as the top 4 bits. */
535 cond = (it >> 4);
536 }
537
538 /* Shift makes it look like an ARM-mode instruction */
539 insn = cond << 28;
540 return arm_check_condition(insn, cpsr) != ARM_OPCODE_CONDTEST_FAIL;
541}
542
543/*
544 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
545 * proper exit to QEMU.
546 */
547static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
548 int exception_index)
549{
550 unsigned long hsr_ec;
551
552 switch (exception_index) {
553 case ARM_EXCEPTION_IRQ:
554 return 1;
555 case ARM_EXCEPTION_UNDEFINED:
556 kvm_err("Undefined exception in Hyp mode at: %#08x\n",
557 vcpu->arch.hyp_pc);
558 BUG();
559 panic("KVM: Hypervisor undefined exception!\n");
560 case ARM_EXCEPTION_DATA_ABORT:
561 case ARM_EXCEPTION_PREF_ABORT:
562 case ARM_EXCEPTION_HVC:
563 hsr_ec = (vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT;
564
565 if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers)
566 || !arm_exit_handlers[hsr_ec]) {
567 kvm_err("Unkown exception class: %#08lx, "
568 "hsr: %#08x\n", hsr_ec,
569 (unsigned int)vcpu->arch.hsr);
570 BUG();
571 }
572
573 /*
574 * See ARM ARM B1.14.1: "Hyp traps on instructions
575 * that fail their condition code check"
576 */
577 if (!kvm_condition_valid(vcpu)) {
578 bool is_wide = vcpu->arch.hsr & HSR_IL;
579 kvm_skip_instr(vcpu, is_wide);
580 return 1;
581 }
582
583 return arm_exit_handlers[hsr_ec](vcpu, run);
584 default:
585 kvm_pr_unimpl("Unsupported exception type: %d",
586 exception_index);
587 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
588 return 0;
589 }
590}
591
592static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
593{
594 if (likely(vcpu->arch.has_run_once))
595 return 0;
596
597 vcpu->arch.has_run_once = true;
598
599 /*
600 * Handle the "start in power-off" case by calling into the
601 * PSCI code.
602 */
603 if (test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) {
604 *vcpu_reg(vcpu, 0) = KVM_PSCI_FN_CPU_OFF;
605 kvm_psci_call(vcpu);
606 }
607
608 return 0;
609}
610
611static void vcpu_pause(struct kvm_vcpu *vcpu)
612{
613 wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu);
614
615 wait_event_interruptible(*wq, !vcpu->arch.pause);
616}
617
618/**
619 * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
620 * @vcpu: The VCPU pointer
621 * @run: The kvm_run structure pointer used for userspace state exchange
622 *
623 * This function is called through the VCPU_RUN ioctl called from user space. It
624 * will execute VM code in a loop until the time slice for the process is used
625 * or some emulation is needed from user space in which case the function will
626 * return with return value 0 and with the kvm_run structure filled in with the
627 * required data for the requested emulation.
628 */
629int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
630{
631 int ret;
632 sigset_t sigsaved;
633
634 /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */
635 if (unlikely(vcpu->arch.target < 0))
636 return -ENOEXEC;
637
638 ret = kvm_vcpu_first_run_init(vcpu);
639 if (ret)
640 return ret;
641
642 if (run->exit_reason == KVM_EXIT_MMIO) {
643 ret = kvm_handle_mmio_return(vcpu, vcpu->run);
644 if (ret)
645 return ret;
646 }
647
648 if (vcpu->sigset_active)
649 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
650
651 ret = 1;
652 run->exit_reason = KVM_EXIT_UNKNOWN;
653 while (ret > 0) {
654 /*
655 * Check conditions before entering the guest
656 */
657 cond_resched();
658
659 update_vttbr(vcpu->kvm);
660
661 if (vcpu->arch.pause)
662 vcpu_pause(vcpu);
663
664 local_irq_disable();
665
666 /*
667 * Re-check atomic conditions
668 */
669 if (signal_pending(current)) {
670 ret = -EINTR;
671 run->exit_reason = KVM_EXIT_INTR;
672 }
673
674 if (ret <= 0 || need_new_vmid_gen(vcpu->kvm)) {
675 local_irq_enable();
676 continue;
677 }
678
679 /**************************************************************
680 * Enter the guest
681 */
682 trace_kvm_entry(*vcpu_pc(vcpu));
683 kvm_guest_enter();
684 vcpu->mode = IN_GUEST_MODE;
685
686 ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
687
688 vcpu->mode = OUTSIDE_GUEST_MODE;
689 vcpu->arch.last_pcpu = smp_processor_id();
690 kvm_guest_exit();
691 trace_kvm_exit(*vcpu_pc(vcpu));
692 /*
693 * We may have taken a host interrupt in HYP mode (ie
694 * while executing the guest). This interrupt is still
695 * pending, as we haven't serviced it yet!
696 *
697 * We're now back in SVC mode, with interrupts
698 * disabled. Enabling the interrupts now will have
699 * the effect of taking the interrupt again, in SVC
700 * mode this time.
701 */
702 local_irq_enable();
703
704 /*
705 * Back from guest
706 *************************************************************/
707
708 ret = handle_exit(vcpu, run, ret);
709 }
710
711 if (vcpu->sigset_active)
712 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
713 return ret;
714}
715
716static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level)
717{
718 int bit_index;
719 bool set;
720 unsigned long *ptr;
721
722 if (number == KVM_ARM_IRQ_CPU_IRQ)
723 bit_index = __ffs(HCR_VI);
724 else /* KVM_ARM_IRQ_CPU_FIQ */
725 bit_index = __ffs(HCR_VF);
726
727 ptr = (unsigned long *)&vcpu->arch.irq_lines;
728 if (level)
729 set = test_and_set_bit(bit_index, ptr);
730 else
731 set = test_and_clear_bit(bit_index, ptr);
732
733 /*
734 * If we didn't change anything, no need to wake up or kick other CPUs
735 */
736 if (set == level)
737 return 0;
738
739 /*
740 * The vcpu irq_lines field was updated, wake up sleeping VCPUs and
741 * trigger a world-switch round on the running physical CPU to set the
742 * virtual IRQ/FIQ fields in the HCR appropriately.
743 */
744 kvm_vcpu_kick(vcpu);
745
746 return 0;
747}
748
749int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level)
750{
751 u32 irq = irq_level->irq;
752 unsigned int irq_type, vcpu_idx, irq_num;
753 int nrcpus = atomic_read(&kvm->online_vcpus);
754 struct kvm_vcpu *vcpu = NULL;
755 bool level = irq_level->level;
756
757 irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK;
758 vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK;
759 irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK;
760
761 trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level);
762
763 if (irq_type != KVM_ARM_IRQ_TYPE_CPU)
764 return -EINVAL;
765
766 if (vcpu_idx >= nrcpus)
767 return -EINVAL;
768
769 vcpu = kvm_get_vcpu(kvm, vcpu_idx);
770 if (!vcpu)
771 return -EINVAL;
772
773 if (irq_num > KVM_ARM_IRQ_CPU_FIQ)
774 return -EINVAL;
775
776 return vcpu_interrupt_line(vcpu, irq_num, level);
777}
778
779long kvm_arch_vcpu_ioctl(struct file *filp,
780 unsigned int ioctl, unsigned long arg)
781{
782 struct kvm_vcpu *vcpu = filp->private_data;
783 void __user *argp = (void __user *)arg;
784
785 switch (ioctl) {
786 case KVM_ARM_VCPU_INIT: {
787 struct kvm_vcpu_init init;
788
789 if (copy_from_user(&init, argp, sizeof(init)))
790 return -EFAULT;
791
792 return kvm_vcpu_set_target(vcpu, &init);
793
794 }
795 case KVM_SET_ONE_REG:
796 case KVM_GET_ONE_REG: {
797 struct kvm_one_reg reg;
798 if (copy_from_user(&reg, argp, sizeof(reg)))
799 return -EFAULT;
800 if (ioctl == KVM_SET_ONE_REG)
801 return kvm_arm_set_reg(vcpu, &reg);
802 else
803 return kvm_arm_get_reg(vcpu, &reg);
804 }
805 case KVM_GET_REG_LIST: {
806 struct kvm_reg_list __user *user_list = argp;
807 struct kvm_reg_list reg_list;
808 unsigned n;
809
810 if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
811 return -EFAULT;
812 n = reg_list.n;
813 reg_list.n = kvm_arm_num_regs(vcpu);
814 if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
815 return -EFAULT;
816 if (n < reg_list.n)
817 return -E2BIG;
818 return kvm_arm_copy_reg_indices(vcpu, user_list->reg);
819 }
820 default:
821 return -EINVAL;
822 }
823}
824
825int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
826{
827 return -EINVAL;
828}
829
830long kvm_arch_vm_ioctl(struct file *filp,
831 unsigned int ioctl, unsigned long arg)
832{
833 return -EINVAL;
834}
835
836static void cpu_init_hyp_mode(void *vector)
837{
838 unsigned long long pgd_ptr;
839 unsigned long pgd_low, pgd_high;
840 unsigned long hyp_stack_ptr;
841 unsigned long stack_page;
842 unsigned long vector_ptr;
843
844 /* Switch from the HYP stub to our own HYP init vector */
845 __hyp_set_vectors((unsigned long)vector);
846
847 pgd_ptr = (unsigned long long)kvm_mmu_get_httbr();
848 pgd_low = (pgd_ptr & ((1ULL << 32) - 1));
849 pgd_high = (pgd_ptr >> 32ULL);
850 stack_page = __get_cpu_var(kvm_arm_hyp_stack_page);
851 hyp_stack_ptr = stack_page + PAGE_SIZE;
852 vector_ptr = (unsigned long)__kvm_hyp_vector;
853
854 /*
855 * Call initialization code, and switch to the full blown
856 * HYP code. The init code doesn't need to preserve these registers as
857 * r1-r3 and r12 are already callee save according to the AAPCS.
858 * Note that we slightly misuse the prototype by casing the pgd_low to
859 * a void *.
860 */
861 kvm_call_hyp((void *)pgd_low, pgd_high, hyp_stack_ptr, vector_ptr);
862}
863
864/**
865 * Inits Hyp-mode on all online CPUs
866 */
867static int init_hyp_mode(void)
868{
869 phys_addr_t init_phys_addr;
870 int cpu;
871 int err = 0;
872
873 /*
874 * Allocate Hyp PGD and setup Hyp identity mapping
875 */
876 err = kvm_mmu_init();
877 if (err)
878 goto out_err;
879
880 /*
881 * It is probably enough to obtain the default on one
882 * CPU. It's unlikely to be different on the others.
883 */
884 hyp_default_vectors = __hyp_get_vectors();
885
886 /*
887 * Allocate stack pages for Hypervisor-mode
888 */
889 for_each_possible_cpu(cpu) {
890 unsigned long stack_page;
891
892 stack_page = __get_free_page(GFP_KERNEL);
893 if (!stack_page) {
894 err = -ENOMEM;
895 goto out_free_stack_pages;
896 }
897
898 per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
899 }
900
901 /*
902 * Execute the init code on each CPU.
903 *
904 * Note: The stack is not mapped yet, so don't do anything else than
905 * initializing the hypervisor mode on each CPU using a local stack
906 * space for temporary storage.
907 */
908 init_phys_addr = virt_to_phys(__kvm_hyp_init);
909 for_each_online_cpu(cpu) {
910 smp_call_function_single(cpu, cpu_init_hyp_mode,
911 (void *)(long)init_phys_addr, 1);
912 }
913
914 /*
915 * Unmap the identity mapping
916 */
917 kvm_clear_hyp_idmap();
918
919 /*
920 * Map the Hyp-code called directly from the host
921 */
922 err = create_hyp_mappings(__kvm_hyp_code_start, __kvm_hyp_code_end);
923 if (err) {
924 kvm_err("Cannot map world-switch code\n");
925 goto out_free_mappings;
926 }
927
928 /*
929 * Map the Hyp stack pages
930 */
931 for_each_possible_cpu(cpu) {
932 char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
933 err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE);
934
935 if (err) {
936 kvm_err("Cannot map hyp stack\n");
937 goto out_free_mappings;
938 }
939 }
940
941 /*
942 * Map the host VFP structures
943 */
944 kvm_host_vfp_state = alloc_percpu(struct vfp_hard_struct);
945 if (!kvm_host_vfp_state) {
946 err = -ENOMEM;
947 kvm_err("Cannot allocate host VFP state\n");
948 goto out_free_mappings;
949 }
950
951 for_each_possible_cpu(cpu) {
952 struct vfp_hard_struct *vfp;
953
954 vfp = per_cpu_ptr(kvm_host_vfp_state, cpu);
955 err = create_hyp_mappings(vfp, vfp + 1);
956
957 if (err) {
958 kvm_err("Cannot map host VFP state: %d\n", err);
959 goto out_free_vfp;
960 }
961 }
962
963 kvm_info("Hyp mode initialized successfully\n");
964 return 0;
965out_free_vfp:
966 free_percpu(kvm_host_vfp_state);
967out_free_mappings:
968 free_hyp_pmds();
969out_free_stack_pages:
970 for_each_possible_cpu(cpu)
971 free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
972out_err:
973 kvm_err("error initializing Hyp mode: %d\n", err);
974 return err;
975}
976
977/**
978 * Initialize Hyp-mode and memory mappings on all CPUs.
979 */
980int kvm_arch_init(void *opaque)
981{
982 int err;
983
984 if (!is_hyp_mode_available()) {
985 kvm_err("HYP mode not available\n");
986 return -ENODEV;
987 }
988
989 if (kvm_target_cpu() < 0) {
990 kvm_err("Target CPU not supported!\n");
991 return -ENODEV;
992 }
993
994 err = init_hyp_mode();
995 if (err)
996 goto out_err;
997
998 kvm_coproc_table_init();
999 return 0;
1000out_err:
1001 return err;
1002}
1003
1004/* NOP: Compiling as a module not supported */
1005void kvm_arch_exit(void)
1006{
1007}
1008
1009static int arm_init(void)
1010{
1011 int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1012 return rc;
1013}
1014
1015module_init(arm_init);
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
new file mode 100644
index 000000000000..d782638c7ec0
--- /dev/null
+++ b/arch/arm/kvm/coproc.c
@@ -0,0 +1,1046 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Authors: Rusty Russell <rusty@rustcorp.com.au>
4 * Christoffer Dall <c.dall@virtualopensystems.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 */
19#include <linux/mm.h>
20#include <linux/kvm_host.h>
21#include <linux/uaccess.h>
22#include <asm/kvm_arm.h>
23#include <asm/kvm_host.h>
24#include <asm/kvm_emulate.h>
25#include <asm/kvm_coproc.h>
26#include <asm/cacheflush.h>
27#include <asm/cputype.h>
28#include <trace/events/kvm.h>
29#include <asm/vfp.h>
30#include "../vfp/vfpinstr.h"
31
32#include "trace.h"
33#include "coproc.h"
34
35
36/******************************************************************************
37 * Co-processor emulation
38 *****************************************************************************/
39
40/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
41static u32 cache_levels;
42
43/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
44#define CSSELR_MAX 12
45
46int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run)
47{
48 kvm_inject_undefined(vcpu);
49 return 1;
50}
51
52int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
53{
54 /*
55 * We can get here, if the host has been built without VFPv3 support,
56 * but the guest attempted a floating point operation.
57 */
58 kvm_inject_undefined(vcpu);
59 return 1;
60}
61
62int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
63{
64 kvm_inject_undefined(vcpu);
65 return 1;
66}
67
68int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
69{
70 kvm_inject_undefined(vcpu);
71 return 1;
72}
73
74/* See note at ARM ARM B1.14.4 */
75static bool access_dcsw(struct kvm_vcpu *vcpu,
76 const struct coproc_params *p,
77 const struct coproc_reg *r)
78{
79 u32 val;
80 int cpu;
81
82 cpu = get_cpu();
83
84 if (!p->is_write)
85 return read_from_write_only(vcpu, p);
86
87 cpumask_setall(&vcpu->arch.require_dcache_flush);
88 cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
89
90 /* If we were already preempted, take the long way around */
91 if (cpu != vcpu->arch.last_pcpu) {
92 flush_cache_all();
93 goto done;
94 }
95
96 val = *vcpu_reg(vcpu, p->Rt1);
97
98 switch (p->CRm) {
99 case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
100 case 14: /* DCCISW */
101 asm volatile("mcr p15, 0, %0, c7, c14, 2" : : "r" (val));
102 break;
103
104 case 10: /* DCCSW */
105 asm volatile("mcr p15, 0, %0, c7, c10, 2" : : "r" (val));
106 break;
107 }
108
109done:
110 put_cpu();
111
112 return true;
113}
114
115/*
116 * We could trap ID_DFR0 and tell the guest we don't support performance
117 * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
118 * NAKed, so it will read the PMCR anyway.
119 *
120 * Therefore we tell the guest we have 0 counters. Unfortunately, we
121 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
122 * all PM registers, which doesn't crash the guest kernel at least.
123 */
124static bool pm_fake(struct kvm_vcpu *vcpu,
125 const struct coproc_params *p,
126 const struct coproc_reg *r)
127{
128 if (p->is_write)
129 return ignore_write(vcpu, p);
130 else
131 return read_zero(vcpu, p);
132}
133
134#define access_pmcr pm_fake
135#define access_pmcntenset pm_fake
136#define access_pmcntenclr pm_fake
137#define access_pmovsr pm_fake
138#define access_pmselr pm_fake
139#define access_pmceid0 pm_fake
140#define access_pmceid1 pm_fake
141#define access_pmccntr pm_fake
142#define access_pmxevtyper pm_fake
143#define access_pmxevcntr pm_fake
144#define access_pmuserenr pm_fake
145#define access_pmintenset pm_fake
146#define access_pmintenclr pm_fake
147
148/* Architected CP15 registers.
149 * Important: Must be sorted ascending by CRn, CRM, Op1, Op2
150 */
151static const struct coproc_reg cp15_regs[] = {
152 /* CSSELR: swapped by interrupt.S. */
153 { CRn( 0), CRm( 0), Op1( 2), Op2( 0), is32,
154 NULL, reset_unknown, c0_CSSELR },
155
156 /* TTBR0/TTBR1: swapped by interrupt.S. */
157 { CRm( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 },
158 { CRm( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 },
159
160 /* TTBCR: swapped by interrupt.S. */
161 { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32,
162 NULL, reset_val, c2_TTBCR, 0x00000000 },
163
164 /* DACR: swapped by interrupt.S. */
165 { CRn( 3), CRm( 0), Op1( 0), Op2( 0), is32,
166 NULL, reset_unknown, c3_DACR },
167
168 /* DFSR/IFSR/ADFSR/AIFSR: swapped by interrupt.S. */
169 { CRn( 5), CRm( 0), Op1( 0), Op2( 0), is32,
170 NULL, reset_unknown, c5_DFSR },
171 { CRn( 5), CRm( 0), Op1( 0), Op2( 1), is32,
172 NULL, reset_unknown, c5_IFSR },
173 { CRn( 5), CRm( 1), Op1( 0), Op2( 0), is32,
174 NULL, reset_unknown, c5_ADFSR },
175 { CRn( 5), CRm( 1), Op1( 0), Op2( 1), is32,
176 NULL, reset_unknown, c5_AIFSR },
177
178 /* DFAR/IFAR: swapped by interrupt.S. */
179 { CRn( 6), CRm( 0), Op1( 0), Op2( 0), is32,
180 NULL, reset_unknown, c6_DFAR },
181 { CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32,
182 NULL, reset_unknown, c6_IFAR },
183 /*
184 * DC{C,I,CI}SW operations:
185 */
186 { CRn( 7), CRm( 6), Op1( 0), Op2( 2), is32, access_dcsw},
187 { CRn( 7), CRm(10), Op1( 0), Op2( 2), is32, access_dcsw},
188 { CRn( 7), CRm(14), Op1( 0), Op2( 2), is32, access_dcsw},
189 /*
190 * Dummy performance monitor implementation.
191 */
192 { CRn( 9), CRm(12), Op1( 0), Op2( 0), is32, access_pmcr},
193 { CRn( 9), CRm(12), Op1( 0), Op2( 1), is32, access_pmcntenset},
194 { CRn( 9), CRm(12), Op1( 0), Op2( 2), is32, access_pmcntenclr},
195 { CRn( 9), CRm(12), Op1( 0), Op2( 3), is32, access_pmovsr},
196 { CRn( 9), CRm(12), Op1( 0), Op2( 5), is32, access_pmselr},
197 { CRn( 9), CRm(12), Op1( 0), Op2( 6), is32, access_pmceid0},
198 { CRn( 9), CRm(12), Op1( 0), Op2( 7), is32, access_pmceid1},
199 { CRn( 9), CRm(13), Op1( 0), Op2( 0), is32, access_pmccntr},
200 { CRn( 9), CRm(13), Op1( 0), Op2( 1), is32, access_pmxevtyper},
201 { CRn( 9), CRm(13), Op1( 0), Op2( 2), is32, access_pmxevcntr},
202 { CRn( 9), CRm(14), Op1( 0), Op2( 0), is32, access_pmuserenr},
203 { CRn( 9), CRm(14), Op1( 0), Op2( 1), is32, access_pmintenset},
204 { CRn( 9), CRm(14), Op1( 0), Op2( 2), is32, access_pmintenclr},
205
206 /* PRRR/NMRR (aka MAIR0/MAIR1): swapped by interrupt.S. */
207 { CRn(10), CRm( 2), Op1( 0), Op2( 0), is32,
208 NULL, reset_unknown, c10_PRRR},
209 { CRn(10), CRm( 2), Op1( 0), Op2( 1), is32,
210 NULL, reset_unknown, c10_NMRR},
211
212 /* VBAR: swapped by interrupt.S. */
213 { CRn(12), CRm( 0), Op1( 0), Op2( 0), is32,
214 NULL, reset_val, c12_VBAR, 0x00000000 },
215
216 /* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */
217 { CRn(13), CRm( 0), Op1( 0), Op2( 1), is32,
218 NULL, reset_val, c13_CID, 0x00000000 },
219 { CRn(13), CRm( 0), Op1( 0), Op2( 2), is32,
220 NULL, reset_unknown, c13_TID_URW },
221 { CRn(13), CRm( 0), Op1( 0), Op2( 3), is32,
222 NULL, reset_unknown, c13_TID_URO },
223 { CRn(13), CRm( 0), Op1( 0), Op2( 4), is32,
224 NULL, reset_unknown, c13_TID_PRIV },
225};
226
227/* Target specific emulation tables */
228static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS];
229
230void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table)
231{
232 target_tables[table->target] = table;
233}
234
235/* Get specific register table for this target. */
236static const struct coproc_reg *get_target_table(unsigned target, size_t *num)
237{
238 struct kvm_coproc_target_table *table;
239
240 table = target_tables[target];
241 *num = table->num;
242 return table->table;
243}
244
245static const struct coproc_reg *find_reg(const struct coproc_params *params,
246 const struct coproc_reg table[],
247 unsigned int num)
248{
249 unsigned int i;
250
251 for (i = 0; i < num; i++) {
252 const struct coproc_reg *r = &table[i];
253
254 if (params->is_64bit != r->is_64)
255 continue;
256 if (params->CRn != r->CRn)
257 continue;
258 if (params->CRm != r->CRm)
259 continue;
260 if (params->Op1 != r->Op1)
261 continue;
262 if (params->Op2 != r->Op2)
263 continue;
264
265 return r;
266 }
267 return NULL;
268}
269
270static int emulate_cp15(struct kvm_vcpu *vcpu,
271 const struct coproc_params *params)
272{
273 size_t num;
274 const struct coproc_reg *table, *r;
275
276 trace_kvm_emulate_cp15_imp(params->Op1, params->Rt1, params->CRn,
277 params->CRm, params->Op2, params->is_write);
278
279 table = get_target_table(vcpu->arch.target, &num);
280
281 /* Search target-specific then generic table. */
282 r = find_reg(params, table, num);
283 if (!r)
284 r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs));
285
286 if (likely(r)) {
287 /* If we don't have an accessor, we should never get here! */
288 BUG_ON(!r->access);
289
290 if (likely(r->access(vcpu, params, r))) {
291 /* Skip instruction, since it was emulated */
292 kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1);
293 return 1;
294 }
295 /* If access function fails, it should complain. */
296 } else {
297 kvm_err("Unsupported guest CP15 access at: %08x\n",
298 *vcpu_pc(vcpu));
299 print_cp_instr(params);
300 }
301 kvm_inject_undefined(vcpu);
302 return 1;
303}
304
305/**
306 * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
307 * @vcpu: The VCPU pointer
308 * @run: The kvm_run struct
309 */
310int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
311{
312 struct coproc_params params;
313
314 params.CRm = (vcpu->arch.hsr >> 1) & 0xf;
315 params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf;
316 params.is_write = ((vcpu->arch.hsr & 1) == 0);
317 params.is_64bit = true;
318
319 params.Op1 = (vcpu->arch.hsr >> 16) & 0xf;
320 params.Op2 = 0;
321 params.Rt2 = (vcpu->arch.hsr >> 10) & 0xf;
322 params.CRn = 0;
323
324 return emulate_cp15(vcpu, &params);
325}
326
327static void reset_coproc_regs(struct kvm_vcpu *vcpu,
328 const struct coproc_reg *table, size_t num)
329{
330 unsigned long i;
331
332 for (i = 0; i < num; i++)
333 if (table[i].reset)
334 table[i].reset(vcpu, &table[i]);
335}
336
337/**
338 * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
339 * @vcpu: The VCPU pointer
340 * @run: The kvm_run struct
341 */
342int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
343{
344 struct coproc_params params;
345
346 params.CRm = (vcpu->arch.hsr >> 1) & 0xf;
347 params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf;
348 params.is_write = ((vcpu->arch.hsr & 1) == 0);
349 params.is_64bit = false;
350
351 params.CRn = (vcpu->arch.hsr >> 10) & 0xf;
352 params.Op1 = (vcpu->arch.hsr >> 14) & 0x7;
353 params.Op2 = (vcpu->arch.hsr >> 17) & 0x7;
354 params.Rt2 = 0;
355
356 return emulate_cp15(vcpu, &params);
357}
358
359/******************************************************************************
360 * Userspace API
361 *****************************************************************************/
362
363static bool index_to_params(u64 id, struct coproc_params *params)
364{
365 switch (id & KVM_REG_SIZE_MASK) {
366 case KVM_REG_SIZE_U32:
367 /* Any unused index bits means it's not valid. */
368 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
369 | KVM_REG_ARM_COPROC_MASK
370 | KVM_REG_ARM_32_CRN_MASK
371 | KVM_REG_ARM_CRM_MASK
372 | KVM_REG_ARM_OPC1_MASK
373 | KVM_REG_ARM_32_OPC2_MASK))
374 return false;
375
376 params->is_64bit = false;
377 params->CRn = ((id & KVM_REG_ARM_32_CRN_MASK)
378 >> KVM_REG_ARM_32_CRN_SHIFT);
379 params->CRm = ((id & KVM_REG_ARM_CRM_MASK)
380 >> KVM_REG_ARM_CRM_SHIFT);
381 params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
382 >> KVM_REG_ARM_OPC1_SHIFT);
383 params->Op2 = ((id & KVM_REG_ARM_32_OPC2_MASK)
384 >> KVM_REG_ARM_32_OPC2_SHIFT);
385 return true;
386 case KVM_REG_SIZE_U64:
387 /* Any unused index bits means it's not valid. */
388 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
389 | KVM_REG_ARM_COPROC_MASK
390 | KVM_REG_ARM_CRM_MASK
391 | KVM_REG_ARM_OPC1_MASK))
392 return false;
393 params->is_64bit = true;
394 params->CRm = ((id & KVM_REG_ARM_CRM_MASK)
395 >> KVM_REG_ARM_CRM_SHIFT);
396 params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
397 >> KVM_REG_ARM_OPC1_SHIFT);
398 params->Op2 = 0;
399 params->CRn = 0;
400 return true;
401 default:
402 return false;
403 }
404}
405
406/* Decode an index value, and find the cp15 coproc_reg entry. */
407static const struct coproc_reg *index_to_coproc_reg(struct kvm_vcpu *vcpu,
408 u64 id)
409{
410 size_t num;
411 const struct coproc_reg *table, *r;
412 struct coproc_params params;
413
414 /* We only do cp15 for now. */
415 if ((id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT != 15)
416 return NULL;
417
418 if (!index_to_params(id, &params))
419 return NULL;
420
421 table = get_target_table(vcpu->arch.target, &num);
422 r = find_reg(&params, table, num);
423 if (!r)
424 r = find_reg(&params, cp15_regs, ARRAY_SIZE(cp15_regs));
425
426 /* Not saved in the cp15 array? */
427 if (r && !r->reg)
428 r = NULL;
429
430 return r;
431}
432
433/*
434 * These are the invariant cp15 registers: we let the guest see the host
435 * versions of these, so they're part of the guest state.
436 *
437 * A future CPU may provide a mechanism to present different values to
438 * the guest, or a future kvm may trap them.
439 */
440/* Unfortunately, there's no register-argument for mrc, so generate. */
441#define FUNCTION_FOR32(crn, crm, op1, op2, name) \
442 static void get_##name(struct kvm_vcpu *v, \
443 const struct coproc_reg *r) \
444 { \
445 u32 val; \
446 \
447 asm volatile("mrc p15, " __stringify(op1) \
448 ", %0, c" __stringify(crn) \
449 ", c" __stringify(crm) \
450 ", " __stringify(op2) "\n" : "=r" (val)); \
451 ((struct coproc_reg *)r)->val = val; \
452 }
453
454FUNCTION_FOR32(0, 0, 0, 0, MIDR)
455FUNCTION_FOR32(0, 0, 0, 1, CTR)
456FUNCTION_FOR32(0, 0, 0, 2, TCMTR)
457FUNCTION_FOR32(0, 0, 0, 3, TLBTR)
458FUNCTION_FOR32(0, 0, 0, 6, REVIDR)
459FUNCTION_FOR32(0, 1, 0, 0, ID_PFR0)
460FUNCTION_FOR32(0, 1, 0, 1, ID_PFR1)
461FUNCTION_FOR32(0, 1, 0, 2, ID_DFR0)
462FUNCTION_FOR32(0, 1, 0, 3, ID_AFR0)
463FUNCTION_FOR32(0, 1, 0, 4, ID_MMFR0)
464FUNCTION_FOR32(0, 1, 0, 5, ID_MMFR1)
465FUNCTION_FOR32(0, 1, 0, 6, ID_MMFR2)
466FUNCTION_FOR32(0, 1, 0, 7, ID_MMFR3)
467FUNCTION_FOR32(0, 2, 0, 0, ID_ISAR0)
468FUNCTION_FOR32(0, 2, 0, 1, ID_ISAR1)
469FUNCTION_FOR32(0, 2, 0, 2, ID_ISAR2)
470FUNCTION_FOR32(0, 2, 0, 3, ID_ISAR3)
471FUNCTION_FOR32(0, 2, 0, 4, ID_ISAR4)
472FUNCTION_FOR32(0, 2, 0, 5, ID_ISAR5)
473FUNCTION_FOR32(0, 0, 1, 1, CLIDR)
474FUNCTION_FOR32(0, 0, 1, 7, AIDR)
475
476/* ->val is filled in by kvm_invariant_coproc_table_init() */
477static struct coproc_reg invariant_cp15[] = {
478 { CRn( 0), CRm( 0), Op1( 0), Op2( 0), is32, NULL, get_MIDR },
479 { CRn( 0), CRm( 0), Op1( 0), Op2( 1), is32, NULL, get_CTR },
480 { CRn( 0), CRm( 0), Op1( 0), Op2( 2), is32, NULL, get_TCMTR },
481 { CRn( 0), CRm( 0), Op1( 0), Op2( 3), is32, NULL, get_TLBTR },
482 { CRn( 0), CRm( 0), Op1( 0), Op2( 6), is32, NULL, get_REVIDR },
483
484 { CRn( 0), CRm( 1), Op1( 0), Op2( 0), is32, NULL, get_ID_PFR0 },
485 { CRn( 0), CRm( 1), Op1( 0), Op2( 1), is32, NULL, get_ID_PFR1 },
486 { CRn( 0), CRm( 1), Op1( 0), Op2( 2), is32, NULL, get_ID_DFR0 },
487 { CRn( 0), CRm( 1), Op1( 0), Op2( 3), is32, NULL, get_ID_AFR0 },
488 { CRn( 0), CRm( 1), Op1( 0), Op2( 4), is32, NULL, get_ID_MMFR0 },
489 { CRn( 0), CRm( 1), Op1( 0), Op2( 5), is32, NULL, get_ID_MMFR1 },
490 { CRn( 0), CRm( 1), Op1( 0), Op2( 6), is32, NULL, get_ID_MMFR2 },
491 { CRn( 0), CRm( 1), Op1( 0), Op2( 7), is32, NULL, get_ID_MMFR3 },
492
493 { CRn( 0), CRm( 2), Op1( 0), Op2( 0), is32, NULL, get_ID_ISAR0 },
494 { CRn( 0), CRm( 2), Op1( 0), Op2( 1), is32, NULL, get_ID_ISAR1 },
495 { CRn( 0), CRm( 2), Op1( 0), Op2( 2), is32, NULL, get_ID_ISAR2 },
496 { CRn( 0), CRm( 2), Op1( 0), Op2( 3), is32, NULL, get_ID_ISAR3 },
497 { CRn( 0), CRm( 2), Op1( 0), Op2( 4), is32, NULL, get_ID_ISAR4 },
498 { CRn( 0), CRm( 2), Op1( 0), Op2( 5), is32, NULL, get_ID_ISAR5 },
499
500 { CRn( 0), CRm( 0), Op1( 1), Op2( 1), is32, NULL, get_CLIDR },
501 { CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR },
502};
503
504static int reg_from_user(void *val, const void __user *uaddr, u64 id)
505{
506 /* This Just Works because we are little endian. */
507 if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
508 return -EFAULT;
509 return 0;
510}
511
512static int reg_to_user(void __user *uaddr, const void *val, u64 id)
513{
514 /* This Just Works because we are little endian. */
515 if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
516 return -EFAULT;
517 return 0;
518}
519
520static int get_invariant_cp15(u64 id, void __user *uaddr)
521{
522 struct coproc_params params;
523 const struct coproc_reg *r;
524
525 if (!index_to_params(id, &params))
526 return -ENOENT;
527
528 r = find_reg(&params, invariant_cp15, ARRAY_SIZE(invariant_cp15));
529 if (!r)
530 return -ENOENT;
531
532 return reg_to_user(uaddr, &r->val, id);
533}
534
535static int set_invariant_cp15(u64 id, void __user *uaddr)
536{
537 struct coproc_params params;
538 const struct coproc_reg *r;
539 int err;
540 u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
541
542 if (!index_to_params(id, &params))
543 return -ENOENT;
544 r = find_reg(&params, invariant_cp15, ARRAY_SIZE(invariant_cp15));
545 if (!r)
546 return -ENOENT;
547
548 err = reg_from_user(&val, uaddr, id);
549 if (err)
550 return err;
551
552 /* This is what we mean by invariant: you can't change it. */
553 if (r->val != val)
554 return -EINVAL;
555
556 return 0;
557}
558
559static bool is_valid_cache(u32 val)
560{
561 u32 level, ctype;
562
563 if (val >= CSSELR_MAX)
564 return -ENOENT;
565
566 /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
567 level = (val >> 1);
568 ctype = (cache_levels >> (level * 3)) & 7;
569
570 switch (ctype) {
571 case 0: /* No cache */
572 return false;
573 case 1: /* Instruction cache only */
574 return (val & 1);
575 case 2: /* Data cache only */
576 case 4: /* Unified cache */
577 return !(val & 1);
578 case 3: /* Separate instruction and data caches */
579 return true;
580 default: /* Reserved: we can't know instruction or data. */
581 return false;
582 }
583}
584
585/* Which cache CCSIDR represents depends on CSSELR value. */
586static u32 get_ccsidr(u32 csselr)
587{
588 u32 ccsidr;
589
590 /* Make sure noone else changes CSSELR during this! */
591 local_irq_disable();
592 /* Put value into CSSELR */
593 asm volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (csselr));
594 isb();
595 /* Read result out of CCSIDR */
596 asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (ccsidr));
597 local_irq_enable();
598
599 return ccsidr;
600}
601
602static int demux_c15_get(u64 id, void __user *uaddr)
603{
604 u32 val;
605 u32 __user *uval = uaddr;
606
607 /* Fail if we have unknown bits set. */
608 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
609 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
610 return -ENOENT;
611
612 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
613 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
614 if (KVM_REG_SIZE(id) != 4)
615 return -ENOENT;
616 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
617 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
618 if (!is_valid_cache(val))
619 return -ENOENT;
620
621 return put_user(get_ccsidr(val), uval);
622 default:
623 return -ENOENT;
624 }
625}
626
627static int demux_c15_set(u64 id, void __user *uaddr)
628{
629 u32 val, newval;
630 u32 __user *uval = uaddr;
631
632 /* Fail if we have unknown bits set. */
633 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
634 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
635 return -ENOENT;
636
637 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
638 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
639 if (KVM_REG_SIZE(id) != 4)
640 return -ENOENT;
641 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
642 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
643 if (!is_valid_cache(val))
644 return -ENOENT;
645
646 if (get_user(newval, uval))
647 return -EFAULT;
648
649 /* This is also invariant: you can't change it. */
650 if (newval != get_ccsidr(val))
651 return -EINVAL;
652 return 0;
653 default:
654 return -ENOENT;
655 }
656}
657
658#ifdef CONFIG_VFPv3
659static const int vfp_sysregs[] = { KVM_REG_ARM_VFP_FPEXC,
660 KVM_REG_ARM_VFP_FPSCR,
661 KVM_REG_ARM_VFP_FPINST,
662 KVM_REG_ARM_VFP_FPINST2,
663 KVM_REG_ARM_VFP_MVFR0,
664 KVM_REG_ARM_VFP_MVFR1,
665 KVM_REG_ARM_VFP_FPSID };
666
667static unsigned int num_fp_regs(void)
668{
669 if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK) >> MVFR0_A_SIMD_BIT) == 2)
670 return 32;
671 else
672 return 16;
673}
674
675static unsigned int num_vfp_regs(void)
676{
677 /* Normal FP regs + control regs. */
678 return num_fp_regs() + ARRAY_SIZE(vfp_sysregs);
679}
680
681static int copy_vfp_regids(u64 __user *uindices)
682{
683 unsigned int i;
684 const u64 u32reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP;
685 const u64 u64reg = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP;
686
687 for (i = 0; i < num_fp_regs(); i++) {
688 if (put_user((u64reg | KVM_REG_ARM_VFP_BASE_REG) + i,
689 uindices))
690 return -EFAULT;
691 uindices++;
692 }
693
694 for (i = 0; i < ARRAY_SIZE(vfp_sysregs); i++) {
695 if (put_user(u32reg | vfp_sysregs[i], uindices))
696 return -EFAULT;
697 uindices++;
698 }
699
700 return num_vfp_regs();
701}
702
703static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
704{
705 u32 vfpid = (id & KVM_REG_ARM_VFP_MASK);
706 u32 val;
707
708 /* Fail if we have unknown bits set. */
709 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
710 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
711 return -ENOENT;
712
713 if (vfpid < num_fp_regs()) {
714 if (KVM_REG_SIZE(id) != 8)
715 return -ENOENT;
716 return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpregs[vfpid],
717 id);
718 }
719
720 /* FP control registers are all 32 bit. */
721 if (KVM_REG_SIZE(id) != 4)
722 return -ENOENT;
723
724 switch (vfpid) {
725 case KVM_REG_ARM_VFP_FPEXC:
726 return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpexc, id);
727 case KVM_REG_ARM_VFP_FPSCR:
728 return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpscr, id);
729 case KVM_REG_ARM_VFP_FPINST:
730 return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst, id);
731 case KVM_REG_ARM_VFP_FPINST2:
732 return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst2, id);
733 case KVM_REG_ARM_VFP_MVFR0:
734 val = fmrx(MVFR0);
735 return reg_to_user(uaddr, &val, id);
736 case KVM_REG_ARM_VFP_MVFR1:
737 val = fmrx(MVFR1);
738 return reg_to_user(uaddr, &val, id);
739 case KVM_REG_ARM_VFP_FPSID:
740 val = fmrx(FPSID);
741 return reg_to_user(uaddr, &val, id);
742 default:
743 return -ENOENT;
744 }
745}
746
747static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr)
748{
749 u32 vfpid = (id & KVM_REG_ARM_VFP_MASK);
750 u32 val;
751
752 /* Fail if we have unknown bits set. */
753 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
754 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
755 return -ENOENT;
756
757 if (vfpid < num_fp_regs()) {
758 if (KVM_REG_SIZE(id) != 8)
759 return -ENOENT;
760 return reg_from_user(&vcpu->arch.vfp_guest.fpregs[vfpid],
761 uaddr, id);
762 }
763
764 /* FP control registers are all 32 bit. */
765 if (KVM_REG_SIZE(id) != 4)
766 return -ENOENT;
767
768 switch (vfpid) {
769 case KVM_REG_ARM_VFP_FPEXC:
770 return reg_from_user(&vcpu->arch.vfp_guest.fpexc, uaddr, id);
771 case KVM_REG_ARM_VFP_FPSCR:
772 return reg_from_user(&vcpu->arch.vfp_guest.fpscr, uaddr, id);
773 case KVM_REG_ARM_VFP_FPINST:
774 return reg_from_user(&vcpu->arch.vfp_guest.fpinst, uaddr, id);
775 case KVM_REG_ARM_VFP_FPINST2:
776 return reg_from_user(&vcpu->arch.vfp_guest.fpinst2, uaddr, id);
777 /* These are invariant. */
778 case KVM_REG_ARM_VFP_MVFR0:
779 if (reg_from_user(&val, uaddr, id))
780 return -EFAULT;
781 if (val != fmrx(MVFR0))
782 return -EINVAL;
783 return 0;
784 case KVM_REG_ARM_VFP_MVFR1:
785 if (reg_from_user(&val, uaddr, id))
786 return -EFAULT;
787 if (val != fmrx(MVFR1))
788 return -EINVAL;
789 return 0;
790 case KVM_REG_ARM_VFP_FPSID:
791 if (reg_from_user(&val, uaddr, id))
792 return -EFAULT;
793 if (val != fmrx(FPSID))
794 return -EINVAL;
795 return 0;
796 default:
797 return -ENOENT;
798 }
799}
800#else /* !CONFIG_VFPv3 */
801static unsigned int num_vfp_regs(void)
802{
803 return 0;
804}
805
806static int copy_vfp_regids(u64 __user *uindices)
807{
808 return 0;
809}
810
811static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
812{
813 return -ENOENT;
814}
815
816static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr)
817{
818 return -ENOENT;
819}
820#endif /* !CONFIG_VFPv3 */
821
822int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
823{
824 const struct coproc_reg *r;
825 void __user *uaddr = (void __user *)(long)reg->addr;
826
827 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
828 return demux_c15_get(reg->id, uaddr);
829
830 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP)
831 return vfp_get_reg(vcpu, reg->id, uaddr);
832
833 r = index_to_coproc_reg(vcpu, reg->id);
834 if (!r)
835 return get_invariant_cp15(reg->id, uaddr);
836
837 /* Note: copies two regs if size is 64 bit. */
838 return reg_to_user(uaddr, &vcpu->arch.cp15[r->reg], reg->id);
839}
840
841int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
842{
843 const struct coproc_reg *r;
844 void __user *uaddr = (void __user *)(long)reg->addr;
845
846 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
847 return demux_c15_set(reg->id, uaddr);
848
849 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP)
850 return vfp_set_reg(vcpu, reg->id, uaddr);
851
852 r = index_to_coproc_reg(vcpu, reg->id);
853 if (!r)
854 return set_invariant_cp15(reg->id, uaddr);
855
856 /* Note: copies two regs if size is 64 bit */
857 return reg_from_user(&vcpu->arch.cp15[r->reg], uaddr, reg->id);
858}
859
860static unsigned int num_demux_regs(void)
861{
862 unsigned int i, count = 0;
863
864 for (i = 0; i < CSSELR_MAX; i++)
865 if (is_valid_cache(i))
866 count++;
867
868 return count;
869}
870
871static int write_demux_regids(u64 __user *uindices)
872{
873 u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
874 unsigned int i;
875
876 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
877 for (i = 0; i < CSSELR_MAX; i++) {
878 if (!is_valid_cache(i))
879 continue;
880 if (put_user(val | i, uindices))
881 return -EFAULT;
882 uindices++;
883 }
884 return 0;
885}
886
887static u64 cp15_to_index(const struct coproc_reg *reg)
888{
889 u64 val = KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT);
890 if (reg->is_64) {
891 val |= KVM_REG_SIZE_U64;
892 val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
893 val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT);
894 } else {
895 val |= KVM_REG_SIZE_U32;
896 val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
897 val |= (reg->Op2 << KVM_REG_ARM_32_OPC2_SHIFT);
898 val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT);
899 val |= (reg->CRn << KVM_REG_ARM_32_CRN_SHIFT);
900 }
901 return val;
902}
903
904static bool copy_reg_to_user(const struct coproc_reg *reg, u64 __user **uind)
905{
906 if (!*uind)
907 return true;
908
909 if (put_user(cp15_to_index(reg), *uind))
910 return false;
911
912 (*uind)++;
913 return true;
914}
915
916/* Assumed ordered tables, see kvm_coproc_table_init. */
917static int walk_cp15(struct kvm_vcpu *vcpu, u64 __user *uind)
918{
919 const struct coproc_reg *i1, *i2, *end1, *end2;
920 unsigned int total = 0;
921 size_t num;
922
923 /* We check for duplicates here, to allow arch-specific overrides. */
924 i1 = get_target_table(vcpu->arch.target, &num);
925 end1 = i1 + num;
926 i2 = cp15_regs;
927 end2 = cp15_regs + ARRAY_SIZE(cp15_regs);
928
929 BUG_ON(i1 == end1 || i2 == end2);
930
931 /* Walk carefully, as both tables may refer to the same register. */
932 while (i1 || i2) {
933 int cmp = cmp_reg(i1, i2);
934 /* target-specific overrides generic entry. */
935 if (cmp <= 0) {
936 /* Ignore registers we trap but don't save. */
937 if (i1->reg) {
938 if (!copy_reg_to_user(i1, &uind))
939 return -EFAULT;
940 total++;
941 }
942 } else {
943 /* Ignore registers we trap but don't save. */
944 if (i2->reg) {
945 if (!copy_reg_to_user(i2, &uind))
946 return -EFAULT;
947 total++;
948 }
949 }
950
951 if (cmp <= 0 && ++i1 == end1)
952 i1 = NULL;
953 if (cmp >= 0 && ++i2 == end2)
954 i2 = NULL;
955 }
956 return total;
957}
958
959unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu)
960{
961 return ARRAY_SIZE(invariant_cp15)
962 + num_demux_regs()
963 + num_vfp_regs()
964 + walk_cp15(vcpu, (u64 __user *)NULL);
965}
966
967int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
968{
969 unsigned int i;
970 int err;
971
972 /* Then give them all the invariant registers' indices. */
973 for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) {
974 if (put_user(cp15_to_index(&invariant_cp15[i]), uindices))
975 return -EFAULT;
976 uindices++;
977 }
978
979 err = walk_cp15(vcpu, uindices);
980 if (err < 0)
981 return err;
982 uindices += err;
983
984 err = copy_vfp_regids(uindices);
985 if (err < 0)
986 return err;
987 uindices += err;
988
989 return write_demux_regids(uindices);
990}
991
992void kvm_coproc_table_init(void)
993{
994 unsigned int i;
995
996 /* Make sure tables are unique and in order. */
997 for (i = 1; i < ARRAY_SIZE(cp15_regs); i++)
998 BUG_ON(cmp_reg(&cp15_regs[i-1], &cp15_regs[i]) >= 0);
999
1000 /* We abuse the reset function to overwrite the table itself. */
1001 for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++)
1002 invariant_cp15[i].reset(NULL, &invariant_cp15[i]);
1003
1004 /*
1005 * CLIDR format is awkward, so clean it up. See ARM B4.1.20:
1006 *
1007 * If software reads the Cache Type fields from Ctype1
1008 * upwards, once it has seen a value of 0b000, no caches
1009 * exist at further-out levels of the hierarchy. So, for
1010 * example, if Ctype3 is the first Cache Type field with a
1011 * value of 0b000, the values of Ctype4 to Ctype7 must be
1012 * ignored.
1013 */
1014 asm volatile("mrc p15, 1, %0, c0, c0, 1" : "=r" (cache_levels));
1015 for (i = 0; i < 7; i++)
1016 if (((cache_levels >> (i*3)) & 7) == 0)
1017 break;
1018 /* Clear all higher bits. */
1019 cache_levels &= (1 << (i*3))-1;
1020}
1021
1022/**
1023 * kvm_reset_coprocs - sets cp15 registers to reset value
1024 * @vcpu: The VCPU pointer
1025 *
1026 * This function finds the right table above and sets the registers on the
1027 * virtual CPU struct to their architecturally defined reset values.
1028 */
1029void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
1030{
1031 size_t num;
1032 const struct coproc_reg *table;
1033
1034 /* Catch someone adding a register without putting in reset entry. */
1035 memset(vcpu->arch.cp15, 0x42, sizeof(vcpu->arch.cp15));
1036
1037 /* Generic chip reset first (so target could override). */
1038 reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs));
1039
1040 table = get_target_table(vcpu->arch.target, &num);
1041 reset_coproc_regs(vcpu, table, num);
1042
1043 for (num = 1; num < NR_CP15_REGS; num++)
1044 if (vcpu->arch.cp15[num] == 0x42424242)
1045 panic("Didn't reset vcpu->arch.cp15[%zi]", num);
1046}
diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h
new file mode 100644
index 000000000000..992adfafa2ff
--- /dev/null
+++ b/arch/arm/kvm/coproc.h
@@ -0,0 +1,153 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Authors: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#ifndef __ARM_KVM_COPROC_LOCAL_H__
20#define __ARM_KVM_COPROC_LOCAL_H__
21
22struct coproc_params {
23 unsigned long CRn;
24 unsigned long CRm;
25 unsigned long Op1;
26 unsigned long Op2;
27 unsigned long Rt1;
28 unsigned long Rt2;
29 bool is_64bit;
30 bool is_write;
31};
32
33struct coproc_reg {
34 /* MRC/MCR/MRRC/MCRR instruction which accesses it. */
35 unsigned long CRn;
36 unsigned long CRm;
37 unsigned long Op1;
38 unsigned long Op2;
39
40 bool is_64;
41
42 /* Trapped access from guest, if non-NULL. */
43 bool (*access)(struct kvm_vcpu *,
44 const struct coproc_params *,
45 const struct coproc_reg *);
46
47 /* Initialization for vcpu. */
48 void (*reset)(struct kvm_vcpu *, const struct coproc_reg *);
49
50 /* Index into vcpu->arch.cp15[], or 0 if we don't need to save it. */
51 unsigned long reg;
52
53 /* Value (usually reset value) */
54 u64 val;
55};
56
57static inline void print_cp_instr(const struct coproc_params *p)
58{
59 /* Look, we even formatted it for you to paste into the table! */
60 if (p->is_64bit) {
61 kvm_pr_unimpl(" { CRm(%2lu), Op1(%2lu), is64, func_%s },\n",
62 p->CRm, p->Op1, p->is_write ? "write" : "read");
63 } else {
64 kvm_pr_unimpl(" { CRn(%2lu), CRm(%2lu), Op1(%2lu), Op2(%2lu), is32,"
65 " func_%s },\n",
66 p->CRn, p->CRm, p->Op1, p->Op2,
67 p->is_write ? "write" : "read");
68 }
69}
70
71static inline bool ignore_write(struct kvm_vcpu *vcpu,
72 const struct coproc_params *p)
73{
74 return true;
75}
76
77static inline bool read_zero(struct kvm_vcpu *vcpu,
78 const struct coproc_params *p)
79{
80 *vcpu_reg(vcpu, p->Rt1) = 0;
81 return true;
82}
83
84static inline bool write_to_read_only(struct kvm_vcpu *vcpu,
85 const struct coproc_params *params)
86{
87 kvm_debug("CP15 write to read-only register at: %08x\n",
88 *vcpu_pc(vcpu));
89 print_cp_instr(params);
90 return false;
91}
92
93static inline bool read_from_write_only(struct kvm_vcpu *vcpu,
94 const struct coproc_params *params)
95{
96 kvm_debug("CP15 read to write-only register at: %08x\n",
97 *vcpu_pc(vcpu));
98 print_cp_instr(params);
99 return false;
100}
101
102/* Reset functions */
103static inline void reset_unknown(struct kvm_vcpu *vcpu,
104 const struct coproc_reg *r)
105{
106 BUG_ON(!r->reg);
107 BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15));
108 vcpu->arch.cp15[r->reg] = 0xdecafbad;
109}
110
111static inline void reset_val(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
112{
113 BUG_ON(!r->reg);
114 BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15));
115 vcpu->arch.cp15[r->reg] = r->val;
116}
117
118static inline void reset_unknown64(struct kvm_vcpu *vcpu,
119 const struct coproc_reg *r)
120{
121 BUG_ON(!r->reg);
122 BUG_ON(r->reg + 1 >= ARRAY_SIZE(vcpu->arch.cp15));
123
124 vcpu->arch.cp15[r->reg] = 0xdecafbad;
125 vcpu->arch.cp15[r->reg+1] = 0xd0c0ffee;
126}
127
128static inline int cmp_reg(const struct coproc_reg *i1,
129 const struct coproc_reg *i2)
130{
131 BUG_ON(i1 == i2);
132 if (!i1)
133 return 1;
134 else if (!i2)
135 return -1;
136 if (i1->CRn != i2->CRn)
137 return i1->CRn - i2->CRn;
138 if (i1->CRm != i2->CRm)
139 return i1->CRm - i2->CRm;
140 if (i1->Op1 != i2->Op1)
141 return i1->Op1 - i2->Op1;
142 return i1->Op2 - i2->Op2;
143}
144
145
146#define CRn(_x) .CRn = _x
147#define CRm(_x) .CRm = _x
148#define Op1(_x) .Op1 = _x
149#define Op2(_x) .Op2 = _x
150#define is64 .is_64 = true
151#define is32 .is_64 = false
152
153#endif /* __ARM_KVM_COPROC_LOCAL_H__ */
diff --git a/arch/arm/kvm/coproc_a15.c b/arch/arm/kvm/coproc_a15.c
new file mode 100644
index 000000000000..685063a6d0cf
--- /dev/null
+++ b/arch/arm/kvm/coproc_a15.c
@@ -0,0 +1,162 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Authors: Rusty Russell <rusty@rustcorp.au>
4 * Christoffer Dall <c.dall@virtualopensystems.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 */
19#include <linux/kvm_host.h>
20#include <asm/cputype.h>
21#include <asm/kvm_arm.h>
22#include <asm/kvm_host.h>
23#include <asm/kvm_emulate.h>
24#include <asm/kvm_coproc.h>
25#include <linux/init.h>
26
27static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
28{
29 /*
30 * Compute guest MPIDR:
31 * (Even if we present only one VCPU to the guest on an SMP
32 * host we don't set the U bit in the MPIDR, or vice versa, as
33 * revealing the underlying hardware properties is likely to
34 * be the best choice).
35 */
36 vcpu->arch.cp15[c0_MPIDR] = (read_cpuid_mpidr() & ~MPIDR_LEVEL_MASK)
37 | (vcpu->vcpu_id & MPIDR_LEVEL_MASK);
38}
39
40#include "coproc.h"
41
42/* A15 TRM 4.3.28: RO WI */
43static bool access_actlr(struct kvm_vcpu *vcpu,
44 const struct coproc_params *p,
45 const struct coproc_reg *r)
46{
47 if (p->is_write)
48 return ignore_write(vcpu, p);
49
50 *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c1_ACTLR];
51 return true;
52}
53
54/* A15 TRM 4.3.60: R/O. */
55static bool access_cbar(struct kvm_vcpu *vcpu,
56 const struct coproc_params *p,
57 const struct coproc_reg *r)
58{
59 if (p->is_write)
60 return write_to_read_only(vcpu, p);
61 return read_zero(vcpu, p);
62}
63
64/* A15 TRM 4.3.48: R/O WI. */
65static bool access_l2ctlr(struct kvm_vcpu *vcpu,
66 const struct coproc_params *p,
67 const struct coproc_reg *r)
68{
69 if (p->is_write)
70 return ignore_write(vcpu, p);
71
72 *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c9_L2CTLR];
73 return true;
74}
75
76static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
77{
78 u32 l2ctlr, ncores;
79
80 asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr));
81 l2ctlr &= ~(3 << 24);
82 ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1;
83 l2ctlr |= (ncores & 3) << 24;
84
85 vcpu->arch.cp15[c9_L2CTLR] = l2ctlr;
86}
87
88static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
89{
90 u32 actlr;
91
92 /* ACTLR contains SMP bit: make sure you create all cpus first! */
93 asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
94 /* Make the SMP bit consistent with the guest configuration */
95 if (atomic_read(&vcpu->kvm->online_vcpus) > 1)
96 actlr |= 1U << 6;
97 else
98 actlr &= ~(1U << 6);
99
100 vcpu->arch.cp15[c1_ACTLR] = actlr;
101}
102
103/* A15 TRM 4.3.49: R/O WI (even if NSACR.NS_L2ERR, a write of 1 is ignored). */
104static bool access_l2ectlr(struct kvm_vcpu *vcpu,
105 const struct coproc_params *p,
106 const struct coproc_reg *r)
107{
108 if (p->is_write)
109 return ignore_write(vcpu, p);
110
111 *vcpu_reg(vcpu, p->Rt1) = 0;
112 return true;
113}
114
115/*
116 * A15-specific CP15 registers.
117 * Important: Must be sorted ascending by CRn, CRM, Op1, Op2
118 */
119static const struct coproc_reg a15_regs[] = {
120 /* MPIDR: we use VMPIDR for guest access. */
121 { CRn( 0), CRm( 0), Op1( 0), Op2( 5), is32,
122 NULL, reset_mpidr, c0_MPIDR },
123
124 /* SCTLR: swapped by interrupt.S. */
125 { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
126 NULL, reset_val, c1_SCTLR, 0x00C50078 },
127 /* ACTLR: trapped by HCR.TAC bit. */
128 { CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32,
129 access_actlr, reset_actlr, c1_ACTLR },
130 /* CPACR: swapped by interrupt.S. */
131 { CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32,
132 NULL, reset_val, c1_CPACR, 0x00000000 },
133
134 /*
135 * L2CTLR access (guest wants to know #CPUs).
136 */
137 { CRn( 9), CRm( 0), Op1( 1), Op2( 2), is32,
138 access_l2ctlr, reset_l2ctlr, c9_L2CTLR },
139 { CRn( 9), CRm( 0), Op1( 1), Op2( 3), is32, access_l2ectlr},
140
141 /* The Configuration Base Address Register. */
142 { CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar},
143};
144
145static struct kvm_coproc_target_table a15_target_table = {
146 .target = KVM_ARM_TARGET_CORTEX_A15,
147 .table = a15_regs,
148 .num = ARRAY_SIZE(a15_regs),
149};
150
151static int __init coproc_a15_init(void)
152{
153 unsigned int i;
154
155 for (i = 1; i < ARRAY_SIZE(a15_regs); i++)
156 BUG_ON(cmp_reg(&a15_regs[i-1],
157 &a15_regs[i]) >= 0);
158
159 kvm_register_target_coproc_table(&a15_target_table);
160 return 0;
161}
162late_initcall(coproc_a15_init);
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c
new file mode 100644
index 000000000000..d61450ac6665
--- /dev/null
+++ b/arch/arm/kvm/emulate.c
@@ -0,0 +1,373 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#include <linux/mm.h>
20#include <linux/kvm_host.h>
21#include <asm/kvm_arm.h>
22#include <asm/kvm_emulate.h>
23#include <trace/events/kvm.h>
24
25#include "trace.h"
26
27#define VCPU_NR_MODES 6
28#define VCPU_REG_OFFSET_USR 0
29#define VCPU_REG_OFFSET_FIQ 1
30#define VCPU_REG_OFFSET_IRQ 2
31#define VCPU_REG_OFFSET_SVC 3
32#define VCPU_REG_OFFSET_ABT 4
33#define VCPU_REG_OFFSET_UND 5
34#define REG_OFFSET(_reg) \
35 (offsetof(struct kvm_regs, _reg) / sizeof(u32))
36
37#define USR_REG_OFFSET(_num) REG_OFFSET(usr_regs.uregs[_num])
38
39static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][15] = {
40 /* USR/SYS Registers */
41 [VCPU_REG_OFFSET_USR] = {
42 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
43 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
44 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
45 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
46 USR_REG_OFFSET(12), USR_REG_OFFSET(13), USR_REG_OFFSET(14),
47 },
48
49 /* FIQ Registers */
50 [VCPU_REG_OFFSET_FIQ] = {
51 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
52 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
53 USR_REG_OFFSET(6), USR_REG_OFFSET(7),
54 REG_OFFSET(fiq_regs[0]), /* r8 */
55 REG_OFFSET(fiq_regs[1]), /* r9 */
56 REG_OFFSET(fiq_regs[2]), /* r10 */
57 REG_OFFSET(fiq_regs[3]), /* r11 */
58 REG_OFFSET(fiq_regs[4]), /* r12 */
59 REG_OFFSET(fiq_regs[5]), /* r13 */
60 REG_OFFSET(fiq_regs[6]), /* r14 */
61 },
62
63 /* IRQ Registers */
64 [VCPU_REG_OFFSET_IRQ] = {
65 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
66 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
67 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
68 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
69 USR_REG_OFFSET(12),
70 REG_OFFSET(irq_regs[0]), /* r13 */
71 REG_OFFSET(irq_regs[1]), /* r14 */
72 },
73
74 /* SVC Registers */
75 [VCPU_REG_OFFSET_SVC] = {
76 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
77 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
78 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
79 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
80 USR_REG_OFFSET(12),
81 REG_OFFSET(svc_regs[0]), /* r13 */
82 REG_OFFSET(svc_regs[1]), /* r14 */
83 },
84
85 /* ABT Registers */
86 [VCPU_REG_OFFSET_ABT] = {
87 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
88 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
89 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
90 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
91 USR_REG_OFFSET(12),
92 REG_OFFSET(abt_regs[0]), /* r13 */
93 REG_OFFSET(abt_regs[1]), /* r14 */
94 },
95
96 /* UND Registers */
97 [VCPU_REG_OFFSET_UND] = {
98 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
99 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
100 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
101 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
102 USR_REG_OFFSET(12),
103 REG_OFFSET(und_regs[0]), /* r13 */
104 REG_OFFSET(und_regs[1]), /* r14 */
105 },
106};
107
108/*
109 * Return a pointer to the register number valid in the current mode of
110 * the virtual CPU.
111 */
112u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num)
113{
114 u32 *reg_array = (u32 *)&vcpu->arch.regs;
115 u32 mode = *vcpu_cpsr(vcpu) & MODE_MASK;
116
117 switch (mode) {
118 case USR_MODE...SVC_MODE:
119 mode &= ~MODE32_BIT; /* 0 ... 3 */
120 break;
121
122 case ABT_MODE:
123 mode = VCPU_REG_OFFSET_ABT;
124 break;
125
126 case UND_MODE:
127 mode = VCPU_REG_OFFSET_UND;
128 break;
129
130 case SYSTEM_MODE:
131 mode = VCPU_REG_OFFSET_USR;
132 break;
133
134 default:
135 BUG();
136 }
137
138 return reg_array + vcpu_reg_offsets[mode][reg_num];
139}
140
141/*
142 * Return the SPSR for the current mode of the virtual CPU.
143 */
144u32 *vcpu_spsr(struct kvm_vcpu *vcpu)
145{
146 u32 mode = *vcpu_cpsr(vcpu) & MODE_MASK;
147 switch (mode) {
148 case SVC_MODE:
149 return &vcpu->arch.regs.KVM_ARM_SVC_spsr;
150 case ABT_MODE:
151 return &vcpu->arch.regs.KVM_ARM_ABT_spsr;
152 case UND_MODE:
153 return &vcpu->arch.regs.KVM_ARM_UND_spsr;
154 case IRQ_MODE:
155 return &vcpu->arch.regs.KVM_ARM_IRQ_spsr;
156 case FIQ_MODE:
157 return &vcpu->arch.regs.KVM_ARM_FIQ_spsr;
158 default:
159 BUG();
160 }
161}
162
163/**
164 * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest
165 * @vcpu: the vcpu pointer
166 * @run: the kvm_run structure pointer
167 *
168 * Simply sets the wait_for_interrupts flag on the vcpu structure, which will
169 * halt execution of world-switches and schedule other host processes until
170 * there is an incoming IRQ or FIQ to the VM.
171 */
172int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run)
173{
174 trace_kvm_wfi(*vcpu_pc(vcpu));
175 kvm_vcpu_block(vcpu);
176 return 1;
177}
178
179/**
180 * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block
181 * @vcpu: The VCPU pointer
182 *
183 * When exceptions occur while instructions are executed in Thumb IF-THEN
184 * blocks, the ITSTATE field of the CPSR is not advanved (updated), so we have
185 * to do this little bit of work manually. The fields map like this:
186 *
187 * IT[7:0] -> CPSR[26:25],CPSR[15:10]
188 */
189static void kvm_adjust_itstate(struct kvm_vcpu *vcpu)
190{
191 unsigned long itbits, cond;
192 unsigned long cpsr = *vcpu_cpsr(vcpu);
193 bool is_arm = !(cpsr & PSR_T_BIT);
194
195 BUG_ON(is_arm && (cpsr & PSR_IT_MASK));
196
197 if (!(cpsr & PSR_IT_MASK))
198 return;
199
200 cond = (cpsr & 0xe000) >> 13;
201 itbits = (cpsr & 0x1c00) >> (10 - 2);
202 itbits |= (cpsr & (0x3 << 25)) >> 25;
203
204 /* Perform ITAdvance (see page A-52 in ARM DDI 0406C) */
205 if ((itbits & 0x7) == 0)
206 itbits = cond = 0;
207 else
208 itbits = (itbits << 1) & 0x1f;
209
210 cpsr &= ~PSR_IT_MASK;
211 cpsr |= cond << 13;
212 cpsr |= (itbits & 0x1c) << (10 - 2);
213 cpsr |= (itbits & 0x3) << 25;
214 *vcpu_cpsr(vcpu) = cpsr;
215}
216
217/**
218 * kvm_skip_instr - skip a trapped instruction and proceed to the next
219 * @vcpu: The vcpu pointer
220 */
221void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
222{
223 bool is_thumb;
224
225 is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_T_BIT);
226 if (is_thumb && !is_wide_instr)
227 *vcpu_pc(vcpu) += 2;
228 else
229 *vcpu_pc(vcpu) += 4;
230 kvm_adjust_itstate(vcpu);
231}
232
233
234/******************************************************************************
235 * Inject exceptions into the guest
236 */
237
238static u32 exc_vector_base(struct kvm_vcpu *vcpu)
239{
240 u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
241 u32 vbar = vcpu->arch.cp15[c12_VBAR];
242
243 if (sctlr & SCTLR_V)
244 return 0xffff0000;
245 else /* always have security exceptions */
246 return vbar;
247}
248
249/**
250 * kvm_inject_undefined - inject an undefined exception into the guest
251 * @vcpu: The VCPU to receive the undefined exception
252 *
253 * It is assumed that this code is called from the VCPU thread and that the
254 * VCPU therefore is not currently executing guest code.
255 *
256 * Modelled after TakeUndefInstrException() pseudocode.
257 */
258void kvm_inject_undefined(struct kvm_vcpu *vcpu)
259{
260 u32 new_lr_value;
261 u32 new_spsr_value;
262 u32 cpsr = *vcpu_cpsr(vcpu);
263 u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
264 bool is_thumb = (cpsr & PSR_T_BIT);
265 u32 vect_offset = 4;
266 u32 return_offset = (is_thumb) ? 2 : 4;
267
268 new_spsr_value = cpsr;
269 new_lr_value = *vcpu_pc(vcpu) - return_offset;
270
271 *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | UND_MODE;
272 *vcpu_cpsr(vcpu) |= PSR_I_BIT;
273 *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
274
275 if (sctlr & SCTLR_TE)
276 *vcpu_cpsr(vcpu) |= PSR_T_BIT;
277 if (sctlr & SCTLR_EE)
278 *vcpu_cpsr(vcpu) |= PSR_E_BIT;
279
280 /* Note: These now point to UND banked copies */
281 *vcpu_spsr(vcpu) = cpsr;
282 *vcpu_reg(vcpu, 14) = new_lr_value;
283
284 /* Branch to exception vector */
285 *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
286}
287
288/*
289 * Modelled after TakeDataAbortException() and TakePrefetchAbortException
290 * pseudocode.
291 */
292static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
293{
294 u32 new_lr_value;
295 u32 new_spsr_value;
296 u32 cpsr = *vcpu_cpsr(vcpu);
297 u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
298 bool is_thumb = (cpsr & PSR_T_BIT);
299 u32 vect_offset;
300 u32 return_offset = (is_thumb) ? 4 : 0;
301 bool is_lpae;
302
303 new_spsr_value = cpsr;
304 new_lr_value = *vcpu_pc(vcpu) + return_offset;
305
306 *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | ABT_MODE;
307 *vcpu_cpsr(vcpu) |= PSR_I_BIT | PSR_A_BIT;
308 *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
309
310 if (sctlr & SCTLR_TE)
311 *vcpu_cpsr(vcpu) |= PSR_T_BIT;
312 if (sctlr & SCTLR_EE)
313 *vcpu_cpsr(vcpu) |= PSR_E_BIT;
314
315 /* Note: These now point to ABT banked copies */
316 *vcpu_spsr(vcpu) = cpsr;
317 *vcpu_reg(vcpu, 14) = new_lr_value;
318
319 if (is_pabt)
320 vect_offset = 12;
321 else
322 vect_offset = 16;
323
324 /* Branch to exception vector */
325 *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
326
327 if (is_pabt) {
328 /* Set DFAR and DFSR */
329 vcpu->arch.cp15[c6_IFAR] = addr;
330 is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31);
331 /* Always give debug fault for now - should give guest a clue */
332 if (is_lpae)
333 vcpu->arch.cp15[c5_IFSR] = 1 << 9 | 0x22;
334 else
335 vcpu->arch.cp15[c5_IFSR] = 2;
336 } else { /* !iabt */
337 /* Set DFAR and DFSR */
338 vcpu->arch.cp15[c6_DFAR] = addr;
339 is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31);
340 /* Always give debug fault for now - should give guest a clue */
341 if (is_lpae)
342 vcpu->arch.cp15[c5_DFSR] = 1 << 9 | 0x22;
343 else
344 vcpu->arch.cp15[c5_DFSR] = 2;
345 }
346
347}
348
349/**
350 * kvm_inject_dabt - inject a data abort into the guest
351 * @vcpu: The VCPU to receive the undefined exception
352 * @addr: The address to report in the DFAR
353 *
354 * It is assumed that this code is called from the VCPU thread and that the
355 * VCPU therefore is not currently executing guest code.
356 */
357void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
358{
359 inject_abt(vcpu, false, addr);
360}
361
362/**
363 * kvm_inject_pabt - inject a prefetch abort into the guest
364 * @vcpu: The VCPU to receive the undefined exception
365 * @addr: The address to report in the DFAR
366 *
367 * It is assumed that this code is called from the VCPU thread and that the
368 * VCPU therefore is not currently executing guest code.
369 */
370void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
371{
372 inject_abt(vcpu, true, addr);
373}
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
new file mode 100644
index 000000000000..2339d9609d36
--- /dev/null
+++ b/arch/arm/kvm/guest.c
@@ -0,0 +1,222 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#include <linux/errno.h>
20#include <linux/err.h>
21#include <linux/kvm_host.h>
22#include <linux/module.h>
23#include <linux/vmalloc.h>
24#include <linux/fs.h>
25#include <asm/uaccess.h>
26#include <asm/kvm.h>
27#include <asm/kvm_asm.h>
28#include <asm/kvm_emulate.h>
29#include <asm/kvm_coproc.h>
30
31#define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM }
32#define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU }
33
34struct kvm_stats_debugfs_item debugfs_entries[] = {
35 { NULL }
36};
37
38int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
39{
40 return 0;
41}
42
43static u64 core_reg_offset_from_id(u64 id)
44{
45 return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
46}
47
48static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
49{
50 u32 __user *uaddr = (u32 __user *)(long)reg->addr;
51 struct kvm_regs *regs = &vcpu->arch.regs;
52 u64 off;
53
54 if (KVM_REG_SIZE(reg->id) != 4)
55 return -ENOENT;
56
57 /* Our ID is an index into the kvm_regs struct. */
58 off = core_reg_offset_from_id(reg->id);
59 if (off >= sizeof(*regs) / KVM_REG_SIZE(reg->id))
60 return -ENOENT;
61
62 return put_user(((u32 *)regs)[off], uaddr);
63}
64
65static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
66{
67 u32 __user *uaddr = (u32 __user *)(long)reg->addr;
68 struct kvm_regs *regs = &vcpu->arch.regs;
69 u64 off, val;
70
71 if (KVM_REG_SIZE(reg->id) != 4)
72 return -ENOENT;
73
74 /* Our ID is an index into the kvm_regs struct. */
75 off = core_reg_offset_from_id(reg->id);
76 if (off >= sizeof(*regs) / KVM_REG_SIZE(reg->id))
77 return -ENOENT;
78
79 if (get_user(val, uaddr) != 0)
80 return -EFAULT;
81
82 if (off == KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr)) {
83 unsigned long mode = val & MODE_MASK;
84 switch (mode) {
85 case USR_MODE:
86 case FIQ_MODE:
87 case IRQ_MODE:
88 case SVC_MODE:
89 case ABT_MODE:
90 case UND_MODE:
91 break;
92 default:
93 return -EINVAL;
94 }
95 }
96
97 ((u32 *)regs)[off] = val;
98 return 0;
99}
100
101int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
102{
103 return -EINVAL;
104}
105
106int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
107{
108 return -EINVAL;
109}
110
111static unsigned long num_core_regs(void)
112{
113 return sizeof(struct kvm_regs) / sizeof(u32);
114}
115
116/**
117 * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
118 *
119 * This is for all registers.
120 */
121unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
122{
123 return num_core_regs() + kvm_arm_num_coproc_regs(vcpu);
124}
125
126/**
127 * kvm_arm_copy_reg_indices - get indices of all registers.
128 *
129 * We do core registers right here, then we apppend coproc regs.
130 */
131int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
132{
133 unsigned int i;
134 const u64 core_reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE;
135
136 for (i = 0; i < sizeof(struct kvm_regs)/sizeof(u32); i++) {
137 if (put_user(core_reg | i, uindices))
138 return -EFAULT;
139 uindices++;
140 }
141
142 return kvm_arm_copy_coproc_indices(vcpu, uindices);
143}
144
145int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
146{
147 /* We currently use nothing arch-specific in upper 32 bits */
148 if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM >> 32)
149 return -EINVAL;
150
151 /* Register group 16 means we want a core register. */
152 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
153 return get_core_reg(vcpu, reg);
154
155 return kvm_arm_coproc_get_reg(vcpu, reg);
156}
157
158int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
159{
160 /* We currently use nothing arch-specific in upper 32 bits */
161 if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM >> 32)
162 return -EINVAL;
163
164 /* Register group 16 means we set a core register. */
165 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
166 return set_core_reg(vcpu, reg);
167
168 return kvm_arm_coproc_set_reg(vcpu, reg);
169}
170
171int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
172 struct kvm_sregs *sregs)
173{
174 return -EINVAL;
175}
176
177int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
178 struct kvm_sregs *sregs)
179{
180 return -EINVAL;
181}
182
183int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
184 const struct kvm_vcpu_init *init)
185{
186 unsigned int i;
187
188 /* We can only do a cortex A15 for now. */
189 if (init->target != kvm_target_cpu())
190 return -EINVAL;
191
192 vcpu->arch.target = init->target;
193 bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
194
195 /* -ENOENT for unknown features, -EINVAL for invalid combinations. */
196 for (i = 0; i < sizeof(init->features) * 8; i++) {
197 if (test_bit(i, (void *)init->features)) {
198 if (i >= KVM_VCPU_MAX_FEATURES)
199 return -ENOENT;
200 set_bit(i, vcpu->arch.features);
201 }
202 }
203
204 /* Now we know what it is, we can reset it. */
205 return kvm_reset_vcpu(vcpu);
206}
207
208int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
209{
210 return -EINVAL;
211}
212
213int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
214{
215 return -EINVAL;
216}
217
218int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
219 struct kvm_translation *tr)
220{
221 return -EINVAL;
222}
diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S
new file mode 100644
index 000000000000..9f37a79b880b
--- /dev/null
+++ b/arch/arm/kvm/init.S
@@ -0,0 +1,114 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#include <linux/linkage.h>
20#include <asm/unified.h>
21#include <asm/asm-offsets.h>
22#include <asm/kvm_asm.h>
23#include <asm/kvm_arm.h>
24
25/********************************************************************
26 * Hypervisor initialization
27 * - should be called with:
28 * r0,r1 = Hypervisor pgd pointer
29 * r2 = top of Hyp stack (kernel VA)
30 * r3 = pointer to hyp vectors
31 */
32
33 .text
34 .pushsection .hyp.idmap.text,"ax"
35 .align 5
36__kvm_hyp_init:
37 .globl __kvm_hyp_init
38
39 @ Hyp-mode exception vector
40 W(b) .
41 W(b) .
42 W(b) .
43 W(b) .
44 W(b) .
45 W(b) __do_hyp_init
46 W(b) .
47 W(b) .
48
49__do_hyp_init:
50 @ Set the HTTBR to point to the hypervisor PGD pointer passed
51 mcrr p15, 4, r0, r1, c2
52
53 @ Set the HTCR and VTCR to the same shareability and cacheability
54 @ settings as the non-secure TTBCR and with T0SZ == 0.
55 mrc p15, 4, r0, c2, c0, 2 @ HTCR
56 ldr r12, =HTCR_MASK
57 bic r0, r0, r12
58 mrc p15, 0, r1, c2, c0, 2 @ TTBCR
59 and r1, r1, #(HTCR_MASK & ~TTBCR_T0SZ)
60 orr r0, r0, r1
61 mcr p15, 4, r0, c2, c0, 2 @ HTCR
62
63 mrc p15, 4, r1, c2, c1, 2 @ VTCR
64 ldr r12, =VTCR_MASK
65 bic r1, r1, r12
66 bic r0, r0, #(~VTCR_HTCR_SH) @ clear non-reusable HTCR bits
67 orr r1, r0, r1
68 orr r1, r1, #(KVM_VTCR_SL0 | KVM_VTCR_T0SZ | KVM_VTCR_S)
69 mcr p15, 4, r1, c2, c1, 2 @ VTCR
70
71 @ Use the same memory attributes for hyp. accesses as the kernel
72 @ (copy MAIRx ro HMAIRx).
73 mrc p15, 0, r0, c10, c2, 0
74 mcr p15, 4, r0, c10, c2, 0
75 mrc p15, 0, r0, c10, c2, 1
76 mcr p15, 4, r0, c10, c2, 1
77
78 @ Set the HSCTLR to:
79 @ - ARM/THUMB exceptions: Kernel config (Thumb-2 kernel)
80 @ - Endianness: Kernel config
81 @ - Fast Interrupt Features: Kernel config
82 @ - Write permission implies XN: disabled
83 @ - Instruction cache: enabled
84 @ - Data/Unified cache: enabled
85 @ - Memory alignment checks: enabled
86 @ - MMU: enabled (this code must be run from an identity mapping)
87 mrc p15, 4, r0, c1, c0, 0 @ HSCR
88 ldr r12, =HSCTLR_MASK
89 bic r0, r0, r12
90 mrc p15, 0, r1, c1, c0, 0 @ SCTLR
91 ldr r12, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C)
92 and r1, r1, r12
93 ARM( ldr r12, =(HSCTLR_M | HSCTLR_A) )
94 THUMB( ldr r12, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE) )
95 orr r1, r1, r12
96 orr r0, r0, r1
97 isb
98 mcr p15, 4, r0, c1, c0, 0 @ HSCR
99 isb
100
101 @ Set stack pointer and return to the kernel
102 mov sp, r2
103
104 @ Set HVBAR to point to the HYP vectors
105 mcr p15, 4, r3, c12, c0, 0 @ HVBAR
106
107 eret
108
109 .ltorg
110
111 .globl __kvm_hyp_init_end
112__kvm_hyp_init_end:
113
114 .popsection
diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
new file mode 100644
index 000000000000..c5400d2e97ca
--- /dev/null
+++ b/arch/arm/kvm/interrupts.S
@@ -0,0 +1,478 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#include <linux/linkage.h>
20#include <linux/const.h>
21#include <asm/unified.h>
22#include <asm/page.h>
23#include <asm/ptrace.h>
24#include <asm/asm-offsets.h>
25#include <asm/kvm_asm.h>
26#include <asm/kvm_arm.h>
27#include <asm/vfpmacros.h>
28#include "interrupts_head.S"
29
30 .text
31
32__kvm_hyp_code_start:
33 .globl __kvm_hyp_code_start
34
35/********************************************************************
36 * Flush per-VMID TLBs
37 *
38 * void __kvm_tlb_flush_vmid(struct kvm *kvm);
39 *
40 * We rely on the hardware to broadcast the TLB invalidation to all CPUs
41 * inside the inner-shareable domain (which is the case for all v7
42 * implementations). If we come across a non-IS SMP implementation, we'll
43 * have to use an IPI based mechanism. Until then, we stick to the simple
44 * hardware assisted version.
45 */
46ENTRY(__kvm_tlb_flush_vmid)
47 push {r2, r3}
48
49 add r0, r0, #KVM_VTTBR
50 ldrd r2, r3, [r0]
51 mcrr p15, 6, r2, r3, c2 @ Write VTTBR
52 isb
53 mcr p15, 0, r0, c8, c3, 0 @ TLBIALLIS (rt ignored)
54 dsb
55 isb
56 mov r2, #0
57 mov r3, #0
58 mcrr p15, 6, r2, r3, c2 @ Back to VMID #0
59 isb @ Not necessary if followed by eret
60
61 pop {r2, r3}
62 bx lr
63ENDPROC(__kvm_tlb_flush_vmid)
64
65/********************************************************************
66 * Flush TLBs and instruction caches of all CPUs inside the inner-shareable
67 * domain, for all VMIDs
68 *
69 * void __kvm_flush_vm_context(void);
70 */
71ENTRY(__kvm_flush_vm_context)
72 mov r0, #0 @ rn parameter for c15 flushes is SBZ
73
74 /* Invalidate NS Non-Hyp TLB Inner Shareable (TLBIALLNSNHIS) */
75 mcr p15, 4, r0, c8, c3, 4
76 /* Invalidate instruction caches Inner Shareable (ICIALLUIS) */
77 mcr p15, 0, r0, c7, c1, 0
78 dsb
79 isb @ Not necessary if followed by eret
80
81 bx lr
82ENDPROC(__kvm_flush_vm_context)
83
84
85/********************************************************************
86 * Hypervisor world-switch code
87 *
88 *
89 * int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
90 */
91ENTRY(__kvm_vcpu_run)
92 @ Save the vcpu pointer
93 mcr p15, 4, vcpu, c13, c0, 2 @ HTPIDR
94
95 save_host_regs
96
97 @ Store hardware CP15 state and load guest state
98 read_cp15_state store_to_vcpu = 0
99 write_cp15_state read_from_vcpu = 1
100
101 @ If the host kernel has not been configured with VFPv3 support,
102 @ then it is safer if we deny guests from using it as well.
103#ifdef CONFIG_VFPv3
104 @ Set FPEXC_EN so the guest doesn't trap floating point instructions
105 VFPFMRX r2, FPEXC @ VMRS
106 push {r2}
107 orr r2, r2, #FPEXC_EN
108 VFPFMXR FPEXC, r2 @ VMSR
109#endif
110
111 @ Configure Hyp-role
112 configure_hyp_role vmentry
113
114 @ Trap coprocessor CRx accesses
115 set_hstr vmentry
116 set_hcptr vmentry, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11))
117 set_hdcr vmentry
118
119 @ Write configured ID register into MIDR alias
120 ldr r1, [vcpu, #VCPU_MIDR]
121 mcr p15, 4, r1, c0, c0, 0
122
123 @ Write guest view of MPIDR into VMPIDR
124 ldr r1, [vcpu, #CP15_OFFSET(c0_MPIDR)]
125 mcr p15, 4, r1, c0, c0, 5
126
127 @ Set up guest memory translation
128 ldr r1, [vcpu, #VCPU_KVM]
129 add r1, r1, #KVM_VTTBR
130 ldrd r2, r3, [r1]
131 mcrr p15, 6, r2, r3, c2 @ Write VTTBR
132
133 @ We're all done, just restore the GPRs and go to the guest
134 restore_guest_regs
135 clrex @ Clear exclusive monitor
136 eret
137
138__kvm_vcpu_return:
139 /*
140 * return convention:
141 * guest r0, r1, r2 saved on the stack
142 * r0: vcpu pointer
143 * r1: exception code
144 */
145 save_guest_regs
146
147 @ Set VMID == 0
148 mov r2, #0
149 mov r3, #0
150 mcrr p15, 6, r2, r3, c2 @ Write VTTBR
151
152 @ Don't trap coprocessor accesses for host kernel
153 set_hstr vmexit
154 set_hdcr vmexit
155 set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11))
156
157#ifdef CONFIG_VFPv3
158 @ Save floating point registers we if let guest use them.
159 tst r2, #(HCPTR_TCP(10) | HCPTR_TCP(11))
160 bne after_vfp_restore
161
162 @ Switch VFP/NEON hardware state to the host's
163 add r7, vcpu, #VCPU_VFP_GUEST
164 store_vfp_state r7
165 add r7, vcpu, #VCPU_VFP_HOST
166 ldr r7, [r7]
167 restore_vfp_state r7
168
169after_vfp_restore:
170 @ Restore FPEXC_EN which we clobbered on entry
171 pop {r2}
172 VFPFMXR FPEXC, r2
173#endif
174
175 @ Reset Hyp-role
176 configure_hyp_role vmexit
177
178 @ Let host read hardware MIDR
179 mrc p15, 0, r2, c0, c0, 0
180 mcr p15, 4, r2, c0, c0, 0
181
182 @ Back to hardware MPIDR
183 mrc p15, 0, r2, c0, c0, 5
184 mcr p15, 4, r2, c0, c0, 5
185
186 @ Store guest CP15 state and restore host state
187 read_cp15_state store_to_vcpu = 1
188 write_cp15_state read_from_vcpu = 0
189
190 restore_host_regs
191 clrex @ Clear exclusive monitor
192 mov r0, r1 @ Return the return code
193 mov r1, #0 @ Clear upper bits in return value
194 bx lr @ return to IOCTL
195
196/********************************************************************
197 * Call function in Hyp mode
198 *
199 *
200 * u64 kvm_call_hyp(void *hypfn, ...);
201 *
202 * This is not really a variadic function in the classic C-way and care must
203 * be taken when calling this to ensure parameters are passed in registers
204 * only, since the stack will change between the caller and the callee.
205 *
206 * Call the function with the first argument containing a pointer to the
207 * function you wish to call in Hyp mode, and subsequent arguments will be
208 * passed as r0, r1, and r2 (a maximum of 3 arguments in addition to the
209 * function pointer can be passed). The function being called must be mapped
210 * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
211 * passed in r0 and r1.
212 *
213 * The calling convention follows the standard AAPCS:
214 * r0 - r3: caller save
215 * r12: caller save
216 * rest: callee save
217 */
218ENTRY(kvm_call_hyp)
219 hvc #0
220 bx lr
221
222/********************************************************************
223 * Hypervisor exception vector and handlers
224 *
225 *
226 * The KVM/ARM Hypervisor ABI is defined as follows:
227 *
228 * Entry to Hyp mode from the host kernel will happen _only_ when an HVC
229 * instruction is issued since all traps are disabled when running the host
230 * kernel as per the Hyp-mode initialization at boot time.
231 *
232 * HVC instructions cause a trap to the vector page + offset 0x18 (see hyp_hvc
233 * below) when the HVC instruction is called from SVC mode (i.e. a guest or the
234 * host kernel) and they cause a trap to the vector page + offset 0xc when HVC
235 * instructions are called from within Hyp-mode.
236 *
237 * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode):
238 * Switching to Hyp mode is done through a simple HVC #0 instruction. The
239 * exception vector code will check that the HVC comes from VMID==0 and if
240 * so will push the necessary state (SPSR, lr_usr) on the Hyp stack.
241 * - r0 contains a pointer to a HYP function
242 * - r1, r2, and r3 contain arguments to the above function.
243 * - The HYP function will be called with its arguments in r0, r1 and r2.
244 * On HYP function return, we return directly to SVC.
245 *
246 * Note that the above is used to execute code in Hyp-mode from a host-kernel
247 * point of view, and is a different concept from performing a world-switch and
248 * executing guest code SVC mode (with a VMID != 0).
249 */
250
251/* Handle undef, svc, pabt, or dabt by crashing with a user notice */
252.macro bad_exception exception_code, panic_str
253 push {r0-r2}
254 mrrc p15, 6, r0, r1, c2 @ Read VTTBR
255 lsr r1, r1, #16
256 ands r1, r1, #0xff
257 beq 99f
258
259 load_vcpu @ Load VCPU pointer
260 .if \exception_code == ARM_EXCEPTION_DATA_ABORT
261 mrc p15, 4, r2, c5, c2, 0 @ HSR
262 mrc p15, 4, r1, c6, c0, 0 @ HDFAR
263 str r2, [vcpu, #VCPU_HSR]
264 str r1, [vcpu, #VCPU_HxFAR]
265 .endif
266 .if \exception_code == ARM_EXCEPTION_PREF_ABORT
267 mrc p15, 4, r2, c5, c2, 0 @ HSR
268 mrc p15, 4, r1, c6, c0, 2 @ HIFAR
269 str r2, [vcpu, #VCPU_HSR]
270 str r1, [vcpu, #VCPU_HxFAR]
271 .endif
272 mov r1, #\exception_code
273 b __kvm_vcpu_return
274
275 @ We were in the host already. Let's craft a panic-ing return to SVC.
27699: mrs r2, cpsr
277 bic r2, r2, #MODE_MASK
278 orr r2, r2, #SVC_MODE
279THUMB( orr r2, r2, #PSR_T_BIT )
280 msr spsr_cxsf, r2
281 mrs r1, ELR_hyp
282 ldr r2, =BSYM(panic)
283 msr ELR_hyp, r2
284 ldr r0, =\panic_str
285 eret
286.endm
287
288 .text
289
290 .align 5
291__kvm_hyp_vector:
292 .globl __kvm_hyp_vector
293
294 @ Hyp-mode exception vector
295 W(b) hyp_reset
296 W(b) hyp_undef
297 W(b) hyp_svc
298 W(b) hyp_pabt
299 W(b) hyp_dabt
300 W(b) hyp_hvc
301 W(b) hyp_irq
302 W(b) hyp_fiq
303
304 .align
305hyp_reset:
306 b hyp_reset
307
308 .align
309hyp_undef:
310 bad_exception ARM_EXCEPTION_UNDEFINED, und_die_str
311
312 .align
313hyp_svc:
314 bad_exception ARM_EXCEPTION_HVC, svc_die_str
315
316 .align
317hyp_pabt:
318 bad_exception ARM_EXCEPTION_PREF_ABORT, pabt_die_str
319
320 .align
321hyp_dabt:
322 bad_exception ARM_EXCEPTION_DATA_ABORT, dabt_die_str
323
324 .align
325hyp_hvc:
326 /*
327 * Getting here is either becuase of a trap from a guest or from calling
328 * HVC from the host kernel, which means "switch to Hyp mode".
329 */
330 push {r0, r1, r2}
331
332 @ Check syndrome register
333 mrc p15, 4, r1, c5, c2, 0 @ HSR
334 lsr r0, r1, #HSR_EC_SHIFT
335#ifdef CONFIG_VFPv3
336 cmp r0, #HSR_EC_CP_0_13
337 beq switch_to_guest_vfp
338#endif
339 cmp r0, #HSR_EC_HVC
340 bne guest_trap @ Not HVC instr.
341
342 /*
343 * Let's check if the HVC came from VMID 0 and allow simple
344 * switch to Hyp mode
345 */
346 mrrc p15, 6, r0, r2, c2
347 lsr r2, r2, #16
348 and r2, r2, #0xff
349 cmp r2, #0
350 bne guest_trap @ Guest called HVC
351
352host_switch_to_hyp:
353 pop {r0, r1, r2}
354
355 push {lr}
356 mrs lr, SPSR
357 push {lr}
358
359 mov lr, r0
360 mov r0, r1
361 mov r1, r2
362 mov r2, r3
363
364THUMB( orr lr, #1)
365 blx lr @ Call the HYP function
366
367 pop {lr}
368 msr SPSR_csxf, lr
369 pop {lr}
370 eret
371
372guest_trap:
373 load_vcpu @ Load VCPU pointer to r0
374 str r1, [vcpu, #VCPU_HSR]
375
376 @ Check if we need the fault information
377 lsr r1, r1, #HSR_EC_SHIFT
378 cmp r1, #HSR_EC_IABT
379 mrceq p15, 4, r2, c6, c0, 2 @ HIFAR
380 beq 2f
381 cmp r1, #HSR_EC_DABT
382 bne 1f
383 mrc p15, 4, r2, c6, c0, 0 @ HDFAR
384
3852: str r2, [vcpu, #VCPU_HxFAR]
386
387 /*
388 * B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode:
389 *
390 * Abort on the stage 2 translation for a memory access from a
391 * Non-secure PL1 or PL0 mode:
392 *
393 * For any Access flag fault or Translation fault, and also for any
394 * Permission fault on the stage 2 translation of a memory access
395 * made as part of a translation table walk for a stage 1 translation,
396 * the HPFAR holds the IPA that caused the fault. Otherwise, the HPFAR
397 * is UNKNOWN.
398 */
399
400 /* Check for permission fault, and S1PTW */
401 mrc p15, 4, r1, c5, c2, 0 @ HSR
402 and r0, r1, #HSR_FSC_TYPE
403 cmp r0, #FSC_PERM
404 tsteq r1, #(1 << 7) @ S1PTW
405 mrcne p15, 4, r2, c6, c0, 4 @ HPFAR
406 bne 3f
407
408 /* Resolve IPA using the xFAR */
409 mcr p15, 0, r2, c7, c8, 0 @ ATS1CPR
410 isb
411 mrrc p15, 0, r0, r1, c7 @ PAR
412 tst r0, #1
413 bne 4f @ Failed translation
414 ubfx r2, r0, #12, #20
415 lsl r2, r2, #4
416 orr r2, r2, r1, lsl #24
417
4183: load_vcpu @ Load VCPU pointer to r0
419 str r2, [r0, #VCPU_HPFAR]
420
4211: mov r1, #ARM_EXCEPTION_HVC
422 b __kvm_vcpu_return
423
4244: pop {r0, r1, r2} @ Failed translation, return to guest
425 eret
426
427/*
428 * If VFPv3 support is not available, then we will not switch the VFP
429 * registers; however cp10 and cp11 accesses will still trap and fallback
430 * to the regular coprocessor emulation code, which currently will
431 * inject an undefined exception to the guest.
432 */
433#ifdef CONFIG_VFPv3
434switch_to_guest_vfp:
435 load_vcpu @ Load VCPU pointer to r0
436 push {r3-r7}
437
438 @ NEON/VFP used. Turn on VFP access.
439 set_hcptr vmexit, (HCPTR_TCP(10) | HCPTR_TCP(11))
440
441 @ Switch VFP/NEON hardware state to the guest's
442 add r7, r0, #VCPU_VFP_HOST
443 ldr r7, [r7]
444 store_vfp_state r7
445 add r7, r0, #VCPU_VFP_GUEST
446 restore_vfp_state r7
447
448 pop {r3-r7}
449 pop {r0-r2}
450 eret
451#endif
452
453 .align
454hyp_irq:
455 push {r0, r1, r2}
456 mov r1, #ARM_EXCEPTION_IRQ
457 load_vcpu @ Load VCPU pointer to r0
458 b __kvm_vcpu_return
459
460 .align
461hyp_fiq:
462 b hyp_fiq
463
464 .ltorg
465
466__kvm_hyp_code_end:
467 .globl __kvm_hyp_code_end
468
469 .section ".rodata"
470
471und_die_str:
472 .ascii "unexpected undefined exception in Hyp mode at: %#08x"
473pabt_die_str:
474 .ascii "unexpected prefetch abort in Hyp mode at: %#08x"
475dabt_die_str:
476 .ascii "unexpected data abort in Hyp mode at: %#08x"
477svc_die_str:
478 .ascii "unexpected HVC/SVC trap in Hyp mode at: %#08x"
diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
new file mode 100644
index 000000000000..6a95d341e9c5
--- /dev/null
+++ b/arch/arm/kvm/interrupts_head.S
@@ -0,0 +1,441 @@
1#define VCPU_USR_REG(_reg_nr) (VCPU_USR_REGS + (_reg_nr * 4))
2#define VCPU_USR_SP (VCPU_USR_REG(13))
3#define VCPU_USR_LR (VCPU_USR_REG(14))
4#define CP15_OFFSET(_cp15_reg_idx) (VCPU_CP15 + (_cp15_reg_idx * 4))
5
6/*
7 * Many of these macros need to access the VCPU structure, which is always
8 * held in r0. These macros should never clobber r1, as it is used to hold the
9 * exception code on the return path (except of course the macro that switches
10 * all the registers before the final jump to the VM).
11 */
12vcpu .req r0 @ vcpu pointer always in r0
13
14/* Clobbers {r2-r6} */
15.macro store_vfp_state vfp_base
16 @ The VFPFMRX and VFPFMXR macros are the VMRS and VMSR instructions
17 VFPFMRX r2, FPEXC
18 @ Make sure VFP is enabled so we can touch the registers.
19 orr r6, r2, #FPEXC_EN
20 VFPFMXR FPEXC, r6
21
22 VFPFMRX r3, FPSCR
23 tst r2, #FPEXC_EX @ Check for VFP Subarchitecture
24 beq 1f
25 @ If FPEXC_EX is 0, then FPINST/FPINST2 reads are upredictable, so
26 @ we only need to save them if FPEXC_EX is set.
27 VFPFMRX r4, FPINST
28 tst r2, #FPEXC_FP2V
29 VFPFMRX r5, FPINST2, ne @ vmrsne
30 bic r6, r2, #FPEXC_EX @ FPEXC_EX disable
31 VFPFMXR FPEXC, r6
321:
33 VFPFSTMIA \vfp_base, r6 @ Save VFP registers
34 stm \vfp_base, {r2-r5} @ Save FPEXC, FPSCR, FPINST, FPINST2
35.endm
36
37/* Assume FPEXC_EN is on and FPEXC_EX is off, clobbers {r2-r6} */
38.macro restore_vfp_state vfp_base
39 VFPFLDMIA \vfp_base, r6 @ Load VFP registers
40 ldm \vfp_base, {r2-r5} @ Load FPEXC, FPSCR, FPINST, FPINST2
41
42 VFPFMXR FPSCR, r3
43 tst r2, #FPEXC_EX @ Check for VFP Subarchitecture
44 beq 1f
45 VFPFMXR FPINST, r4
46 tst r2, #FPEXC_FP2V
47 VFPFMXR FPINST2, r5, ne
481:
49 VFPFMXR FPEXC, r2 @ FPEXC (last, in case !EN)
50.endm
51
52/* These are simply for the macros to work - value don't have meaning */
53.equ usr, 0
54.equ svc, 1
55.equ abt, 2
56.equ und, 3
57.equ irq, 4
58.equ fiq, 5
59
60.macro push_host_regs_mode mode
61 mrs r2, SP_\mode
62 mrs r3, LR_\mode
63 mrs r4, SPSR_\mode
64 push {r2, r3, r4}
65.endm
66
67/*
68 * Store all host persistent registers on the stack.
69 * Clobbers all registers, in all modes, except r0 and r1.
70 */
71.macro save_host_regs
72 /* Hyp regs. Only ELR_hyp (SPSR_hyp already saved) */
73 mrs r2, ELR_hyp
74 push {r2}
75
76 /* usr regs */
77 push {r4-r12} @ r0-r3 are always clobbered
78 mrs r2, SP_usr
79 mov r3, lr
80 push {r2, r3}
81
82 push_host_regs_mode svc
83 push_host_regs_mode abt
84 push_host_regs_mode und
85 push_host_regs_mode irq
86
87 /* fiq regs */
88 mrs r2, r8_fiq
89 mrs r3, r9_fiq
90 mrs r4, r10_fiq
91 mrs r5, r11_fiq
92 mrs r6, r12_fiq
93 mrs r7, SP_fiq
94 mrs r8, LR_fiq
95 mrs r9, SPSR_fiq
96 push {r2-r9}
97.endm
98
99.macro pop_host_regs_mode mode
100 pop {r2, r3, r4}
101 msr SP_\mode, r2
102 msr LR_\mode, r3
103 msr SPSR_\mode, r4
104.endm
105
106/*
107 * Restore all host registers from the stack.
108 * Clobbers all registers, in all modes, except r0 and r1.
109 */
110.macro restore_host_regs
111 pop {r2-r9}
112 msr r8_fiq, r2
113 msr r9_fiq, r3
114 msr r10_fiq, r4
115 msr r11_fiq, r5
116 msr r12_fiq, r6
117 msr SP_fiq, r7
118 msr LR_fiq, r8
119 msr SPSR_fiq, r9
120
121 pop_host_regs_mode irq
122 pop_host_regs_mode und
123 pop_host_regs_mode abt
124 pop_host_regs_mode svc
125
126 pop {r2, r3}
127 msr SP_usr, r2
128 mov lr, r3
129 pop {r4-r12}
130
131 pop {r2}
132 msr ELR_hyp, r2
133.endm
134
135/*
136 * Restore SP, LR and SPSR for a given mode. offset is the offset of
137 * this mode's registers from the VCPU base.
138 *
139 * Assumes vcpu pointer in vcpu reg
140 *
141 * Clobbers r1, r2, r3, r4.
142 */
143.macro restore_guest_regs_mode mode, offset
144 add r1, vcpu, \offset
145 ldm r1, {r2, r3, r4}
146 msr SP_\mode, r2
147 msr LR_\mode, r3
148 msr SPSR_\mode, r4
149.endm
150
151/*
152 * Restore all guest registers from the vcpu struct.
153 *
154 * Assumes vcpu pointer in vcpu reg
155 *
156 * Clobbers *all* registers.
157 */
158.macro restore_guest_regs
159 restore_guest_regs_mode svc, #VCPU_SVC_REGS
160 restore_guest_regs_mode abt, #VCPU_ABT_REGS
161 restore_guest_regs_mode und, #VCPU_UND_REGS
162 restore_guest_regs_mode irq, #VCPU_IRQ_REGS
163
164 add r1, vcpu, #VCPU_FIQ_REGS
165 ldm r1, {r2-r9}
166 msr r8_fiq, r2
167 msr r9_fiq, r3
168 msr r10_fiq, r4
169 msr r11_fiq, r5
170 msr r12_fiq, r6
171 msr SP_fiq, r7
172 msr LR_fiq, r8
173 msr SPSR_fiq, r9
174
175 @ Load return state
176 ldr r2, [vcpu, #VCPU_PC]
177 ldr r3, [vcpu, #VCPU_CPSR]
178 msr ELR_hyp, r2
179 msr SPSR_cxsf, r3
180
181 @ Load user registers
182 ldr r2, [vcpu, #VCPU_USR_SP]
183 ldr r3, [vcpu, #VCPU_USR_LR]
184 msr SP_usr, r2
185 mov lr, r3
186 add vcpu, vcpu, #(VCPU_USR_REGS)
187 ldm vcpu, {r0-r12}
188.endm
189
190/*
191 * Save SP, LR and SPSR for a given mode. offset is the offset of
192 * this mode's registers from the VCPU base.
193 *
194 * Assumes vcpu pointer in vcpu reg
195 *
196 * Clobbers r2, r3, r4, r5.
197 */
198.macro save_guest_regs_mode mode, offset
199 add r2, vcpu, \offset
200 mrs r3, SP_\mode
201 mrs r4, LR_\mode
202 mrs r5, SPSR_\mode
203 stm r2, {r3, r4, r5}
204.endm
205
206/*
207 * Save all guest registers to the vcpu struct
208 * Expects guest's r0, r1, r2 on the stack.
209 *
210 * Assumes vcpu pointer in vcpu reg
211 *
212 * Clobbers r2, r3, r4, r5.
213 */
214.macro save_guest_regs
215 @ Store usr registers
216 add r2, vcpu, #VCPU_USR_REG(3)
217 stm r2, {r3-r12}
218 add r2, vcpu, #VCPU_USR_REG(0)
219 pop {r3, r4, r5} @ r0, r1, r2
220 stm r2, {r3, r4, r5}
221 mrs r2, SP_usr
222 mov r3, lr
223 str r2, [vcpu, #VCPU_USR_SP]
224 str r3, [vcpu, #VCPU_USR_LR]
225
226 @ Store return state
227 mrs r2, ELR_hyp
228 mrs r3, spsr
229 str r2, [vcpu, #VCPU_PC]
230 str r3, [vcpu, #VCPU_CPSR]
231
232 @ Store other guest registers
233 save_guest_regs_mode svc, #VCPU_SVC_REGS
234 save_guest_regs_mode abt, #VCPU_ABT_REGS
235 save_guest_regs_mode und, #VCPU_UND_REGS
236 save_guest_regs_mode irq, #VCPU_IRQ_REGS
237.endm
238
239/* Reads cp15 registers from hardware and stores them in memory
240 * @store_to_vcpu: If 0, registers are written in-order to the stack,
241 * otherwise to the VCPU struct pointed to by vcpup
242 *
243 * Assumes vcpu pointer in vcpu reg
244 *
245 * Clobbers r2 - r12
246 */
247.macro read_cp15_state store_to_vcpu
248 mrc p15, 0, r2, c1, c0, 0 @ SCTLR
249 mrc p15, 0, r3, c1, c0, 2 @ CPACR
250 mrc p15, 0, r4, c2, c0, 2 @ TTBCR
251 mrc p15, 0, r5, c3, c0, 0 @ DACR
252 mrrc p15, 0, r6, r7, c2 @ TTBR 0
253 mrrc p15, 1, r8, r9, c2 @ TTBR 1
254 mrc p15, 0, r10, c10, c2, 0 @ PRRR
255 mrc p15, 0, r11, c10, c2, 1 @ NMRR
256 mrc p15, 2, r12, c0, c0, 0 @ CSSELR
257
258 .if \store_to_vcpu == 0
259 push {r2-r12} @ Push CP15 registers
260 .else
261 str r2, [vcpu, #CP15_OFFSET(c1_SCTLR)]
262 str r3, [vcpu, #CP15_OFFSET(c1_CPACR)]
263 str r4, [vcpu, #CP15_OFFSET(c2_TTBCR)]
264 str r5, [vcpu, #CP15_OFFSET(c3_DACR)]
265 add r2, vcpu, #CP15_OFFSET(c2_TTBR0)
266 strd r6, r7, [r2]
267 add r2, vcpu, #CP15_OFFSET(c2_TTBR1)
268 strd r8, r9, [r2]
269 str r10, [vcpu, #CP15_OFFSET(c10_PRRR)]
270 str r11, [vcpu, #CP15_OFFSET(c10_NMRR)]
271 str r12, [vcpu, #CP15_OFFSET(c0_CSSELR)]
272 .endif
273
274 mrc p15, 0, r2, c13, c0, 1 @ CID
275 mrc p15, 0, r3, c13, c0, 2 @ TID_URW
276 mrc p15, 0, r4, c13, c0, 3 @ TID_URO
277 mrc p15, 0, r5, c13, c0, 4 @ TID_PRIV
278 mrc p15, 0, r6, c5, c0, 0 @ DFSR
279 mrc p15, 0, r7, c5, c0, 1 @ IFSR
280 mrc p15, 0, r8, c5, c1, 0 @ ADFSR
281 mrc p15, 0, r9, c5, c1, 1 @ AIFSR
282 mrc p15, 0, r10, c6, c0, 0 @ DFAR
283 mrc p15, 0, r11, c6, c0, 2 @ IFAR
284 mrc p15, 0, r12, c12, c0, 0 @ VBAR
285
286 .if \store_to_vcpu == 0
287 push {r2-r12} @ Push CP15 registers
288 .else
289 str r2, [vcpu, #CP15_OFFSET(c13_CID)]
290 str r3, [vcpu, #CP15_OFFSET(c13_TID_URW)]
291 str r4, [vcpu, #CP15_OFFSET(c13_TID_URO)]
292 str r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)]
293 str r6, [vcpu, #CP15_OFFSET(c5_DFSR)]
294 str r7, [vcpu, #CP15_OFFSET(c5_IFSR)]
295 str r8, [vcpu, #CP15_OFFSET(c5_ADFSR)]
296 str r9, [vcpu, #CP15_OFFSET(c5_AIFSR)]
297 str r10, [vcpu, #CP15_OFFSET(c6_DFAR)]
298 str r11, [vcpu, #CP15_OFFSET(c6_IFAR)]
299 str r12, [vcpu, #CP15_OFFSET(c12_VBAR)]
300 .endif
301.endm
302
303/*
304 * Reads cp15 registers from memory and writes them to hardware
305 * @read_from_vcpu: If 0, registers are read in-order from the stack,
306 * otherwise from the VCPU struct pointed to by vcpup
307 *
308 * Assumes vcpu pointer in vcpu reg
309 */
310.macro write_cp15_state read_from_vcpu
311 .if \read_from_vcpu == 0
312 pop {r2-r12}
313 .else
314 ldr r2, [vcpu, #CP15_OFFSET(c13_CID)]
315 ldr r3, [vcpu, #CP15_OFFSET(c13_TID_URW)]
316 ldr r4, [vcpu, #CP15_OFFSET(c13_TID_URO)]
317 ldr r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)]
318 ldr r6, [vcpu, #CP15_OFFSET(c5_DFSR)]
319 ldr r7, [vcpu, #CP15_OFFSET(c5_IFSR)]
320 ldr r8, [vcpu, #CP15_OFFSET(c5_ADFSR)]
321 ldr r9, [vcpu, #CP15_OFFSET(c5_AIFSR)]
322 ldr r10, [vcpu, #CP15_OFFSET(c6_DFAR)]
323 ldr r11, [vcpu, #CP15_OFFSET(c6_IFAR)]
324 ldr r12, [vcpu, #CP15_OFFSET(c12_VBAR)]
325 .endif
326
327 mcr p15, 0, r2, c13, c0, 1 @ CID
328 mcr p15, 0, r3, c13, c0, 2 @ TID_URW
329 mcr p15, 0, r4, c13, c0, 3 @ TID_URO
330 mcr p15, 0, r5, c13, c0, 4 @ TID_PRIV
331 mcr p15, 0, r6, c5, c0, 0 @ DFSR
332 mcr p15, 0, r7, c5, c0, 1 @ IFSR
333 mcr p15, 0, r8, c5, c1, 0 @ ADFSR
334 mcr p15, 0, r9, c5, c1, 1 @ AIFSR
335 mcr p15, 0, r10, c6, c0, 0 @ DFAR
336 mcr p15, 0, r11, c6, c0, 2 @ IFAR
337 mcr p15, 0, r12, c12, c0, 0 @ VBAR
338
339 .if \read_from_vcpu == 0
340 pop {r2-r12}
341 .else
342 ldr r2, [vcpu, #CP15_OFFSET(c1_SCTLR)]
343 ldr r3, [vcpu, #CP15_OFFSET(c1_CPACR)]
344 ldr r4, [vcpu, #CP15_OFFSET(c2_TTBCR)]
345 ldr r5, [vcpu, #CP15_OFFSET(c3_DACR)]
346 add r12, vcpu, #CP15_OFFSET(c2_TTBR0)
347 ldrd r6, r7, [r12]
348 add r12, vcpu, #CP15_OFFSET(c2_TTBR1)
349 ldrd r8, r9, [r12]
350 ldr r10, [vcpu, #CP15_OFFSET(c10_PRRR)]
351 ldr r11, [vcpu, #CP15_OFFSET(c10_NMRR)]
352 ldr r12, [vcpu, #CP15_OFFSET(c0_CSSELR)]
353 .endif
354
355 mcr p15, 0, r2, c1, c0, 0 @ SCTLR
356 mcr p15, 0, r3, c1, c0, 2 @ CPACR
357 mcr p15, 0, r4, c2, c0, 2 @ TTBCR
358 mcr p15, 0, r5, c3, c0, 0 @ DACR
359 mcrr p15, 0, r6, r7, c2 @ TTBR 0
360 mcrr p15, 1, r8, r9, c2 @ TTBR 1
361 mcr p15, 0, r10, c10, c2, 0 @ PRRR
362 mcr p15, 0, r11, c10, c2, 1 @ NMRR
363 mcr p15, 2, r12, c0, c0, 0 @ CSSELR
364.endm
365
366/*
367 * Save the VGIC CPU state into memory
368 *
369 * Assumes vcpu pointer in vcpu reg
370 */
371.macro save_vgic_state
372.endm
373
374/*
375 * Restore the VGIC CPU state from memory
376 *
377 * Assumes vcpu pointer in vcpu reg
378 */
379.macro restore_vgic_state
380.endm
381
382.equ vmentry, 0
383.equ vmexit, 1
384
385/* Configures the HSTR (Hyp System Trap Register) on entry/return
386 * (hardware reset value is 0) */
387.macro set_hstr operation
388 mrc p15, 4, r2, c1, c1, 3
389 ldr r3, =HSTR_T(15)
390 .if \operation == vmentry
391 orr r2, r2, r3 @ Trap CR{15}
392 .else
393 bic r2, r2, r3 @ Don't trap any CRx accesses
394 .endif
395 mcr p15, 4, r2, c1, c1, 3
396.endm
397
398/* Configures the HCPTR (Hyp Coprocessor Trap Register) on entry/return
399 * (hardware reset value is 0). Keep previous value in r2. */
400.macro set_hcptr operation, mask
401 mrc p15, 4, r2, c1, c1, 2
402 ldr r3, =\mask
403 .if \operation == vmentry
404 orr r3, r2, r3 @ Trap coproc-accesses defined in mask
405 .else
406 bic r3, r2, r3 @ Don't trap defined coproc-accesses
407 .endif
408 mcr p15, 4, r3, c1, c1, 2
409.endm
410
411/* Configures the HDCR (Hyp Debug Configuration Register) on entry/return
412 * (hardware reset value is 0) */
413.macro set_hdcr operation
414 mrc p15, 4, r2, c1, c1, 1
415 ldr r3, =(HDCR_TPM|HDCR_TPMCR)
416 .if \operation == vmentry
417 orr r2, r2, r3 @ Trap some perfmon accesses
418 .else
419 bic r2, r2, r3 @ Don't trap any perfmon accesses
420 .endif
421 mcr p15, 4, r2, c1, c1, 1
422.endm
423
424/* Enable/Disable: stage-2 trans., trap interrupts, trap wfi, trap smc */
425.macro configure_hyp_role operation
426 mrc p15, 4, r2, c1, c1, 0 @ HCR
427 bic r2, r2, #HCR_VIRT_EXCP_MASK
428 ldr r3, =HCR_GUEST_MASK
429 .if \operation == vmentry
430 orr r2, r2, r3
431 ldr r3, [vcpu, #VCPU_IRQ_LINES]
432 orr r2, r2, r3
433 .else
434 bic r2, r2, r3
435 .endif
436 mcr p15, 4, r2, c1, c1, 0
437.endm
438
439.macro load_vcpu
440 mrc p15, 4, vcpu, c13, c0, 2 @ HTPIDR
441.endm
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
new file mode 100644
index 000000000000..0144baf82904
--- /dev/null
+++ b/arch/arm/kvm/mmio.c
@@ -0,0 +1,153 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#include <linux/kvm_host.h>
20#include <asm/kvm_mmio.h>
21#include <asm/kvm_emulate.h>
22#include <trace/events/kvm.h>
23
24#include "trace.h"
25
26/**
27 * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
28 * @vcpu: The VCPU pointer
29 * @run: The VCPU run struct containing the mmio data
30 *
31 * This should only be called after returning from userspace for MMIO load
32 * emulation.
33 */
34int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
35{
36 __u32 *dest;
37 unsigned int len;
38 int mask;
39
40 if (!run->mmio.is_write) {
41 dest = vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt);
42 memset(dest, 0, sizeof(int));
43
44 len = run->mmio.len;
45 if (len > 4)
46 return -EINVAL;
47
48 memcpy(dest, run->mmio.data, len);
49
50 trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
51 *((u64 *)run->mmio.data));
52
53 if (vcpu->arch.mmio_decode.sign_extend && len < 4) {
54 mask = 1U << ((len * 8) - 1);
55 *dest = (*dest ^ mask) - mask;
56 }
57 }
58
59 return 0;
60}
61
62static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
63 struct kvm_exit_mmio *mmio)
64{
65 unsigned long rt, len;
66 bool is_write, sign_extend;
67
68 if ((vcpu->arch.hsr >> 8) & 1) {
69 /* cache operation on I/O addr, tell guest unsupported */
70 kvm_inject_dabt(vcpu, vcpu->arch.hxfar);
71 return 1;
72 }
73
74 if ((vcpu->arch.hsr >> 7) & 1) {
75 /* page table accesses IO mem: tell guest to fix its TTBR */
76 kvm_inject_dabt(vcpu, vcpu->arch.hxfar);
77 return 1;
78 }
79
80 switch ((vcpu->arch.hsr >> 22) & 0x3) {
81 case 0:
82 len = 1;
83 break;
84 case 1:
85 len = 2;
86 break;
87 case 2:
88 len = 4;
89 break;
90 default:
91 kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
92 return -EFAULT;
93 }
94
95 is_write = vcpu->arch.hsr & HSR_WNR;
96 sign_extend = vcpu->arch.hsr & HSR_SSE;
97 rt = (vcpu->arch.hsr & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
98
99 if (kvm_vcpu_reg_is_pc(vcpu, rt)) {
100 /* IO memory trying to read/write pc */
101 kvm_inject_pabt(vcpu, vcpu->arch.hxfar);
102 return 1;
103 }
104
105 mmio->is_write = is_write;
106 mmio->phys_addr = fault_ipa;
107 mmio->len = len;
108 vcpu->arch.mmio_decode.sign_extend = sign_extend;
109 vcpu->arch.mmio_decode.rt = rt;
110
111 /*
112 * The MMIO instruction is emulated and should not be re-executed
113 * in the guest.
114 */
115 kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1);
116 return 0;
117}
118
119int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
120 phys_addr_t fault_ipa)
121{
122 struct kvm_exit_mmio mmio;
123 unsigned long rt;
124 int ret;
125
126 /*
127 * Prepare MMIO operation. First stash it in a private
128 * structure that we can use for in-kernel emulation. If the
129 * kernel can't handle it, copy it into run->mmio and let user
130 * space do its magic.
131 */
132
133 if (vcpu->arch.hsr & HSR_ISV) {
134 ret = decode_hsr(vcpu, fault_ipa, &mmio);
135 if (ret)
136 return ret;
137 } else {
138 kvm_err("load/store instruction decoding not implemented\n");
139 return -ENOSYS;
140 }
141
142 rt = vcpu->arch.mmio_decode.rt;
143 trace_kvm_mmio((mmio.is_write) ? KVM_TRACE_MMIO_WRITE :
144 KVM_TRACE_MMIO_READ_UNSATISFIED,
145 mmio.len, fault_ipa,
146 (mmio.is_write) ? *vcpu_reg(vcpu, rt) : 0);
147
148 if (mmio.is_write)
149 memcpy(mmio.data, vcpu_reg(vcpu, rt), mmio.len);
150
151 kvm_prepare_mmio(run, &mmio);
152 return 0;
153}
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
new file mode 100644
index 000000000000..f30e13163a96
--- /dev/null
+++ b/arch/arm/kvm/mmu.c
@@ -0,0 +1,787 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#include <linux/mman.h>
20#include <linux/kvm_host.h>
21#include <linux/io.h>
22#include <trace/events/kvm.h>
23#include <asm/idmap.h>
24#include <asm/pgalloc.h>
25#include <asm/cacheflush.h>
26#include <asm/kvm_arm.h>
27#include <asm/kvm_mmu.h>
28#include <asm/kvm_mmio.h>
29#include <asm/kvm_asm.h>
30#include <asm/kvm_emulate.h>
31#include <asm/mach/map.h>
32#include <trace/events/kvm.h>
33
34#include "trace.h"
35
36extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
37
38static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
39
40static void kvm_tlb_flush_vmid(struct kvm *kvm)
41{
42 kvm_call_hyp(__kvm_tlb_flush_vmid, kvm);
43}
44
45static void kvm_set_pte(pte_t *pte, pte_t new_pte)
46{
47 pte_val(*pte) = new_pte;
48 /*
49 * flush_pmd_entry just takes a void pointer and cleans the necessary
50 * cache entries, so we can reuse the function for ptes.
51 */
52 flush_pmd_entry(pte);
53}
54
55static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
56 int min, int max)
57{
58 void *page;
59
60 BUG_ON(max > KVM_NR_MEM_OBJS);
61 if (cache->nobjs >= min)
62 return 0;
63 while (cache->nobjs < max) {
64 page = (void *)__get_free_page(PGALLOC_GFP);
65 if (!page)
66 return -ENOMEM;
67 cache->objects[cache->nobjs++] = page;
68 }
69 return 0;
70}
71
72static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
73{
74 while (mc->nobjs)
75 free_page((unsigned long)mc->objects[--mc->nobjs]);
76}
77
78static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
79{
80 void *p;
81
82 BUG_ON(!mc || !mc->nobjs);
83 p = mc->objects[--mc->nobjs];
84 return p;
85}
86
87static void free_ptes(pmd_t *pmd, unsigned long addr)
88{
89 pte_t *pte;
90 unsigned int i;
91
92 for (i = 0; i < PTRS_PER_PMD; i++, addr += PMD_SIZE) {
93 if (!pmd_none(*pmd) && pmd_table(*pmd)) {
94 pte = pte_offset_kernel(pmd, addr);
95 pte_free_kernel(NULL, pte);
96 }
97 pmd++;
98 }
99}
100
101/**
102 * free_hyp_pmds - free a Hyp-mode level-2 tables and child level-3 tables
103 *
104 * Assumes this is a page table used strictly in Hyp-mode and therefore contains
105 * only mappings in the kernel memory area, which is above PAGE_OFFSET.
106 */
107void free_hyp_pmds(void)
108{
109 pgd_t *pgd;
110 pud_t *pud;
111 pmd_t *pmd;
112 unsigned long addr;
113
114 mutex_lock(&kvm_hyp_pgd_mutex);
115 for (addr = PAGE_OFFSET; addr != 0; addr += PGDIR_SIZE) {
116 pgd = hyp_pgd + pgd_index(addr);
117 pud = pud_offset(pgd, addr);
118
119 if (pud_none(*pud))
120 continue;
121 BUG_ON(pud_bad(*pud));
122
123 pmd = pmd_offset(pud, addr);
124 free_ptes(pmd, addr);
125 pmd_free(NULL, pmd);
126 pud_clear(pud);
127 }
128 mutex_unlock(&kvm_hyp_pgd_mutex);
129}
130
131static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
132 unsigned long end)
133{
134 pte_t *pte;
135 unsigned long addr;
136 struct page *page;
137
138 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
139 pte = pte_offset_kernel(pmd, addr);
140 BUG_ON(!virt_addr_valid(addr));
141 page = virt_to_page(addr);
142 kvm_set_pte(pte, mk_pte(page, PAGE_HYP));
143 }
144}
145
146static void create_hyp_io_pte_mappings(pmd_t *pmd, unsigned long start,
147 unsigned long end,
148 unsigned long *pfn_base)
149{
150 pte_t *pte;
151 unsigned long addr;
152
153 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
154 pte = pte_offset_kernel(pmd, addr);
155 BUG_ON(pfn_valid(*pfn_base));
156 kvm_set_pte(pte, pfn_pte(*pfn_base, PAGE_HYP_DEVICE));
157 (*pfn_base)++;
158 }
159}
160
161static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
162 unsigned long end, unsigned long *pfn_base)
163{
164 pmd_t *pmd;
165 pte_t *pte;
166 unsigned long addr, next;
167
168 for (addr = start; addr < end; addr = next) {
169 pmd = pmd_offset(pud, addr);
170
171 BUG_ON(pmd_sect(*pmd));
172
173 if (pmd_none(*pmd)) {
174 pte = pte_alloc_one_kernel(NULL, addr);
175 if (!pte) {
176 kvm_err("Cannot allocate Hyp pte\n");
177 return -ENOMEM;
178 }
179 pmd_populate_kernel(NULL, pmd, pte);
180 }
181
182 next = pmd_addr_end(addr, end);
183
184 /*
185 * If pfn_base is NULL, we map kernel pages into HYP with the
186 * virtual address. Otherwise, this is considered an I/O
187 * mapping and we map the physical region starting at
188 * *pfn_base to [start, end[.
189 */
190 if (!pfn_base)
191 create_hyp_pte_mappings(pmd, addr, next);
192 else
193 create_hyp_io_pte_mappings(pmd, addr, next, pfn_base);
194 }
195
196 return 0;
197}
198
199static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base)
200{
201 unsigned long start = (unsigned long)from;
202 unsigned long end = (unsigned long)to;
203 pgd_t *pgd;
204 pud_t *pud;
205 pmd_t *pmd;
206 unsigned long addr, next;
207 int err = 0;
208
209 BUG_ON(start > end);
210 if (start < PAGE_OFFSET)
211 return -EINVAL;
212
213 mutex_lock(&kvm_hyp_pgd_mutex);
214 for (addr = start; addr < end; addr = next) {
215 pgd = hyp_pgd + pgd_index(addr);
216 pud = pud_offset(pgd, addr);
217
218 if (pud_none_or_clear_bad(pud)) {
219 pmd = pmd_alloc_one(NULL, addr);
220 if (!pmd) {
221 kvm_err("Cannot allocate Hyp pmd\n");
222 err = -ENOMEM;
223 goto out;
224 }
225 pud_populate(NULL, pud, pmd);
226 }
227
228 next = pgd_addr_end(addr, end);
229 err = create_hyp_pmd_mappings(pud, addr, next, pfn_base);
230 if (err)
231 goto out;
232 }
233out:
234 mutex_unlock(&kvm_hyp_pgd_mutex);
235 return err;
236}
237
238/**
239 * create_hyp_mappings - map a kernel virtual address range in Hyp mode
240 * @from: The virtual kernel start address of the range
241 * @to: The virtual kernel end address of the range (exclusive)
242 *
243 * The same virtual address as the kernel virtual address is also used in
244 * Hyp-mode mapping to the same underlying physical pages.
245 *
246 * Note: Wrapping around zero in the "to" address is not supported.
247 */
248int create_hyp_mappings(void *from, void *to)
249{
250 return __create_hyp_mappings(from, to, NULL);
251}
252
253/**
254 * create_hyp_io_mappings - map a physical IO range in Hyp mode
255 * @from: The virtual HYP start address of the range
256 * @to: The virtual HYP end address of the range (exclusive)
257 * @addr: The physical start address which gets mapped
258 */
259int create_hyp_io_mappings(void *from, void *to, phys_addr_t addr)
260{
261 unsigned long pfn = __phys_to_pfn(addr);
262 return __create_hyp_mappings(from, to, &pfn);
263}
264
265/**
266 * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
267 * @kvm: The KVM struct pointer for the VM.
268 *
269 * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can
270 * support either full 40-bit input addresses or limited to 32-bit input
271 * addresses). Clears the allocated pages.
272 *
273 * Note we don't need locking here as this is only called when the VM is
274 * created, which can only be done once.
275 */
276int kvm_alloc_stage2_pgd(struct kvm *kvm)
277{
278 pgd_t *pgd;
279
280 if (kvm->arch.pgd != NULL) {
281 kvm_err("kvm_arch already initialized?\n");
282 return -EINVAL;
283 }
284
285 pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, S2_PGD_ORDER);
286 if (!pgd)
287 return -ENOMEM;
288
289 /* stage-2 pgd must be aligned to its size */
290 VM_BUG_ON((unsigned long)pgd & (S2_PGD_SIZE - 1));
291
292 memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t));
293 clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
294 kvm->arch.pgd = pgd;
295
296 return 0;
297}
298
299static void clear_pud_entry(pud_t *pud)
300{
301 pmd_t *pmd_table = pmd_offset(pud, 0);
302 pud_clear(pud);
303 pmd_free(NULL, pmd_table);
304 put_page(virt_to_page(pud));
305}
306
307static void clear_pmd_entry(pmd_t *pmd)
308{
309 pte_t *pte_table = pte_offset_kernel(pmd, 0);
310 pmd_clear(pmd);
311 pte_free_kernel(NULL, pte_table);
312 put_page(virt_to_page(pmd));
313}
314
315static bool pmd_empty(pmd_t *pmd)
316{
317 struct page *pmd_page = virt_to_page(pmd);
318 return page_count(pmd_page) == 1;
319}
320
321static void clear_pte_entry(pte_t *pte)
322{
323 if (pte_present(*pte)) {
324 kvm_set_pte(pte, __pte(0));
325 put_page(virt_to_page(pte));
326 }
327}
328
329static bool pte_empty(pte_t *pte)
330{
331 struct page *pte_page = virt_to_page(pte);
332 return page_count(pte_page) == 1;
333}
334
335/**
336 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
337 * @kvm: The VM pointer
338 * @start: The intermediate physical base address of the range to unmap
339 * @size: The size of the area to unmap
340 *
341 * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
342 * be called while holding mmu_lock (unless for freeing the stage2 pgd before
343 * destroying the VM), otherwise another faulting VCPU may come in and mess
344 * with things behind our backs.
345 */
346static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
347{
348 pgd_t *pgd;
349 pud_t *pud;
350 pmd_t *pmd;
351 pte_t *pte;
352 phys_addr_t addr = start, end = start + size;
353 u64 range;
354
355 while (addr < end) {
356 pgd = kvm->arch.pgd + pgd_index(addr);
357 pud = pud_offset(pgd, addr);
358 if (pud_none(*pud)) {
359 addr += PUD_SIZE;
360 continue;
361 }
362
363 pmd = pmd_offset(pud, addr);
364 if (pmd_none(*pmd)) {
365 addr += PMD_SIZE;
366 continue;
367 }
368
369 pte = pte_offset_kernel(pmd, addr);
370 clear_pte_entry(pte);
371 range = PAGE_SIZE;
372
373 /* If we emptied the pte, walk back up the ladder */
374 if (pte_empty(pte)) {
375 clear_pmd_entry(pmd);
376 range = PMD_SIZE;
377 if (pmd_empty(pmd)) {
378 clear_pud_entry(pud);
379 range = PUD_SIZE;
380 }
381 }
382
383 addr += range;
384 }
385}
386
387/**
388 * kvm_free_stage2_pgd - free all stage-2 tables
389 * @kvm: The KVM struct pointer for the VM.
390 *
391 * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
392 * underlying level-2 and level-3 tables before freeing the actual level-1 table
393 * and setting the struct pointer to NULL.
394 *
395 * Note we don't need locking here as this is only called when the VM is
396 * destroyed, which can only be done once.
397 */
398void kvm_free_stage2_pgd(struct kvm *kvm)
399{
400 if (kvm->arch.pgd == NULL)
401 return;
402
403 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
404 free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER);
405 kvm->arch.pgd = NULL;
406}
407
408
409static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
410 phys_addr_t addr, const pte_t *new_pte, bool iomap)
411{
412 pgd_t *pgd;
413 pud_t *pud;
414 pmd_t *pmd;
415 pte_t *pte, old_pte;
416
417 /* Create 2nd stage page table mapping - Level 1 */
418 pgd = kvm->arch.pgd + pgd_index(addr);
419 pud = pud_offset(pgd, addr);
420 if (pud_none(*pud)) {
421 if (!cache)
422 return 0; /* ignore calls from kvm_set_spte_hva */
423 pmd = mmu_memory_cache_alloc(cache);
424 pud_populate(NULL, pud, pmd);
425 pmd += pmd_index(addr);
426 get_page(virt_to_page(pud));
427 } else
428 pmd = pmd_offset(pud, addr);
429
430 /* Create 2nd stage page table mapping - Level 2 */
431 if (pmd_none(*pmd)) {
432 if (!cache)
433 return 0; /* ignore calls from kvm_set_spte_hva */
434 pte = mmu_memory_cache_alloc(cache);
435 clean_pte_table(pte);
436 pmd_populate_kernel(NULL, pmd, pte);
437 pte += pte_index(addr);
438 get_page(virt_to_page(pmd));
439 } else
440 pte = pte_offset_kernel(pmd, addr);
441
442 if (iomap && pte_present(*pte))
443 return -EFAULT;
444
445 /* Create 2nd stage page table mapping - Level 3 */
446 old_pte = *pte;
447 kvm_set_pte(pte, *new_pte);
448 if (pte_present(old_pte))
449 kvm_tlb_flush_vmid(kvm);
450 else
451 get_page(virt_to_page(pte));
452
453 return 0;
454}
455
456/**
457 * kvm_phys_addr_ioremap - map a device range to guest IPA
458 *
459 * @kvm: The KVM pointer
460 * @guest_ipa: The IPA at which to insert the mapping
461 * @pa: The physical address of the device
462 * @size: The size of the mapping
463 */
464int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
465 phys_addr_t pa, unsigned long size)
466{
467 phys_addr_t addr, end;
468 int ret = 0;
469 unsigned long pfn;
470 struct kvm_mmu_memory_cache cache = { 0, };
471
472 end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
473 pfn = __phys_to_pfn(pa);
474
475 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
476 pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE | L_PTE_S2_RDWR);
477
478 ret = mmu_topup_memory_cache(&cache, 2, 2);
479 if (ret)
480 goto out;
481 spin_lock(&kvm->mmu_lock);
482 ret = stage2_set_pte(kvm, &cache, addr, &pte, true);
483 spin_unlock(&kvm->mmu_lock);
484 if (ret)
485 goto out;
486
487 pfn++;
488 }
489
490out:
491 mmu_free_memory_cache(&cache);
492 return ret;
493}
494
495static void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
496{
497 /*
498 * If we are going to insert an instruction page and the icache is
499 * either VIPT or PIPT, there is a potential problem where the host
500 * (or another VM) may have used the same page as this guest, and we
501 * read incorrect data from the icache. If we're using a PIPT cache,
502 * we can invalidate just that page, but if we are using a VIPT cache
503 * we need to invalidate the entire icache - damn shame - as written
504 * in the ARM ARM (DDI 0406C.b - Page B3-1393).
505 *
506 * VIVT caches are tagged using both the ASID and the VMID and doesn't
507 * need any kind of flushing (DDI 0406C.b - Page B3-1392).
508 */
509 if (icache_is_pipt()) {
510 unsigned long hva = gfn_to_hva(kvm, gfn);
511 __cpuc_coherent_user_range(hva, hva + PAGE_SIZE);
512 } else if (!icache_is_vivt_asid_tagged()) {
513 /* any kind of VIPT cache */
514 __flush_icache_all();
515 }
516}
517
518static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
519 gfn_t gfn, struct kvm_memory_slot *memslot,
520 unsigned long fault_status)
521{
522 pte_t new_pte;
523 pfn_t pfn;
524 int ret;
525 bool write_fault, writable;
526 unsigned long mmu_seq;
527 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
528
529 write_fault = kvm_is_write_fault(vcpu->arch.hsr);
530 if (fault_status == FSC_PERM && !write_fault) {
531 kvm_err("Unexpected L2 read permission error\n");
532 return -EFAULT;
533 }
534
535 /* We need minimum second+third level pages */
536 ret = mmu_topup_memory_cache(memcache, 2, KVM_NR_MEM_OBJS);
537 if (ret)
538 return ret;
539
540 mmu_seq = vcpu->kvm->mmu_notifier_seq;
541 /*
542 * Ensure the read of mmu_notifier_seq happens before we call
543 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
544 * the page we just got a reference to gets unmapped before we have a
545 * chance to grab the mmu_lock, which ensure that if the page gets
546 * unmapped afterwards, the call to kvm_unmap_hva will take it away
547 * from us again properly. This smp_rmb() interacts with the smp_wmb()
548 * in kvm_mmu_notifier_invalidate_<page|range_end>.
549 */
550 smp_rmb();
551
552 pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write_fault, &writable);
553 if (is_error_pfn(pfn))
554 return -EFAULT;
555
556 new_pte = pfn_pte(pfn, PAGE_S2);
557 coherent_icache_guest_page(vcpu->kvm, gfn);
558
559 spin_lock(&vcpu->kvm->mmu_lock);
560 if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
561 goto out_unlock;
562 if (writable) {
563 pte_val(new_pte) |= L_PTE_S2_RDWR;
564 kvm_set_pfn_dirty(pfn);
565 }
566 stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false);
567
568out_unlock:
569 spin_unlock(&vcpu->kvm->mmu_lock);
570 kvm_release_pfn_clean(pfn);
571 return 0;
572}
573
574/**
575 * kvm_handle_guest_abort - handles all 2nd stage aborts
576 * @vcpu: the VCPU pointer
577 * @run: the kvm_run structure
578 *
579 * Any abort that gets to the host is almost guaranteed to be caused by a
580 * missing second stage translation table entry, which can mean that either the
581 * guest simply needs more memory and we must allocate an appropriate page or it
582 * can mean that the guest tried to access I/O memory, which is emulated by user
583 * space. The distinction is based on the IPA causing the fault and whether this
584 * memory region has been registered as standard RAM by user space.
585 */
586int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
587{
588 unsigned long hsr_ec;
589 unsigned long fault_status;
590 phys_addr_t fault_ipa;
591 struct kvm_memory_slot *memslot;
592 bool is_iabt;
593 gfn_t gfn;
594 int ret, idx;
595
596 hsr_ec = vcpu->arch.hsr >> HSR_EC_SHIFT;
597 is_iabt = (hsr_ec == HSR_EC_IABT);
598 fault_ipa = ((phys_addr_t)vcpu->arch.hpfar & HPFAR_MASK) << 8;
599
600 trace_kvm_guest_fault(*vcpu_pc(vcpu), vcpu->arch.hsr,
601 vcpu->arch.hxfar, fault_ipa);
602
603 /* Check the stage-2 fault is trans. fault or write fault */
604 fault_status = (vcpu->arch.hsr & HSR_FSC_TYPE);
605 if (fault_status != FSC_FAULT && fault_status != FSC_PERM) {
606 kvm_err("Unsupported fault status: EC=%#lx DFCS=%#lx\n",
607 hsr_ec, fault_status);
608 return -EFAULT;
609 }
610
611 idx = srcu_read_lock(&vcpu->kvm->srcu);
612
613 gfn = fault_ipa >> PAGE_SHIFT;
614 if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) {
615 if (is_iabt) {
616 /* Prefetch Abort on I/O address */
617 kvm_inject_pabt(vcpu, vcpu->arch.hxfar);
618 ret = 1;
619 goto out_unlock;
620 }
621
622 if (fault_status != FSC_FAULT) {
623 kvm_err("Unsupported fault status on io memory: %#lx\n",
624 fault_status);
625 ret = -EFAULT;
626 goto out_unlock;
627 }
628
629 /* Adjust page offset */
630 fault_ipa |= vcpu->arch.hxfar & ~PAGE_MASK;
631 ret = io_mem_abort(vcpu, run, fault_ipa);
632 goto out_unlock;
633 }
634
635 memslot = gfn_to_memslot(vcpu->kvm, gfn);
636 if (!memslot->user_alloc) {
637 kvm_err("non user-alloc memslots not supported\n");
638 ret = -EINVAL;
639 goto out_unlock;
640 }
641
642 ret = user_mem_abort(vcpu, fault_ipa, gfn, memslot, fault_status);
643 if (ret == 0)
644 ret = 1;
645out_unlock:
646 srcu_read_unlock(&vcpu->kvm->srcu, idx);
647 return ret;
648}
649
650static void handle_hva_to_gpa(struct kvm *kvm,
651 unsigned long start,
652 unsigned long end,
653 void (*handler)(struct kvm *kvm,
654 gpa_t gpa, void *data),
655 void *data)
656{
657 struct kvm_memslots *slots;
658 struct kvm_memory_slot *memslot;
659
660 slots = kvm_memslots(kvm);
661
662 /* we only care about the pages that the guest sees */
663 kvm_for_each_memslot(memslot, slots) {
664 unsigned long hva_start, hva_end;
665 gfn_t gfn, gfn_end;
666
667 hva_start = max(start, memslot->userspace_addr);
668 hva_end = min(end, memslot->userspace_addr +
669 (memslot->npages << PAGE_SHIFT));
670 if (hva_start >= hva_end)
671 continue;
672
673 /*
674 * {gfn(page) | page intersects with [hva_start, hva_end)} =
675 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
676 */
677 gfn = hva_to_gfn_memslot(hva_start, memslot);
678 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
679
680 for (; gfn < gfn_end; ++gfn) {
681 gpa_t gpa = gfn << PAGE_SHIFT;
682 handler(kvm, gpa, data);
683 }
684 }
685}
686
687static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
688{
689 unmap_stage2_range(kvm, gpa, PAGE_SIZE);
690 kvm_tlb_flush_vmid(kvm);
691}
692
693int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
694{
695 unsigned long end = hva + PAGE_SIZE;
696
697 if (!kvm->arch.pgd)
698 return 0;
699
700 trace_kvm_unmap_hva(hva);
701 handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
702 return 0;
703}
704
705int kvm_unmap_hva_range(struct kvm *kvm,
706 unsigned long start, unsigned long end)
707{
708 if (!kvm->arch.pgd)
709 return 0;
710
711 trace_kvm_unmap_hva_range(start, end);
712 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
713 return 0;
714}
715
716static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
717{
718 pte_t *pte = (pte_t *)data;
719
720 stage2_set_pte(kvm, NULL, gpa, pte, false);
721}
722
723
724void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
725{
726 unsigned long end = hva + PAGE_SIZE;
727 pte_t stage2_pte;
728
729 if (!kvm->arch.pgd)
730 return;
731
732 trace_kvm_set_spte_hva(hva);
733 stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
734 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
735}
736
737void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
738{
739 mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
740}
741
742phys_addr_t kvm_mmu_get_httbr(void)
743{
744 VM_BUG_ON(!virt_addr_valid(hyp_pgd));
745 return virt_to_phys(hyp_pgd);
746}
747
748int kvm_mmu_init(void)
749{
750 if (!hyp_pgd) {
751 kvm_err("Hyp mode PGD not allocated\n");
752 return -ENOMEM;
753 }
754
755 return 0;
756}
757
758/**
759 * kvm_clear_idmap - remove all idmaps from the hyp pgd
760 *
761 * Free the underlying pmds for all pgds in range and clear the pgds (but
762 * don't free them) afterwards.
763 */
764void kvm_clear_hyp_idmap(void)
765{
766 unsigned long addr, end;
767 unsigned long next;
768 pgd_t *pgd = hyp_pgd;
769 pud_t *pud;
770 pmd_t *pmd;
771
772 addr = virt_to_phys(__hyp_idmap_text_start);
773 end = virt_to_phys(__hyp_idmap_text_end);
774
775 pgd += pgd_index(addr);
776 do {
777 next = pgd_addr_end(addr, end);
778 if (pgd_none_or_clear_bad(pgd))
779 continue;
780 pud = pud_offset(pgd, addr);
781 pmd = pmd_offset(pud, addr);
782
783 pud_clear(pud);
784 clean_pmd_entry(pmd);
785 pmd_free(NULL, (pmd_t *)((unsigned long)pmd & PAGE_MASK));
786 } while (pgd++, addr = next, addr < end);
787}
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
new file mode 100644
index 000000000000..7ee5bb7a3667
--- /dev/null
+++ b/arch/arm/kvm/psci.c
@@ -0,0 +1,108 @@
1/*
2 * Copyright (C) 2012 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/kvm_host.h>
19#include <linux/wait.h>
20
21#include <asm/kvm_emulate.h>
22#include <asm/kvm_psci.h>
23
24/*
25 * This is an implementation of the Power State Coordination Interface
26 * as described in ARM document number ARM DEN 0022A.
27 */
28
29static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
30{
31 vcpu->arch.pause = true;
32}
33
34static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
35{
36 struct kvm *kvm = source_vcpu->kvm;
37 struct kvm_vcpu *vcpu;
38 wait_queue_head_t *wq;
39 unsigned long cpu_id;
40 phys_addr_t target_pc;
41
42 cpu_id = *vcpu_reg(source_vcpu, 1);
43 if (vcpu_mode_is_32bit(source_vcpu))
44 cpu_id &= ~((u32) 0);
45
46 if (cpu_id >= atomic_read(&kvm->online_vcpus))
47 return KVM_PSCI_RET_INVAL;
48
49 target_pc = *vcpu_reg(source_vcpu, 2);
50
51 vcpu = kvm_get_vcpu(kvm, cpu_id);
52
53 wq = kvm_arch_vcpu_wq(vcpu);
54 if (!waitqueue_active(wq))
55 return KVM_PSCI_RET_INVAL;
56
57 kvm_reset_vcpu(vcpu);
58
59 /* Gracefully handle Thumb2 entry point */
60 if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
61 target_pc &= ~((phys_addr_t) 1);
62 vcpu_set_thumb(vcpu);
63 }
64
65 *vcpu_pc(vcpu) = target_pc;
66 vcpu->arch.pause = false;
67 smp_mb(); /* Make sure the above is visible */
68
69 wake_up_interruptible(wq);
70
71 return KVM_PSCI_RET_SUCCESS;
72}
73
74/**
75 * kvm_psci_call - handle PSCI call if r0 value is in range
76 * @vcpu: Pointer to the VCPU struct
77 *
78 * Handle PSCI calls from guests through traps from HVC or SMC instructions.
79 * The calling convention is similar to SMC calls to the secure world where
80 * the function number is placed in r0 and this function returns true if the
81 * function number specified in r0 is withing the PSCI range, and false
82 * otherwise.
83 */
84bool kvm_psci_call(struct kvm_vcpu *vcpu)
85{
86 unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
87 unsigned long val;
88
89 switch (psci_fn) {
90 case KVM_PSCI_FN_CPU_OFF:
91 kvm_psci_vcpu_off(vcpu);
92 val = KVM_PSCI_RET_SUCCESS;
93 break;
94 case KVM_PSCI_FN_CPU_ON:
95 val = kvm_psci_vcpu_on(vcpu);
96 break;
97 case KVM_PSCI_FN_CPU_SUSPEND:
98 case KVM_PSCI_FN_MIGRATE:
99 val = KVM_PSCI_RET_NI;
100 break;
101
102 default:
103 return false;
104 }
105
106 *vcpu_reg(vcpu, 0) = val;
107 return true;
108}
diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c
new file mode 100644
index 000000000000..b80256b554cd
--- /dev/null
+++ b/arch/arm/kvm/reset.c
@@ -0,0 +1,74 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18#include <linux/compiler.h>
19#include <linux/errno.h>
20#include <linux/sched.h>
21#include <linux/kvm_host.h>
22#include <linux/kvm.h>
23
24#include <asm/unified.h>
25#include <asm/ptrace.h>
26#include <asm/cputype.h>
27#include <asm/kvm_arm.h>
28#include <asm/kvm_coproc.h>
29
30/******************************************************************************
31 * Cortex-A15 Reset Values
32 */
33
34static const int a15_max_cpu_idx = 3;
35
36static struct kvm_regs a15_regs_reset = {
37 .usr_regs.ARM_cpsr = SVC_MODE | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT,
38};
39
40
41/*******************************************************************************
42 * Exported reset function
43 */
44
45/**
46 * kvm_reset_vcpu - sets core registers and cp15 registers to reset value
47 * @vcpu: The VCPU pointer
48 *
49 * This function finds the right table above and sets the registers on the
50 * virtual CPU struct to their architectually defined reset values.
51 */
52int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
53{
54 struct kvm_regs *cpu_reset;
55
56 switch (vcpu->arch.target) {
57 case KVM_ARM_TARGET_CORTEX_A15:
58 if (vcpu->vcpu_id > a15_max_cpu_idx)
59 return -EINVAL;
60 cpu_reset = &a15_regs_reset;
61 vcpu->arch.midr = read_cpuid_id();
62 break;
63 default:
64 return -ENODEV;
65 }
66
67 /* Reset core registers */
68 memcpy(&vcpu->arch.regs, cpu_reset, sizeof(vcpu->arch.regs));
69
70 /* Reset CP15 registers */
71 kvm_reset_coprocs(vcpu);
72
73 return 0;
74}
diff --git a/arch/arm/kvm/trace.h b/arch/arm/kvm/trace.h
new file mode 100644
index 000000000000..a8e73ed5ad5b
--- /dev/null
+++ b/arch/arm/kvm/trace.h
@@ -0,0 +1,235 @@
1#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_KVM_H
3
4#include <linux/tracepoint.h>
5
6#undef TRACE_SYSTEM
7#define TRACE_SYSTEM kvm
8
9/*
10 * Tracepoints for entry/exit to guest
11 */
12TRACE_EVENT(kvm_entry,
13 TP_PROTO(unsigned long vcpu_pc),
14 TP_ARGS(vcpu_pc),
15
16 TP_STRUCT__entry(
17 __field( unsigned long, vcpu_pc )
18 ),
19
20 TP_fast_assign(
21 __entry->vcpu_pc = vcpu_pc;
22 ),
23
24 TP_printk("PC: 0x%08lx", __entry->vcpu_pc)
25);
26
27TRACE_EVENT(kvm_exit,
28 TP_PROTO(unsigned long vcpu_pc),
29 TP_ARGS(vcpu_pc),
30
31 TP_STRUCT__entry(
32 __field( unsigned long, vcpu_pc )
33 ),
34
35 TP_fast_assign(
36 __entry->vcpu_pc = vcpu_pc;
37 ),
38
39 TP_printk("PC: 0x%08lx", __entry->vcpu_pc)
40);
41
42TRACE_EVENT(kvm_guest_fault,
43 TP_PROTO(unsigned long vcpu_pc, unsigned long hsr,
44 unsigned long hxfar,
45 unsigned long long ipa),
46 TP_ARGS(vcpu_pc, hsr, hxfar, ipa),
47
48 TP_STRUCT__entry(
49 __field( unsigned long, vcpu_pc )
50 __field( unsigned long, hsr )
51 __field( unsigned long, hxfar )
52 __field( unsigned long long, ipa )
53 ),
54
55 TP_fast_assign(
56 __entry->vcpu_pc = vcpu_pc;
57 __entry->hsr = hsr;
58 __entry->hxfar = hxfar;
59 __entry->ipa = ipa;
60 ),
61
62 TP_printk("guest fault at PC %#08lx (hxfar %#08lx, "
63 "ipa %#16llx, hsr %#08lx",
64 __entry->vcpu_pc, __entry->hxfar,
65 __entry->ipa, __entry->hsr)
66);
67
68TRACE_EVENT(kvm_irq_line,
69 TP_PROTO(unsigned int type, int vcpu_idx, int irq_num, int level),
70 TP_ARGS(type, vcpu_idx, irq_num, level),
71
72 TP_STRUCT__entry(
73 __field( unsigned int, type )
74 __field( int, vcpu_idx )
75 __field( int, irq_num )
76 __field( int, level )
77 ),
78
79 TP_fast_assign(
80 __entry->type = type;
81 __entry->vcpu_idx = vcpu_idx;
82 __entry->irq_num = irq_num;
83 __entry->level = level;
84 ),
85
86 TP_printk("Inject %s interrupt (%d), vcpu->idx: %d, num: %d, level: %d",
87 (__entry->type == KVM_ARM_IRQ_TYPE_CPU) ? "CPU" :
88 (__entry->type == KVM_ARM_IRQ_TYPE_PPI) ? "VGIC PPI" :
89 (__entry->type == KVM_ARM_IRQ_TYPE_SPI) ? "VGIC SPI" : "UNKNOWN",
90 __entry->type, __entry->vcpu_idx, __entry->irq_num, __entry->level)
91);
92
93TRACE_EVENT(kvm_mmio_emulate,
94 TP_PROTO(unsigned long vcpu_pc, unsigned long instr,
95 unsigned long cpsr),
96 TP_ARGS(vcpu_pc, instr, cpsr),
97
98 TP_STRUCT__entry(
99 __field( unsigned long, vcpu_pc )
100 __field( unsigned long, instr )
101 __field( unsigned long, cpsr )
102 ),
103
104 TP_fast_assign(
105 __entry->vcpu_pc = vcpu_pc;
106 __entry->instr = instr;
107 __entry->cpsr = cpsr;
108 ),
109
110 TP_printk("Emulate MMIO at: 0x%08lx (instr: %08lx, cpsr: %08lx)",
111 __entry->vcpu_pc, __entry->instr, __entry->cpsr)
112);
113
114/* Architecturally implementation defined CP15 register access */
115TRACE_EVENT(kvm_emulate_cp15_imp,
116 TP_PROTO(unsigned long Op1, unsigned long Rt1, unsigned long CRn,
117 unsigned long CRm, unsigned long Op2, bool is_write),
118 TP_ARGS(Op1, Rt1, CRn, CRm, Op2, is_write),
119
120 TP_STRUCT__entry(
121 __field( unsigned int, Op1 )
122 __field( unsigned int, Rt1 )
123 __field( unsigned int, CRn )
124 __field( unsigned int, CRm )
125 __field( unsigned int, Op2 )
126 __field( bool, is_write )
127 ),
128
129 TP_fast_assign(
130 __entry->is_write = is_write;
131 __entry->Op1 = Op1;
132 __entry->Rt1 = Rt1;
133 __entry->CRn = CRn;
134 __entry->CRm = CRm;
135 __entry->Op2 = Op2;
136 ),
137
138 TP_printk("Implementation defined CP15: %s\tp15, %u, r%u, c%u, c%u, %u",
139 (__entry->is_write) ? "mcr" : "mrc",
140 __entry->Op1, __entry->Rt1, __entry->CRn,
141 __entry->CRm, __entry->Op2)
142);
143
144TRACE_EVENT(kvm_wfi,
145 TP_PROTO(unsigned long vcpu_pc),
146 TP_ARGS(vcpu_pc),
147
148 TP_STRUCT__entry(
149 __field( unsigned long, vcpu_pc )
150 ),
151
152 TP_fast_assign(
153 __entry->vcpu_pc = vcpu_pc;
154 ),
155
156 TP_printk("guest executed wfi at: 0x%08lx", __entry->vcpu_pc)
157);
158
159TRACE_EVENT(kvm_unmap_hva,
160 TP_PROTO(unsigned long hva),
161 TP_ARGS(hva),
162
163 TP_STRUCT__entry(
164 __field( unsigned long, hva )
165 ),
166
167 TP_fast_assign(
168 __entry->hva = hva;
169 ),
170
171 TP_printk("mmu notifier unmap hva: %#08lx", __entry->hva)
172);
173
174TRACE_EVENT(kvm_unmap_hva_range,
175 TP_PROTO(unsigned long start, unsigned long end),
176 TP_ARGS(start, end),
177
178 TP_STRUCT__entry(
179 __field( unsigned long, start )
180 __field( unsigned long, end )
181 ),
182
183 TP_fast_assign(
184 __entry->start = start;
185 __entry->end = end;
186 ),
187
188 TP_printk("mmu notifier unmap range: %#08lx -- %#08lx",
189 __entry->start, __entry->end)
190);
191
192TRACE_EVENT(kvm_set_spte_hva,
193 TP_PROTO(unsigned long hva),
194 TP_ARGS(hva),
195
196 TP_STRUCT__entry(
197 __field( unsigned long, hva )
198 ),
199
200 TP_fast_assign(
201 __entry->hva = hva;
202 ),
203
204 TP_printk("mmu notifier set pte hva: %#08lx", __entry->hva)
205);
206
207TRACE_EVENT(kvm_hvc,
208 TP_PROTO(unsigned long vcpu_pc, unsigned long r0, unsigned long imm),
209 TP_ARGS(vcpu_pc, r0, imm),
210
211 TP_STRUCT__entry(
212 __field( unsigned long, vcpu_pc )
213 __field( unsigned long, r0 )
214 __field( unsigned long, imm )
215 ),
216
217 TP_fast_assign(
218 __entry->vcpu_pc = vcpu_pc;
219 __entry->r0 = r0;
220 __entry->imm = imm;
221 ),
222
223 TP_printk("HVC at 0x%08lx (r0: 0x%08lx, imm: 0x%lx",
224 __entry->vcpu_pc, __entry->r0, __entry->imm)
225);
226
227#endif /* _TRACE_KVM_H */
228
229#undef TRACE_INCLUDE_PATH
230#define TRACE_INCLUDE_PATH arch/arm/kvm
231#undef TRACE_INCLUDE_FILE
232#define TRACE_INCLUDE_FILE trace
233
234/* This part must be outside protection */
235#include <trace/define_trace.h>
diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c
index 9ee866ce0478..4b678478cf95 100644
--- a/arch/arm/mach-at91/setup.c
+++ b/arch/arm/mach-at91/setup.c
@@ -105,6 +105,8 @@ static void __init soc_detect(u32 dbgu_base)
105 switch (socid) { 105 switch (socid) {
106 case ARCH_ID_AT91RM9200: 106 case ARCH_ID_AT91RM9200:
107 at91_soc_initdata.type = AT91_SOC_RM9200; 107 at91_soc_initdata.type = AT91_SOC_RM9200;
108 if (at91_soc_initdata.subtype == AT91_SOC_SUBTYPE_NONE)
109 at91_soc_initdata.subtype = AT91_SOC_RM9200_BGA;
108 at91_boot_soc = at91rm9200_soc; 110 at91_boot_soc = at91rm9200_soc;
109 break; 111 break;
110 112
diff --git a/arch/arm/mach-davinci/cpuidle.c b/arch/arm/mach-davinci/cpuidle.c
index 9107691adbdb..5ac9e9384b15 100644
--- a/arch/arm/mach-davinci/cpuidle.c
+++ b/arch/arm/mach-davinci/cpuidle.c
@@ -25,35 +25,44 @@
25 25
26#define DAVINCI_CPUIDLE_MAX_STATES 2 26#define DAVINCI_CPUIDLE_MAX_STATES 2
27 27
28struct davinci_ops { 28static DEFINE_PER_CPU(struct cpuidle_device, davinci_cpuidle_device);
29 void (*enter) (u32 flags); 29static void __iomem *ddr2_reg_base;
30 void (*exit) (u32 flags); 30static bool ddr2_pdown;
31 u32 flags; 31
32}; 32static void davinci_save_ddr_power(int enter, bool pdown)
33{
34 u32 val;
35
36 val = __raw_readl(ddr2_reg_base + DDR2_SDRCR_OFFSET);
37
38 if (enter) {
39 if (pdown)
40 val |= DDR2_SRPD_BIT;
41 else
42 val &= ~DDR2_SRPD_BIT;
43 val |= DDR2_LPMODEN_BIT;
44 } else {
45 val &= ~(DDR2_SRPD_BIT | DDR2_LPMODEN_BIT);
46 }
47
48 __raw_writel(val, ddr2_reg_base + DDR2_SDRCR_OFFSET);
49}
33 50
34/* Actual code that puts the SoC in different idle states */ 51/* Actual code that puts the SoC in different idle states */
35static int davinci_enter_idle(struct cpuidle_device *dev, 52static int davinci_enter_idle(struct cpuidle_device *dev,
36 struct cpuidle_driver *drv, 53 struct cpuidle_driver *drv,
37 int index) 54 int index)
38{ 55{
39 struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; 56 davinci_save_ddr_power(1, ddr2_pdown);
40 struct davinci_ops *ops = cpuidle_get_statedata(state_usage);
41
42 if (ops && ops->enter)
43 ops->enter(ops->flags);
44 57
45 index = cpuidle_wrap_enter(dev, drv, index, 58 index = cpuidle_wrap_enter(dev, drv, index,
46 arm_cpuidle_simple_enter); 59 arm_cpuidle_simple_enter);
47 60
48 if (ops && ops->exit) 61 davinci_save_ddr_power(0, ddr2_pdown);
49 ops->exit(ops->flags);
50 62
51 return index; 63 return index;
52} 64}
53 65
54/* fields in davinci_ops.flags */
55#define DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN BIT(0)
56
57static struct cpuidle_driver davinci_idle_driver = { 66static struct cpuidle_driver davinci_idle_driver = {
58 .name = "cpuidle-davinci", 67 .name = "cpuidle-davinci",
59 .owner = THIS_MODULE, 68 .owner = THIS_MODULE,
@@ -70,45 +79,6 @@ static struct cpuidle_driver davinci_idle_driver = {
70 .state_count = DAVINCI_CPUIDLE_MAX_STATES, 79 .state_count = DAVINCI_CPUIDLE_MAX_STATES,
71}; 80};
72 81
73static DEFINE_PER_CPU(struct cpuidle_device, davinci_cpuidle_device);
74static void __iomem *ddr2_reg_base;
75
76static void davinci_save_ddr_power(int enter, bool pdown)
77{
78 u32 val;
79
80 val = __raw_readl(ddr2_reg_base + DDR2_SDRCR_OFFSET);
81
82 if (enter) {
83 if (pdown)
84 val |= DDR2_SRPD_BIT;
85 else
86 val &= ~DDR2_SRPD_BIT;
87 val |= DDR2_LPMODEN_BIT;
88 } else {
89 val &= ~(DDR2_SRPD_BIT | DDR2_LPMODEN_BIT);
90 }
91
92 __raw_writel(val, ddr2_reg_base + DDR2_SDRCR_OFFSET);
93}
94
95static void davinci_c2state_enter(u32 flags)
96{
97 davinci_save_ddr_power(1, !!(flags & DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN));
98}
99
100static void davinci_c2state_exit(u32 flags)
101{
102 davinci_save_ddr_power(0, !!(flags & DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN));
103}
104
105static struct davinci_ops davinci_states[DAVINCI_CPUIDLE_MAX_STATES] = {
106 [1] = {
107 .enter = davinci_c2state_enter,
108 .exit = davinci_c2state_exit,
109 },
110};
111
112static int __init davinci_cpuidle_probe(struct platform_device *pdev) 82static int __init davinci_cpuidle_probe(struct platform_device *pdev)
113{ 83{
114 int ret; 84 int ret;
@@ -124,11 +94,7 @@ static int __init davinci_cpuidle_probe(struct platform_device *pdev)
124 94
125 ddr2_reg_base = pdata->ddr2_ctlr_base; 95 ddr2_reg_base = pdata->ddr2_ctlr_base;
126 96
127 if (pdata->ddr2_pdown) 97 ddr2_pdown = pdata->ddr2_pdown;
128 davinci_states[1].flags |= DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN;
129 cpuidle_set_statedata(&device->states_usage[1], &davinci_states[1]);
130
131 device->state_count = DAVINCI_CPUIDLE_MAX_STATES;
132 98
133 ret = cpuidle_register_driver(&davinci_idle_driver); 99 ret = cpuidle_register_driver(&davinci_idle_driver);
134 if (ret) { 100 if (ret) {
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index e103c290bc9e..85afb031b676 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -414,7 +414,7 @@ config MACH_EXYNOS4_DT
414 select CPU_EXYNOS4210 414 select CPU_EXYNOS4210
415 select HAVE_SAMSUNG_KEYPAD if INPUT_KEYBOARD 415 select HAVE_SAMSUNG_KEYPAD if INPUT_KEYBOARD
416 select PINCTRL 416 select PINCTRL
417 select PINCTRL_EXYNOS4 417 select PINCTRL_EXYNOS
418 select USE_OF 418 select USE_OF
419 help 419 help
420 Machine support for Samsung Exynos4 machine with device tree enabled. 420 Machine support for Samsung Exynos4 machine with device tree enabled.
diff --git a/arch/arm/mach-exynos/include/mach/cpufreq.h b/arch/arm/mach-exynos/include/mach/cpufreq.h
index 7517c3f417af..b5d39dd03b2a 100644
--- a/arch/arm/mach-exynos/include/mach/cpufreq.h
+++ b/arch/arm/mach-exynos/include/mach/cpufreq.h
@@ -18,12 +18,25 @@ enum cpufreq_level_index {
18 L20, 18 L20,
19}; 19};
20 20
21#define APLL_FREQ(f, a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, m, p, s) \
22 { \
23 .freq = (f) * 1000, \
24 .clk_div_cpu0 = ((a0) | (a1) << 4 | (a2) << 8 | (a3) << 12 | \
25 (a4) << 16 | (a5) << 20 | (a6) << 24 | (a7) << 28), \
26 .clk_div_cpu1 = (b0 << 0 | b1 << 4 | b2 << 8), \
27 .mps = ((m) << 16 | (p) << 8 | (s)), \
28 }
29
30struct apll_freq {
31 unsigned int freq;
32 u32 clk_div_cpu0;
33 u32 clk_div_cpu1;
34 u32 mps;
35};
36
21struct exynos_dvfs_info { 37struct exynos_dvfs_info {
22 unsigned long mpll_freq_khz; 38 unsigned long mpll_freq_khz;
23 unsigned int pll_safe_idx; 39 unsigned int pll_safe_idx;
24 unsigned int pm_lock_idx;
25 unsigned int max_support_idx;
26 unsigned int min_support_idx;
27 struct clk *cpu_clk; 40 struct clk *cpu_clk;
28 unsigned int *volt_table; 41 unsigned int *volt_table;
29 struct cpufreq_frequency_table *freq_table; 42 struct cpufreq_frequency_table *freq_table;
diff --git a/arch/arm/mach-highbank/Kconfig b/arch/arm/mach-highbank/Kconfig
index 551c97e87a78..44b12f9c1584 100644
--- a/arch/arm/mach-highbank/Kconfig
+++ b/arch/arm/mach-highbank/Kconfig
@@ -1,5 +1,7 @@
1config ARCH_HIGHBANK 1config ARCH_HIGHBANK
2 bool "Calxeda ECX-1000/2000 (Highbank/Midway)" if ARCH_MULTI_V7 2 bool "Calxeda ECX-1000/2000 (Highbank/Midway)" if ARCH_MULTI_V7
3 select ARCH_HAS_CPUFREQ
4 select ARCH_HAS_OPP
3 select ARCH_WANT_OPTIONAL_GPIOLIB 5 select ARCH_WANT_OPTIONAL_GPIOLIB
4 select ARM_AMBA 6 select ARM_AMBA
5 select ARM_GIC 7 select ARM_GIC
@@ -11,5 +13,7 @@ config ARCH_HIGHBANK
11 select GENERIC_CLOCKEVENTS 13 select GENERIC_CLOCKEVENTS
12 select HAVE_ARM_SCU 14 select HAVE_ARM_SCU
13 select HAVE_SMP 15 select HAVE_SMP
16 select MAILBOX
17 select PL320_MBOX
14 select SPARSE_IRQ 18 select SPARSE_IRQ
15 select USE_OF 19 select USE_OF
diff --git a/arch/arm/mach-highbank/core.h b/arch/arm/mach-highbank/core.h
index 80235b46cb58..3f65206a9b92 100644
--- a/arch/arm/mach-highbank/core.h
+++ b/arch/arm/mach-highbank/core.h
@@ -2,7 +2,6 @@
2#define __HIGHBANK_CORE_H 2#define __HIGHBANK_CORE_H
3 3
4extern void highbank_set_cpu_jump(int cpu, void *jump_addr); 4extern void highbank_set_cpu_jump(int cpu, void *jump_addr);
5extern void highbank_clocks_init(void);
6extern void highbank_restart(char, const char *); 5extern void highbank_restart(char, const char *);
7extern void __iomem *scu_base_addr; 6extern void __iomem *scu_base_addr;
8 7
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
index 981dc1e1da51..65656ff0eb33 100644
--- a/arch/arm/mach-highbank/highbank.c
+++ b/arch/arm/mach-highbank/highbank.c
@@ -25,9 +25,11 @@
25#include <linux/of_address.h> 25#include <linux/of_address.h>
26#include <linux/smp.h> 26#include <linux/smp.h>
27#include <linux/amba/bus.h> 27#include <linux/amba/bus.h>
28#include <linux/clk-provider.h>
28 29
29#include <asm/arch_timer.h> 30#include <asm/arch_timer.h>
30#include <asm/cacheflush.h> 31#include <asm/cacheflush.h>
32#include <asm/cputype.h>
31#include <asm/smp_plat.h> 33#include <asm/smp_plat.h>
32#include <asm/smp_twd.h> 34#include <asm/smp_twd.h>
33#include <asm/hardware/arm_timer.h> 35#include <asm/hardware/arm_timer.h>
@@ -59,7 +61,7 @@ static void __init highbank_scu_map_io(void)
59 61
60void highbank_set_cpu_jump(int cpu, void *jump_addr) 62void highbank_set_cpu_jump(int cpu, void *jump_addr)
61{ 63{
62 cpu = cpu_logical_map(cpu); 64 cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 0);
63 writel(virt_to_phys(jump_addr), HB_JUMP_TABLE_VIRT(cpu)); 65 writel(virt_to_phys(jump_addr), HB_JUMP_TABLE_VIRT(cpu));
64 __cpuc_flush_dcache_area(HB_JUMP_TABLE_VIRT(cpu), 16); 66 __cpuc_flush_dcache_area(HB_JUMP_TABLE_VIRT(cpu), 16);
65 outer_clean_range(HB_JUMP_TABLE_PHYS(cpu), 67 outer_clean_range(HB_JUMP_TABLE_PHYS(cpu),
@@ -116,7 +118,7 @@ static void __init highbank_timer_init(void)
116 WARN_ON(!timer_base); 118 WARN_ON(!timer_base);
117 irq = irq_of_parse_and_map(np, 0); 119 irq = irq_of_parse_and_map(np, 0);
118 120
119 highbank_clocks_init(); 121 of_clk_init(NULL);
120 lookup.clk = of_clk_get(np, 0); 122 lookup.clk = of_clk_get(np, 0);
121 clkdev_add(&lookup); 123 clkdev_add(&lookup);
122 124
diff --git a/arch/arm/mach-highbank/sysregs.h b/arch/arm/mach-highbank/sysregs.h
index 70af9d13fcef..5995df7f2622 100644
--- a/arch/arm/mach-highbank/sysregs.h
+++ b/arch/arm/mach-highbank/sysregs.h
@@ -37,7 +37,7 @@ extern void __iomem *sregs_base;
37 37
38static inline void highbank_set_core_pwr(void) 38static inline void highbank_set_core_pwr(void)
39{ 39{
40 int cpu = cpu_logical_map(smp_processor_id()); 40 int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0);
41 if (scu_base_addr) 41 if (scu_base_addr)
42 scu_power_mode(scu_base_addr, SCU_PM_POWEROFF); 42 scu_power_mode(scu_base_addr, SCU_PM_POWEROFF);
43 else 43 else
@@ -46,7 +46,7 @@ static inline void highbank_set_core_pwr(void)
46 46
47static inline void highbank_clear_core_pwr(void) 47static inline void highbank_clear_core_pwr(void)
48{ 48{
49 int cpu = cpu_logical_map(smp_processor_id()); 49 int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0);
50 if (scu_base_addr) 50 if (scu_base_addr)
51 scu_power_mode(scu_base_addr, SCU_PM_NORMAL); 51 scu_power_mode(scu_base_addr, SCU_PM_NORMAL);
52 else 52 else
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index 3e628fd7a674..0a2349dc7018 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -851,6 +851,7 @@ config SOC_IMX6Q
851 select HAVE_CAN_FLEXCAN if CAN 851 select HAVE_CAN_FLEXCAN if CAN
852 select HAVE_IMX_GPC 852 select HAVE_IMX_GPC
853 select HAVE_IMX_MMDC 853 select HAVE_IMX_MMDC
854 select HAVE_IMX_SRC
854 select HAVE_SMP 855 select HAVE_SMP
855 select MFD_SYSCON 856 select MFD_SYSCON
856 select PINCTRL 857 select PINCTRL
diff --git a/arch/arm/mach-imx/clk-imx25.c b/arch/arm/mach-imx/clk-imx25.c
index b197aa73dc4b..2c570cdaae7b 100644
--- a/arch/arm/mach-imx/clk-imx25.c
+++ b/arch/arm/mach-imx/clk-imx25.c
@@ -254,9 +254,9 @@ int __init mx25_clocks_init(void)
254 clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.2"); 254 clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.2");
255 clk_register_clkdev(clk[usbotg_ahb], "ahb", "mxc-ehci.2"); 255 clk_register_clkdev(clk[usbotg_ahb], "ahb", "mxc-ehci.2");
256 clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.2"); 256 clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.2");
257 clk_register_clkdev(clk[ipg], "ipg", "fsl-usb2-udc"); 257 clk_register_clkdev(clk[ipg], "ipg", "imx-udc-mx27");
258 clk_register_clkdev(clk[usbotg_ahb], "ahb", "fsl-usb2-udc"); 258 clk_register_clkdev(clk[usbotg_ahb], "ahb", "imx-udc-mx27");
259 clk_register_clkdev(clk[usb_div], "per", "fsl-usb2-udc"); 259 clk_register_clkdev(clk[usb_div], "per", "imx-udc-mx27");
260 clk_register_clkdev(clk[nfc_ipg_per], NULL, "imx25-nand.0"); 260 clk_register_clkdev(clk[nfc_ipg_per], NULL, "imx25-nand.0");
261 /* i.mx25 has the i.mx35 type cspi */ 261 /* i.mx25 has the i.mx35 type cspi */
262 clk_register_clkdev(clk[cspi1_ipg], NULL, "imx35-cspi.0"); 262 clk_register_clkdev(clk[cspi1_ipg], NULL, "imx35-cspi.0");
diff --git a/arch/arm/mach-imx/clk-imx27.c b/arch/arm/mach-imx/clk-imx27.c
index 4c1d1e4efc74..1ffe3b534e51 100644
--- a/arch/arm/mach-imx/clk-imx27.c
+++ b/arch/arm/mach-imx/clk-imx27.c
@@ -236,9 +236,9 @@ int __init mx27_clocks_init(unsigned long fref)
236 clk_register_clkdev(clk[lcdc_ahb_gate], "ahb", "imx21-fb.0"); 236 clk_register_clkdev(clk[lcdc_ahb_gate], "ahb", "imx21-fb.0");
237 clk_register_clkdev(clk[csi_ahb_gate], "ahb", "imx27-camera.0"); 237 clk_register_clkdev(clk[csi_ahb_gate], "ahb", "imx27-camera.0");
238 clk_register_clkdev(clk[per4_gate], "per", "imx27-camera.0"); 238 clk_register_clkdev(clk[per4_gate], "per", "imx27-camera.0");
239 clk_register_clkdev(clk[usb_div], "per", "fsl-usb2-udc"); 239 clk_register_clkdev(clk[usb_div], "per", "imx-udc-mx27");
240 clk_register_clkdev(clk[usb_ipg_gate], "ipg", "fsl-usb2-udc"); 240 clk_register_clkdev(clk[usb_ipg_gate], "ipg", "imx-udc-mx27");
241 clk_register_clkdev(clk[usb_ahb_gate], "ahb", "fsl-usb2-udc"); 241 clk_register_clkdev(clk[usb_ahb_gate], "ahb", "imx-udc-mx27");
242 clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.0"); 242 clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.0");
243 clk_register_clkdev(clk[usb_ipg_gate], "ipg", "mxc-ehci.0"); 243 clk_register_clkdev(clk[usb_ipg_gate], "ipg", "mxc-ehci.0");
244 clk_register_clkdev(clk[usb_ahb_gate], "ahb", "mxc-ehci.0"); 244 clk_register_clkdev(clk[usb_ahb_gate], "ahb", "mxc-ehci.0");
diff --git a/arch/arm/mach-imx/clk-imx31.c b/arch/arm/mach-imx/clk-imx31.c
index 8be64e0a4ace..16ccbd41dea9 100644
--- a/arch/arm/mach-imx/clk-imx31.c
+++ b/arch/arm/mach-imx/clk-imx31.c
@@ -139,9 +139,9 @@ int __init mx31_clocks_init(unsigned long fref)
139 clk_register_clkdev(clk[usb_div_post], "per", "mxc-ehci.2"); 139 clk_register_clkdev(clk[usb_div_post], "per", "mxc-ehci.2");
140 clk_register_clkdev(clk[usb_gate], "ahb", "mxc-ehci.2"); 140 clk_register_clkdev(clk[usb_gate], "ahb", "mxc-ehci.2");
141 clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.2"); 141 clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.2");
142 clk_register_clkdev(clk[usb_div_post], "per", "fsl-usb2-udc"); 142 clk_register_clkdev(clk[usb_div_post], "per", "imx-udc-mx27");
143 clk_register_clkdev(clk[usb_gate], "ahb", "fsl-usb2-udc"); 143 clk_register_clkdev(clk[usb_gate], "ahb", "imx-udc-mx27");
144 clk_register_clkdev(clk[ipg], "ipg", "fsl-usb2-udc"); 144 clk_register_clkdev(clk[ipg], "ipg", "imx-udc-mx27");
145 clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0"); 145 clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0");
146 /* i.mx31 has the i.mx21 type uart */ 146 /* i.mx31 has the i.mx21 type uart */
147 clk_register_clkdev(clk[uart1_gate], "per", "imx21-uart.0"); 147 clk_register_clkdev(clk[uart1_gate], "per", "imx21-uart.0");
diff --git a/arch/arm/mach-imx/clk-imx35.c b/arch/arm/mach-imx/clk-imx35.c
index 66f3d65ea275..f0727e80815d 100644
--- a/arch/arm/mach-imx/clk-imx35.c
+++ b/arch/arm/mach-imx/clk-imx35.c
@@ -251,9 +251,9 @@ int __init mx35_clocks_init()
251 clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.2"); 251 clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.2");
252 clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.2"); 252 clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.2");
253 clk_register_clkdev(clk[usbotg_gate], "ahb", "mxc-ehci.2"); 253 clk_register_clkdev(clk[usbotg_gate], "ahb", "mxc-ehci.2");
254 clk_register_clkdev(clk[usb_div], "per", "fsl-usb2-udc"); 254 clk_register_clkdev(clk[usb_div], "per", "imx-udc-mx27");
255 clk_register_clkdev(clk[ipg], "ipg", "fsl-usb2-udc"); 255 clk_register_clkdev(clk[ipg], "ipg", "imx-udc-mx27");
256 clk_register_clkdev(clk[usbotg_gate], "ahb", "fsl-usb2-udc"); 256 clk_register_clkdev(clk[usbotg_gate], "ahb", "imx-udc-mx27");
257 clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0"); 257 clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0");
258 clk_register_clkdev(clk[nfc_div], NULL, "imx25-nand.0"); 258 clk_register_clkdev(clk[nfc_div], NULL, "imx25-nand.0");
259 clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0"); 259 clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0");
diff --git a/arch/arm/mach-imx/clk-imx51-imx53.c b/arch/arm/mach-imx/clk-imx51-imx53.c
index 579023f59dc1..fb7cb841b64c 100644
--- a/arch/arm/mach-imx/clk-imx51-imx53.c
+++ b/arch/arm/mach-imx/clk-imx51-imx53.c
@@ -269,9 +269,9 @@ static void __init mx5_clocks_common_init(unsigned long rate_ckil,
269 clk_register_clkdev(clk[usboh3_per_gate], "per", "mxc-ehci.2"); 269 clk_register_clkdev(clk[usboh3_per_gate], "per", "mxc-ehci.2");
270 clk_register_clkdev(clk[usboh3_gate], "ipg", "mxc-ehci.2"); 270 clk_register_clkdev(clk[usboh3_gate], "ipg", "mxc-ehci.2");
271 clk_register_clkdev(clk[usboh3_gate], "ahb", "mxc-ehci.2"); 271 clk_register_clkdev(clk[usboh3_gate], "ahb", "mxc-ehci.2");
272 clk_register_clkdev(clk[usboh3_per_gate], "per", "fsl-usb2-udc"); 272 clk_register_clkdev(clk[usboh3_per_gate], "per", "imx-udc-mx51");
273 clk_register_clkdev(clk[usboh3_gate], "ipg", "fsl-usb2-udc"); 273 clk_register_clkdev(clk[usboh3_gate], "ipg", "imx-udc-mx51");
274 clk_register_clkdev(clk[usboh3_gate], "ahb", "fsl-usb2-udc"); 274 clk_register_clkdev(clk[usboh3_gate], "ahb", "imx-udc-mx51");
275 clk_register_clkdev(clk[nfc_gate], NULL, "imx51-nand"); 275 clk_register_clkdev(clk[nfc_gate], NULL, "imx51-nand");
276 clk_register_clkdev(clk[ssi1_ipg_gate], NULL, "imx-ssi.0"); 276 clk_register_clkdev(clk[ssi1_ipg_gate], NULL, "imx-ssi.0");
277 clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "imx-ssi.1"); 277 clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "imx-ssi.1");
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index 7f2c10c7413a..c0c4e723b7f5 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -436,6 +436,9 @@ int __init mx6q_clocks_init(void)
436 for (i = 0; i < ARRAY_SIZE(clks_init_on); i++) 436 for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
437 clk_prepare_enable(clk[clks_init_on[i]]); 437 clk_prepare_enable(clk[clks_init_on[i]]);
438 438
439 /* Set initial power mode */
440 imx6q_set_lpm(WAIT_CLOCKED);
441
439 np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt"); 442 np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt");
440 base = of_iomap(np, 0); 443 base = of_iomap(np, 0);
441 WARN_ON(!base); 444 WARN_ON(!base);
diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h
index 7191ab4434e5..fa36fb84ab19 100644
--- a/arch/arm/mach-imx/common.h
+++ b/arch/arm/mach-imx/common.h
@@ -142,6 +142,7 @@ extern int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode);
142extern void imx6q_clock_map_io(void); 142extern void imx6q_clock_map_io(void);
143 143
144extern void imx_cpu_die(unsigned int cpu); 144extern void imx_cpu_die(unsigned int cpu);
145extern int imx_cpu_kill(unsigned int cpu);
145 146
146#ifdef CONFIG_PM 147#ifdef CONFIG_PM
147extern void imx6q_pm_init(void); 148extern void imx6q_pm_init(void);
diff --git a/arch/arm/mach-imx/devices/devices-common.h b/arch/arm/mach-imx/devices/devices-common.h
index 6277baf1b7be..9bd5777ff0e7 100644
--- a/arch/arm/mach-imx/devices/devices-common.h
+++ b/arch/arm/mach-imx/devices/devices-common.h
@@ -63,6 +63,7 @@ struct platform_device *__init imx_add_flexcan(
63 63
64#include <linux/fsl_devices.h> 64#include <linux/fsl_devices.h>
65struct imx_fsl_usb2_udc_data { 65struct imx_fsl_usb2_udc_data {
66 const char *devid;
66 resource_size_t iobase; 67 resource_size_t iobase;
67 resource_size_t irq; 68 resource_size_t irq;
68}; 69};
diff --git a/arch/arm/mach-imx/devices/platform-fsl-usb2-udc.c b/arch/arm/mach-imx/devices/platform-fsl-usb2-udc.c
index 37e44398197b..3c06bd96e9cc 100644
--- a/arch/arm/mach-imx/devices/platform-fsl-usb2-udc.c
+++ b/arch/arm/mach-imx/devices/platform-fsl-usb2-udc.c
@@ -11,35 +11,36 @@
11#include "../hardware.h" 11#include "../hardware.h"
12#include "devices-common.h" 12#include "devices-common.h"
13 13
14#define imx_fsl_usb2_udc_data_entry_single(soc) \ 14#define imx_fsl_usb2_udc_data_entry_single(soc, _devid) \
15 { \ 15 { \
16 .devid = _devid, \
16 .iobase = soc ## _USB_OTG_BASE_ADDR, \ 17 .iobase = soc ## _USB_OTG_BASE_ADDR, \
17 .irq = soc ## _INT_USB_OTG, \ 18 .irq = soc ## _INT_USB_OTG, \
18 } 19 }
19 20
20#ifdef CONFIG_SOC_IMX25 21#ifdef CONFIG_SOC_IMX25
21const struct imx_fsl_usb2_udc_data imx25_fsl_usb2_udc_data __initconst = 22const struct imx_fsl_usb2_udc_data imx25_fsl_usb2_udc_data __initconst =
22 imx_fsl_usb2_udc_data_entry_single(MX25); 23 imx_fsl_usb2_udc_data_entry_single(MX25, "imx-udc-mx27");
23#endif /* ifdef CONFIG_SOC_IMX25 */ 24#endif /* ifdef CONFIG_SOC_IMX25 */
24 25
25#ifdef CONFIG_SOC_IMX27 26#ifdef CONFIG_SOC_IMX27
26const struct imx_fsl_usb2_udc_data imx27_fsl_usb2_udc_data __initconst = 27const struct imx_fsl_usb2_udc_data imx27_fsl_usb2_udc_data __initconst =
27 imx_fsl_usb2_udc_data_entry_single(MX27); 28 imx_fsl_usb2_udc_data_entry_single(MX27, "imx-udc-mx27");
28#endif /* ifdef CONFIG_SOC_IMX27 */ 29#endif /* ifdef CONFIG_SOC_IMX27 */
29 30
30#ifdef CONFIG_SOC_IMX31 31#ifdef CONFIG_SOC_IMX31
31const struct imx_fsl_usb2_udc_data imx31_fsl_usb2_udc_data __initconst = 32const struct imx_fsl_usb2_udc_data imx31_fsl_usb2_udc_data __initconst =
32 imx_fsl_usb2_udc_data_entry_single(MX31); 33 imx_fsl_usb2_udc_data_entry_single(MX31, "imx-udc-mx27");
33#endif /* ifdef CONFIG_SOC_IMX31 */ 34#endif /* ifdef CONFIG_SOC_IMX31 */
34 35
35#ifdef CONFIG_SOC_IMX35 36#ifdef CONFIG_SOC_IMX35
36const struct imx_fsl_usb2_udc_data imx35_fsl_usb2_udc_data __initconst = 37const struct imx_fsl_usb2_udc_data imx35_fsl_usb2_udc_data __initconst =
37 imx_fsl_usb2_udc_data_entry_single(MX35); 38 imx_fsl_usb2_udc_data_entry_single(MX35, "imx-udc-mx27");
38#endif /* ifdef CONFIG_SOC_IMX35 */ 39#endif /* ifdef CONFIG_SOC_IMX35 */
39 40
40#ifdef CONFIG_SOC_IMX51 41#ifdef CONFIG_SOC_IMX51
41const struct imx_fsl_usb2_udc_data imx51_fsl_usb2_udc_data __initconst = 42const struct imx_fsl_usb2_udc_data imx51_fsl_usb2_udc_data __initconst =
42 imx_fsl_usb2_udc_data_entry_single(MX51); 43 imx_fsl_usb2_udc_data_entry_single(MX51, "imx-udc-mx51");
43#endif 44#endif
44 45
45struct platform_device *__init imx_add_fsl_usb2_udc( 46struct platform_device *__init imx_add_fsl_usb2_udc(
@@ -57,7 +58,7 @@ struct platform_device *__init imx_add_fsl_usb2_udc(
57 .flags = IORESOURCE_IRQ, 58 .flags = IORESOURCE_IRQ,
58 }, 59 },
59 }; 60 };
60 return imx_add_platform_device_dmamask("fsl-usb2-udc", -1, 61 return imx_add_platform_device_dmamask(data->devid, -1,
61 res, ARRAY_SIZE(res), 62 res, ARRAY_SIZE(res),
62 pdata, sizeof(*pdata), DMA_BIT_MASK(32)); 63 pdata, sizeof(*pdata), DMA_BIT_MASK(32));
63} 64}
diff --git a/arch/arm/mach-imx/devices/platform-imx-fb.c b/arch/arm/mach-imx/devices/platform-imx-fb.c
index 10b0ed39f07f..25a47c616b2d 100644
--- a/arch/arm/mach-imx/devices/platform-imx-fb.c
+++ b/arch/arm/mach-imx/devices/platform-imx-fb.c
@@ -54,7 +54,7 @@ struct platform_device *__init imx_add_imx_fb(
54 .flags = IORESOURCE_IRQ, 54 .flags = IORESOURCE_IRQ,
55 }, 55 },
56 }; 56 };
57 return imx_add_platform_device_dmamask("imx-fb", 0, 57 return imx_add_platform_device_dmamask(data->devid, 0,
58 res, ARRAY_SIZE(res), 58 res, ARRAY_SIZE(res),
59 pdata, sizeof(*pdata), DMA_BIT_MASK(32)); 59 pdata, sizeof(*pdata), DMA_BIT_MASK(32));
60} 60}
diff --git a/arch/arm/mach-imx/hotplug.c b/arch/arm/mach-imx/hotplug.c
index 3dec962b0770..7bc5fe15dda2 100644
--- a/arch/arm/mach-imx/hotplug.c
+++ b/arch/arm/mach-imx/hotplug.c
@@ -46,9 +46,11 @@ static inline void cpu_enter_lowpower(void)
46void imx_cpu_die(unsigned int cpu) 46void imx_cpu_die(unsigned int cpu)
47{ 47{
48 cpu_enter_lowpower(); 48 cpu_enter_lowpower();
49 imx_enable_cpu(cpu, false); 49 cpu_do_idle();
50}
50 51
51 /* spin here until hardware takes it down */ 52int imx_cpu_kill(unsigned int cpu)
52 while (1) 53{
53 ; 54 imx_enable_cpu(cpu, false);
55 return 1;
54} 56}
diff --git a/arch/arm/mach-imx/iram_alloc.c b/arch/arm/mach-imx/iram_alloc.c
index 6c80424f678e..e05cf407db65 100644
--- a/arch/arm/mach-imx/iram_alloc.c
+++ b/arch/arm/mach-imx/iram_alloc.c
@@ -22,8 +22,7 @@
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/spinlock.h> 23#include <linux/spinlock.h>
24#include <linux/genalloc.h> 24#include <linux/genalloc.h>
25 25#include "linux/platform_data/imx-iram.h"
26#include "iram.h"
27 26
28static unsigned long iram_phys_base; 27static unsigned long iram_phys_base;
29static void __iomem *iram_virt_base; 28static void __iomem *iram_virt_base;
diff --git a/arch/arm/mach-imx/platsmp.c b/arch/arm/mach-imx/platsmp.c
index 3777b805b76b..66fae885c842 100644
--- a/arch/arm/mach-imx/platsmp.c
+++ b/arch/arm/mach-imx/platsmp.c
@@ -92,5 +92,6 @@ struct smp_operations imx_smp_ops __initdata = {
92 .smp_boot_secondary = imx_boot_secondary, 92 .smp_boot_secondary = imx_boot_secondary,
93#ifdef CONFIG_HOTPLUG_CPU 93#ifdef CONFIG_HOTPLUG_CPU
94 .cpu_die = imx_cpu_die, 94 .cpu_die = imx_cpu_die,
95 .cpu_kill = imx_cpu_kill,
95#endif 96#endif
96}; 97};
diff --git a/arch/arm/mach-imx/pm-imx6q.c b/arch/arm/mach-imx/pm-imx6q.c
index a17543da602d..ee42d20cba19 100644
--- a/arch/arm/mach-imx/pm-imx6q.c
+++ b/arch/arm/mach-imx/pm-imx6q.c
@@ -41,6 +41,7 @@ static int imx6q_pm_enter(suspend_state_t state)
41 cpu_suspend(0, imx6q_suspend_finish); 41 cpu_suspend(0, imx6q_suspend_finish);
42 imx_smp_prepare(); 42 imx_smp_prepare();
43 imx_gpc_post_resume(); 43 imx_gpc_post_resume();
44 imx6q_set_lpm(WAIT_CLOCKED);
44 break; 45 break;
45 default: 46 default:
46 return -EINVAL; 47 return -EINVAL;
diff --git a/arch/arm/mach-integrator/pci_v3.c b/arch/arm/mach-integrator/pci_v3.c
index be50e795536d..e7fcea7f3300 100644
--- a/arch/arm/mach-integrator/pci_v3.c
+++ b/arch/arm/mach-integrator/pci_v3.c
@@ -475,13 +475,12 @@ int __init pci_v3_setup(int nr, struct pci_sys_data *sys)
475{ 475{
476 int ret = 0; 476 int ret = 0;
477 477
478 if (!ap_syscon_base)
479 return -EINVAL;
480
478 if (nr == 0) { 481 if (nr == 0) {
479 sys->mem_offset = PHYS_PCI_MEM_BASE; 482 sys->mem_offset = PHYS_PCI_MEM_BASE;
480 ret = pci_v3_setup_resources(sys); 483 ret = pci_v3_setup_resources(sys);
481 /* Remap the Integrator system controller */
482 ap_syscon_base = ioremap(INTEGRATOR_SC_BASE, 0x100);
483 if (!ap_syscon_base)
484 return -EINVAL;
485 } 484 }
486 485
487 return ret; 486 return ret;
@@ -497,6 +496,13 @@ void __init pci_v3_preinit(void)
497 unsigned int temp; 496 unsigned int temp;
498 int ret; 497 int ret;
499 498
499 /* Remap the Integrator system controller */
500 ap_syscon_base = ioremap(INTEGRATOR_SC_BASE, 0x100);
501 if (!ap_syscon_base) {
502 pr_err("unable to remap the AP syscon for PCIv3\n");
503 return;
504 }
505
500 pcibios_min_mem = 0x00100000; 506 pcibios_min_mem = 0x00100000;
501 507
502 /* 508 /*
diff --git a/arch/arm/mach-kirkwood/board-ns2.c b/arch/arm/mach-kirkwood/board-ns2.c
index 8821720ab5a4..f4632a809f68 100644
--- a/arch/arm/mach-kirkwood/board-ns2.c
+++ b/arch/arm/mach-kirkwood/board-ns2.c
@@ -18,47 +18,11 @@
18#include <linux/gpio.h> 18#include <linux/gpio.h>
19#include <linux/of.h> 19#include <linux/of.h>
20#include "common.h" 20#include "common.h"
21#include "mpp.h"
22 21
23static struct mv643xx_eth_platform_data ns2_ge00_data = { 22static struct mv643xx_eth_platform_data ns2_ge00_data = {
24 .phy_addr = MV643XX_ETH_PHY_ADDR(8), 23 .phy_addr = MV643XX_ETH_PHY_ADDR(8),
25}; 24};
26 25
27static unsigned int ns2_mpp_config[] __initdata = {
28 MPP0_SPI_SCn,
29 MPP1_SPI_MOSI,
30 MPP2_SPI_SCK,
31 MPP3_SPI_MISO,
32 MPP4_NF_IO6,
33 MPP5_NF_IO7,
34 MPP6_SYSRST_OUTn,
35 MPP7_GPO, /* Fan speed (bit 1) */
36 MPP8_TW0_SDA,
37 MPP9_TW0_SCK,
38 MPP10_UART0_TXD,
39 MPP11_UART0_RXD,
40 MPP12_GPO, /* Red led */
41 MPP14_GPIO, /* USB fuse */
42 MPP16_GPIO, /* SATA 0 power */
43 MPP17_GPIO, /* SATA 1 power */
44 MPP18_NF_IO0,
45 MPP19_NF_IO1,
46 MPP20_SATA1_ACTn,
47 MPP21_SATA0_ACTn,
48 MPP22_GPIO, /* Fan speed (bit 0) */
49 MPP23_GPIO, /* Fan power */
50 MPP24_GPIO, /* USB mode select */
51 MPP25_GPIO, /* Fan rotation fail */
52 MPP26_GPIO, /* USB device vbus */
53 MPP28_GPIO, /* USB enable host vbus */
54 MPP29_GPIO, /* Blue led (slow register) */
55 MPP30_GPIO, /* Blue led (command register) */
56 MPP31_GPIO, /* Board power off */
57 MPP32_GPIO, /* Power button (0 = Released, 1 = Pushed) */
58 MPP33_GPO, /* Fan speed (bit 2) */
59 0
60};
61
62#define NS2_GPIO_POWER_OFF 31 26#define NS2_GPIO_POWER_OFF 31
63 27
64static void ns2_power_off(void) 28static void ns2_power_off(void)
@@ -71,8 +35,6 @@ void __init ns2_init(void)
71 /* 35 /*
72 * Basic setup. Needs to be called early. 36 * Basic setup. Needs to be called early.
73 */ 37 */
74 kirkwood_mpp_conf(ns2_mpp_config);
75
76 if (of_machine_is_compatible("lacie,netspace_lite_v2") || 38 if (of_machine_is_compatible("lacie,netspace_lite_v2") ||
77 of_machine_is_compatible("lacie,netspace_mini_v2")) 39 of_machine_is_compatible("lacie,netspace_mini_v2"))
78 ns2_ge00_data.phy_addr = MV643XX_ETH_PHY_ADDR(0); 40 ns2_ge00_data.phy_addr = MV643XX_ETH_PHY_ADDR(0);
diff --git a/arch/arm/mach-mvebu/Makefile b/arch/arm/mach-mvebu/Makefile
index 5dcb369b58aa..99df4df680fd 100644
--- a/arch/arm/mach-mvebu/Makefile
+++ b/arch/arm/mach-mvebu/Makefile
@@ -1,6 +1,8 @@
1ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \ 1ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \
2 -I$(srctree)/arch/arm/plat-orion/include 2 -I$(srctree)/arch/arm/plat-orion/include
3 3
4AFLAGS_coherency_ll.o := -Wa,-march=armv7-a
5
4obj-y += system-controller.o 6obj-y += system-controller.o
5obj-$(CONFIG_MACH_ARMADA_370_XP) += armada-370-xp.o irq-armada-370-xp.o addr-map.o coherency.o coherency_ll.o pmsu.o 7obj-$(CONFIG_MACH_ARMADA_370_XP) += armada-370-xp.o irq-armada-370-xp.o addr-map.o coherency.o coherency_ll.o pmsu.o
6obj-$(CONFIG_SMP) += platsmp.o headsmp.o 8obj-$(CONFIG_SMP) += platsmp.o headsmp.o
diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c
index 5c8e9cee2c2e..769c1feee1c4 100644
--- a/arch/arm/mach-omap2/board-omap4panda.c
+++ b/arch/arm/mach-omap2/board-omap4panda.c
@@ -397,6 +397,12 @@ static struct omap_board_mux board_mux[] __initdata = {
397 OMAP_PULL_ENA), 397 OMAP_PULL_ENA),
398 OMAP4_MUX(ABE_MCBSP1_FSX, OMAP_MUX_MODE0 | OMAP_PIN_INPUT), 398 OMAP4_MUX(ABE_MCBSP1_FSX, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
399 399
400 /* UART2 - BT/FM/GPS shared transport */
401 OMAP4_MUX(UART2_CTS, OMAP_PIN_INPUT | OMAP_MUX_MODE0),
402 OMAP4_MUX(UART2_RTS, OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
403 OMAP4_MUX(UART2_RX, OMAP_PIN_INPUT | OMAP_MUX_MODE0),
404 OMAP4_MUX(UART2_TX, OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
405
400 { .reg_offset = OMAP_MUX_TERMINATOR }, 406 { .reg_offset = OMAP_MUX_TERMINATOR },
401}; 407};
402 408
diff --git a/arch/arm/mach-omap2/cclock2420_data.c b/arch/arm/mach-omap2/cclock2420_data.c
index 7e5febe456d9..ab7e952d2070 100644
--- a/arch/arm/mach-omap2/cclock2420_data.c
+++ b/arch/arm/mach-omap2/cclock2420_data.c
@@ -1935,6 +1935,8 @@ int __init omap2420_clk_init(void)
1935 omap2_init_clk_hw_omap_clocks(c->lk.clk); 1935 omap2_init_clk_hw_omap_clocks(c->lk.clk);
1936 } 1936 }
1937 1937
1938 omap2xxx_clkt_vps_late_init();
1939
1938 omap2_clk_disable_autoidle_all(); 1940 omap2_clk_disable_autoidle_all();
1939 1941
1940 omap2_clk_enable_init_clocks(enable_init_clks, 1942 omap2_clk_enable_init_clocks(enable_init_clks,
diff --git a/arch/arm/mach-omap2/cclock2430_data.c b/arch/arm/mach-omap2/cclock2430_data.c
index eda079b96c6a..eb3dab68d536 100644
--- a/arch/arm/mach-omap2/cclock2430_data.c
+++ b/arch/arm/mach-omap2/cclock2430_data.c
@@ -2050,6 +2050,8 @@ int __init omap2430_clk_init(void)
2050 omap2_init_clk_hw_omap_clocks(c->lk.clk); 2050 omap2_init_clk_hw_omap_clocks(c->lk.clk);
2051 } 2051 }
2052 2052
2053 omap2xxx_clkt_vps_late_init();
2054
2053 omap2_clk_disable_autoidle_all(); 2055 omap2_clk_disable_autoidle_all();
2054 2056
2055 omap2_clk_enable_init_clocks(enable_init_clks, 2057 omap2_clk_enable_init_clocks(enable_init_clks,
diff --git a/arch/arm/mach-omap2/cclock44xx_data.c b/arch/arm/mach-omap2/cclock44xx_data.c
index 5789a5e25563..a2cc046b47f4 100644
--- a/arch/arm/mach-omap2/cclock44xx_data.c
+++ b/arch/arm/mach-omap2/cclock44xx_data.c
@@ -2026,14 +2026,13 @@ int __init omap4xxx_clk_init(void)
2026 * On OMAP4460 the ABE DPLL fails to turn on if in idle low-power 2026 * On OMAP4460 the ABE DPLL fails to turn on if in idle low-power
2027 * state when turning the ABE clock domain. Workaround this by 2027 * state when turning the ABE clock domain. Workaround this by
2028 * locking the ABE DPLL on boot. 2028 * locking the ABE DPLL on boot.
2029 * Lock the ABE DPLL in any case to avoid issues with audio.
2029 */ 2030 */
2030 if (cpu_is_omap446x()) { 2031 rc = clk_set_parent(&abe_dpll_refclk_mux_ck, &sys_32k_ck);
2031 rc = clk_set_parent(&abe_dpll_refclk_mux_ck, &sys_32k_ck); 2032 if (!rc)
2032 if (!rc) 2033 rc = clk_set_rate(&dpll_abe_ck, OMAP4_DPLL_ABE_DEFFREQ);
2033 rc = clk_set_rate(&dpll_abe_ck, OMAP4_DPLL_ABE_DEFFREQ); 2034 if (rc)
2034 if (rc) 2035 pr_err("%s: failed to configure ABE DPLL!\n", __func__);
2035 pr_err("%s: failed to configure ABE DPLL!\n", __func__);
2036 }
2037 2036
2038 return 0; 2037 return 0;
2039} 2038}
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index 5e304d0719a2..626f3ea3142f 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -639,7 +639,7 @@ static int count_ocp2scp_devices(struct omap_ocp2scp_dev *ocp2scp_dev)
639 return cnt; 639 return cnt;
640} 640}
641 641
642static void omap_init_ocp2scp(void) 642static void __init omap_init_ocp2scp(void)
643{ 643{
644 struct omap_hwmod *oh; 644 struct omap_hwmod *oh;
645 struct platform_device *pdev; 645 struct platform_device *pdev;
diff --git a/arch/arm/mach-omap2/drm.c b/arch/arm/mach-omap2/drm.c
index 4c7566c7e24a..2a2cfa88ddbf 100644
--- a/arch/arm/mach-omap2/drm.c
+++ b/arch/arm/mach-omap2/drm.c
@@ -25,6 +25,7 @@
25#include <linux/dma-mapping.h> 25#include <linux/dma-mapping.h>
26#include <linux/platform_data/omap_drm.h> 26#include <linux/platform_data/omap_drm.h>
27 27
28#include "soc.h"
28#include "omap_device.h" 29#include "omap_device.h"
29#include "omap_hwmod.h" 30#include "omap_hwmod.h"
30 31
@@ -56,7 +57,7 @@ static int __init omap_init_drm(void)
56 oh->name); 57 oh->name);
57 } 58 }
58 59
59 platform_data.omaprev = GET_OMAP_REVISION(); 60 platform_data.omaprev = GET_OMAP_TYPE;
60 61
61 return platform_device_register(&omap_drm_device); 62 return platform_device_register(&omap_drm_device);
62 63
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index 129d5081ed15..793f54ac7d14 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -2132,8 +2132,12 @@ static struct omap_hwmod omap44xx_mcpdm_hwmod = {
2132 * currently reset very early during boot, before I2C is 2132 * currently reset very early during boot, before I2C is
2133 * available, so it doesn't seem that we have any choice in 2133 * available, so it doesn't seem that we have any choice in
2134 * the kernel other than to avoid resetting it. 2134 * the kernel other than to avoid resetting it.
2135 *
2136 * Also, McPDM needs to be configured to NO_IDLE mode when it
2137 * is in used otherwise vital clocks will be gated which
2138 * results 'slow motion' audio playback.
2135 */ 2139 */
2136 .flags = HWMOD_EXT_OPT_MAIN_CLK, 2140 .flags = HWMOD_EXT_OPT_MAIN_CLK | HWMOD_SWSUP_SIDLE,
2137 .mpu_irqs = omap44xx_mcpdm_irqs, 2141 .mpu_irqs = omap44xx_mcpdm_irqs,
2138 .sdma_reqs = omap44xx_mcpdm_sdma_reqs, 2142 .sdma_reqs = omap44xx_mcpdm_sdma_reqs,
2139 .main_clk = "mcpdm_fck", 2143 .main_clk = "mcpdm_fck",
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 7be3622cfc85..2d93d8b23835 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -351,12 +351,10 @@ static void omap3_pm_idle(void)
351 if (omap_irq_pending()) 351 if (omap_irq_pending())
352 goto out; 352 goto out;
353 353
354 trace_power_start(POWER_CSTATE, 1, smp_processor_id());
355 trace_cpu_idle(1, smp_processor_id()); 354 trace_cpu_idle(1, smp_processor_id());
356 355
357 omap_sram_idle(); 356 omap_sram_idle();
358 357
359 trace_power_end(smp_processor_id());
360 trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); 358 trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
361 359
362out: 360out:
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index 691aa674665a..b8ad6e632bb8 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -165,15 +165,11 @@ static struct device_node * __init omap_get_timer_dt(struct of_device_id *match,
165 struct device_node *np; 165 struct device_node *np;
166 166
167 for_each_matching_node(np, match) { 167 for_each_matching_node(np, match) {
168 if (!of_device_is_available(np)) { 168 if (!of_device_is_available(np))
169 of_node_put(np);
170 continue; 169 continue;
171 }
172 170
173 if (property && !of_get_property(np, property, NULL)) { 171 if (property && !of_get_property(np, property, NULL))
174 of_node_put(np);
175 continue; 172 continue;
176 }
177 173
178 of_add_property(np, &device_disabled); 174 of_add_property(np, &device_disabled);
179 return np; 175 return np;
diff --git a/arch/arm/mach-realview/include/mach/irqs-eb.h b/arch/arm/mach-realview/include/mach/irqs-eb.h
index d6b5073692d2..44754230fdcc 100644
--- a/arch/arm/mach-realview/include/mach/irqs-eb.h
+++ b/arch/arm/mach-realview/include/mach/irqs-eb.h
@@ -115,7 +115,7 @@
115/* 115/*
116 * Only define NR_IRQS if less than NR_IRQS_EB 116 * Only define NR_IRQS if less than NR_IRQS_EB
117 */ 117 */
118#define NR_IRQS_EB (IRQ_EB_GIC_START + 96) 118#define NR_IRQS_EB (IRQ_EB_GIC_START + 128)
119 119
120#if defined(CONFIG_MACH_REALVIEW_EB) \ 120#if defined(CONFIG_MACH_REALVIEW_EB) \
121 && (!defined(NR_IRQS) || (NR_IRQS < NR_IRQS_EB)) 121 && (!defined(NR_IRQS) || (NR_IRQS < NR_IRQS_EB))
diff --git a/arch/arm/mach-s3c64xx/mach-crag6410-module.c b/arch/arm/mach-s3c64xx/mach-crag6410-module.c
index 553059f51841..755c0bb119f4 100644
--- a/arch/arm/mach-s3c64xx/mach-crag6410-module.c
+++ b/arch/arm/mach-s3c64xx/mach-crag6410-module.c
@@ -47,7 +47,7 @@ static struct spi_board_info wm1253_devs[] = {
47 .bus_num = 0, 47 .bus_num = 0,
48 .chip_select = 0, 48 .chip_select = 0,
49 .mode = SPI_MODE_0, 49 .mode = SPI_MODE_0,
50 .irq = S3C_EINT(5), 50 .irq = S3C_EINT(4),
51 .controller_data = &wm0010_spi_csinfo, 51 .controller_data = &wm0010_spi_csinfo,
52 .platform_data = &wm0010_pdata, 52 .platform_data = &wm0010_pdata,
53 }, 53 },
diff --git a/arch/arm/mach-s3c64xx/pm.c b/arch/arm/mach-s3c64xx/pm.c
index 7feb426fc202..d2e1a16690bd 100644
--- a/arch/arm/mach-s3c64xx/pm.c
+++ b/arch/arm/mach-s3c64xx/pm.c
@@ -338,8 +338,10 @@ int __init s3c64xx_pm_init(void)
338 for (i = 0; i < ARRAY_SIZE(s3c64xx_pm_domains); i++) 338 for (i = 0; i < ARRAY_SIZE(s3c64xx_pm_domains); i++)
339 pm_genpd_init(&s3c64xx_pm_domains[i]->pd, NULL, false); 339 pm_genpd_init(&s3c64xx_pm_domains[i]->pd, NULL, false);
340 340
341#ifdef CONFIG_S3C_DEV_FB
341 if (dev_get_platdata(&s3c_device_fb.dev)) 342 if (dev_get_platdata(&s3c_device_fb.dev))
342 pm_genpd_add_device(&s3c64xx_pm_f.pd, &s3c_device_fb.dev); 343 pm_genpd_add_device(&s3c64xx_pm_f.pd, &s3c_device_fb.dev);
344#endif
343 345
344 return 0; 346 return 0;
345} 347}
diff --git a/arch/arm/mach-sunxi/Kconfig b/arch/arm/mach-sunxi/Kconfig
index 3fdd0085e306..8709a39bd34c 100644
--- a/arch/arm/mach-sunxi/Kconfig
+++ b/arch/arm/mach-sunxi/Kconfig
@@ -7,3 +7,4 @@ config ARCH_SUNXI
7 select PINCTRL 7 select PINCTRL
8 select SPARSE_IRQ 8 select SPARSE_IRQ
9 select SUNXI_TIMER 9 select SUNXI_TIMER
10 select PINCTRL_SUNXI \ No newline at end of file
diff --git a/arch/arm/mach-tegra/cpu-tegra.c b/arch/arm/mach-tegra/cpu-tegra.c
index a74d3c7d2e26..a36a03d3c9a0 100644
--- a/arch/arm/mach-tegra/cpu-tegra.c
+++ b/arch/arm/mach-tegra/cpu-tegra.c
@@ -243,8 +243,7 @@ static int tegra_cpu_init(struct cpufreq_policy *policy)
243 /* FIXME: what's the actual transition time? */ 243 /* FIXME: what's the actual transition time? */
244 policy->cpuinfo.transition_latency = 300 * 1000; 244 policy->cpuinfo.transition_latency = 300 * 1000;
245 245
246 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; 246 cpumask_copy(policy->cpus, cpu_possible_mask);
247 cpumask_copy(policy->related_cpus, cpu_possible_mask);
248 247
249 if (policy->cpu == 0) 248 if (policy->cpu == 0)
250 register_pm_notifier(&tegra_cpu_pm_notifier); 249 register_pm_notifier(&tegra_cpu_pm_notifier);
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig
index 5dea90636d94..3e5bbd0e5b23 100644
--- a/arch/arm/mach-ux500/Kconfig
+++ b/arch/arm/mach-ux500/Kconfig
@@ -11,6 +11,7 @@ config UX500_SOC_COMMON
11 select COMMON_CLK 11 select COMMON_CLK
12 select PINCTRL 12 select PINCTRL
13 select PINCTRL_NOMADIK 13 select PINCTRL_NOMADIK
14 select PINCTRL_ABX500
14 select PL310_ERRATA_753970 if CACHE_PL310 15 select PL310_ERRATA_753970 if CACHE_PL310
15 16
16config UX500_SOC_DB8500 17config UX500_SOC_DB8500
@@ -18,6 +19,11 @@ config UX500_SOC_DB8500
18 select CPU_FREQ_TABLE if CPU_FREQ 19 select CPU_FREQ_TABLE if CPU_FREQ
19 select MFD_DB8500_PRCMU 20 select MFD_DB8500_PRCMU
20 select PINCTRL_DB8500 21 select PINCTRL_DB8500
22 select PINCTRL_DB8540
23 select PINCTRL_AB8500
24 select PINCTRL_AB8505
25 select PINCTRL_AB9540
26 select PINCTRL_AB8540
21 select REGULATOR 27 select REGULATOR
22 select REGULATOR_DB8500_PRCMU 28 select REGULATOR_DB8500_PRCMU
23 29
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index d453522edb0d..b8781caa54b8 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -90,26 +90,8 @@ static struct platform_device snowball_gpio_en_3v3_regulator_dev = {
90 }, 90 },
91}; 91};
92 92
93static struct ab8500_gpio_platform_data ab8500_gpio_pdata = { 93static struct abx500_gpio_platform_data ab8500_gpio_pdata = {
94 .gpio_base = MOP500_AB8500_PIN_GPIO(1), 94 .gpio_base = MOP500_AB8500_PIN_GPIO(1),
95 .irq_base = MOP500_AB8500_VIR_GPIO_IRQ_BASE,
96 /* config_reg is the initial configuration of ab8500 pins.
97 * The pins can be configured as GPIO or alt functions based
98 * on value present in GpioSel1 to GpioSel6 and AlternatFunction
99 * register. This is the array of 7 configuration settings.
100 * One has to compile time decide these settings. Below is the
101 * explanation of these setting
102 * GpioSel1 = 0x00 => Pins GPIO1 to GPIO8 are not used as GPIO
103 * GpioSel2 = 0x1E => Pins GPIO10 to GPIO13 are configured as GPIO
104 * GpioSel3 = 0x80 => Pin GPIO24 is configured as GPIO
105 * GpioSel4 = 0x01 => Pin GPIo25 is configured as GPIO
106 * GpioSel5 = 0x7A => Pins GPIO34, GPIO36 to GPIO39 are conf as GPIO
107 * GpioSel6 = 0x00 => Pins GPIO41 & GPIo42 are not configured as GPIO
108 * AlternaFunction = 0x00 => If Pins GPIO10 to 13 are not configured
109 * as GPIO then this register selectes the alternate fucntions
110 */
111 .config_reg = {0x00, 0x1E, 0x80, 0x01,
112 0x7A, 0x00, 0x00},
113}; 95};
114 96
115/* ab8500-codec */ 97/* ab8500-codec */
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index 5b286e06474c..b80ad9610e97 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -285,7 +285,7 @@ static struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
285 OF_DEV_AUXDATA("st,nomadik-i2c", 0x80110000, "nmk-i2c.3", NULL), 285 OF_DEV_AUXDATA("st,nomadik-i2c", 0x80110000, "nmk-i2c.3", NULL),
286 OF_DEV_AUXDATA("st,nomadik-i2c", 0x8012a000, "nmk-i2c.4", NULL), 286 OF_DEV_AUXDATA("st,nomadik-i2c", 0x8012a000, "nmk-i2c.4", NULL),
287 /* Requires device name bindings. */ 287 /* Requires device name bindings. */
288 OF_DEV_AUXDATA("stericsson,nmk_pinctrl", U8500_PRCMU_BASE, 288 OF_DEV_AUXDATA("stericsson,nmk-pinctrl", U8500_PRCMU_BASE,
289 "pinctrl-db8500", NULL), 289 "pinctrl-db8500", NULL),
290 /* Requires clock name and DMA bindings. */ 290 /* Requires clock name and DMA bindings. */
291 OF_DEV_AUXDATA("stericsson,ux500-msp-i2s", 0x80123000, 291 OF_DEV_AUXDATA("stericsson,ux500-msp-i2s", 0x80123000,
diff --git a/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h b/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h
index 7d34c52798b5..d526dd8e87d3 100644
--- a/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h
+++ b/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h
@@ -38,15 +38,7 @@
38#define MOP500_STMPE1601_IRQ_END \ 38#define MOP500_STMPE1601_IRQ_END \
39 MOP500_STMPE1601_IRQ(STMPE_NR_INTERNAL_IRQS) 39 MOP500_STMPE1601_IRQ(STMPE_NR_INTERNAL_IRQS)
40 40
41/* AB8500 virtual gpio IRQ */ 41#define MOP500_NR_IRQS MOP500_STMPE1601_IRQ_END
42#define AB8500_VIR_GPIO_NR_IRQS 16
43
44#define MOP500_AB8500_VIR_GPIO_IRQ_BASE \
45 MOP500_STMPE1601_IRQ_END
46#define MOP500_AB8500_VIR_GPIO_IRQ_END \
47 (MOP500_AB8500_VIR_GPIO_IRQ_BASE + AB8500_VIR_GPIO_NR_IRQS)
48
49#define MOP500_NR_IRQS MOP500_AB8500_VIR_GPIO_IRQ_END
50 42
51#define MOP500_IRQ_END MOP500_NR_IRQS 43#define MOP500_IRQ_END MOP500_NR_IRQS
52 44
diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c
index 5d5929450366..a78827b70270 100644
--- a/arch/arm/mach-versatile/core.c
+++ b/arch/arm/mach-versatile/core.c
@@ -36,6 +36,7 @@
36#include <linux/gfp.h> 36#include <linux/gfp.h>
37#include <linux/clkdev.h> 37#include <linux/clkdev.h>
38#include <linux/mtd/physmap.h> 38#include <linux/mtd/physmap.h>
39#include <linux/bitops.h>
39 40
40#include <asm/irq.h> 41#include <asm/irq.h>
41#include <asm/hardware/arm_timer.h> 42#include <asm/hardware/arm_timer.h>
@@ -65,16 +66,28 @@
65#define VA_VIC_BASE __io_address(VERSATILE_VIC_BASE) 66#define VA_VIC_BASE __io_address(VERSATILE_VIC_BASE)
66#define VA_SIC_BASE __io_address(VERSATILE_SIC_BASE) 67#define VA_SIC_BASE __io_address(VERSATILE_SIC_BASE)
67 68
69/* These PIC IRQs are valid in each configuration */
70#define PIC_VALID_ALL BIT(SIC_INT_KMI0) | BIT(SIC_INT_KMI1) | \
71 BIT(SIC_INT_SCI3) | BIT(SIC_INT_UART3) | \
72 BIT(SIC_INT_CLCD) | BIT(SIC_INT_TOUCH) | \
73 BIT(SIC_INT_KEYPAD) | BIT(SIC_INT_DoC) | \
74 BIT(SIC_INT_USB) | BIT(SIC_INT_PCI0) | \
75 BIT(SIC_INT_PCI1) | BIT(SIC_INT_PCI2) | \
76 BIT(SIC_INT_PCI3)
68#if 1 77#if 1
69#define IRQ_MMCI0A IRQ_VICSOURCE22 78#define IRQ_MMCI0A IRQ_VICSOURCE22
70#define IRQ_AACI IRQ_VICSOURCE24 79#define IRQ_AACI IRQ_VICSOURCE24
71#define IRQ_ETH IRQ_VICSOURCE25 80#define IRQ_ETH IRQ_VICSOURCE25
72#define PIC_MASK 0xFFD00000 81#define PIC_MASK 0xFFD00000
82#define PIC_VALID PIC_VALID_ALL
73#else 83#else
74#define IRQ_MMCI0A IRQ_SIC_MMCI0A 84#define IRQ_MMCI0A IRQ_SIC_MMCI0A
75#define IRQ_AACI IRQ_SIC_AACI 85#define IRQ_AACI IRQ_SIC_AACI
76#define IRQ_ETH IRQ_SIC_ETH 86#define IRQ_ETH IRQ_SIC_ETH
77#define PIC_MASK 0 87#define PIC_MASK 0
88#define PIC_VALID PIC_VALID_ALL | BIT(SIC_INT_MMCI0A) | \
89 BIT(SIC_INT_MMCI1A) | BIT(SIC_INT_AACI) | \
90 BIT(SIC_INT_ETH)
78#endif 91#endif
79 92
80/* Lookup table for finding a DT node that represents the vic instance */ 93/* Lookup table for finding a DT node that represents the vic instance */
@@ -102,7 +115,7 @@ void __init versatile_init_irq(void)
102 VERSATILE_SIC_BASE); 115 VERSATILE_SIC_BASE);
103 116
104 fpga_irq_init(VA_SIC_BASE, "SIC", IRQ_SIC_START, 117 fpga_irq_init(VA_SIC_BASE, "SIC", IRQ_SIC_START,
105 IRQ_VICSOURCE31, ~PIC_MASK, np); 118 IRQ_VICSOURCE31, PIC_VALID, np);
106 119
107 /* 120 /*
108 * Interrupts on secondary controller from 0 to 8 are routed to 121 * Interrupts on secondary controller from 0 to 8 are routed to
diff --git a/arch/arm/mach-versatile/pci.c b/arch/arm/mach-versatile/pci.c
index 2f84f4094f13..e92e5e0705bc 100644
--- a/arch/arm/mach-versatile/pci.c
+++ b/arch/arm/mach-versatile/pci.c
@@ -23,6 +23,7 @@
23#include <linux/io.h> 23#include <linux/io.h>
24 24
25#include <mach/hardware.h> 25#include <mach/hardware.h>
26#include <mach/irqs.h>
26#include <asm/irq.h> 27#include <asm/irq.h>
27#include <asm/mach/pci.h> 28#include <asm/mach/pci.h>
28 29
@@ -327,12 +328,12 @@ static int __init versatile_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
327 int irq; 328 int irq;
328 329
329 /* slot, pin, irq 330 /* slot, pin, irq
330 * 24 1 27 331 * 24 1 IRQ_SIC_PCI0
331 * 25 1 28 332 * 25 1 IRQ_SIC_PCI1
332 * 26 1 29 333 * 26 1 IRQ_SIC_PCI2
333 * 27 1 30 334 * 27 1 IRQ_SIC_PCI3
334 */ 335 */
335 irq = 27 + ((slot - 24 + pin - 1) & 3); 336 irq = IRQ_SIC_PCI0 + ((slot - 24 + pin - 1) & 3);
336 337
337 return irq; 338 return irq;
338} 339}
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 3fd629d5a513..025d17328730 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -629,8 +629,9 @@ config ARM_THUMBEE
629 make use of it. Say N for code that can run on CPUs without ThumbEE. 629 make use of it. Say N for code that can run on CPUs without ThumbEE.
630 630
631config ARM_VIRT_EXT 631config ARM_VIRT_EXT
632 bool "Native support for the ARM Virtualization Extensions" 632 bool
633 depends on MMU && CPU_V7 633 depends on MMU
634 default y if CPU_V7
634 help 635 help
635 Enable the kernel to make use of the ARM Virtualization 636 Enable the kernel to make use of the ARM Virtualization
636 Extensions to install hypervisors without run-time firmware 637 Extensions to install hypervisors without run-time firmware
@@ -640,11 +641,6 @@ config ARM_VIRT_EXT
640 use of this feature. Refer to Documentation/arm/Booting for 641 use of this feature. Refer to Documentation/arm/Booting for
641 details. 642 details.
642 643
643 It is safe to enable this option even if the kernel may not be
644 booted in HYP mode, may not have support for the
645 virtualization extensions, or may be booted with a
646 non-compliant bootloader.
647
648config SWP_EMULATE 644config SWP_EMULATE
649 bool "Emulate SWP/SWPB instructions" 645 bool "Emulate SWP/SWPB instructions"
650 depends on !CPU_USE_DOMAINS && CPU_V7 646 depends on !CPU_USE_DOMAINS && CPU_V7
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 8a9c4cb50a93..4e333fa2756f 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -6,7 +6,7 @@ obj-y := dma-mapping.o extable.o fault.o init.o \
6 iomap.o 6 iomap.o
7 7
8obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \ 8obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \
9 mmap.o pgd.o mmu.o vmregion.o 9 mmap.o pgd.o mmu.o
10 10
11ifneq ($(CONFIG_MMU),y) 11ifneq ($(CONFIG_MMU),y)
12obj-y += nommu.o 12obj-y += nommu.o
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index bc4a5e9ebb78..7a0511191f6b 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -34,6 +34,9 @@
34 * The ASID is used to tag entries in the CPU caches and TLBs. 34 * The ASID is used to tag entries in the CPU caches and TLBs.
35 * The context ID is used by debuggers and trace logic, and 35 * The context ID is used by debuggers and trace logic, and
36 * should be unique within all running processes. 36 * should be unique within all running processes.
37 *
38 * In big endian operation, the two 32 bit words are swapped if accesed by
39 * non 64-bit operations.
37 */ 40 */
38#define ASID_FIRST_VERSION (1ULL << ASID_BITS) 41#define ASID_FIRST_VERSION (1ULL << ASID_BITS)
39#define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1) 42#define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1)
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 6b2fb87c8698..dda3904dc64c 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -640,7 +640,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
640 640
641 if (is_coherent || nommu()) 641 if (is_coherent || nommu())
642 addr = __alloc_simple_buffer(dev, size, gfp, &page); 642 addr = __alloc_simple_buffer(dev, size, gfp, &page);
643 else if (gfp & GFP_ATOMIC) 643 else if (!(gfp & __GFP_WAIT))
644 addr = __alloc_from_pool(size, &page); 644 addr = __alloc_from_pool(size, &page);
645 else if (!IS_ENABLED(CONFIG_CMA)) 645 else if (!IS_ENABLED(CONFIG_CMA))
646 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); 646 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
@@ -774,25 +774,27 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
774 size_t size, enum dma_data_direction dir, 774 size_t size, enum dma_data_direction dir,
775 void (*op)(const void *, size_t, int)) 775 void (*op)(const void *, size_t, int))
776{ 776{
777 unsigned long pfn;
778 size_t left = size;
779
780 pfn = page_to_pfn(page) + offset / PAGE_SIZE;
781 offset %= PAGE_SIZE;
782
777 /* 783 /*
778 * A single sg entry may refer to multiple physically contiguous 784 * A single sg entry may refer to multiple physically contiguous
779 * pages. But we still need to process highmem pages individually. 785 * pages. But we still need to process highmem pages individually.
780 * If highmem is not configured then the bulk of this loop gets 786 * If highmem is not configured then the bulk of this loop gets
781 * optimized out. 787 * optimized out.
782 */ 788 */
783 size_t left = size;
784 do { 789 do {
785 size_t len = left; 790 size_t len = left;
786 void *vaddr; 791 void *vaddr;
787 792
793 page = pfn_to_page(pfn);
794
788 if (PageHighMem(page)) { 795 if (PageHighMem(page)) {
789 if (len + offset > PAGE_SIZE) { 796 if (len + offset > PAGE_SIZE)
790 if (offset >= PAGE_SIZE) {
791 page += offset / PAGE_SIZE;
792 offset %= PAGE_SIZE;
793 }
794 len = PAGE_SIZE - offset; 797 len = PAGE_SIZE - offset;
795 }
796 vaddr = kmap_high_get(page); 798 vaddr = kmap_high_get(page);
797 if (vaddr) { 799 if (vaddr) {
798 vaddr += offset; 800 vaddr += offset;
@@ -809,7 +811,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
809 op(vaddr, len, dir); 811 op(vaddr, len, dir);
810 } 812 }
811 offset = 0; 813 offset = 0;
812 page++; 814 pfn++;
813 left -= len; 815 left -= len;
814 } while (left); 816 } while (left);
815} 817}
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
index 99db769307ec..2dffc010cc41 100644
--- a/arch/arm/mm/idmap.c
+++ b/arch/arm/mm/idmap.c
@@ -1,4 +1,6 @@
1#include <linux/module.h>
1#include <linux/kernel.h> 2#include <linux/kernel.h>
3#include <linux/slab.h>
2 4
3#include <asm/cputype.h> 5#include <asm/cputype.h>
4#include <asm/idmap.h> 6#include <asm/idmap.h>
@@ -6,6 +8,7 @@
6#include <asm/pgtable.h> 8#include <asm/pgtable.h>
7#include <asm/sections.h> 9#include <asm/sections.h>
8#include <asm/system_info.h> 10#include <asm/system_info.h>
11#include <asm/virt.h>
9 12
10pgd_t *idmap_pgd; 13pgd_t *idmap_pgd;
11 14
@@ -59,11 +62,17 @@ static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
59 } while (pud++, addr = next, addr != end); 62 } while (pud++, addr = next, addr != end);
60} 63}
61 64
62static void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end) 65static void identity_mapping_add(pgd_t *pgd, const char *text_start,
66 const char *text_end, unsigned long prot)
63{ 67{
64 unsigned long prot, next; 68 unsigned long addr, end;
69 unsigned long next;
70
71 addr = virt_to_phys(text_start);
72 end = virt_to_phys(text_end);
73
74 prot |= PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF;
65 75
66 prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF;
67 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) 76 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
68 prot |= PMD_BIT4; 77 prot |= PMD_BIT4;
69 78
@@ -74,28 +83,52 @@ static void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long e
74 } while (pgd++, addr = next, addr != end); 83 } while (pgd++, addr = next, addr != end);
75} 84}
76 85
86#if defined(CONFIG_ARM_VIRT_EXT) && defined(CONFIG_ARM_LPAE)
87pgd_t *hyp_pgd;
88
89extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
90
91static int __init init_static_idmap_hyp(void)
92{
93 hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
94 if (!hyp_pgd)
95 return -ENOMEM;
96
97 pr_info("Setting up static HYP identity map for 0x%p - 0x%p\n",
98 __hyp_idmap_text_start, __hyp_idmap_text_end);
99 identity_mapping_add(hyp_pgd, __hyp_idmap_text_start,
100 __hyp_idmap_text_end, PMD_SECT_AP1);
101
102 return 0;
103}
104#else
105static int __init init_static_idmap_hyp(void)
106{
107 return 0;
108}
109#endif
110
77extern char __idmap_text_start[], __idmap_text_end[]; 111extern char __idmap_text_start[], __idmap_text_end[];
78 112
79static int __init init_static_idmap(void) 113static int __init init_static_idmap(void)
80{ 114{
81 phys_addr_t idmap_start, idmap_end; 115 int ret;
82 116
83 idmap_pgd = pgd_alloc(&init_mm); 117 idmap_pgd = pgd_alloc(&init_mm);
84 if (!idmap_pgd) 118 if (!idmap_pgd)
85 return -ENOMEM; 119 return -ENOMEM;
86 120
87 /* Add an identity mapping for the physical address of the section. */ 121 pr_info("Setting up static identity map for 0x%p - 0x%p\n",
88 idmap_start = virt_to_phys((void *)__idmap_text_start); 122 __idmap_text_start, __idmap_text_end);
89 idmap_end = virt_to_phys((void *)__idmap_text_end); 123 identity_mapping_add(idmap_pgd, __idmap_text_start,
124 __idmap_text_end, 0);
90 125
91 pr_info("Setting up static identity map for 0x%llx - 0x%llx\n", 126 ret = init_static_idmap_hyp();
92 (long long)idmap_start, (long long)idmap_end);
93 identity_mapping_add(idmap_pgd, idmap_start, idmap_end);
94 127
95 /* Flush L1 for the hardware to see this page table content */ 128 /* Flush L1 for the hardware to see this page table content */
96 flush_cache_louis(); 129 flush_cache_louis();
97 130
98 return 0; 131 return ret;
99} 132}
100early_initcall(init_static_idmap); 133early_initcall(init_static_idmap);
101 134
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 88fd86cf3d9a..04d9006eab1f 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -39,6 +39,70 @@
39#include <asm/mach/pci.h> 39#include <asm/mach/pci.h>
40#include "mm.h" 40#include "mm.h"
41 41
42
43LIST_HEAD(static_vmlist);
44
45static struct static_vm *find_static_vm_paddr(phys_addr_t paddr,
46 size_t size, unsigned int mtype)
47{
48 struct static_vm *svm;
49 struct vm_struct *vm;
50
51 list_for_each_entry(svm, &static_vmlist, list) {
52 vm = &svm->vm;
53 if (!(vm->flags & VM_ARM_STATIC_MAPPING))
54 continue;
55 if ((vm->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
56 continue;
57
58 if (vm->phys_addr > paddr ||
59 paddr + size - 1 > vm->phys_addr + vm->size - 1)
60 continue;
61
62 return svm;
63 }
64
65 return NULL;
66}
67
68struct static_vm *find_static_vm_vaddr(void *vaddr)
69{
70 struct static_vm *svm;
71 struct vm_struct *vm;
72
73 list_for_each_entry(svm, &static_vmlist, list) {
74 vm = &svm->vm;
75
76 /* static_vmlist is ascending order */
77 if (vm->addr > vaddr)
78 break;
79
80 if (vm->addr <= vaddr && vm->addr + vm->size > vaddr)
81 return svm;
82 }
83
84 return NULL;
85}
86
87void __init add_static_vm_early(struct static_vm *svm)
88{
89 struct static_vm *curr_svm;
90 struct vm_struct *vm;
91 void *vaddr;
92
93 vm = &svm->vm;
94 vm_area_add_early(vm);
95 vaddr = vm->addr;
96
97 list_for_each_entry(curr_svm, &static_vmlist, list) {
98 vm = &curr_svm->vm;
99
100 if (vm->addr > vaddr)
101 break;
102 }
103 list_add_tail(&svm->list, &curr_svm->list);
104}
105
42int ioremap_page(unsigned long virt, unsigned long phys, 106int ioremap_page(unsigned long virt, unsigned long phys,
43 const struct mem_type *mtype) 107 const struct mem_type *mtype)
44{ 108{
@@ -197,13 +261,14 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
197 const struct mem_type *type; 261 const struct mem_type *type;
198 int err; 262 int err;
199 unsigned long addr; 263 unsigned long addr;
200 struct vm_struct * area; 264 struct vm_struct *area;
265 phys_addr_t paddr = __pfn_to_phys(pfn);
201 266
202#ifndef CONFIG_ARM_LPAE 267#ifndef CONFIG_ARM_LPAE
203 /* 268 /*
204 * High mappings must be supersection aligned 269 * High mappings must be supersection aligned
205 */ 270 */
206 if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) 271 if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK))
207 return NULL; 272 return NULL;
208#endif 273#endif
209 274
@@ -219,24 +284,16 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
219 /* 284 /*
220 * Try to reuse one of the static mapping whenever possible. 285 * Try to reuse one of the static mapping whenever possible.
221 */ 286 */
222 read_lock(&vmlist_lock); 287 if (size && !(sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) {
223 for (area = vmlist; area; area = area->next) { 288 struct static_vm *svm;
224 if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) 289
225 break; 290 svm = find_static_vm_paddr(paddr, size, mtype);
226 if (!(area->flags & VM_ARM_STATIC_MAPPING)) 291 if (svm) {
227 continue; 292 addr = (unsigned long)svm->vm.addr;
228 if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype)) 293 addr += paddr - svm->vm.phys_addr;
229 continue; 294 return (void __iomem *) (offset + addr);
230 if (__phys_to_pfn(area->phys_addr) > pfn || 295 }
231 __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1)
232 continue;
233 /* we can drop the lock here as we know *area is static */
234 read_unlock(&vmlist_lock);
235 addr = (unsigned long)area->addr;
236 addr += __pfn_to_phys(pfn) - area->phys_addr;
237 return (void __iomem *) (offset + addr);
238 } 296 }
239 read_unlock(&vmlist_lock);
240 297
241 /* 298 /*
242 * Don't allow RAM to be mapped - this causes problems with ARMv6+ 299 * Don't allow RAM to be mapped - this causes problems with ARMv6+
@@ -248,21 +305,21 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
248 if (!area) 305 if (!area)
249 return NULL; 306 return NULL;
250 addr = (unsigned long)area->addr; 307 addr = (unsigned long)area->addr;
251 area->phys_addr = __pfn_to_phys(pfn); 308 area->phys_addr = paddr;
252 309
253#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) 310#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
254 if (DOMAIN_IO == 0 && 311 if (DOMAIN_IO == 0 &&
255 (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || 312 (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
256 cpu_is_xsc3()) && pfn >= 0x100000 && 313 cpu_is_xsc3()) && pfn >= 0x100000 &&
257 !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) { 314 !((paddr | size | addr) & ~SUPERSECTION_MASK)) {
258 area->flags |= VM_ARM_SECTION_MAPPING; 315 area->flags |= VM_ARM_SECTION_MAPPING;
259 err = remap_area_supersections(addr, pfn, size, type); 316 err = remap_area_supersections(addr, pfn, size, type);
260 } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) { 317 } else if (!((paddr | size | addr) & ~PMD_MASK)) {
261 area->flags |= VM_ARM_SECTION_MAPPING; 318 area->flags |= VM_ARM_SECTION_MAPPING;
262 err = remap_area_sections(addr, pfn, size, type); 319 err = remap_area_sections(addr, pfn, size, type);
263 } else 320 } else
264#endif 321#endif
265 err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn), 322 err = ioremap_page_range(addr, addr + size, paddr,
266 __pgprot(type->prot_pte)); 323 __pgprot(type->prot_pte));
267 324
268 if (err) { 325 if (err) {
@@ -346,34 +403,28 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
346void __iounmap(volatile void __iomem *io_addr) 403void __iounmap(volatile void __iomem *io_addr)
347{ 404{
348 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); 405 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
349 struct vm_struct *vm; 406 struct static_vm *svm;
407
408 /* If this is a static mapping, we must leave it alone */
409 svm = find_static_vm_vaddr(addr);
410 if (svm)
411 return;
350 412
351 read_lock(&vmlist_lock);
352 for (vm = vmlist; vm; vm = vm->next) {
353 if (vm->addr > addr)
354 break;
355 if (!(vm->flags & VM_IOREMAP))
356 continue;
357 /* If this is a static mapping we must leave it alone */
358 if ((vm->flags & VM_ARM_STATIC_MAPPING) &&
359 (vm->addr <= addr) && (vm->addr + vm->size > addr)) {
360 read_unlock(&vmlist_lock);
361 return;
362 }
363#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) 413#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
414 {
415 struct vm_struct *vm;
416
417 vm = find_vm_area(addr);
418
364 /* 419 /*
365 * If this is a section based mapping we need to handle it 420 * If this is a section based mapping we need to handle it
366 * specially as the VM subsystem does not know how to handle 421 * specially as the VM subsystem does not know how to handle
367 * such a beast. 422 * such a beast.
368 */ 423 */
369 if ((vm->addr == addr) && 424 if (vm && (vm->flags & VM_ARM_SECTION_MAPPING))
370 (vm->flags & VM_ARM_SECTION_MAPPING)) {
371 unmap_area_sections((unsigned long)vm->addr, vm->size); 425 unmap_area_sections((unsigned long)vm->addr, vm->size);
372 break;
373 }
374#endif
375 } 426 }
376 read_unlock(&vmlist_lock); 427#endif
377 428
378 vunmap(addr); 429 vunmap(addr);
379} 430}
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index a8ee92da3544..d5a4e9ad8f0f 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -1,4 +1,6 @@
1#ifdef CONFIG_MMU 1#ifdef CONFIG_MMU
2#include <linux/list.h>
3#include <linux/vmalloc.h>
2 4
3/* the upper-most page table pointer */ 5/* the upper-most page table pointer */
4extern pmd_t *top_pmd; 6extern pmd_t *top_pmd;
@@ -65,6 +67,16 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
65/* consistent regions used by dma_alloc_attrs() */ 67/* consistent regions used by dma_alloc_attrs() */
66#define VM_ARM_DMA_CONSISTENT 0x20000000 68#define VM_ARM_DMA_CONSISTENT 0x20000000
67 69
70
71struct static_vm {
72 struct vm_struct vm;
73 struct list_head list;
74};
75
76extern struct list_head static_vmlist;
77extern struct static_vm *find_static_vm_vaddr(void *vaddr);
78extern __init void add_static_vm_early(struct static_vm *svm);
79
68#endif 80#endif
69 81
70#ifdef CONFIG_ZONE_DMA 82#ifdef CONFIG_ZONE_DMA
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 9f0610243bd6..e95a996ab78f 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -57,6 +57,9 @@ static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
57static unsigned int ecc_mask __initdata = 0; 57static unsigned int ecc_mask __initdata = 0;
58pgprot_t pgprot_user; 58pgprot_t pgprot_user;
59pgprot_t pgprot_kernel; 59pgprot_t pgprot_kernel;
60pgprot_t pgprot_hyp_device;
61pgprot_t pgprot_s2;
62pgprot_t pgprot_s2_device;
60 63
61EXPORT_SYMBOL(pgprot_user); 64EXPORT_SYMBOL(pgprot_user);
62EXPORT_SYMBOL(pgprot_kernel); 65EXPORT_SYMBOL(pgprot_kernel);
@@ -66,34 +69,46 @@ struct cachepolicy {
66 unsigned int cr_mask; 69 unsigned int cr_mask;
67 pmdval_t pmd; 70 pmdval_t pmd;
68 pteval_t pte; 71 pteval_t pte;
72 pteval_t pte_s2;
69}; 73};
70 74
75#ifdef CONFIG_ARM_LPAE
76#define s2_policy(policy) policy
77#else
78#define s2_policy(policy) 0
79#endif
80
71static struct cachepolicy cache_policies[] __initdata = { 81static struct cachepolicy cache_policies[] __initdata = {
72 { 82 {
73 .policy = "uncached", 83 .policy = "uncached",
74 .cr_mask = CR_W|CR_C, 84 .cr_mask = CR_W|CR_C,
75 .pmd = PMD_SECT_UNCACHED, 85 .pmd = PMD_SECT_UNCACHED,
76 .pte = L_PTE_MT_UNCACHED, 86 .pte = L_PTE_MT_UNCACHED,
87 .pte_s2 = s2_policy(L_PTE_S2_MT_UNCACHED),
77 }, { 88 }, {
78 .policy = "buffered", 89 .policy = "buffered",
79 .cr_mask = CR_C, 90 .cr_mask = CR_C,
80 .pmd = PMD_SECT_BUFFERED, 91 .pmd = PMD_SECT_BUFFERED,
81 .pte = L_PTE_MT_BUFFERABLE, 92 .pte = L_PTE_MT_BUFFERABLE,
93 .pte_s2 = s2_policy(L_PTE_S2_MT_UNCACHED),
82 }, { 94 }, {
83 .policy = "writethrough", 95 .policy = "writethrough",
84 .cr_mask = 0, 96 .cr_mask = 0,
85 .pmd = PMD_SECT_WT, 97 .pmd = PMD_SECT_WT,
86 .pte = L_PTE_MT_WRITETHROUGH, 98 .pte = L_PTE_MT_WRITETHROUGH,
99 .pte_s2 = s2_policy(L_PTE_S2_MT_WRITETHROUGH),
87 }, { 100 }, {
88 .policy = "writeback", 101 .policy = "writeback",
89 .cr_mask = 0, 102 .cr_mask = 0,
90 .pmd = PMD_SECT_WB, 103 .pmd = PMD_SECT_WB,
91 .pte = L_PTE_MT_WRITEBACK, 104 .pte = L_PTE_MT_WRITEBACK,
105 .pte_s2 = s2_policy(L_PTE_S2_MT_WRITEBACK),
92 }, { 106 }, {
93 .policy = "writealloc", 107 .policy = "writealloc",
94 .cr_mask = 0, 108 .cr_mask = 0,
95 .pmd = PMD_SECT_WBWA, 109 .pmd = PMD_SECT_WBWA,
96 .pte = L_PTE_MT_WRITEALLOC, 110 .pte = L_PTE_MT_WRITEALLOC,
111 .pte_s2 = s2_policy(L_PTE_S2_MT_WRITEBACK),
97 } 112 }
98}; 113};
99 114
@@ -283,7 +298,7 @@ static struct mem_type mem_types[] = {
283 }, 298 },
284 [MT_MEMORY_SO] = { 299 [MT_MEMORY_SO] = {
285 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 300 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
286 L_PTE_MT_UNCACHED, 301 L_PTE_MT_UNCACHED | L_PTE_XN,
287 .prot_l1 = PMD_TYPE_TABLE, 302 .prot_l1 = PMD_TYPE_TABLE,
288 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S | 303 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
289 PMD_SECT_UNCACHED | PMD_SECT_XN, 304 PMD_SECT_UNCACHED | PMD_SECT_XN,
@@ -310,6 +325,7 @@ static void __init build_mem_type_table(void)
310 struct cachepolicy *cp; 325 struct cachepolicy *cp;
311 unsigned int cr = get_cr(); 326 unsigned int cr = get_cr();
312 pteval_t user_pgprot, kern_pgprot, vecs_pgprot; 327 pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
328 pteval_t hyp_device_pgprot, s2_pgprot, s2_device_pgprot;
313 int cpu_arch = cpu_architecture(); 329 int cpu_arch = cpu_architecture();
314 int i; 330 int i;
315 331
@@ -421,6 +437,8 @@ static void __init build_mem_type_table(void)
421 */ 437 */
422 cp = &cache_policies[cachepolicy]; 438 cp = &cache_policies[cachepolicy];
423 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; 439 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
440 s2_pgprot = cp->pte_s2;
441 hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte;
424 442
425 /* 443 /*
426 * ARMv6 and above have extended page tables. 444 * ARMv6 and above have extended page tables.
@@ -444,6 +462,7 @@ static void __init build_mem_type_table(void)
444 user_pgprot |= L_PTE_SHARED; 462 user_pgprot |= L_PTE_SHARED;
445 kern_pgprot |= L_PTE_SHARED; 463 kern_pgprot |= L_PTE_SHARED;
446 vecs_pgprot |= L_PTE_SHARED; 464 vecs_pgprot |= L_PTE_SHARED;
465 s2_pgprot |= L_PTE_SHARED;
447 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S; 466 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
448 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; 467 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
449 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; 468 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
@@ -498,6 +517,9 @@ static void __init build_mem_type_table(void)
498 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); 517 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
499 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | 518 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
500 L_PTE_DIRTY | kern_pgprot); 519 L_PTE_DIRTY | kern_pgprot);
520 pgprot_s2 = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | s2_pgprot);
521 pgprot_s2_device = __pgprot(s2_device_pgprot);
522 pgprot_hyp_device = __pgprot(hyp_device_pgprot);
501 523
502 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; 524 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
503 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; 525 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
@@ -757,21 +779,24 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
757{ 779{
758 struct map_desc *md; 780 struct map_desc *md;
759 struct vm_struct *vm; 781 struct vm_struct *vm;
782 struct static_vm *svm;
760 783
761 if (!nr) 784 if (!nr)
762 return; 785 return;
763 786
764 vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm)); 787 svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm));
765 788
766 for (md = io_desc; nr; md++, nr--) { 789 for (md = io_desc; nr; md++, nr--) {
767 create_mapping(md); 790 create_mapping(md);
791
792 vm = &svm->vm;
768 vm->addr = (void *)(md->virtual & PAGE_MASK); 793 vm->addr = (void *)(md->virtual & PAGE_MASK);
769 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); 794 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
770 vm->phys_addr = __pfn_to_phys(md->pfn); 795 vm->phys_addr = __pfn_to_phys(md->pfn);
771 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; 796 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
772 vm->flags |= VM_ARM_MTYPE(md->type); 797 vm->flags |= VM_ARM_MTYPE(md->type);
773 vm->caller = iotable_init; 798 vm->caller = iotable_init;
774 vm_area_add_early(vm++); 799 add_static_vm_early(svm++);
775 } 800 }
776} 801}
777 802
@@ -779,13 +804,16 @@ void __init vm_reserve_area_early(unsigned long addr, unsigned long size,
779 void *caller) 804 void *caller)
780{ 805{
781 struct vm_struct *vm; 806 struct vm_struct *vm;
807 struct static_vm *svm;
808
809 svm = early_alloc_aligned(sizeof(*svm), __alignof__(*svm));
782 810
783 vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm)); 811 vm = &svm->vm;
784 vm->addr = (void *)addr; 812 vm->addr = (void *)addr;
785 vm->size = size; 813 vm->size = size;
786 vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING; 814 vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
787 vm->caller = caller; 815 vm->caller = caller;
788 vm_area_add_early(vm); 816 add_static_vm_early(svm);
789} 817}
790 818
791#ifndef CONFIG_ARM_LPAE 819#ifndef CONFIG_ARM_LPAE
@@ -810,14 +838,13 @@ static void __init pmd_empty_section_gap(unsigned long addr)
810 838
811static void __init fill_pmd_gaps(void) 839static void __init fill_pmd_gaps(void)
812{ 840{
841 struct static_vm *svm;
813 struct vm_struct *vm; 842 struct vm_struct *vm;
814 unsigned long addr, next = 0; 843 unsigned long addr, next = 0;
815 pmd_t *pmd; 844 pmd_t *pmd;
816 845
817 /* we're still single threaded hence no lock needed here */ 846 list_for_each_entry(svm, &static_vmlist, list) {
818 for (vm = vmlist; vm; vm = vm->next) { 847 vm = &svm->vm;
819 if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING)))
820 continue;
821 addr = (unsigned long)vm->addr; 848 addr = (unsigned long)vm->addr;
822 if (addr < next) 849 if (addr < next)
823 continue; 850 continue;
@@ -857,19 +884,12 @@ static void __init fill_pmd_gaps(void)
857#if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H) 884#if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H)
858static void __init pci_reserve_io(void) 885static void __init pci_reserve_io(void)
859{ 886{
860 struct vm_struct *vm; 887 struct static_vm *svm;
861 unsigned long addr;
862 888
863 /* we're still single threaded hence no lock needed here */ 889 svm = find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE);
864 for (vm = vmlist; vm; vm = vm->next) { 890 if (svm)
865 if (!(vm->flags & VM_ARM_STATIC_MAPPING)) 891 return;
866 continue;
867 addr = (unsigned long)vm->addr;
868 addr &= ~(SZ_2M - 1);
869 if (addr == PCI_IO_VIRT_BASE)
870 return;
871 892
872 }
873 vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io); 893 vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io);
874} 894}
875#else 895#else
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index eb6aa73bc8b7..f9a0aa725ea9 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -38,9 +38,14 @@
38 38
39/* 39/*
40 * mmid - get context id from mm pointer (mm->context.id) 40 * mmid - get context id from mm pointer (mm->context.id)
41 * note, this field is 64bit, so in big-endian the two words are swapped too.
41 */ 42 */
42 .macro mmid, rd, rn 43 .macro mmid, rd, rn
44#ifdef __ARMEB__
45 ldr \rd, [\rn, #MM_CONTEXT_ID + 4 ]
46#else
43 ldr \rd, [\rn, #MM_CONTEXT_ID] 47 ldr \rd, [\rn, #MM_CONTEXT_ID]
48#endif
44 .endm 49 .endm
45 50
46/* 51/*
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 09c5233f4dfc..bcaaa8de9325 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -101,7 +101,7 @@ ENTRY(cpu_v6_dcache_clean_area)
101ENTRY(cpu_v6_switch_mm) 101ENTRY(cpu_v6_switch_mm)
102#ifdef CONFIG_MMU 102#ifdef CONFIG_MMU
103 mov r2, #0 103 mov r2, #0
104 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id 104 mmid r1, r1 @ get mm->context.id
105 ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP) 105 ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
106 ALT_UP(orr r0, r0, #TTB_FLAGS_UP) 106 ALT_UP(orr r0, r0, #TTB_FLAGS_UP)
107 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB 107 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
index 6d98c13ab827..78f520bc0e99 100644
--- a/arch/arm/mm/proc-v7-2level.S
+++ b/arch/arm/mm/proc-v7-2level.S
@@ -40,7 +40,7 @@
40ENTRY(cpu_v7_switch_mm) 40ENTRY(cpu_v7_switch_mm)
41#ifdef CONFIG_MMU 41#ifdef CONFIG_MMU
42 mov r2, #0 42 mov r2, #0
43 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id 43 mmid r1, r1 @ get mm->context.id
44 ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP) 44 ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
45 ALT_UP(orr r0, r0, #TTB_FLAGS_UP) 45 ALT_UP(orr r0, r0, #TTB_FLAGS_UP)
46#ifdef CONFIG_ARM_ERRATA_430973 46#ifdef CONFIG_ARM_ERRATA_430973
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
index 7b56386f9496..50bf1dafc9ea 100644
--- a/arch/arm/mm/proc-v7-3level.S
+++ b/arch/arm/mm/proc-v7-3level.S
@@ -47,7 +47,7 @@
47 */ 47 */
48ENTRY(cpu_v7_switch_mm) 48ENTRY(cpu_v7_switch_mm)
49#ifdef CONFIG_MMU 49#ifdef CONFIG_MMU
50 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id 50 mmid r1, r1 @ get mm->context.id
51 and r3, r1, #0xff 51 and r3, r1, #0xff
52 mov r3, r3, lsl #(48 - 32) @ ASID 52 mov r3, r3, lsl #(48 - 32) @ ASID
53 mcrr p15, 0, r0, r3, c2 @ set TTB 0 53 mcrr p15, 0, r0, r3, c2 @ set TTB 0
diff --git a/arch/arm/mm/vmregion.c b/arch/arm/mm/vmregion.c
deleted file mode 100644
index a631016e1f8f..000000000000
--- a/arch/arm/mm/vmregion.c
+++ /dev/null
@@ -1,205 +0,0 @@
1#include <linux/fs.h>
2#include <linux/spinlock.h>
3#include <linux/list.h>
4#include <linux/proc_fs.h>
5#include <linux/seq_file.h>
6#include <linux/slab.h>
7
8#include "vmregion.h"
9
10/*
11 * VM region handling support.
12 *
13 * This should become something generic, handling VM region allocations for
14 * vmalloc and similar (ioremap, module space, etc).
15 *
16 * I envisage vmalloc()'s supporting vm_struct becoming:
17 *
18 * struct vm_struct {
19 * struct vmregion region;
20 * unsigned long flags;
21 * struct page **pages;
22 * unsigned int nr_pages;
23 * unsigned long phys_addr;
24 * };
25 *
26 * get_vm_area() would then call vmregion_alloc with an appropriate
27 * struct vmregion head (eg):
28 *
29 * struct vmregion vmalloc_head = {
30 * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list),
31 * .vm_start = VMALLOC_START,
32 * .vm_end = VMALLOC_END,
33 * };
34 *
35 * However, vmalloc_head.vm_start is variable (typically, it is dependent on
36 * the amount of RAM found at boot time.) I would imagine that get_vm_area()
37 * would have to initialise this each time prior to calling vmregion_alloc().
38 */
39
40struct arm_vmregion *
41arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align,
42 size_t size, gfp_t gfp, const void *caller)
43{
44 unsigned long start = head->vm_start, addr = head->vm_end;
45 unsigned long flags;
46 struct arm_vmregion *c, *new;
47
48 if (head->vm_end - head->vm_start < size) {
49 printk(KERN_WARNING "%s: allocation too big (requested %#x)\n",
50 __func__, size);
51 goto out;
52 }
53
54 new = kmalloc(sizeof(struct arm_vmregion), gfp);
55 if (!new)
56 goto out;
57
58 new->caller = caller;
59
60 spin_lock_irqsave(&head->vm_lock, flags);
61
62 addr = rounddown(addr - size, align);
63 list_for_each_entry_reverse(c, &head->vm_list, vm_list) {
64 if (addr >= c->vm_end)
65 goto found;
66 addr = rounddown(c->vm_start - size, align);
67 if (addr < start)
68 goto nospc;
69 }
70
71 found:
72 /*
73 * Insert this entry after the one we found.
74 */
75 list_add(&new->vm_list, &c->vm_list);
76 new->vm_start = addr;
77 new->vm_end = addr + size;
78 new->vm_active = 1;
79
80 spin_unlock_irqrestore(&head->vm_lock, flags);
81 return new;
82
83 nospc:
84 spin_unlock_irqrestore(&head->vm_lock, flags);
85 kfree(new);
86 out:
87 return NULL;
88}
89
90static struct arm_vmregion *__arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr)
91{
92 struct arm_vmregion *c;
93
94 list_for_each_entry(c, &head->vm_list, vm_list) {
95 if (c->vm_active && c->vm_start == addr)
96 goto out;
97 }
98 c = NULL;
99 out:
100 return c;
101}
102
103struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr)
104{
105 struct arm_vmregion *c;
106 unsigned long flags;
107
108 spin_lock_irqsave(&head->vm_lock, flags);
109 c = __arm_vmregion_find(head, addr);
110 spin_unlock_irqrestore(&head->vm_lock, flags);
111 return c;
112}
113
114struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *head, unsigned long addr)
115{
116 struct arm_vmregion *c;
117 unsigned long flags;
118
119 spin_lock_irqsave(&head->vm_lock, flags);
120 c = __arm_vmregion_find(head, addr);
121 if (c)
122 c->vm_active = 0;
123 spin_unlock_irqrestore(&head->vm_lock, flags);
124 return c;
125}
126
127void arm_vmregion_free(struct arm_vmregion_head *head, struct arm_vmregion *c)
128{
129 unsigned long flags;
130
131 spin_lock_irqsave(&head->vm_lock, flags);
132 list_del(&c->vm_list);
133 spin_unlock_irqrestore(&head->vm_lock, flags);
134
135 kfree(c);
136}
137
138#ifdef CONFIG_PROC_FS
139static int arm_vmregion_show(struct seq_file *m, void *p)
140{
141 struct arm_vmregion *c = list_entry(p, struct arm_vmregion, vm_list);
142
143 seq_printf(m, "0x%08lx-0x%08lx %7lu", c->vm_start, c->vm_end,
144 c->vm_end - c->vm_start);
145 if (c->caller)
146 seq_printf(m, " %pS", (void *)c->caller);
147 seq_putc(m, '\n');
148 return 0;
149}
150
151static void *arm_vmregion_start(struct seq_file *m, loff_t *pos)
152{
153 struct arm_vmregion_head *h = m->private;
154 spin_lock_irq(&h->vm_lock);
155 return seq_list_start(&h->vm_list, *pos);
156}
157
158static void *arm_vmregion_next(struct seq_file *m, void *p, loff_t *pos)
159{
160 struct arm_vmregion_head *h = m->private;
161 return seq_list_next(p, &h->vm_list, pos);
162}
163
164static void arm_vmregion_stop(struct seq_file *m, void *p)
165{
166 struct arm_vmregion_head *h = m->private;
167 spin_unlock_irq(&h->vm_lock);
168}
169
170static const struct seq_operations arm_vmregion_ops = {
171 .start = arm_vmregion_start,
172 .stop = arm_vmregion_stop,
173 .next = arm_vmregion_next,
174 .show = arm_vmregion_show,
175};
176
177static int arm_vmregion_open(struct inode *inode, struct file *file)
178{
179 struct arm_vmregion_head *h = PDE(inode)->data;
180 int ret = seq_open(file, &arm_vmregion_ops);
181 if (!ret) {
182 struct seq_file *m = file->private_data;
183 m->private = h;
184 }
185 return ret;
186}
187
188static const struct file_operations arm_vmregion_fops = {
189 .open = arm_vmregion_open,
190 .read = seq_read,
191 .llseek = seq_lseek,
192 .release = seq_release,
193};
194
195int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h)
196{
197 proc_create_data(path, S_IRUSR, NULL, &arm_vmregion_fops, h);
198 return 0;
199}
200#else
201int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h)
202{
203 return 0;
204}
205#endif
diff --git a/arch/arm/mm/vmregion.h b/arch/arm/mm/vmregion.h
deleted file mode 100644
index 0f5a5f2a2c7b..000000000000
--- a/arch/arm/mm/vmregion.h
+++ /dev/null
@@ -1,31 +0,0 @@
1#ifndef VMREGION_H
2#define VMREGION_H
3
4#include <linux/spinlock.h>
5#include <linux/list.h>
6
7struct page;
8
9struct arm_vmregion_head {
10 spinlock_t vm_lock;
11 struct list_head vm_list;
12 unsigned long vm_start;
13 unsigned long vm_end;
14};
15
16struct arm_vmregion {
17 struct list_head vm_list;
18 unsigned long vm_start;
19 unsigned long vm_end;
20 int vm_active;
21 const void *caller;
22};
23
24struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, size_t, gfp_t, const void *);
25struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *, unsigned long);
26struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *, unsigned long);
27void arm_vmregion_free(struct arm_vmregion_head *, struct arm_vmregion *);
28
29int arm_vmregion_create_proc(const char *, struct arm_vmregion_head *);
30
31#endif
diff --git a/arch/arm/plat-versatile/headsmp.S b/arch/arm/plat-versatile/headsmp.S
index dd703ef09b8d..b178d44e9eaa 100644
--- a/arch/arm/plat-versatile/headsmp.S
+++ b/arch/arm/plat-versatile/headsmp.S
@@ -20,7 +20,7 @@
20 */ 20 */
21ENTRY(versatile_secondary_startup) 21ENTRY(versatile_secondary_startup)
22 mrc p15, 0, r0, c0, c0, 5 22 mrc p15, 0, r0, c0, c0, 5
23 and r0, r0, #15 23 bic r0, #0xff000000
24 adr r4, 1f 24 adr r4, 1f
25 ldmia r4, {r5, r6} 25 ldmia r4, {r5, r6}
26 sub r4, r4, r5 26 sub r4, r4, r5
diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S
index cc926c985981..323ce1a62bbf 100644
--- a/arch/arm/vfp/entry.S
+++ b/arch/arm/vfp/entry.S
@@ -22,7 +22,7 @@
22@ IRQs disabled. 22@ IRQs disabled.
23@ 23@
24ENTRY(do_vfp) 24ENTRY(do_vfp)
25#ifdef CONFIG_PREEMPT 25#ifdef CONFIG_PREEMPT_COUNT
26 ldr r4, [r10, #TI_PREEMPT] @ get preempt count 26 ldr r4, [r10, #TI_PREEMPT] @ get preempt count
27 add r11, r4, #1 @ increment it 27 add r11, r4, #1 @ increment it
28 str r11, [r10, #TI_PREEMPT] 28 str r11, [r10, #TI_PREEMPT]
@@ -35,7 +35,7 @@ ENTRY(do_vfp)
35ENDPROC(do_vfp) 35ENDPROC(do_vfp)
36 36
37ENTRY(vfp_null_entry) 37ENTRY(vfp_null_entry)
38#ifdef CONFIG_PREEMPT 38#ifdef CONFIG_PREEMPT_COUNT
39 get_thread_info r10 39 get_thread_info r10
40 ldr r4, [r10, #TI_PREEMPT] @ get preempt count 40 ldr r4, [r10, #TI_PREEMPT] @ get preempt count
41 sub r11, r4, #1 @ decrement it 41 sub r11, r4, #1 @ decrement it
@@ -53,7 +53,7 @@ ENDPROC(vfp_null_entry)
53 53
54 __INIT 54 __INIT
55ENTRY(vfp_testing_entry) 55ENTRY(vfp_testing_entry)
56#ifdef CONFIG_PREEMPT 56#ifdef CONFIG_PREEMPT_COUNT
57 get_thread_info r10 57 get_thread_info r10
58 ldr r4, [r10, #TI_PREEMPT] @ get preempt count 58 ldr r4, [r10, #TI_PREEMPT] @ get preempt count
59 sub r11, r4, #1 @ decrement it 59 sub r11, r4, #1 @ decrement it
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
index ea0349f63586..dd5e56f95f3f 100644
--- a/arch/arm/vfp/vfphw.S
+++ b/arch/arm/vfp/vfphw.S
@@ -168,7 +168,7 @@ vfp_hw_state_valid:
168 @ else it's one 32-bit instruction, so 168 @ else it's one 32-bit instruction, so
169 @ always subtract 4 from the following 169 @ always subtract 4 from the following
170 @ instruction address. 170 @ instruction address.
171#ifdef CONFIG_PREEMPT 171#ifdef CONFIG_PREEMPT_COUNT
172 get_thread_info r10 172 get_thread_info r10
173 ldr r4, [r10, #TI_PREEMPT] @ get preempt count 173 ldr r4, [r10, #TI_PREEMPT] @ get preempt count
174 sub r11, r4, #1 @ decrement it 174 sub r11, r4, #1 @ decrement it
@@ -192,7 +192,7 @@ look_for_VFP_exceptions:
192 @ not recognised by VFP 192 @ not recognised by VFP
193 193
194 DBGSTR "not VFP" 194 DBGSTR "not VFP"
195#ifdef CONFIG_PREEMPT 195#ifdef CONFIG_PREEMPT_COUNT
196 get_thread_info r10 196 get_thread_info r10
197 ldr r4, [r10, #TI_PREEMPT] @ get preempt count 197 ldr r4, [r10, #TI_PREEMPT] @ get preempt count
198 sub r11, r4, #1 @ decrement it 198 sub r11, r4, #1 @ decrement it
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 594c5cf07c00..7c43569e3141 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -22,7 +22,6 @@ config ARM64
22 select HAVE_GENERIC_DMA_COHERENT 22 select HAVE_GENERIC_DMA_COHERENT
23 select HAVE_GENERIC_HARDIRQS 23 select HAVE_GENERIC_HARDIRQS
24 select HAVE_HW_BREAKPOINT if PERF_EVENTS 24 select HAVE_HW_BREAKPOINT if PERF_EVENTS
25 select HAVE_IRQ_WORK
26 select HAVE_MEMBLOCK 25 select HAVE_MEMBLOCK
27 select HAVE_PERF_EVENTS 26 select HAVE_PERF_EVENTS
28 select IRQ_DOMAIN 27 select IRQ_DOMAIN
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index a8fbd7eaa2ed..0337cdb0667b 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -98,14 +98,9 @@ static void default_idle(void)
98 local_irq_enable(); 98 local_irq_enable();
99} 99}
100 100
101void (*pm_idle)(void) = default_idle;
102EXPORT_SYMBOL_GPL(pm_idle);
103
104/* 101/*
105 * The idle thread, has rather strange semantics for calling pm_idle, 102 * The idle thread.
106 * but this is what x86 does and we need to do the same, so that 103 * We always respect 'hlt_counter' to prevent low power idle.
107 * things like cpuidle get called in the same way. The only difference
108 * is that we always respect 'hlt_counter' to prevent low power idle.
109 */ 104 */
110void cpu_idle(void) 105void cpu_idle(void)
111{ 106{
@@ -123,10 +118,10 @@ void cpu_idle(void)
123 local_irq_disable(); 118 local_irq_disable();
124 if (!need_resched()) { 119 if (!need_resched()) {
125 stop_critical_timings(); 120 stop_critical_timings();
126 pm_idle(); 121 default_idle();
127 start_critical_timings(); 122 start_critical_timings();
128 /* 123 /*
129 * pm_idle functions should always return 124 * default_idle functions should always return
130 * with IRQs enabled. 125 * with IRQs enabled.
131 */ 126 */
132 WARN_ON(irqs_disabled()); 127 WARN_ON(irqs_disabled());
diff --git a/arch/avr32/include/asm/dma-mapping.h b/arch/avr32/include/asm/dma-mapping.h
index aaf5199d8fcb..b3d18f9f3e8d 100644
--- a/arch/avr32/include/asm/dma-mapping.h
+++ b/arch/avr32/include/asm/dma-mapping.h
@@ -336,4 +336,14 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
336#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 336#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
337#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 337#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
338 338
339/* drivers/base/dma-mapping.c */
340extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
341 void *cpu_addr, dma_addr_t dma_addr, size_t size);
342extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
343 void *cpu_addr, dma_addr_t dma_addr,
344 size_t size);
345
346#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
347#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
348
339#endif /* __ASM_AVR32_DMA_MAPPING_H */ 349#endif /* __ASM_AVR32_DMA_MAPPING_H */
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index b6f3ad5441c5..67e4aaad78f5 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -24,7 +24,6 @@ config BLACKFIN
24 select HAVE_FUNCTION_TRACER 24 select HAVE_FUNCTION_TRACER
25 select HAVE_FUNCTION_TRACE_MCOUNT_TEST 25 select HAVE_FUNCTION_TRACE_MCOUNT_TEST
26 select HAVE_IDE 26 select HAVE_IDE
27 select HAVE_IRQ_WORK
28 select HAVE_KERNEL_GZIP if RAMKERNEL 27 select HAVE_KERNEL_GZIP if RAMKERNEL
29 select HAVE_KERNEL_BZIP2 if RAMKERNEL 28 select HAVE_KERNEL_BZIP2 if RAMKERNEL
30 select HAVE_KERNEL_LZMA if RAMKERNEL 29 select HAVE_KERNEL_LZMA if RAMKERNEL
@@ -38,7 +37,6 @@ config BLACKFIN
38 select HAVE_GENERIC_HARDIRQS 37 select HAVE_GENERIC_HARDIRQS
39 select GENERIC_ATOMIC64 38 select GENERIC_ATOMIC64
40 select GENERIC_IRQ_PROBE 39 select GENERIC_IRQ_PROBE
41 select IRQ_PER_CPU if SMP
42 select USE_GENERIC_SMP_HELPERS if SMP 40 select USE_GENERIC_SMP_HELPERS if SMP
43 select HAVE_NMI_WATCHDOG if NMI_WATCHDOG 41 select HAVE_NMI_WATCHDOG if NMI_WATCHDOG
44 select GENERIC_SMP_IDLE_THREAD 42 select GENERIC_SMP_IDLE_THREAD
diff --git a/arch/blackfin/include/asm/dma-mapping.h b/arch/blackfin/include/asm/dma-mapping.h
index bbf461076a0a..054d9ec57d9d 100644
--- a/arch/blackfin/include/asm/dma-mapping.h
+++ b/arch/blackfin/include/asm/dma-mapping.h
@@ -154,4 +154,14 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
154 _dma_sync((dma_addr_t)vaddr, size, dir); 154 _dma_sync((dma_addr_t)vaddr, size, dir);
155} 155}
156 156
157/* drivers/base/dma-mapping.c */
158extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
159 void *cpu_addr, dma_addr_t dma_addr, size_t size);
160extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
161 void *cpu_addr, dma_addr_t dma_addr,
162 size_t size);
163
164#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
165#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
166
157#endif /* _BLACKFIN_DMA_MAPPING_H */ 167#endif /* _BLACKFIN_DMA_MAPPING_H */
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index 3e16ad9b0a99..8061426b7df5 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -39,12 +39,6 @@ int nr_l1stack_tasks;
39void *l1_stack_base; 39void *l1_stack_base;
40unsigned long l1_stack_len; 40unsigned long l1_stack_len;
41 41
42/*
43 * Powermanagement idle function, if any..
44 */
45void (*pm_idle)(void) = NULL;
46EXPORT_SYMBOL(pm_idle);
47
48void (*pm_power_off)(void) = NULL; 42void (*pm_power_off)(void) = NULL;
49EXPORT_SYMBOL(pm_power_off); 43EXPORT_SYMBOL(pm_power_off);
50 44
@@ -81,7 +75,6 @@ void cpu_idle(void)
81{ 75{
82 /* endless idle loop with no priority at all */ 76 /* endless idle loop with no priority at all */
83 while (1) { 77 while (1) {
84 void (*idle)(void) = pm_idle;
85 78
86#ifdef CONFIG_HOTPLUG_CPU 79#ifdef CONFIG_HOTPLUG_CPU
87 if (cpu_is_offline(smp_processor_id())) 80 if (cpu_is_offline(smp_processor_id()))
diff --git a/arch/c6x/include/asm/dma-mapping.h b/arch/c6x/include/asm/dma-mapping.h
index 3c694065030f..88bd0d899bdb 100644
--- a/arch/c6x/include/asm/dma-mapping.h
+++ b/arch/c6x/include/asm/dma-mapping.h
@@ -89,4 +89,19 @@ extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
89#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent((d), (s), (h), (f)) 89#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent((d), (s), (h), (f))
90#define dma_free_noncoherent(d, s, v, h) dma_free_coherent((d), (s), (v), (h)) 90#define dma_free_noncoherent(d, s, v, h) dma_free_coherent((d), (s), (v), (h))
91 91
92/* Not supported for now */
93static inline int dma_mmap_coherent(struct device *dev,
94 struct vm_area_struct *vma, void *cpu_addr,
95 dma_addr_t dma_addr, size_t size)
96{
97 return -EINVAL;
98}
99
100static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
101 void *cpu_addr, dma_addr_t dma_addr,
102 size_t size)
103{
104 return -EINVAL;
105}
106
92#endif /* _ASM_C6X_DMA_MAPPING_H */ 107#endif /* _ASM_C6X_DMA_MAPPING_H */
diff --git a/arch/cris/include/asm/dma-mapping.h b/arch/cris/include/asm/dma-mapping.h
index 8588b2ccf854..2f0f654f1b44 100644
--- a/arch/cris/include/asm/dma-mapping.h
+++ b/arch/cris/include/asm/dma-mapping.h
@@ -158,5 +158,15 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
158{ 158{
159} 159}
160 160
161/* drivers/base/dma-mapping.c */
162extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
163 void *cpu_addr, dma_addr_t dma_addr, size_t size);
164extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
165 void *cpu_addr, dma_addr_t dma_addr,
166 size_t size);
167
168#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
169#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
170
161 171
162#endif 172#endif
diff --git a/arch/cris/kernel/process.c b/arch/cris/kernel/process.c
index 7f65be6f7f17..104ff4dd9b98 100644
--- a/arch/cris/kernel/process.c
+++ b/arch/cris/kernel/process.c
@@ -54,11 +54,6 @@ void enable_hlt(void)
54 54
55EXPORT_SYMBOL(enable_hlt); 55EXPORT_SYMBOL(enable_hlt);
56 56
57/*
58 * The following aren't currently used.
59 */
60void (*pm_idle)(void);
61
62extern void default_idle(void); 57extern void default_idle(void);
63 58
64void (*pm_power_off)(void); 59void (*pm_power_off)(void);
@@ -77,16 +72,12 @@ void cpu_idle (void)
77 while (1) { 72 while (1) {
78 rcu_idle_enter(); 73 rcu_idle_enter();
79 while (!need_resched()) { 74 while (!need_resched()) {
80 void (*idle)(void);
81 /* 75 /*
82 * Mark this as an RCU critical section so that 76 * Mark this as an RCU critical section so that
83 * synchronize_kernel() in the unload path waits 77 * synchronize_kernel() in the unload path waits
84 * for our completion. 78 * for our completion.
85 */ 79 */
86 idle = pm_idle; 80 default_idle();
87 if (!idle)
88 idle = default_idle;
89 idle();
90 } 81 }
91 rcu_idle_exit(); 82 rcu_idle_exit();
92 schedule_preempt_disabled(); 83 schedule_preempt_disabled();
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
index 9d262645f667..17df48fc8f44 100644
--- a/arch/frv/Kconfig
+++ b/arch/frv/Kconfig
@@ -3,7 +3,6 @@ config FRV
3 default y 3 default y
4 select HAVE_IDE 4 select HAVE_IDE
5 select HAVE_ARCH_TRACEHOOK 5 select HAVE_ARCH_TRACEHOOK
6 select HAVE_IRQ_WORK
7 select HAVE_PERF_EVENTS 6 select HAVE_PERF_EVENTS
8 select HAVE_UID16 7 select HAVE_UID16
9 select HAVE_GENERIC_HARDIRQS 8 select HAVE_GENERIC_HARDIRQS
diff --git a/arch/frv/include/asm/dma-mapping.h b/arch/frv/include/asm/dma-mapping.h
index dfb811002c64..1746a2b8e6e7 100644
--- a/arch/frv/include/asm/dma-mapping.h
+++ b/arch/frv/include/asm/dma-mapping.h
@@ -132,4 +132,19 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
132 flush_write_buffers(); 132 flush_write_buffers();
133} 133}
134 134
135/* Not supported for now */
136static inline int dma_mmap_coherent(struct device *dev,
137 struct vm_area_struct *vma, void *cpu_addr,
138 dma_addr_t dma_addr, size_t size)
139{
140 return -EINVAL;
141}
142
143static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
144 void *cpu_addr, dma_addr_t dma_addr,
145 size_t size)
146{
147 return -EINVAL;
148}
149
135#endif /* _ASM_DMA_MAPPING_H */ 150#endif /* _ASM_DMA_MAPPING_H */
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig
index 0744f7d7b1fd..e4decc6b8947 100644
--- a/arch/hexagon/Kconfig
+++ b/arch/hexagon/Kconfig
@@ -12,9 +12,7 @@ config HEXAGON
12 # select ARCH_WANT_OPTIONAL_GPIOLIB 12 # select ARCH_WANT_OPTIONAL_GPIOLIB
13 # select ARCH_REQUIRE_GPIOLIB 13 # select ARCH_REQUIRE_GPIOLIB
14 # select HAVE_CLK 14 # select HAVE_CLK
15 # select IRQ_PER_CPU
16 # select GENERIC_PENDING_IRQ if SMP 15 # select GENERIC_PENDING_IRQ if SMP
17 select HAVE_IRQ_WORK
18 select GENERIC_ATOMIC64 16 select GENERIC_ATOMIC64
19 select HAVE_PERF_EVENTS 17 select HAVE_PERF_EVENTS
20 select HAVE_GENERIC_HARDIRQS 18 select HAVE_GENERIC_HARDIRQS
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 3279646120e3..00c2e88f7755 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -29,7 +29,6 @@ config IA64
29 select ARCH_DISCARD_MEMBLOCK 29 select ARCH_DISCARD_MEMBLOCK
30 select GENERIC_IRQ_PROBE 30 select GENERIC_IRQ_PROBE
31 select GENERIC_PENDING_IRQ if SMP 31 select GENERIC_PENDING_IRQ if SMP
32 select IRQ_PER_CPU
33 select GENERIC_IRQ_SHOW 32 select GENERIC_IRQ_SHOW
34 select ARCH_WANT_OPTIONAL_GPIOLIB 33 select ARCH_WANT_OPTIONAL_GPIOLIB
35 select ARCH_HAVE_NMI_SAFE_CMPXCHG 34 select ARCH_HAVE_NMI_SAFE_CMPXCHG
diff --git a/arch/ia64/hp/common/aml_nfw.c b/arch/ia64/hp/common/aml_nfw.c
index 6192f7188654..916ffe770bcf 100644
--- a/arch/ia64/hp/common/aml_nfw.c
+++ b/arch/ia64/hp/common/aml_nfw.c
@@ -191,7 +191,7 @@ static int aml_nfw_add(struct acpi_device *device)
191 return aml_nfw_add_global_handler(); 191 return aml_nfw_add_global_handler();
192} 192}
193 193
194static int aml_nfw_remove(struct acpi_device *device, int type) 194static int aml_nfw_remove(struct acpi_device *device)
195{ 195{
196 return aml_nfw_remove_global_handler(); 196 return aml_nfw_remove_global_handler();
197} 197}
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h
index 359e68a03ca3..faa1bf0da815 100644
--- a/arch/ia64/include/asm/acpi.h
+++ b/arch/ia64/include/asm/acpi.h
@@ -52,10 +52,6 @@
52 52
53/* Asm macros */ 53/* Asm macros */
54 54
55#define ACPI_ASM_MACROS
56#define BREAKPOINT3
57#define ACPI_DISABLE_IRQS() local_irq_disable()
58#define ACPI_ENABLE_IRQS() local_irq_enable()
59#define ACPI_FLUSH_CPU_CACHE() 55#define ACPI_FLUSH_CPU_CACHE()
60 56
61static inline int 57static inline int
diff --git a/arch/ia64/include/asm/cputime.h b/arch/ia64/include/asm/cputime.h
index 7fcf7f08ab06..e2d3f5baf265 100644
--- a/arch/ia64/include/asm/cputime.h
+++ b/arch/ia64/include/asm/cputime.h
@@ -11,99 +11,19 @@
11 * as published by the Free Software Foundation; either version 11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version. 12 * 2 of the License, or (at your option) any later version.
13 * 13 *
14 * If we have CONFIG_VIRT_CPU_ACCOUNTING, we measure cpu time in nsec. 14 * If we have CONFIG_VIRT_CPU_ACCOUNTING_NATIVE, we measure cpu time in nsec.
15 * Otherwise we measure cpu time in jiffies using the generic definitions. 15 * Otherwise we measure cpu time in jiffies using the generic definitions.
16 */ 16 */
17 17
18#ifndef __IA64_CPUTIME_H 18#ifndef __IA64_CPUTIME_H
19#define __IA64_CPUTIME_H 19#define __IA64_CPUTIME_H
20 20
21#ifndef CONFIG_VIRT_CPU_ACCOUNTING 21#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
22#include <asm-generic/cputime.h> 22# include <asm-generic/cputime.h>
23#else 23#else
24 24# include <asm/processor.h>
25#include <linux/time.h> 25# include <asm-generic/cputime_nsecs.h>
26#include <linux/jiffies.h>
27#include <asm/processor.h>
28
29typedef u64 __nocast cputime_t;
30typedef u64 __nocast cputime64_t;
31
32#define cputime_one_jiffy jiffies_to_cputime(1)
33
34/*
35 * Convert cputime <-> jiffies (HZ)
36 */
37#define cputime_to_jiffies(__ct) \
38 ((__force u64)(__ct) / (NSEC_PER_SEC / HZ))
39#define jiffies_to_cputime(__jif) \
40 (__force cputime_t)((__jif) * (NSEC_PER_SEC / HZ))
41#define cputime64_to_jiffies64(__ct) \
42 ((__force u64)(__ct) / (NSEC_PER_SEC / HZ))
43#define jiffies64_to_cputime64(__jif) \
44 (__force cputime64_t)((__jif) * (NSEC_PER_SEC / HZ))
45
46/*
47 * Convert cputime <-> microseconds
48 */
49#define cputime_to_usecs(__ct) \
50 ((__force u64)(__ct) / NSEC_PER_USEC)
51#define usecs_to_cputime(__usecs) \
52 (__force cputime_t)((__usecs) * NSEC_PER_USEC)
53#define usecs_to_cputime64(__usecs) \
54 (__force cputime64_t)((__usecs) * NSEC_PER_USEC)
55
56/*
57 * Convert cputime <-> seconds
58 */
59#define cputime_to_secs(__ct) \
60 ((__force u64)(__ct) / NSEC_PER_SEC)
61#define secs_to_cputime(__secs) \
62 (__force cputime_t)((__secs) * NSEC_PER_SEC)
63
64/*
65 * Convert cputime <-> timespec (nsec)
66 */
67static inline cputime_t timespec_to_cputime(const struct timespec *val)
68{
69 u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
70 return (__force cputime_t) ret;
71}
72static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
73{
74 val->tv_sec = (__force u64) ct / NSEC_PER_SEC;
75 val->tv_nsec = (__force u64) ct % NSEC_PER_SEC;
76}
77
78/*
79 * Convert cputime <-> timeval (msec)
80 */
81static inline cputime_t timeval_to_cputime(struct timeval *val)
82{
83 u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC;
84 return (__force cputime_t) ret;
85}
86static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
87{
88 val->tv_sec = (__force u64) ct / NSEC_PER_SEC;
89 val->tv_usec = ((__force u64) ct % NSEC_PER_SEC) / NSEC_PER_USEC;
90}
91
92/*
93 * Convert cputime <-> clock (USER_HZ)
94 */
95#define cputime_to_clock_t(__ct) \
96 ((__force u64)(__ct) / (NSEC_PER_SEC / USER_HZ))
97#define clock_t_to_cputime(__x) \
98 (__force cputime_t)((__x) * (NSEC_PER_SEC / USER_HZ))
99
100/*
101 * Convert cputime64 to clock.
102 */
103#define cputime64_to_clock_t(__ct) \
104 cputime_to_clock_t((__force cputime_t)__ct)
105
106extern void arch_vtime_task_switch(struct task_struct *tsk); 26extern void arch_vtime_task_switch(struct task_struct *tsk);
27#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
107 28
108#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
109#endif /* __IA64_CPUTIME_H */ 29#endif /* __IA64_CPUTIME_H */
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h
index ff2ae4136584..020d655ed082 100644
--- a/arch/ia64/include/asm/thread_info.h
+++ b/arch/ia64/include/asm/thread_info.h
@@ -31,7 +31,7 @@ struct thread_info {
31 mm_segment_t addr_limit; /* user-level address space limit */ 31 mm_segment_t addr_limit; /* user-level address space limit */
32 int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */ 32 int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */
33 struct restart_block restart_block; 33 struct restart_block restart_block;
34#ifdef CONFIG_VIRT_CPU_ACCOUNTING 34#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
35 __u64 ac_stamp; 35 __u64 ac_stamp;
36 __u64 ac_leave; 36 __u64 ac_leave;
37 __u64 ac_stime; 37 __u64 ac_stime;
@@ -69,7 +69,7 @@ struct thread_info {
69#define task_stack_page(tsk) ((void *)(tsk)) 69#define task_stack_page(tsk) ((void *)(tsk))
70 70
71#define __HAVE_THREAD_FUNCTIONS 71#define __HAVE_THREAD_FUNCTIONS
72#ifdef CONFIG_VIRT_CPU_ACCOUNTING 72#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
73#define setup_thread_stack(p, org) \ 73#define setup_thread_stack(p, org) \
74 *task_thread_info(p) = *task_thread_info(org); \ 74 *task_thread_info(p) = *task_thread_info(org); \
75 task_thread_info(p)->ac_stime = 0; \ 75 task_thread_info(p)->ac_stime = 0; \
diff --git a/arch/ia64/include/asm/xen/minstate.h b/arch/ia64/include/asm/xen/minstate.h
index c57fa910f2c9..00cf03e0cb82 100644
--- a/arch/ia64/include/asm/xen/minstate.h
+++ b/arch/ia64/include/asm/xen/minstate.h
@@ -1,5 +1,5 @@
1 1
2#ifdef CONFIG_VIRT_CPU_ACCOUNTING 2#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
3/* read ar.itc in advance, and use it before leaving bank 0 */ 3/* read ar.itc in advance, and use it before leaving bank 0 */
4#define XEN_ACCOUNT_GET_STAMP \ 4#define XEN_ACCOUNT_GET_STAMP \
5 MOV_FROM_ITC(pUStk, p6, r20, r2); 5 MOV_FROM_ITC(pUStk, p6, r20, r2);
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c
index a48bd9a9927b..46c9e3007315 100644
--- a/arch/ia64/kernel/asm-offsets.c
+++ b/arch/ia64/kernel/asm-offsets.c
@@ -41,7 +41,7 @@ void foo(void)
41 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); 41 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
42 DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); 42 DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
43 DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count)); 43 DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
44#ifdef CONFIG_VIRT_CPU_ACCOUNTING 44#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
45 DEFINE(TI_AC_STAMP, offsetof(struct thread_info, ac_stamp)); 45 DEFINE(TI_AC_STAMP, offsetof(struct thread_info, ac_stamp));
46 DEFINE(TI_AC_LEAVE, offsetof(struct thread_info, ac_leave)); 46 DEFINE(TI_AC_LEAVE, offsetof(struct thread_info, ac_leave));
47 DEFINE(TI_AC_STIME, offsetof(struct thread_info, ac_stime)); 47 DEFINE(TI_AC_STIME, offsetof(struct thread_info, ac_stime));
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 6bfd8429ee0f..7a53530f22c2 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -724,7 +724,7 @@ GLOBAL_ENTRY(__paravirt_leave_syscall)
724#endif 724#endif
725.global __paravirt_work_processed_syscall; 725.global __paravirt_work_processed_syscall;
726__paravirt_work_processed_syscall: 726__paravirt_work_processed_syscall:
727#ifdef CONFIG_VIRT_CPU_ACCOUNTING 727#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
728 adds r2=PT(LOADRS)+16,r12 728 adds r2=PT(LOADRS)+16,r12
729 MOV_FROM_ITC(pUStk, p9, r22, r19) // fetch time at leave 729 MOV_FROM_ITC(pUStk, p9, r22, r19) // fetch time at leave
730 adds r18=TI_FLAGS+IA64_TASK_SIZE,r13 730 adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
@@ -762,7 +762,7 @@ __paravirt_work_processed_syscall:
762 762
763 ld8 r29=[r2],16 // M0|1 load cr.ipsr 763 ld8 r29=[r2],16 // M0|1 load cr.ipsr
764 ld8 r28=[r3],16 // M0|1 load cr.iip 764 ld8 r28=[r3],16 // M0|1 load cr.iip
765#ifdef CONFIG_VIRT_CPU_ACCOUNTING 765#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
766(pUStk) add r14=TI_AC_LEAVE+IA64_TASK_SIZE,r13 766(pUStk) add r14=TI_AC_LEAVE+IA64_TASK_SIZE,r13
767 ;; 767 ;;
768 ld8 r30=[r2],16 // M0|1 load cr.ifs 768 ld8 r30=[r2],16 // M0|1 load cr.ifs
@@ -793,7 +793,7 @@ __paravirt_work_processed_syscall:
793 ld8.fill r1=[r3],16 // M0|1 load r1 793 ld8.fill r1=[r3],16 // M0|1 load r1
794(pUStk) mov r17=1 // A 794(pUStk) mov r17=1 // A
795 ;; 795 ;;
796#ifdef CONFIG_VIRT_CPU_ACCOUNTING 796#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
797(pUStk) st1 [r15]=r17 // M2|3 797(pUStk) st1 [r15]=r17 // M2|3
798#else 798#else
799(pUStk) st1 [r14]=r17 // M2|3 799(pUStk) st1 [r14]=r17 // M2|3
@@ -813,7 +813,7 @@ __paravirt_work_processed_syscall:
813 shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition 813 shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition
814 COVER // B add current frame into dirty partition & set cr.ifs 814 COVER // B add current frame into dirty partition & set cr.ifs
815 ;; 815 ;;
816#ifdef CONFIG_VIRT_CPU_ACCOUNTING 816#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
817 mov r19=ar.bsp // M2 get new backing store pointer 817 mov r19=ar.bsp // M2 get new backing store pointer
818 st8 [r14]=r22 // M save time at leave 818 st8 [r14]=r22 // M save time at leave
819 mov f10=f0 // F clear f10 819 mov f10=f0 // F clear f10
@@ -948,7 +948,7 @@ GLOBAL_ENTRY(__paravirt_leave_kernel)
948 adds r16=PT(CR_IPSR)+16,r12 948 adds r16=PT(CR_IPSR)+16,r12
949 adds r17=PT(CR_IIP)+16,r12 949 adds r17=PT(CR_IIP)+16,r12
950 950
951#ifdef CONFIG_VIRT_CPU_ACCOUNTING 951#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
952 .pred.rel.mutex pUStk,pKStk 952 .pred.rel.mutex pUStk,pKStk
953 MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled 953 MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled
954 MOV_FROM_ITC(pUStk, p9, r22, r29) // M fetch time at leave 954 MOV_FROM_ITC(pUStk, p9, r22, r29) // M fetch time at leave
@@ -981,7 +981,7 @@ GLOBAL_ENTRY(__paravirt_leave_kernel)
981 ;; 981 ;;
982 ld8.fill r12=[r16],16 982 ld8.fill r12=[r16],16
983 ld8.fill r13=[r17],16 983 ld8.fill r13=[r17],16
984#ifdef CONFIG_VIRT_CPU_ACCOUNTING 984#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
985(pUStk) adds r3=TI_AC_LEAVE+IA64_TASK_SIZE,r18 985(pUStk) adds r3=TI_AC_LEAVE+IA64_TASK_SIZE,r18
986#else 986#else
987(pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18 987(pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18
@@ -989,7 +989,7 @@ GLOBAL_ENTRY(__paravirt_leave_kernel)
989 ;; 989 ;;
990 ld8 r20=[r16],16 // ar.fpsr 990 ld8 r20=[r16],16 // ar.fpsr
991 ld8.fill r15=[r17],16 991 ld8.fill r15=[r17],16
992#ifdef CONFIG_VIRT_CPU_ACCOUNTING 992#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
993(pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18 // deferred 993(pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18 // deferred
994#endif 994#endif
995 ;; 995 ;;
@@ -997,7 +997,7 @@ GLOBAL_ENTRY(__paravirt_leave_kernel)
997 ld8.fill r2=[r17] 997 ld8.fill r2=[r17]
998(pUStk) mov r17=1 998(pUStk) mov r17=1
999 ;; 999 ;;
1000#ifdef CONFIG_VIRT_CPU_ACCOUNTING 1000#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
1001 // mmi_ : ld8 st1 shr;; mmi_ : st8 st1 shr;; 1001 // mmi_ : ld8 st1 shr;; mmi_ : st8 st1 shr;;
1002 // mib : mov add br -> mib : ld8 add br 1002 // mib : mov add br -> mib : ld8 add br
1003 // bbb_ : br nop cover;; mbb_ : mov br cover;; 1003 // bbb_ : br nop cover;; mbb_ : mov br cover;;
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index e662f178b990..c4cd45d97749 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -529,7 +529,7 @@ GLOBAL_ENTRY(paravirt_fsys_bubble_down)
529 nop.i 0 529 nop.i 0
530 ;; 530 ;;
531 mov ar.rsc=0 // M2 set enforced lazy mode, pl 0, LE, loadrs=0 531 mov ar.rsc=0 // M2 set enforced lazy mode, pl 0, LE, loadrs=0
532#ifdef CONFIG_VIRT_CPU_ACCOUNTING 532#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
533 MOV_FROM_ITC(p0, p6, r30, r23) // M get cycle for accounting 533 MOV_FROM_ITC(p0, p6, r30, r23) // M get cycle for accounting
534#else 534#else
535 nop.m 0 535 nop.m 0
@@ -555,7 +555,7 @@ GLOBAL_ENTRY(paravirt_fsys_bubble_down)
555 cmp.ne pKStk,pUStk=r0,r0 // A set pKStk <- 0, pUStk <- 1 555 cmp.ne pKStk,pUStk=r0,r0 // A set pKStk <- 0, pUStk <- 1
556 br.call.sptk.many b7=ia64_syscall_setup // B 556 br.call.sptk.many b7=ia64_syscall_setup // B
557 ;; 557 ;;
558#ifdef CONFIG_VIRT_CPU_ACCOUNTING 558#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
559 // mov.m r30=ar.itc is called in advance 559 // mov.m r30=ar.itc is called in advance
560 add r16=TI_AC_STAMP+IA64_TASK_SIZE,r2 560 add r16=TI_AC_STAMP+IA64_TASK_SIZE,r2
561 add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r2 561 add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r2
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index 4738ff7bd66a..9be4e497f3d3 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -1073,7 +1073,7 @@ END(ia64_native_sched_clock)
1073sched_clock = ia64_native_sched_clock 1073sched_clock = ia64_native_sched_clock
1074#endif 1074#endif
1075 1075
1076#ifdef CONFIG_VIRT_CPU_ACCOUNTING 1076#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
1077GLOBAL_ENTRY(cycle_to_cputime) 1077GLOBAL_ENTRY(cycle_to_cputime)
1078 alloc r16=ar.pfs,1,0,0,0 1078 alloc r16=ar.pfs,1,0,0,0
1079 addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 1079 addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
@@ -1091,7 +1091,7 @@ GLOBAL_ENTRY(cycle_to_cputime)
1091 shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT 1091 shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT
1092 br.ret.sptk.many rp 1092 br.ret.sptk.many rp
1093END(cycle_to_cputime) 1093END(cycle_to_cputime)
1094#endif /* CONFIG_VIRT_CPU_ACCOUNTING */ 1094#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
1095 1095
1096#ifdef CONFIG_IA64_BRL_EMU 1096#ifdef CONFIG_IA64_BRL_EMU
1097 1097
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index fa25689fc453..689ffcaa284e 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -784,7 +784,7 @@ ENTRY(break_fault)
784 784
785(p8) adds r28=16,r28 // A switch cr.iip to next bundle 785(p8) adds r28=16,r28 // A switch cr.iip to next bundle
786(p9) adds r8=1,r8 // A increment ei to next slot 786(p9) adds r8=1,r8 // A increment ei to next slot
787#ifdef CONFIG_VIRT_CPU_ACCOUNTING 787#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
788 ;; 788 ;;
789 mov b6=r30 // I0 setup syscall handler branch reg early 789 mov b6=r30 // I0 setup syscall handler branch reg early
790#else 790#else
@@ -801,7 +801,7 @@ ENTRY(break_fault)
801 // 801 //
802/////////////////////////////////////////////////////////////////////// 802///////////////////////////////////////////////////////////////////////
803 st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag 803 st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag
804#ifdef CONFIG_VIRT_CPU_ACCOUNTING 804#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
805 MOV_FROM_ITC(p0, p14, r30, r18) // M get cycle for accounting 805 MOV_FROM_ITC(p0, p14, r30, r18) // M get cycle for accounting
806#else 806#else
807 mov b6=r30 // I0 setup syscall handler branch reg early 807 mov b6=r30 // I0 setup syscall handler branch reg early
@@ -817,7 +817,7 @@ ENTRY(break_fault)
817 cmp.eq p14,p0=r9,r0 // A are syscalls being traced/audited? 817 cmp.eq p14,p0=r9,r0 // A are syscalls being traced/audited?
818 br.call.sptk.many b7=ia64_syscall_setup // B 818 br.call.sptk.many b7=ia64_syscall_setup // B
8191: 8191:
820#ifdef CONFIG_VIRT_CPU_ACCOUNTING 820#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
821 // mov.m r30=ar.itc is called in advance, and r13 is current 821 // mov.m r30=ar.itc is called in advance, and r13 is current
822 add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13 // A 822 add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13 // A
823 add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13 // A 823 add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13 // A
@@ -1043,7 +1043,7 @@ END(ia64_syscall_setup)
1043 DBG_FAULT(16) 1043 DBG_FAULT(16)
1044 FAULT(16) 1044 FAULT(16)
1045 1045
1046#if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(__IA64_ASM_PARAVIRTUALIZED_NATIVE) 1046#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(__IA64_ASM_PARAVIRTUALIZED_NATIVE)
1047 /* 1047 /*
1048 * There is no particular reason for this code to be here, other than 1048 * There is no particular reason for this code to be here, other than
1049 * that there happens to be space here that would go unused otherwise. 1049 * that there happens to be space here that would go unused otherwise.
diff --git a/arch/ia64/kernel/minstate.h b/arch/ia64/kernel/minstate.h
index d56753a11636..cc82a7d744c9 100644
--- a/arch/ia64/kernel/minstate.h
+++ b/arch/ia64/kernel/minstate.h
@@ -4,7 +4,7 @@
4#include "entry.h" 4#include "entry.h"
5#include "paravirt_inst.h" 5#include "paravirt_inst.h"
6 6
7#ifdef CONFIG_VIRT_CPU_ACCOUNTING 7#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
8/* read ar.itc in advance, and use it before leaving bank 0 */ 8/* read ar.itc in advance, and use it before leaving bank 0 */
9#define ACCOUNT_GET_STAMP \ 9#define ACCOUNT_GET_STAMP \
10(pUStk) mov.m r20=ar.itc; 10(pUStk) mov.m r20=ar.itc;
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 31360cbbd5f8..e34f565f595a 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -57,8 +57,6 @@ void (*ia64_mark_idle)(int);
57 57
58unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; 58unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
59EXPORT_SYMBOL(boot_option_idle_override); 59EXPORT_SYMBOL(boot_option_idle_override);
60void (*pm_idle) (void);
61EXPORT_SYMBOL(pm_idle);
62void (*pm_power_off) (void); 60void (*pm_power_off) (void);
63EXPORT_SYMBOL(pm_power_off); 61EXPORT_SYMBOL(pm_power_off);
64 62
@@ -301,7 +299,6 @@ cpu_idle (void)
301 if (mark_idle) 299 if (mark_idle)
302 (*mark_idle)(1); 300 (*mark_idle)(1);
303 301
304 idle = pm_idle;
305 if (!idle) 302 if (!idle)
306 idle = default_idle; 303 idle = default_idle;
307 (*idle)(); 304 (*idle)();
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 4265ff64219b..b7a5fffe0924 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -672,33 +672,6 @@ ptrace_attach_sync_user_rbs (struct task_struct *child)
672 read_unlock(&tasklist_lock); 672 read_unlock(&tasklist_lock);
673} 673}
674 674
675static inline int
676thread_matches (struct task_struct *thread, unsigned long addr)
677{
678 unsigned long thread_rbs_end;
679 struct pt_regs *thread_regs;
680
681 if (ptrace_check_attach(thread, 0) < 0)
682 /*
683 * If the thread is not in an attachable state, we'll
684 * ignore it. The net effect is that if ADDR happens
685 * to overlap with the portion of the thread's
686 * register backing store that is currently residing
687 * on the thread's kernel stack, then ptrace() may end
688 * up accessing a stale value. But if the thread
689 * isn't stopped, that's a problem anyhow, so we're
690 * doing as well as we can...
691 */
692 return 0;
693
694 thread_regs = task_pt_regs(thread);
695 thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL);
696 if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end))
697 return 0;
698
699 return 1; /* looks like we've got a winner */
700}
701
702/* 675/*
703 * Write f32-f127 back to task->thread.fph if it has been modified. 676 * Write f32-f127 back to task->thread.fph if it has been modified.
704 */ 677 */
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index aaefd9b94f2f..2029cc0d2fc6 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -1051,7 +1051,6 @@ cpu_init (void)
1051 max_num_phys_stacked = num_phys_stacked; 1051 max_num_phys_stacked = num_phys_stacked;
1052 } 1052 }
1053 platform_cpu_init(); 1053 platform_cpu_init();
1054 pm_idle = default_idle;
1055} 1054}
1056 1055
1057void __init 1056void __init
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 88a794536bc0..fbaac1afb844 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -77,7 +77,7 @@ static struct clocksource clocksource_itc = {
77}; 77};
78static struct clocksource *itc_clocksource; 78static struct clocksource *itc_clocksource;
79 79
80#ifdef CONFIG_VIRT_CPU_ACCOUNTING 80#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
81 81
82#include <linux/kernel_stat.h> 82#include <linux/kernel_stat.h>
83 83
@@ -136,13 +136,14 @@ void vtime_account_system(struct task_struct *tsk)
136 136
137 account_system_time(tsk, 0, delta, delta); 137 account_system_time(tsk, 0, delta, delta);
138} 138}
139EXPORT_SYMBOL_GPL(vtime_account_system);
139 140
140void vtime_account_idle(struct task_struct *tsk) 141void vtime_account_idle(struct task_struct *tsk)
141{ 142{
142 account_idle_time(vtime_delta(tsk)); 143 account_idle_time(vtime_delta(tsk));
143} 144}
144 145
145#endif /* CONFIG_VIRT_CPU_ACCOUNTING */ 146#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
146 147
147static irqreturn_t 148static irqreturn_t
148timer_interrupt (int irq, void *dev_id) 149timer_interrupt (int irq, void *dev_id)
diff --git a/arch/m32r/kernel/process.c b/arch/m32r/kernel/process.c
index 765d0f57c787..bde899e155d3 100644
--- a/arch/m32r/kernel/process.c
+++ b/arch/m32r/kernel/process.c
@@ -44,36 +44,10 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
44 return tsk->thread.lr; 44 return tsk->thread.lr;
45} 45}
46 46
47/*
48 * Powermanagement idle function, if any..
49 */
50static void (*pm_idle)(void) = NULL;
51
52void (*pm_power_off)(void) = NULL; 47void (*pm_power_off)(void) = NULL;
53EXPORT_SYMBOL(pm_power_off); 48EXPORT_SYMBOL(pm_power_off);
54 49
55/* 50/*
56 * We use this is we don't have any better
57 * idle routine..
58 */
59static void default_idle(void)
60{
61 /* M32R_FIXME: Please use "cpu_sleep" mode. */
62 cpu_relax();
63}
64
65/*
66 * On SMP it's slightly faster (but much more power-consuming!)
67 * to poll the ->work.need_resched flag instead of waiting for the
68 * cross-CPU IPI to arrive. Use this option with caution.
69 */
70static void poll_idle (void)
71{
72 /* M32R_FIXME */
73 cpu_relax();
74}
75
76/*
77 * The idle thread. There's no useful work to be 51 * The idle thread. There's no useful work to be
78 * done, so just try to conserve power and have a 52 * done, so just try to conserve power and have a
79 * low exit latency (ie sit in a loop waiting for 53 * low exit latency (ie sit in a loop waiting for
@@ -84,14 +58,8 @@ void cpu_idle (void)
84 /* endless idle loop with no priority at all */ 58 /* endless idle loop with no priority at all */
85 while (1) { 59 while (1) {
86 rcu_idle_enter(); 60 rcu_idle_enter();
87 while (!need_resched()) { 61 while (!need_resched())
88 void (*idle)(void) = pm_idle; 62 cpu_relax();
89
90 if (!idle)
91 idle = default_idle;
92
93 idle();
94 }
95 rcu_idle_exit(); 63 rcu_idle_exit();
96 schedule_preempt_disabled(); 64 schedule_preempt_disabled();
97 } 65 }
@@ -120,21 +88,6 @@ void machine_power_off(void)
120 /* M32R_FIXME */ 88 /* M32R_FIXME */
121} 89}
122 90
123static int __init idle_setup (char *str)
124{
125 if (!strncmp(str, "poll", 4)) {
126 printk("using poll in idle threads.\n");
127 pm_idle = poll_idle;
128 } else if (!strncmp(str, "sleep", 4)) {
129 printk("using sleep in idle threads.\n");
130 pm_idle = default_idle;
131 }
132
133 return 1;
134}
135
136__setup("idle=", idle_setup);
137
138void show_regs(struct pt_regs * regs) 91void show_regs(struct pt_regs * regs)
139{ 92{
140 printk("\n"); 93 printk("\n");
diff --git a/arch/m68k/include/asm/dma-mapping.h b/arch/m68k/include/asm/dma-mapping.h
index 17f7a45948ea..05aa53594d49 100644
--- a/arch/m68k/include/asm/dma-mapping.h
+++ b/arch/m68k/include/asm/dma-mapping.h
@@ -5,7 +5,6 @@
5 5
6struct scatterlist; 6struct scatterlist;
7 7
8#ifndef CONFIG_MMU_SUN3
9static inline int dma_supported(struct device *dev, u64 mask) 8static inline int dma_supported(struct device *dev, u64 mask)
10{ 9{
11 return 1; 10 return 1;
@@ -21,6 +20,22 @@ extern void *dma_alloc_coherent(struct device *, size_t,
21extern void dma_free_coherent(struct device *, size_t, 20extern void dma_free_coherent(struct device *, size_t,
22 void *, dma_addr_t); 21 void *, dma_addr_t);
23 22
23static inline void *dma_alloc_attrs(struct device *dev, size_t size,
24 dma_addr_t *dma_handle, gfp_t flag,
25 struct dma_attrs *attrs)
26{
27 /* attrs is not supported and ignored */
28 return dma_alloc_coherent(dev, size, dma_handle, flag);
29}
30
31static inline void dma_free_attrs(struct device *dev, size_t size,
32 void *cpu_addr, dma_addr_t dma_handle,
33 struct dma_attrs *attrs)
34{
35 /* attrs is not supported and ignored */
36 dma_free_coherent(dev, size, cpu_addr, dma_handle);
37}
38
24static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, 39static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
25 dma_addr_t *handle, gfp_t flag) 40 dma_addr_t *handle, gfp_t flag)
26{ 41{
@@ -95,8 +110,14 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t handle)
95 return 0; 110 return 0;
96} 111}
97 112
98#else 113/* drivers/base/dma-mapping.c */
99#include <asm-generic/dma-mapping-broken.h> 114extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
100#endif 115 void *cpu_addr, dma_addr_t dma_addr, size_t size);
116extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
117 void *cpu_addr, dma_addr_t dma_addr,
118 size_t size);
119
120#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
121#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
101 122
102#endif /* _M68K_DMA_MAPPING_H */ 123#endif /* _M68K_DMA_MAPPING_H */
diff --git a/arch/m68k/include/asm/pgtable_no.h b/arch/m68k/include/asm/pgtable_no.h
index bf86b29fe64a..037028f4ab70 100644
--- a/arch/m68k/include/asm/pgtable_no.h
+++ b/arch/m68k/include/asm/pgtable_no.h
@@ -64,6 +64,8 @@ extern unsigned int kobjsize(const void *objp);
64 */ 64 */
65#define VMALLOC_START 0 65#define VMALLOC_START 0
66#define VMALLOC_END 0xffffffff 66#define VMALLOC_END 0xffffffff
67#define KMAP_START 0
68#define KMAP_END 0xffffffff
67 69
68#include <asm-generic/pgtable.h> 70#include <asm-generic/pgtable.h>
69 71
diff --git a/arch/m68k/include/asm/processor.h b/arch/m68k/include/asm/processor.h
index ae700f49e51d..b0768a657920 100644
--- a/arch/m68k/include/asm/processor.h
+++ b/arch/m68k/include/asm/processor.h
@@ -130,7 +130,6 @@ extern int handle_kernel_fault(struct pt_regs *regs);
130#define start_thread(_regs, _pc, _usp) \ 130#define start_thread(_regs, _pc, _usp) \
131do { \ 131do { \
132 (_regs)->pc = (_pc); \ 132 (_regs)->pc = (_pc); \
133 ((struct switch_stack *)(_regs))[-1].a6 = 0; \
134 setframeformat(_regs); \ 133 setframeformat(_regs); \
135 if (current->mm) \ 134 if (current->mm) \
136 (_regs)->d5 = current->mm->start_data; \ 135 (_regs)->d5 = current->mm->start_data; \
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index 847994ce6804..f9337f614660 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -4,7 +4,7 @@
4#include <uapi/asm/unistd.h> 4#include <uapi/asm/unistd.h>
5 5
6 6
7#define NR_syscalls 348 7#define NR_syscalls 349
8 8
9#define __ARCH_WANT_OLD_READDIR 9#define __ARCH_WANT_OLD_READDIR
10#define __ARCH_WANT_OLD_STAT 10#define __ARCH_WANT_OLD_STAT
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h
index b94bfbf90705..625f321001dc 100644
--- a/arch/m68k/include/uapi/asm/unistd.h
+++ b/arch/m68k/include/uapi/asm/unistd.h
@@ -353,5 +353,6 @@
353#define __NR_process_vm_readv 345 353#define __NR_process_vm_readv 345
354#define __NR_process_vm_writev 346 354#define __NR_process_vm_writev 346
355#define __NR_kcmp 347 355#define __NR_kcmp 347
356#define __NR_finit_module 348
356 357
357#endif /* _UAPI_ASM_M68K_UNISTD_H_ */ 358#endif /* _UAPI_ASM_M68K_UNISTD_H_ */
diff --git a/arch/m68k/kernel/Makefile b/arch/m68k/kernel/Makefile
index 068ad49210d6..655347d80780 100644
--- a/arch/m68k/kernel/Makefile
+++ b/arch/m68k/kernel/Makefile
@@ -20,7 +20,5 @@ obj-$(CONFIG_MMU_MOTOROLA) += ints.o vectors.o
20obj-$(CONFIG_MMU_SUN3) += ints.o vectors.o 20obj-$(CONFIG_MMU_SUN3) += ints.o vectors.o
21obj-$(CONFIG_PCI) += pcibios.o 21obj-$(CONFIG_PCI) += pcibios.o
22 22
23ifndef CONFIG_MMU_SUN3 23obj-$(CONFIG_HAS_DMA) += dma.o
24obj-y += dma.o
25endif
26 24
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index c30da5b3f2db..3f04ea0ab802 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -368,4 +368,5 @@ ENTRY(sys_call_table)
368 .long sys_process_vm_readv /* 345 */ 368 .long sys_process_vm_readv /* 345 */
369 .long sys_process_vm_writev 369 .long sys_process_vm_writev
370 .long sys_kcmp 370 .long sys_kcmp
371 .long sys_finit_module
371 372
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index f0e05bce92f2..afd8106fd83b 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -39,6 +39,11 @@
39void *empty_zero_page; 39void *empty_zero_page;
40EXPORT_SYMBOL(empty_zero_page); 40EXPORT_SYMBOL(empty_zero_page);
41 41
42#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
43extern void init_pointer_table(unsigned long ptable);
44extern pmd_t *zero_pgtable;
45#endif
46
42#ifdef CONFIG_MMU 47#ifdef CONFIG_MMU
43 48
44pg_data_t pg_data_map[MAX_NUMNODES]; 49pg_data_t pg_data_map[MAX_NUMNODES];
@@ -69,9 +74,6 @@ void __init m68k_setup_node(int node)
69 node_set_online(node); 74 node_set_online(node);
70} 75}
71 76
72extern void init_pointer_table(unsigned long ptable);
73extern pmd_t *zero_pgtable;
74
75#else /* CONFIG_MMU */ 77#else /* CONFIG_MMU */
76 78
77/* 79/*
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index a5b74f729e5b..6ff2dcff3410 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -41,7 +41,6 @@ void show_regs(struct pt_regs *regs)
41 regs->msr, regs->ear, regs->esr, regs->fsr); 41 regs->msr, regs->ear, regs->esr, regs->fsr);
42} 42}
43 43
44void (*pm_idle)(void);
45void (*pm_power_off)(void) = NULL; 44void (*pm_power_off)(void) = NULL;
46EXPORT_SYMBOL(pm_power_off); 45EXPORT_SYMBOL(pm_power_off);
47 46
@@ -98,8 +97,6 @@ void cpu_idle(void)
98 97
99 /* endless idle loop with no priority at all */ 98 /* endless idle loop with no priority at all */
100 while (1) { 99 while (1) {
101 void (*idle)(void) = pm_idle;
102
103 if (!idle) 100 if (!idle)
104 idle = default_idle; 101 idle = default_idle;
105 102
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 2ac626ab9d43..9becc44d9d7a 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -4,7 +4,6 @@ config MIPS
4 select HAVE_GENERIC_DMA_COHERENT 4 select HAVE_GENERIC_DMA_COHERENT
5 select HAVE_IDE 5 select HAVE_IDE
6 select HAVE_OPROFILE 6 select HAVE_OPROFILE
7 select HAVE_IRQ_WORK
8 select HAVE_PERF_EVENTS 7 select HAVE_PERF_EVENTS
9 select PERF_USE_VMALLOC 8 select PERF_USE_VMALLOC
10 select HAVE_ARCH_KGDB 9 select HAVE_ARCH_KGDB
@@ -2161,7 +2160,6 @@ source "mm/Kconfig"
2161config SMP 2160config SMP
2162 bool "Multi-Processing support" 2161 bool "Multi-Processing support"
2163 depends on SYS_SUPPORTS_SMP 2162 depends on SYS_SUPPORTS_SMP
2164 select IRQ_PER_CPU
2165 select USE_GENERIC_SMP_HELPERS 2163 select USE_GENERIC_SMP_HELPERS
2166 help 2164 help
2167 This enables support for systems with more than one CPU. If you have 2165 This enables support for systems with more than one CPU. If you have
diff --git a/arch/mips/bcm47xx/Kconfig b/arch/mips/bcm47xx/Kconfig
index d7af29f1fcf0..ba611927749b 100644
--- a/arch/mips/bcm47xx/Kconfig
+++ b/arch/mips/bcm47xx/Kconfig
@@ -8,8 +8,10 @@ config BCM47XX_SSB
8 select SSB_DRIVER_EXTIF 8 select SSB_DRIVER_EXTIF
9 select SSB_EMBEDDED 9 select SSB_EMBEDDED
10 select SSB_B43_PCI_BRIDGE if PCI 10 select SSB_B43_PCI_BRIDGE if PCI
11 select SSB_DRIVER_PCICORE if PCI
11 select SSB_PCICORE_HOSTMODE if PCI 12 select SSB_PCICORE_HOSTMODE if PCI
12 select SSB_DRIVER_GPIO 13 select SSB_DRIVER_GPIO
14 select GPIOLIB
13 default y 15 default y
14 help 16 help
15 Add support for old Broadcom BCM47xx boards with Sonics Silicon Backplane support. 17 Add support for old Broadcom BCM47xx boards with Sonics Silicon Backplane support.
@@ -25,6 +27,7 @@ config BCM47XX_BCMA
25 select BCMA_HOST_PCI if PCI 27 select BCMA_HOST_PCI if PCI
26 select BCMA_DRIVER_PCI_HOSTMODE if PCI 28 select BCMA_DRIVER_PCI_HOSTMODE if PCI
27 select BCMA_DRIVER_GPIO 29 select BCMA_DRIVER_GPIO
30 select GPIOLIB
28 default y 31 default y
29 help 32 help
30 Add support for new Broadcom BCM47xx boards with Broadcom specific Advanced Microcontroller Bus. 33 Add support for new Broadcom BCM47xx boards with Broadcom specific Advanced Microcontroller Bus.
diff --git a/arch/mips/cavium-octeon/executive/cvmx-l2c.c b/arch/mips/cavium-octeon/executive/cvmx-l2c.c
index 9f883bf76953..33b72144db31 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-l2c.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-l2c.c
@@ -30,6 +30,7 @@
30 * measurement, and debugging facilities. 30 * measurement, and debugging facilities.
31 */ 31 */
32 32
33#include <linux/compiler.h>
33#include <linux/irqflags.h> 34#include <linux/irqflags.h>
34#include <asm/octeon/cvmx.h> 35#include <asm/octeon/cvmx.h>
35#include <asm/octeon/cvmx-l2c.h> 36#include <asm/octeon/cvmx-l2c.h>
@@ -285,22 +286,22 @@ uint64_t cvmx_l2c_read_perf(uint32_t counter)
285 */ 286 */
286static void fault_in(uint64_t addr, int len) 287static void fault_in(uint64_t addr, int len)
287{ 288{
288 volatile char *ptr; 289 char *ptr;
289 volatile char dummy; 290
290 /* 291 /*
291 * Adjust addr and length so we get all cache lines even for 292 * Adjust addr and length so we get all cache lines even for
292 * small ranges spanning two cache lines. 293 * small ranges spanning two cache lines.
293 */ 294 */
294 len += addr & CVMX_CACHE_LINE_MASK; 295 len += addr & CVMX_CACHE_LINE_MASK;
295 addr &= ~CVMX_CACHE_LINE_MASK; 296 addr &= ~CVMX_CACHE_LINE_MASK;
296 ptr = (volatile char *)cvmx_phys_to_ptr(addr); 297 ptr = cvmx_phys_to_ptr(addr);
297 /* 298 /*
298 * Invalidate L1 cache to make sure all loads result in data 299 * Invalidate L1 cache to make sure all loads result in data
299 * being in L2. 300 * being in L2.
300 */ 301 */
301 CVMX_DCACHE_INVALIDATE; 302 CVMX_DCACHE_INVALIDATE;
302 while (len > 0) { 303 while (len > 0) {
303 dummy += *ptr; 304 ACCESS_ONCE(*ptr);
304 len -= CVMX_CACHE_LINE_SIZE; 305 len -= CVMX_CACHE_LINE_SIZE;
305 ptr += CVMX_CACHE_LINE_SIZE; 306 ptr += CVMX_CACHE_LINE_SIZE;
306 } 307 }
diff --git a/arch/mips/include/asm/dsp.h b/arch/mips/include/asm/dsp.h
index e9bfc0813c72..7bfad0520e25 100644
--- a/arch/mips/include/asm/dsp.h
+++ b/arch/mips/include/asm/dsp.h
@@ -16,7 +16,7 @@
16#include <asm/mipsregs.h> 16#include <asm/mipsregs.h>
17 17
18#define DSP_DEFAULT 0x00000000 18#define DSP_DEFAULT 0x00000000
19#define DSP_MASK 0x3ff 19#define DSP_MASK 0x3f
20 20
21#define __enable_dsp_hazard() \ 21#define __enable_dsp_hazard() \
22do { \ 22do { \
diff --git a/arch/mips/include/asm/inst.h b/arch/mips/include/asm/inst.h
index ab84064283db..33c34adbecfa 100644
--- a/arch/mips/include/asm/inst.h
+++ b/arch/mips/include/asm/inst.h
@@ -353,6 +353,7 @@ union mips_instruction {
353 struct u_format u_format; 353 struct u_format u_format;
354 struct c_format c_format; 354 struct c_format c_format;
355 struct r_format r_format; 355 struct r_format r_format;
356 struct p_format p_format;
356 struct f_format f_format; 357 struct f_format f_format;
357 struct ma_format ma_format; 358 struct ma_format ma_format;
358 struct b_format b_format; 359 struct b_format b_format;
diff --git a/arch/mips/include/asm/mach-pnx833x/war.h b/arch/mips/include/asm/mach-pnx833x/war.h
index edaa06d9d492..e410df4e1b3a 100644
--- a/arch/mips/include/asm/mach-pnx833x/war.h
+++ b/arch/mips/include/asm/mach-pnx833x/war.h
@@ -21,4 +21,4 @@
21#define R10000_LLSC_WAR 0 21#define R10000_LLSC_WAR 0
22#define MIPS34K_MISSED_ITLB_WAR 0 22#define MIPS34K_MISSED_ITLB_WAR 0
23 23
24#endif /* __ASM_MIPS_MACH_PNX8550_WAR_H */ 24#endif /* __ASM_MIPS_MACH_PNX833X_WAR_H */
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index c63191055e69..013d5f781263 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -230,6 +230,7 @@ static inline void pud_clear(pud_t *pudp)
230#else 230#else
231#define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT)) 231#define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT))
232#define pfn_pte(pfn, prot) __pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot)) 232#define pfn_pte(pfn, prot) __pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
233#define pfn_pmd(pfn, prot) __pmd(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
233#endif 234#endif
234 235
235#define __pgd_offset(address) pgd_index(address) 236#define __pgd_offset(address) pgd_index(address)
diff --git a/arch/mips/include/uapi/asm/Kbuild b/arch/mips/include/uapi/asm/Kbuild
index a1a0452ac185..77d4fb33f75a 100644
--- a/arch/mips/include/uapi/asm/Kbuild
+++ b/arch/mips/include/uapi/asm/Kbuild
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
3 3
4header-y += auxvec.h 4header-y += auxvec.h
5header-y += bitsperlong.h 5header-y += bitsperlong.h
6header-y += break.h
6header-y += byteorder.h 7header-y += byteorder.h
7header-y += cachectl.h 8header-y += cachectl.h
8header-y += errno.h 9header-y += errno.h
diff --git a/arch/mips/include/asm/break.h b/arch/mips/include/uapi/asm/break.h
index 9161e684cb4c..9161e684cb4c 100644
--- a/arch/mips/include/asm/break.h
+++ b/arch/mips/include/uapi/asm/break.h
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index 6a2d758dd8e9..83fa1460e294 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -25,6 +25,12 @@
25#define MCOUNT_OFFSET_INSNS 4 25#define MCOUNT_OFFSET_INSNS 4
26#endif 26#endif
27 27
28/* Arch override because MIPS doesn't need to run this from stop_machine() */
29void arch_ftrace_update_code(int command)
30{
31 ftrace_modify_all_code(command);
32}
33
28/* 34/*
29 * Check if the address is in kernel space 35 * Check if the address is in kernel space
30 * 36 *
@@ -89,6 +95,24 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
89 return 0; 95 return 0;
90} 96}
91 97
98#ifndef CONFIG_64BIT
99static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
100 unsigned int new_code2)
101{
102 int faulted;
103
104 safe_store_code(new_code1, ip, faulted);
105 if (unlikely(faulted))
106 return -EFAULT;
107 ip += 4;
108 safe_store_code(new_code2, ip, faulted);
109 if (unlikely(faulted))
110 return -EFAULT;
111 flush_icache_range(ip, ip + 8); /* original ip + 12 */
112 return 0;
113}
114#endif
115
92/* 116/*
93 * The details about the calling site of mcount on MIPS 117 * The details about the calling site of mcount on MIPS
94 * 118 *
@@ -131,8 +155,18 @@ int ftrace_make_nop(struct module *mod,
131 * needed. 155 * needed.
132 */ 156 */
133 new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F; 157 new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F;
134 158#ifdef CONFIG_64BIT
135 return ftrace_modify_code(ip, new); 159 return ftrace_modify_code(ip, new);
160#else
161 /*
162 * On 32 bit MIPS platforms, gcc adds a stack adjust
163 * instruction in the delay slot after the branch to
164 * mcount and expects mcount to restore the sp on return.
165 * This is based on a legacy API and does nothing but
166 * waste instructions so it's being removed at runtime.
167 */
168 return ftrace_modify_code_2(ip, new, INSN_NOP);
169#endif
136} 170}
137 171
138int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 172int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
index 4c968e7efb74..165867673357 100644
--- a/arch/mips/kernel/mcount.S
+++ b/arch/mips/kernel/mcount.S
@@ -46,9 +46,8 @@
46 PTR_L a5, PT_R9(sp) 46 PTR_L a5, PT_R9(sp)
47 PTR_L a6, PT_R10(sp) 47 PTR_L a6, PT_R10(sp)
48 PTR_L a7, PT_R11(sp) 48 PTR_L a7, PT_R11(sp)
49 PTR_ADDIU sp, PT_SIZE
50#else 49#else
51 PTR_ADDIU sp, (PT_SIZE + 8) 50 PTR_ADDIU sp, PT_SIZE
52#endif 51#endif
53.endm 52.endm
54 53
@@ -69,7 +68,9 @@ NESTED(ftrace_caller, PT_SIZE, ra)
69 .globl _mcount 68 .globl _mcount
70_mcount: 69_mcount:
71 b ftrace_stub 70 b ftrace_stub
72 nop 71 addiu sp,sp,8
72
73 /* When tracing is activated, it calls ftrace_caller+8 (aka here) */
73 lw t1, function_trace_stop 74 lw t1, function_trace_stop
74 bnez t1, ftrace_stub 75 bnez t1, ftrace_stub
75 nop 76 nop
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index eec690af6581..147cec19621d 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -705,7 +705,7 @@ static int vpe_run(struct vpe * v)
705 705
706 printk(KERN_WARNING 706 printk(KERN_WARNING
707 "VPE loader: TC %d is already in use.\n", 707 "VPE loader: TC %d is already in use.\n",
708 t->index); 708 v->tc->index);
709 return -ENOEXEC; 709 return -ENOEXEC;
710 } 710 }
711 } else { 711 } else {
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index f36acd1b3808..a7935bf0fecb 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -408,7 +408,7 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
408#endif 408#endif
409 409
410 /* tell oprofile which irq to use */ 410 /* tell oprofile which irq to use */
411 cp0_perfcount_irq = LTQ_PERF_IRQ; 411 cp0_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ);
412 412
413 /* 413 /*
414 * if the timer irq is not one of the mips irqs we need to 414 * if the timer irq is not one of the mips irqs we need to
diff --git a/arch/mips/lib/delay.c b/arch/mips/lib/delay.c
index dc81ca8dc0dd..288f7954988d 100644
--- a/arch/mips/lib/delay.c
+++ b/arch/mips/lib/delay.c
@@ -21,7 +21,7 @@ void __delay(unsigned long loops)
21 " .set noreorder \n" 21 " .set noreorder \n"
22 " .align 3 \n" 22 " .align 3 \n"
23 "1: bnez %0, 1b \n" 23 "1: bnez %0, 1b \n"
24#if __SIZEOF_LONG__ == 4 24#if BITS_PER_LONG == 32
25 " subu %0, 1 \n" 25 " subu %0, 1 \n"
26#else 26#else
27 " dsubu %0, 1 \n" 27 " dsubu %0, 1 \n"
diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c
index 7657fd21cd3f..cacfd31e8ec9 100644
--- a/arch/mips/mm/ioremap.c
+++ b/arch/mips/mm/ioremap.c
@@ -190,9 +190,3 @@ void __iounmap(const volatile void __iomem *addr)
190 190
191EXPORT_SYMBOL(__ioremap); 191EXPORT_SYMBOL(__ioremap);
192EXPORT_SYMBOL(__iounmap); 192EXPORT_SYMBOL(__iounmap);
193
194int __virt_addr_valid(const volatile void *kaddr)
195{
196 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
197}
198EXPORT_SYMBOL_GPL(__virt_addr_valid);
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
index d9be7540a6be..7e5fe2790d8a 100644
--- a/arch/mips/mm/mmap.c
+++ b/arch/mips/mm/mmap.c
@@ -192,3 +192,9 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
192 192
193 return ret; 193 return ret;
194} 194}
195
196int __virt_addr_valid(const volatile void *kaddr)
197{
198 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
199}
200EXPORT_SYMBOL_GPL(__virt_addr_valid);
diff --git a/arch/mips/netlogic/xlr/setup.c b/arch/mips/netlogic/xlr/setup.c
index 4e7f49d3d5a8..c5ce6992ac4c 100644
--- a/arch/mips/netlogic/xlr/setup.c
+++ b/arch/mips/netlogic/xlr/setup.c
@@ -193,8 +193,11 @@ static void nlm_init_node(void)
193 193
194void __init prom_init(void) 194void __init prom_init(void)
195{ 195{
196 int i, *argv, *envp; /* passed as 32 bit ptrs */ 196 int *argv, *envp; /* passed as 32 bit ptrs */
197 struct psb_info *prom_infop; 197 struct psb_info *prom_infop;
198#ifdef CONFIG_SMP
199 int i;
200#endif
198 201
199 /* truncate to 32 bit and sign extend all args */ 202 /* truncate to 32 bit and sign extend all args */
200 argv = (int *)(long)(int)fw_arg1; 203 argv = (int *)(long)(int)fw_arg1;
diff --git a/arch/mips/pci/pci-ar71xx.c b/arch/mips/pci/pci-ar71xx.c
index 1552522b8718..6eaa4f2d0e38 100644
--- a/arch/mips/pci/pci-ar71xx.c
+++ b/arch/mips/pci/pci-ar71xx.c
@@ -24,7 +24,7 @@
24#include <asm/mach-ath79/pci.h> 24#include <asm/mach-ath79/pci.h>
25 25
26#define AR71XX_PCI_MEM_BASE 0x10000000 26#define AR71XX_PCI_MEM_BASE 0x10000000
27#define AR71XX_PCI_MEM_SIZE 0x08000000 27#define AR71XX_PCI_MEM_SIZE 0x07000000
28 28
29#define AR71XX_PCI_WIN0_OFFS 0x10000000 29#define AR71XX_PCI_WIN0_OFFS 0x10000000
30#define AR71XX_PCI_WIN1_OFFS 0x11000000 30#define AR71XX_PCI_WIN1_OFFS 0x11000000
diff --git a/arch/mips/pci/pci-ar724x.c b/arch/mips/pci/pci-ar724x.c
index 86d77a666458..c11c75be2d7e 100644
--- a/arch/mips/pci/pci-ar724x.c
+++ b/arch/mips/pci/pci-ar724x.c
@@ -21,7 +21,7 @@
21#define AR724X_PCI_CTRL_SIZE 0x100 21#define AR724X_PCI_CTRL_SIZE 0x100
22 22
23#define AR724X_PCI_MEM_BASE 0x10000000 23#define AR724X_PCI_MEM_BASE 0x10000000
24#define AR724X_PCI_MEM_SIZE 0x08000000 24#define AR724X_PCI_MEM_SIZE 0x04000000
25 25
26#define AR724X_PCI_REG_RESET 0x18 26#define AR724X_PCI_REG_RESET 0x18
27#define AR724X_PCI_REG_INT_STATUS 0x4c 27#define AR724X_PCI_REG_INT_STATUS 0x4c
diff --git a/arch/mn10300/include/asm/dma-mapping.h b/arch/mn10300/include/asm/dma-mapping.h
index c1be4397b1ed..a18abfc558eb 100644
--- a/arch/mn10300/include/asm/dma-mapping.h
+++ b/arch/mn10300/include/asm/dma-mapping.h
@@ -168,4 +168,19 @@ void dma_cache_sync(void *vaddr, size_t size,
168 mn10300_dcache_flush_inv(); 168 mn10300_dcache_flush_inv();
169} 169}
170 170
171/* Not supported for now */
172static inline int dma_mmap_coherent(struct device *dev,
173 struct vm_area_struct *vma, void *cpu_addr,
174 dma_addr_t dma_addr, size_t size)
175{
176 return -EINVAL;
177}
178
179static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
180 void *cpu_addr, dma_addr_t dma_addr,
181 size_t size)
182{
183 return -EINVAL;
184}
185
171#endif 186#endif
diff --git a/arch/mn10300/kernel/process.c b/arch/mn10300/kernel/process.c
index eb09f5a552ff..84f4e97e3074 100644
--- a/arch/mn10300/kernel/process.c
+++ b/arch/mn10300/kernel/process.c
@@ -37,12 +37,6 @@
37#include "internal.h" 37#include "internal.h"
38 38
39/* 39/*
40 * power management idle function, if any..
41 */
42void (*pm_idle)(void);
43EXPORT_SYMBOL(pm_idle);
44
45/*
46 * return saved PC of a blocked thread. 40 * return saved PC of a blocked thread.
47 */ 41 */
48unsigned long thread_saved_pc(struct task_struct *tsk) 42unsigned long thread_saved_pc(struct task_struct *tsk)
@@ -113,7 +107,6 @@ void cpu_idle(void)
113 void (*idle)(void); 107 void (*idle)(void);
114 108
115 smp_rmb(); 109 smp_rmb();
116 idle = pm_idle;
117 if (!idle) { 110 if (!idle) {
118#if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU) 111#if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
119 idle = poll_idle; 112 idle = poll_idle;
diff --git a/arch/openrisc/kernel/idle.c b/arch/openrisc/kernel/idle.c
index 7d618feb1b72..5e8a3b6d6bc6 100644
--- a/arch/openrisc/kernel/idle.c
+++ b/arch/openrisc/kernel/idle.c
@@ -39,11 +39,6 @@
39 39
40void (*powersave) (void) = NULL; 40void (*powersave) (void) = NULL;
41 41
42static inline void pm_idle(void)
43{
44 barrier();
45}
46
47void cpu_idle(void) 42void cpu_idle(void)
48{ 43{
49 set_thread_flag(TIF_POLLING_NRFLAG); 44 set_thread_flag(TIF_POLLING_NRFLAG);
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index b77feffbadea..a32e34ecda9e 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -9,14 +9,12 @@ config PARISC
9 select RTC_DRV_GENERIC 9 select RTC_DRV_GENERIC
10 select INIT_ALL_POSSIBLE 10 select INIT_ALL_POSSIBLE
11 select BUG 11 select BUG
12 select HAVE_IRQ_WORK
13 select HAVE_PERF_EVENTS 12 select HAVE_PERF_EVENTS
14 select GENERIC_ATOMIC64 if !64BIT 13 select GENERIC_ATOMIC64 if !64BIT
15 select HAVE_GENERIC_HARDIRQS 14 select HAVE_GENERIC_HARDIRQS
16 select BROKEN_RODATA 15 select BROKEN_RODATA
17 select GENERIC_IRQ_PROBE 16 select GENERIC_IRQ_PROBE
18 select GENERIC_PCI_IOMAP 17 select GENERIC_PCI_IOMAP
19 select IRQ_PER_CPU
20 select ARCH_HAVE_NMI_SAFE_CMPXCHG 18 select ARCH_HAVE_NMI_SAFE_CMPXCHG
21 select GENERIC_SMP_IDLE_THREAD 19 select GENERIC_SMP_IDLE_THREAD
22 select GENERIC_STRNCPY_FROM_USER 20 select GENERIC_STRNCPY_FROM_USER
diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h
index 467bbd510eac..106b395688e1 100644
--- a/arch/parisc/include/asm/dma-mapping.h
+++ b/arch/parisc/include/asm/dma-mapping.h
@@ -238,4 +238,19 @@ void * sba_get_iommu(struct parisc_device *dev);
238/* At the moment, we panic on error for IOMMU resource exaustion */ 238/* At the moment, we panic on error for IOMMU resource exaustion */
239#define dma_mapping_error(dev, x) 0 239#define dma_mapping_error(dev, x) 0
240 240
241/* This API cannot be supported on PA-RISC */
242static inline int dma_mmap_coherent(struct device *dev,
243 struct vm_area_struct *vma, void *cpu_addr,
244 dma_addr_t dma_addr, size_t size)
245{
246 return -EINVAL;
247}
248
249static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
250 void *cpu_addr, dma_addr_t dma_addr,
251 size_t size)
252{
253 return -EINVAL;
254}
255
241#endif 256#endif
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index bfb44247d7a7..eb7850b46c25 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -1865,7 +1865,7 @@ syscall_restore:
1865 1865
1866 /* Are we being ptraced? */ 1866 /* Are we being ptraced? */
1867 ldw TASK_FLAGS(%r1),%r19 1867 ldw TASK_FLAGS(%r1),%r19
1868 ldi (_TIF_SINGLESTEP|_TIF_BLOCKSTEP),%r2 1868 ldi _TIF_SYSCALL_TRACE_MASK,%r2
1869 and,COND(=) %r19,%r2,%r0 1869 and,COND(=) %r19,%r2,%r0
1870 b,n syscall_restore_rfi 1870 b,n syscall_restore_rfi
1871 1871
@@ -1978,15 +1978,23 @@ syscall_restore_rfi:
1978 /* sr2 should be set to zero for userspace syscalls */ 1978 /* sr2 should be set to zero for userspace syscalls */
1979 STREG %r0,TASK_PT_SR2(%r1) 1979 STREG %r0,TASK_PT_SR2(%r1)
1980 1980
1981pt_regs_ok:
1982 LDREG TASK_PT_GR31(%r1),%r2 1981 LDREG TASK_PT_GR31(%r1),%r2
1983 depi 3,31,2,%r2 /* ensure return to user mode. */ 1982 depi 3,31,2,%r2 /* ensure return to user mode. */
1984 STREG %r2,TASK_PT_IAOQ0(%r1) 1983 STREG %r2,TASK_PT_IAOQ0(%r1)
1985 ldo 4(%r2),%r2 1984 ldo 4(%r2),%r2
1986 STREG %r2,TASK_PT_IAOQ1(%r1) 1985 STREG %r2,TASK_PT_IAOQ1(%r1)
1986 b intr_restore
1987 copy %r25,%r16 1987 copy %r25,%r16
1988
1989pt_regs_ok:
1990 LDREG TASK_PT_IAOQ0(%r1),%r2
1991 depi 3,31,2,%r2 /* ensure return to user mode. */
1992 STREG %r2,TASK_PT_IAOQ0(%r1)
1993 LDREG TASK_PT_IAOQ1(%r1),%r2
1994 depi 3,31,2,%r2
1995 STREG %r2,TASK_PT_IAOQ1(%r1)
1988 b intr_restore 1996 b intr_restore
1989 nop 1997 copy %r25,%r16
1990 1998
1991 .import schedule,code 1999 .import schedule,code
1992syscall_do_resched: 2000syscall_do_resched:
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index c0b1affc06a8..0299d63cd112 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -410,11 +410,13 @@ void __init init_IRQ(void)
410{ 410{
411 local_irq_disable(); /* PARANOID - should already be disabled */ 411 local_irq_disable(); /* PARANOID - should already be disabled */
412 mtctl(~0UL, 23); /* EIRR : clear all pending external intr */ 412 mtctl(~0UL, 23); /* EIRR : clear all pending external intr */
413 claim_cpu_irqs();
414#ifdef CONFIG_SMP 413#ifdef CONFIG_SMP
415 if (!cpu_eiem) 414 if (!cpu_eiem) {
415 claim_cpu_irqs();
416 cpu_eiem = EIEM_MASK(IPI_IRQ) | EIEM_MASK(TIMER_IRQ); 416 cpu_eiem = EIEM_MASK(IPI_IRQ) | EIEM_MASK(TIMER_IRQ);
417 }
417#else 418#else
419 claim_cpu_irqs();
418 cpu_eiem = EIEM_MASK(TIMER_IRQ); 420 cpu_eiem = EIEM_MASK(TIMER_IRQ);
419#endif 421#endif
420 set_eiem(cpu_eiem); /* EIEM : enable all external intr */ 422 set_eiem(cpu_eiem); /* EIEM : enable all external intr */
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index 857c2f545470..534abd4936e1 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -26,7 +26,7 @@
26#include <asm/asm-offsets.h> 26#include <asm/asm-offsets.h>
27 27
28/* PSW bits we allow the debugger to modify */ 28/* PSW bits we allow the debugger to modify */
29#define USER_PSW_BITS (PSW_N | PSW_V | PSW_CB) 29#define USER_PSW_BITS (PSW_N | PSW_B | PSW_V | PSW_CB)
30 30
31/* 31/*
32 * Called by kernel/ptrace.c when detaching.. 32 * Called by kernel/ptrace.c when detaching..
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index 537996955998..fd051705a407 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -190,8 +190,10 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
190 DBG(1,"get_sigframe: ka = %#lx, sp = %#lx, frame_size = %#lx\n", 190 DBG(1,"get_sigframe: ka = %#lx, sp = %#lx, frame_size = %#lx\n",
191 (unsigned long)ka, sp, frame_size); 191 (unsigned long)ka, sp, frame_size);
192 192
193 /* Align alternate stack and reserve 64 bytes for the signal
194 handler's frame marker. */
193 if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp)) 195 if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp))
194 sp = current->sas_ss_sp; /* Stacks grow up! */ 196 sp = (current->sas_ss_sp + 0x7f) & ~0x3f; /* Stacks grow up! */
195 197
196 DBG(1,"get_sigframe: Returning sp = %#lx\n", (unsigned long)sp); 198 DBG(1,"get_sigframe: Returning sp = %#lx\n", (unsigned long)sp);
197 return (void __user *) sp; /* Stacks grow up. Fun. */ 199 return (void __user *) sp; /* Stacks grow up. Fun. */
diff --git a/arch/parisc/math-emu/cnv_float.h b/arch/parisc/math-emu/cnv_float.h
index 9071e093164a..933423fa5144 100644
--- a/arch/parisc/math-emu/cnv_float.h
+++ b/arch/parisc/math-emu/cnv_float.h
@@ -347,16 +347,15 @@
347 Sgl_isinexact_to_fix(sgl_value,exponent) 347 Sgl_isinexact_to_fix(sgl_value,exponent)
348 348
349#define Duint_from_sgl_mantissa(sgl_value,exponent,dresultA,dresultB) \ 349#define Duint_from_sgl_mantissa(sgl_value,exponent,dresultA,dresultB) \
350 {Sall(sgl_value) <<= SGL_EXP_LENGTH; /* left-justify */ \ 350 {unsigned int val = Sall(sgl_value) << SGL_EXP_LENGTH; \
351 if (exponent <= 31) { \ 351 if (exponent <= 31) { \
352 Dintp1(dresultA) = 0; \ 352 Dintp1(dresultA) = 0; \
353 Dintp2(dresultB) = (unsigned)Sall(sgl_value) >> (31 - exponent); \ 353 Dintp2(dresultB) = val >> (31 - exponent); \
354 } \ 354 } \
355 else { \ 355 else { \
356 Dintp1(dresultA) = Sall(sgl_value) >> (63 - exponent); \ 356 Dintp1(dresultA) = val >> (63 - exponent); \
357 Dintp2(dresultB) = Sall(sgl_value) << (exponent - 31); \ 357 Dintp2(dresultB) = exponent <= 62 ? val << (exponent - 31) : 0; \
358 } \ 358 } \
359 Sall(sgl_value) >>= SGL_EXP_LENGTH; /* return to original */ \
360 } 359 }
361 360
362#define Duint_setzero(dresultA,dresultB) \ 361#define Duint_setzero(dresultA,dresultB) \
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 17903f1f356b..561ccca7b1a7 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -118,14 +118,12 @@ config PPC
118 select HAVE_SYSCALL_WRAPPERS if PPC64 118 select HAVE_SYSCALL_WRAPPERS if PPC64
119 select GENERIC_ATOMIC64 if PPC32 119 select GENERIC_ATOMIC64 if PPC32
120 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE 120 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
121 select HAVE_IRQ_WORK
122 select HAVE_PERF_EVENTS 121 select HAVE_PERF_EVENTS
123 select HAVE_REGS_AND_STACK_ACCESS_API 122 select HAVE_REGS_AND_STACK_ACCESS_API
124 select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64 123 select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64
125 select HAVE_GENERIC_HARDIRQS 124 select HAVE_GENERIC_HARDIRQS
126 select ARCH_WANT_IPC_PARSE_VERSION 125 select ARCH_WANT_IPC_PARSE_VERSION
127 select SPARSE_IRQ 126 select SPARSE_IRQ
128 select IRQ_PER_CPU
129 select IRQ_DOMAIN 127 select IRQ_DOMAIN
130 select GENERIC_IRQ_SHOW 128 select GENERIC_IRQ_SHOW
131 select GENERIC_IRQ_SHOW_LEVEL 129 select GENERIC_IRQ_SHOW_LEVEL
diff --git a/arch/powerpc/configs/chroma_defconfig b/arch/powerpc/configs/chroma_defconfig
index 29bb11ec6c64..4f35fc462385 100644
--- a/arch/powerpc/configs/chroma_defconfig
+++ b/arch/powerpc/configs/chroma_defconfig
@@ -1,6 +1,6 @@
1CONFIG_PPC64=y 1CONFIG_PPC64=y
2CONFIG_PPC_BOOK3E_64=y 2CONFIG_PPC_BOOK3E_64=y
3# CONFIG_VIRT_CPU_ACCOUNTING is not set 3# CONFIG_VIRT_CPU_ACCOUNTING_NATIVE is not set
4CONFIG_SMP=y 4CONFIG_SMP=y
5CONFIG_NR_CPUS=256 5CONFIG_NR_CPUS=256
6CONFIG_EXPERIMENTAL=y 6CONFIG_EXPERIMENTAL=y
diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig
index 88fa5c46f66f..f7df8362911f 100644
--- a/arch/powerpc/configs/corenet64_smp_defconfig
+++ b/arch/powerpc/configs/corenet64_smp_defconfig
@@ -1,6 +1,6 @@
1CONFIG_PPC64=y 1CONFIG_PPC64=y
2CONFIG_PPC_BOOK3E_64=y 2CONFIG_PPC_BOOK3E_64=y
3# CONFIG_VIRT_CPU_ACCOUNTING is not set 3# CONFIG_VIRT_CPU_ACCOUNTING_NATIVE is not set
4CONFIG_SMP=y 4CONFIG_SMP=y
5CONFIG_NR_CPUS=2 5CONFIG_NR_CPUS=2
6CONFIG_EXPERIMENTAL=y 6CONFIG_EXPERIMENTAL=y
diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig
index 840a2c2d0430..bcedeea0df89 100644
--- a/arch/powerpc/configs/pasemi_defconfig
+++ b/arch/powerpc/configs/pasemi_defconfig
@@ -1,6 +1,6 @@
1CONFIG_PPC64=y 1CONFIG_PPC64=y
2CONFIG_ALTIVEC=y 2CONFIG_ALTIVEC=y
3# CONFIG_VIRT_CPU_ACCOUNTING is not set 3# CONFIG_VIRT_CPU_ACCOUNTING_NATIVE is not set
4CONFIG_SMP=y 4CONFIG_SMP=y
5CONFIG_NR_CPUS=2 5CONFIG_NR_CPUS=2
6CONFIG_EXPERIMENTAL=y 6CONFIG_EXPERIMENTAL=y
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h
index 483733bd06d4..607559ab271f 100644
--- a/arch/powerpc/include/asm/cputime.h
+++ b/arch/powerpc/include/asm/cputime.h
@@ -8,7 +8,7 @@
8 * as published by the Free Software Foundation; either version 8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 * 10 *
11 * If we have CONFIG_VIRT_CPU_ACCOUNTING, we measure cpu time in 11 * If we have CONFIG_VIRT_CPU_ACCOUNTING_NATIVE, we measure cpu time in
12 * the same units as the timebase. Otherwise we measure cpu time 12 * the same units as the timebase. Otherwise we measure cpu time
13 * in jiffies using the generic definitions. 13 * in jiffies using the generic definitions.
14 */ 14 */
@@ -16,7 +16,7 @@
16#ifndef __POWERPC_CPUTIME_H 16#ifndef __POWERPC_CPUTIME_H
17#define __POWERPC_CPUTIME_H 17#define __POWERPC_CPUTIME_H
18 18
19#ifndef CONFIG_VIRT_CPU_ACCOUNTING 19#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
20#include <asm-generic/cputime.h> 20#include <asm-generic/cputime.h>
21#ifdef __KERNEL__ 21#ifdef __KERNEL__
22static inline void setup_cputime_one_jiffy(void) { } 22static inline void setup_cputime_one_jiffy(void) { }
@@ -231,5 +231,5 @@ static inline cputime_t clock_t_to_cputime(const unsigned long clk)
231static inline void arch_vtime_task_switch(struct task_struct *tsk) { } 231static inline void arch_vtime_task_switch(struct task_struct *tsk) { }
232 232
233#endif /* __KERNEL__ */ 233#endif /* __KERNEL__ */
234#endif /* CONFIG_VIRT_CPU_ACCOUNTING */ 234#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
235#endif /* __POWERPC_CPUTIME_H */ 235#endif /* __POWERPC_CPUTIME_H */
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index 531fe0c3108f..b1e7f2af1016 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -145,7 +145,7 @@ struct dtl_entry {
145extern struct kmem_cache *dtl_cache; 145extern struct kmem_cache *dtl_cache;
146 146
147/* 147/*
148 * When CONFIG_VIRT_CPU_ACCOUNTING = y, the cpu accounting code controls 148 * When CONFIG_VIRT_CPU_ACCOUNTING_NATIVE = y, the cpu accounting code controls
149 * reading from the dispatch trace log. If other code wants to consume 149 * reading from the dispatch trace log. If other code wants to consume
150 * DTL entries, it can set this pointer to a function that will get 150 * DTL entries, it can set this pointer to a function that will get
151 * called once for each DTL entry that gets processed. 151 * called once for each DTL entry that gets processed.
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index 9710be3a2d17..136bba62efa4 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -11,6 +11,7 @@
11 11
12#include <linux/types.h> 12#include <linux/types.h>
13#include <asm/hw_irq.h> 13#include <asm/hw_irq.h>
14#include <linux/device.h>
14 15
15#define MAX_HWEVENTS 8 16#define MAX_HWEVENTS 8
16#define MAX_EVENT_ALTERNATIVES 8 17#define MAX_EVENT_ALTERNATIVES 8
@@ -35,6 +36,7 @@ struct power_pmu {
35 void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]); 36 void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]);
36 int (*limited_pmc_event)(u64 event_id); 37 int (*limited_pmc_event)(u64 event_id);
37 u32 flags; 38 u32 flags;
39 const struct attribute_group **attr_groups;
38 int n_generic; 40 int n_generic;
39 int *generic_events; 41 int *generic_events;
40 int (*cache_events)[PERF_COUNT_HW_CACHE_MAX] 42 int (*cache_events)[PERF_COUNT_HW_CACHE_MAX]
@@ -109,3 +111,27 @@ extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
109 * If an event_id is not subject to the constraint expressed by a particular 111 * If an event_id is not subject to the constraint expressed by a particular
110 * field, then it will have 0 in both the mask and value for that field. 112 * field, then it will have 0 in both the mask and value for that field.
111 */ 113 */
114
115extern ssize_t power_events_sysfs_show(struct device *dev,
116 struct device_attribute *attr, char *page);
117
118/*
119 * EVENT_VAR() is same as PMU_EVENT_VAR with a suffix.
120 *
121 * Having a suffix allows us to have aliases in sysfs - eg: the generic
122 * event 'cpu-cycles' can have two entries in sysfs: 'cpu-cycles' and
123 * 'PM_CYC' where the latter is the name by which the event is known in
124 * POWER CPU specification.
125 */
126#define EVENT_VAR(_id, _suffix) event_attr_##_id##_suffix
127#define EVENT_PTR(_id, _suffix) &EVENT_VAR(_id, _suffix).attr.attr
128
129#define EVENT_ATTR(_name, _id, _suffix) \
130 PMU_EVENT_ATTR(_name, EVENT_VAR(_id, _suffix), PME_PM_##_id, \
131 power_events_sysfs_show)
132
133#define GENERIC_EVENT_ATTR(_name, _id) EVENT_ATTR(_name, _id, _g)
134#define GENERIC_EVENT_PTR(_id) EVENT_PTR(_id, _g)
135
136#define POWER_EVENT_ATTR(_name, _id) EVENT_ATTR(PM_##_name, _id, _p)
137#define POWER_EVENT_PTR(_id) EVENT_PTR(_id, _p)
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index ea2a86e8ff95..2d0e1f5d8339 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -24,7 +24,7 @@
24 * user_time and system_time fields in the paca. 24 * user_time and system_time fields in the paca.
25 */ 25 */
26 26
27#ifndef CONFIG_VIRT_CPU_ACCOUNTING 27#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
28#define ACCOUNT_CPU_USER_ENTRY(ra, rb) 28#define ACCOUNT_CPU_USER_ENTRY(ra, rb)
29#define ACCOUNT_CPU_USER_EXIT(ra, rb) 29#define ACCOUNT_CPU_USER_EXIT(ra, rb)
30#define ACCOUNT_STOLEN_TIME 30#define ACCOUNT_STOLEN_TIME
@@ -70,7 +70,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
70 70
71#endif /* CONFIG_PPC_SPLPAR */ 71#endif /* CONFIG_PPC_SPLPAR */
72 72
73#endif /* CONFIG_VIRT_CPU_ACCOUNTING */ 73#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
74 74
75/* 75/*
76 * Macros for storing registers into and loading registers from 76 * Macros for storing registers into and loading registers from
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index d22e73e4618b..e514de57a125 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -439,6 +439,8 @@ ret_from_fork:
439ret_from_kernel_thread: 439ret_from_kernel_thread:
440 REST_NVGPRS(r1) 440 REST_NVGPRS(r1)
441 bl schedule_tail 441 bl schedule_tail
442 li r3,0
443 stw r3,0(r1)
442 mtlr r14 444 mtlr r14
443 mr r3,r15 445 mr r3,r15
444 PPC440EP_ERR42 446 PPC440EP_ERR42
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index b310a0573625..ac057013f9fd 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -94,7 +94,7 @@ system_call_common:
94 addi r9,r1,STACK_FRAME_OVERHEAD 94 addi r9,r1,STACK_FRAME_OVERHEAD
95 ld r11,exception_marker@toc(r2) 95 ld r11,exception_marker@toc(r2)
96 std r11,-16(r9) /* "regshere" marker */ 96 std r11,-16(r9) /* "regshere" marker */
97#if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR) 97#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC_SPLPAR)
98BEGIN_FW_FTR_SECTION 98BEGIN_FW_FTR_SECTION
99 beq 33f 99 beq 33f
100 /* if from user, see if there are any DTL entries to process */ 100 /* if from user, see if there are any DTL entries to process */
@@ -110,7 +110,7 @@ BEGIN_FW_FTR_SECTION
110 addi r9,r1,STACK_FRAME_OVERHEAD 110 addi r9,r1,STACK_FRAME_OVERHEAD
11133: 11133:
112END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) 112END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
113#endif /* CONFIG_VIRT_CPU_ACCOUNTING && CONFIG_PPC_SPLPAR */ 113#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE && CONFIG_PPC_SPLPAR */
114 114
115 /* 115 /*
116 * A syscall should always be called with interrupts enabled 116 * A syscall should always be called with interrupts enabled
@@ -664,6 +664,19 @@ resume_kernel:
664 ld r4,TI_FLAGS(r9) 664 ld r4,TI_FLAGS(r9)
665 andi. r0,r4,_TIF_NEED_RESCHED 665 andi. r0,r4,_TIF_NEED_RESCHED
666 bne 1b 666 bne 1b
667
668 /*
669 * arch_local_irq_restore() from preempt_schedule_irq above may
670 * enable hard interrupt but we really should disable interrupts
671 * when we return from the interrupt, and so that we don't get
672 * interrupted after loading SRR0/1.
673 */
674#ifdef CONFIG_PPC_BOOK3E
675 wrteei 0
676#else
677 ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */
678 mtmsrd r10,1 /* Update machine state */
679#endif /* CONFIG_PPC_BOOK3E */
667#endif /* CONFIG_PREEMPT */ 680#endif /* CONFIG_PREEMPT */
668 681
669 .globl fast_exc_return_irq 682 .globl fast_exc_return_irq
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index c470a40b29f5..a7bc7521c064 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -154,12 +154,12 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
154static int kgdb_singlestep(struct pt_regs *regs) 154static int kgdb_singlestep(struct pt_regs *regs)
155{ 155{
156 struct thread_info *thread_info, *exception_thread_info; 156 struct thread_info *thread_info, *exception_thread_info;
157 struct thread_info *backup_current_thread_info = \ 157 struct thread_info *backup_current_thread_info;
158 (struct thread_info *)kmalloc(sizeof(struct thread_info), GFP_KERNEL);
159 158
160 if (user_mode(regs)) 159 if (user_mode(regs))
161 return 0; 160 return 0;
162 161
162 backup_current_thread_info = (struct thread_info *)kmalloc(sizeof(struct thread_info), GFP_KERNEL);
163 /* 163 /*
164 * On Book E and perhaps other processors, singlestep is handled on 164 * On Book E and perhaps other processors, singlestep is handled on
165 * the critical exception stack. This causes current_thread_info() 165 * the critical exception stack. This causes current_thread_info()
@@ -185,6 +185,7 @@ static int kgdb_singlestep(struct pt_regs *regs)
185 /* Restore current_thread_info lastly. */ 185 /* Restore current_thread_info lastly. */
186 memcpy(exception_thread_info, backup_current_thread_info, sizeof *thread_info); 186 memcpy(exception_thread_info, backup_current_thread_info, sizeof *thread_info);
187 187
188 kfree(backup_current_thread_info);
188 return 1; 189 return 1;
189} 190}
190 191
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 6f6b1cccc916..f77fa22754bc 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -143,7 +143,7 @@ EXPORT_SYMBOL_GPL(ppc_proc_freq);
143unsigned long ppc_tb_freq; 143unsigned long ppc_tb_freq;
144EXPORT_SYMBOL_GPL(ppc_tb_freq); 144EXPORT_SYMBOL_GPL(ppc_tb_freq);
145 145
146#ifdef CONFIG_VIRT_CPU_ACCOUNTING 146#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
147/* 147/*
148 * Factors for converting from cputime_t (timebase ticks) to 148 * Factors for converting from cputime_t (timebase ticks) to
149 * jiffies, microseconds, seconds, and clock_t (1/USER_HZ seconds). 149 * jiffies, microseconds, seconds, and clock_t (1/USER_HZ seconds).
@@ -347,6 +347,7 @@ void vtime_account_system(struct task_struct *tsk)
347 if (stolen) 347 if (stolen)
348 account_steal_time(stolen); 348 account_steal_time(stolen);
349} 349}
350EXPORT_SYMBOL_GPL(vtime_account_system);
350 351
351void vtime_account_idle(struct task_struct *tsk) 352void vtime_account_idle(struct task_struct *tsk)
352{ 353{
@@ -377,7 +378,7 @@ void vtime_account_user(struct task_struct *tsk)
377 account_user_time(tsk, utime, utimescaled); 378 account_user_time(tsk, utime, utimescaled);
378} 379}
379 380
380#else /* ! CONFIG_VIRT_CPU_ACCOUNTING */ 381#else /* ! CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
381#define calc_cputime_factors() 382#define calc_cputime_factors()
382#endif 383#endif
383 384
@@ -494,10 +495,15 @@ void timer_interrupt(struct pt_regs * regs)
494 set_dec(DECREMENTER_MAX); 495 set_dec(DECREMENTER_MAX);
495 496
496 /* Some implementations of hotplug will get timer interrupts while 497 /* Some implementations of hotplug will get timer interrupts while
497 * offline, just ignore these 498 * offline, just ignore these and we also need to set
499 * decrementers_next_tb as MAX to make sure __check_irq_replay
500 * don't replay timer interrupt when return, otherwise we'll trap
501 * here infinitely :(
498 */ 502 */
499 if (!cpu_online(smp_processor_id())) 503 if (!cpu_online(smp_processor_id())) {
504 *next_tb = ~(u64)0;
500 return; 505 return;
506 }
501 507
502 /* Conditionally hard-enable interrupts now that the DEC has been 508 /* Conditionally hard-enable interrupts now that the DEC has been
503 * bumped to its maximum value 509 * bumped to its maximum value
@@ -663,7 +669,7 @@ int update_persistent_clock(struct timespec now)
663 struct rtc_time tm; 669 struct rtc_time tm;
664 670
665 if (!ppc_md.set_rtc_time) 671 if (!ppc_md.set_rtc_time)
666 return 0; 672 return -ENODEV;
667 673
668 to_tm(now.tv_sec + 1 + timezone_offset, &tm); 674 to_tm(now.tv_sec + 1 + timezone_offset, &tm);
669 tm.tm_year -= 1900; 675 tm.tm_year -= 1900;
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index b0855e5d8905..9d9cddc5b346 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -39,6 +39,7 @@
39#define OP_31_XOP_TRAP 4 39#define OP_31_XOP_TRAP 4
40#define OP_31_XOP_LWZX 23 40#define OP_31_XOP_LWZX 23
41#define OP_31_XOP_TRAP_64 68 41#define OP_31_XOP_TRAP_64 68
42#define OP_31_XOP_DCBF 86
42#define OP_31_XOP_LBZX 87 43#define OP_31_XOP_LBZX 87
43#define OP_31_XOP_STWX 151 44#define OP_31_XOP_STWX 151
44#define OP_31_XOP_STBX 215 45#define OP_31_XOP_STBX 215
@@ -374,6 +375,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
374 emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs); 375 emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs);
375 break; 376 break;
376 377
378 case OP_31_XOP_DCBF:
377 case OP_31_XOP_DCBI: 379 case OP_31_XOP_DCBI:
378 /* Do nothing. The guest is performing dcbi because 380 /* Do nothing. The guest is performing dcbi because
379 * hardware DMA is not snooped by the dcache, but 381 * hardware DMA is not snooped by the dcache, but
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
index 56585086413a..7443481a315c 100644
--- a/arch/powerpc/mm/hash_low_64.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -115,11 +115,13 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
115 sldi r29,r5,SID_SHIFT - VPN_SHIFT 115 sldi r29,r5,SID_SHIFT - VPN_SHIFT
116 rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT) 116 rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
117 or r29,r28,r29 117 or r29,r28,r29
118 118 /*
119 /* Calculate hash value for primary slot and store it in r28 */ 119 * Calculate hash value for primary slot and store it in r28
120 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ 120 * r3 = va, r5 = vsid
121 rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */ 121 * r0 = (va >> 12) & ((1ul << (28 - 12)) -1)
122 xor r28,r5,r0 122 */
123 rldicl r0,r3,64-12,48
124 xor r28,r5,r0 /* hash */
123 b 4f 125 b 4f
124 126
1253: /* Calc vpn and put it in r29 */ 1273: /* Calc vpn and put it in r29 */
@@ -130,11 +132,12 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
130 /* 132 /*
131 * calculate hash value for primary slot and 133 * calculate hash value for primary slot and
132 * store it in r28 for 1T segment 134 * store it in r28 for 1T segment
135 * r3 = va, r5 = vsid
133 */ 136 */
134 rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */ 137 sldi r28,r5,25 /* vsid << 25 */
135 clrldi r5,r5,40 /* vsid & 0xffffff */ 138 /* r0 = (va >> 12) & ((1ul << (40 - 12)) -1) */
136 rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */ 139 rldicl r0,r3,64-12,36
137 xor r28,r28,r5 140 xor r28,r28,r5 /* vsid ^ ( vsid << 25) */
138 xor r28,r28,r0 /* hash */ 141 xor r28,r28,r0 /* hash */
139 142
140 /* Convert linux PTE bits into HW equivalents */ 143 /* Convert linux PTE bits into HW equivalents */
@@ -407,11 +410,13 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
407 */ 410 */
408 rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT) 411 rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
409 or r29,r28,r29 412 or r29,r28,r29
410 413 /*
411 /* Calculate hash value for primary slot and store it in r28 */ 414 * Calculate hash value for primary slot and store it in r28
412 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ 415 * r3 = va, r5 = vsid
413 rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */ 416 * r0 = (va >> 12) & ((1ul << (28 - 12)) -1)
414 xor r28,r5,r0 417 */
418 rldicl r0,r3,64-12,48
419 xor r28,r5,r0 /* hash */
415 b 4f 420 b 4f
416 421
4173: /* Calc vpn and put it in r29 */ 4223: /* Calc vpn and put it in r29 */
@@ -426,11 +431,12 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
426 /* 431 /*
427 * Calculate hash value for primary slot and 432 * Calculate hash value for primary slot and
428 * store it in r28 for 1T segment 433 * store it in r28 for 1T segment
434 * r3 = va, r5 = vsid
429 */ 435 */
430 rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */ 436 sldi r28,r5,25 /* vsid << 25 */
431 clrldi r5,r5,40 /* vsid & 0xffffff */ 437 /* r0 = (va >> 12) & ((1ul << (40 - 12)) -1) */
432 rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */ 438 rldicl r0,r3,64-12,36
433 xor r28,r28,r5 439 xor r28,r28,r5 /* vsid ^ ( vsid << 25) */
434 xor r28,r28,r0 /* hash */ 440 xor r28,r28,r0 /* hash */
435 441
436 /* Convert linux PTE bits into HW equivalents */ 442 /* Convert linux PTE bits into HW equivalents */
@@ -752,25 +758,27 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
752 rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT) 758 rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
753 or r29,r28,r29 759 or r29,r28,r29
754 760
755 /* Calculate hash value for primary slot and store it in r28 */ 761 /* Calculate hash value for primary slot and store it in r28
756 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ 762 * r3 = va, r5 = vsid
757 rldicl r0,r3,64-16,52 /* (ea >> 16) & 0xfff */ 763 * r0 = (va >> 16) & ((1ul << (28 - 16)) -1)
758 xor r28,r5,r0 764 */
765 rldicl r0,r3,64-16,52
766 xor r28,r5,r0 /* hash */
759 b 4f 767 b 4f
760 768
7613: /* Calc vpn and put it in r29 */ 7693: /* Calc vpn and put it in r29 */
762 sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT 770 sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT
763 rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT) 771 rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)
764 or r29,r28,r29 772 or r29,r28,r29
765
766 /* 773 /*
767 * calculate hash value for primary slot and 774 * calculate hash value for primary slot and
768 * store it in r28 for 1T segment 775 * store it in r28 for 1T segment
776 * r3 = va, r5 = vsid
769 */ 777 */
770 rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */ 778 sldi r28,r5,25 /* vsid << 25 */
771 clrldi r5,r5,40 /* vsid & 0xffffff */ 779 /* r0 = (va >> 16) & ((1ul << (40 - 16)) -1) */
772 rldicl r0,r3,64-16,40 /* (ea >> 16) & 0xffffff */ 780 rldicl r0,r3,64-16,40
773 xor r28,r28,r5 781 xor r28,r28,r5 /* vsid ^ ( vsid << 25) */
774 xor r28,r28,r0 /* hash */ 782 xor r28,r28,r0 /* hash */
775 783
776 /* Convert linux PTE bits into HW equivalents */ 784 /* Convert linux PTE bits into HW equivalents */
diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c
index 315f9495e9b2..f444b94935f5 100644
--- a/arch/powerpc/oprofile/op_model_power4.c
+++ b/arch/powerpc/oprofile/op_model_power4.c
@@ -52,7 +52,7 @@ static int power7_marked_instr_event(u64 mmcr1)
52 for (pmc = 0; pmc < 4; pmc++) { 52 for (pmc = 0; pmc < 4; pmc++) {
53 psel = mmcr1 & (OPROFILE_PM_PMCSEL_MSK 53 psel = mmcr1 & (OPROFILE_PM_PMCSEL_MSK
54 << (OPROFILE_MAX_PMC_NUM - pmc) 54 << (OPROFILE_MAX_PMC_NUM - pmc)
55 * OPROFILE_MAX_PMC_NUM); 55 * OPROFILE_PMSEL_FIELD_WIDTH);
56 psel = (psel >> ((OPROFILE_MAX_PMC_NUM - pmc) 56 psel = (psel >> ((OPROFILE_MAX_PMC_NUM - pmc)
57 * OPROFILE_PMSEL_FIELD_WIDTH)) & ~1ULL; 57 * OPROFILE_PMSEL_FIELD_WIDTH)) & ~1ULL;
58 unit = mmcr1 & (OPROFILE_PM_UNIT_MSK 58 unit = mmcr1 & (OPROFILE_PM_UNIT_MSK
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index aa2465e21f1a..fa476d50791f 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1305,6 +1305,16 @@ static int power_pmu_event_idx(struct perf_event *event)
1305 return event->hw.idx; 1305 return event->hw.idx;
1306} 1306}
1307 1307
1308ssize_t power_events_sysfs_show(struct device *dev,
1309 struct device_attribute *attr, char *page)
1310{
1311 struct perf_pmu_events_attr *pmu_attr;
1312
1313 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
1314
1315 return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
1316}
1317
1308struct pmu power_pmu = { 1318struct pmu power_pmu = {
1309 .pmu_enable = power_pmu_enable, 1319 .pmu_enable = power_pmu_enable,
1310 .pmu_disable = power_pmu_disable, 1320 .pmu_disable = power_pmu_disable,
@@ -1537,6 +1547,8 @@ int __cpuinit register_power_pmu(struct power_pmu *pmu)
1537 pr_info("%s performance monitor hardware support registered\n", 1547 pr_info("%s performance monitor hardware support registered\n",
1538 pmu->name); 1548 pmu->name);
1539 1549
1550 power_pmu.attr_groups = ppmu->attr_groups;
1551
1540#ifdef MSR_HV 1552#ifdef MSR_HV
1541 /* 1553 /*
1542 * Use FCHV to ignore kernel events if MSR.HV is set. 1554 * Use FCHV to ignore kernel events if MSR.HV is set.
diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c
index 2ee01e38d5e2..b554879bd31e 100644
--- a/arch/powerpc/perf/power7-pmu.c
+++ b/arch/powerpc/perf/power7-pmu.c
@@ -51,6 +51,18 @@
51#define MMCR1_PMCSEL_MSK 0xff 51#define MMCR1_PMCSEL_MSK 0xff
52 52
53/* 53/*
54 * Power7 event codes.
55 */
56#define PME_PM_CYC 0x1e
57#define PME_PM_GCT_NOSLOT_CYC 0x100f8
58#define PME_PM_CMPLU_STALL 0x4000a
59#define PME_PM_INST_CMPL 0x2
60#define PME_PM_LD_REF_L1 0xc880
61#define PME_PM_LD_MISS_L1 0x400f0
62#define PME_PM_BRU_FIN 0x10068
63#define PME_PM_BRU_MPRED 0x400f6
64
65/*
54 * Layout of constraint bits: 66 * Layout of constraint bits:
55 * 6666555555555544444444443333333333222222222211111111110000000000 67 * 6666555555555544444444443333333333222222222211111111110000000000
56 * 3210987654321098765432109876543210987654321098765432109876543210 68 * 3210987654321098765432109876543210987654321098765432109876543210
@@ -307,14 +319,14 @@ static void power7_disable_pmc(unsigned int pmc, unsigned long mmcr[])
307} 319}
308 320
309static int power7_generic_events[] = { 321static int power7_generic_events[] = {
310 [PERF_COUNT_HW_CPU_CYCLES] = 0x1e, 322 [PERF_COUNT_HW_CPU_CYCLES] = PME_PM_CYC,
311 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x100f8, /* GCT_NOSLOT_CYC */ 323 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PME_PM_GCT_NOSLOT_CYC,
312 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x4000a, /* CMPLU_STALL */ 324 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PME_PM_CMPLU_STALL,
313 [PERF_COUNT_HW_INSTRUCTIONS] = 2, 325 [PERF_COUNT_HW_INSTRUCTIONS] = PME_PM_INST_CMPL,
314 [PERF_COUNT_HW_CACHE_REFERENCES] = 0xc880, /* LD_REF_L1_LSU*/ 326 [PERF_COUNT_HW_CACHE_REFERENCES] = PME_PM_LD_REF_L1,
315 [PERF_COUNT_HW_CACHE_MISSES] = 0x400f0, /* LD_MISS_L1 */ 327 [PERF_COUNT_HW_CACHE_MISSES] = PME_PM_LD_MISS_L1,
316 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x10068, /* BRU_FIN */ 328 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PME_PM_BRU_FIN,
317 [PERF_COUNT_HW_BRANCH_MISSES] = 0x400f6, /* BR_MPRED */ 329 [PERF_COUNT_HW_BRANCH_MISSES] = PME_PM_BRU_MPRED,
318}; 330};
319 331
320#define C(x) PERF_COUNT_HW_CACHE_##x 332#define C(x) PERF_COUNT_HW_CACHE_##x
@@ -362,6 +374,57 @@ static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
362 }, 374 },
363}; 375};
364 376
377
378GENERIC_EVENT_ATTR(cpu-cycles, CYC);
379GENERIC_EVENT_ATTR(stalled-cycles-frontend, GCT_NOSLOT_CYC);
380GENERIC_EVENT_ATTR(stalled-cycles-backend, CMPLU_STALL);
381GENERIC_EVENT_ATTR(instructions, INST_CMPL);
382GENERIC_EVENT_ATTR(cache-references, LD_REF_L1);
383GENERIC_EVENT_ATTR(cache-misses, LD_MISS_L1);
384GENERIC_EVENT_ATTR(branch-instructions, BRU_FIN);
385GENERIC_EVENT_ATTR(branch-misses, BRU_MPRED);
386
387POWER_EVENT_ATTR(CYC, CYC);
388POWER_EVENT_ATTR(GCT_NOSLOT_CYC, GCT_NOSLOT_CYC);
389POWER_EVENT_ATTR(CMPLU_STALL, CMPLU_STALL);
390POWER_EVENT_ATTR(INST_CMPL, INST_CMPL);
391POWER_EVENT_ATTR(LD_REF_L1, LD_REF_L1);
392POWER_EVENT_ATTR(LD_MISS_L1, LD_MISS_L1);
393POWER_EVENT_ATTR(BRU_FIN, BRU_FIN)
394POWER_EVENT_ATTR(BRU_MPRED, BRU_MPRED);
395
396static struct attribute *power7_events_attr[] = {
397 GENERIC_EVENT_PTR(CYC),
398 GENERIC_EVENT_PTR(GCT_NOSLOT_CYC),
399 GENERIC_EVENT_PTR(CMPLU_STALL),
400 GENERIC_EVENT_PTR(INST_CMPL),
401 GENERIC_EVENT_PTR(LD_REF_L1),
402 GENERIC_EVENT_PTR(LD_MISS_L1),
403 GENERIC_EVENT_PTR(BRU_FIN),
404 GENERIC_EVENT_PTR(BRU_MPRED),
405
406 POWER_EVENT_PTR(CYC),
407 POWER_EVENT_PTR(GCT_NOSLOT_CYC),
408 POWER_EVENT_PTR(CMPLU_STALL),
409 POWER_EVENT_PTR(INST_CMPL),
410 POWER_EVENT_PTR(LD_REF_L1),
411 POWER_EVENT_PTR(LD_MISS_L1),
412 POWER_EVENT_PTR(BRU_FIN),
413 POWER_EVENT_PTR(BRU_MPRED),
414 NULL
415};
416
417
418static struct attribute_group power7_pmu_events_group = {
419 .name = "events",
420 .attrs = power7_events_attr,
421};
422
423static const struct attribute_group *power7_pmu_attr_groups[] = {
424 &power7_pmu_events_group,
425 NULL,
426};
427
365static struct power_pmu power7_pmu = { 428static struct power_pmu power7_pmu = {
366 .name = "POWER7", 429 .name = "POWER7",
367 .n_counter = 6, 430 .n_counter = 6,
@@ -373,6 +436,7 @@ static struct power_pmu power7_pmu = {
373 .get_alternatives = power7_get_alternatives, 436 .get_alternatives = power7_get_alternatives,
374 .disable_pmc = power7_disable_pmc, 437 .disable_pmc = power7_disable_pmc,
375 .flags = PPMU_ALT_SIPR, 438 .flags = PPMU_ALT_SIPR,
439 .attr_groups = power7_pmu_attr_groups,
376 .n_generic = ARRAY_SIZE(power7_generic_events), 440 .n_generic = ARRAY_SIZE(power7_generic_events),
377 .generic_events = power7_generic_events, 441 .generic_events = power7_generic_events,
378 .cache_events = &power7_cache_events, 442 .cache_events = &power7_cache_events,
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 25db92a8e1cf..49318385d4fa 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -24,6 +24,7 @@
24 24
25#include <linux/errno.h> 25#include <linux/errno.h>
26#include <linux/sched.h> 26#include <linux/sched.h>
27#include <linux/sched/rt.h>
27#include <linux/kernel.h> 28#include <linux/kernel.h>
28#include <linux/mm.h> 29#include <linux/mm.h>
29#include <linux/slab.h> 30#include <linux/slab.h>
diff --git a/arch/powerpc/platforms/pasemi/cpufreq.c b/arch/powerpc/platforms/pasemi/cpufreq.c
index 95d00173029f..890f30e70f98 100644
--- a/arch/powerpc/platforms/pasemi/cpufreq.c
+++ b/arch/powerpc/platforms/pasemi/cpufreq.c
@@ -236,6 +236,13 @@ out:
236 236
237static int pas_cpufreq_cpu_exit(struct cpufreq_policy *policy) 237static int pas_cpufreq_cpu_exit(struct cpufreq_policy *policy)
238{ 238{
239 /*
240 * We don't support CPU hotplug. Don't unmap after the system
241 * has already made it to a running state.
242 */
243 if (system_state != SYSTEM_BOOTING)
244 return 0;
245
239 if (sdcasr_mapbase) 246 if (sdcasr_mapbase)
240 iounmap(sdcasr_mapbase); 247 iounmap(sdcasr_mapbase);
241 if (sdcpwr_mapbase) 248 if (sdcpwr_mapbase)
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
index a7648543c59e..0cc0ac07a55d 100644
--- a/arch/powerpc/platforms/pseries/dtl.c
+++ b/arch/powerpc/platforms/pseries/dtl.c
@@ -57,7 +57,7 @@ static u8 dtl_event_mask = 0x7;
57 */ 57 */
58static int dtl_buf_entries = N_DISPATCH_LOG; 58static int dtl_buf_entries = N_DISPATCH_LOG;
59 59
60#ifdef CONFIG_VIRT_CPU_ACCOUNTING 60#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
61struct dtl_ring { 61struct dtl_ring {
62 u64 write_index; 62 u64 write_index;
63 struct dtl_entry *write_ptr; 63 struct dtl_entry *write_ptr;
@@ -142,7 +142,7 @@ static u64 dtl_current_index(struct dtl *dtl)
142 return per_cpu(dtl_rings, dtl->cpu).write_index; 142 return per_cpu(dtl_rings, dtl->cpu).write_index;
143} 143}
144 144
145#else /* CONFIG_VIRT_CPU_ACCOUNTING */ 145#else /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
146 146
147static int dtl_start(struct dtl *dtl) 147static int dtl_start(struct dtl *dtl)
148{ 148{
@@ -188,7 +188,7 @@ static u64 dtl_current_index(struct dtl *dtl)
188{ 188{
189 return lppaca_of(dtl->cpu).dtl_idx; 189 return lppaca_of(dtl->cpu).dtl_idx;
190} 190}
191#endif /* CONFIG_VIRT_CPU_ACCOUNTING */ 191#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
192 192
193static int dtl_enable(struct dtl *dtl) 193static int dtl_enable(struct dtl *dtl)
194{ 194{
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index ca55882465d6..527e12c9573b 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -281,7 +281,7 @@ static struct notifier_block pci_dn_reconfig_nb = {
281 281
282struct kmem_cache *dtl_cache; 282struct kmem_cache *dtl_cache;
283 283
284#ifdef CONFIG_VIRT_CPU_ACCOUNTING 284#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
285/* 285/*
286 * Allocate space for the dispatch trace log for all possible cpus 286 * Allocate space for the dispatch trace log for all possible cpus
287 * and register the buffers with the hypervisor. This is used for 287 * and register the buffers with the hypervisor. This is used for
@@ -332,12 +332,12 @@ static int alloc_dispatch_logs(void)
332 332
333 return 0; 333 return 0;
334} 334}
335#else /* !CONFIG_VIRT_CPU_ACCOUNTING */ 335#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
336static inline int alloc_dispatch_logs(void) 336static inline int alloc_dispatch_logs(void)
337{ 337{
338 return 0; 338 return 0;
339} 339}
340#endif /* CONFIG_VIRT_CPU_ACCOUNTING */ 340#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
341 341
342static int alloc_dispatch_log_kmem_cache(void) 342static int alloc_dispatch_log_kmem_cache(void)
343{ 343{
diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm.c b/arch/powerpc/sysdev/bestcomm/bestcomm.c
index d9130630f7ef..81c331481336 100644
--- a/arch/powerpc/sysdev/bestcomm/bestcomm.c
+++ b/arch/powerpc/sysdev/bestcomm/bestcomm.c
@@ -414,7 +414,7 @@ static int mpc52xx_bcom_probe(struct platform_device *op)
414 goto error_sramclean; 414 goto error_sramclean;
415 } 415 }
416 416
417 if (!request_mem_region(res_bcom.start, sizeof(struct mpc52xx_sdma), 417 if (!request_mem_region(res_bcom.start, resource_size(&res_bcom),
418 DRIVER_NAME)) { 418 DRIVER_NAME)) {
419 printk(KERN_ERR DRIVER_NAME ": " 419 printk(KERN_ERR DRIVER_NAME ": "
420 "Can't request registers region\n"); 420 "Can't request registers region\n");
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index b5ea38c25647..c15ba7d1be64 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -78,7 +78,6 @@ config S390
78 select HAVE_KVM if 64BIT 78 select HAVE_KVM if 64BIT
79 select HAVE_ARCH_TRACEHOOK 79 select HAVE_ARCH_TRACEHOOK
80 select INIT_ALL_POSSIBLE 80 select INIT_ALL_POSSIBLE
81 select HAVE_IRQ_WORK
82 select HAVE_PERF_EVENTS 81 select HAVE_PERF_EVENTS
83 select ARCH_HAVE_NMI_SAFE_CMPXCHG 82 select ARCH_HAVE_NMI_SAFE_CMPXCHG
84 select HAVE_DEBUG_KMEMLEAK 83 select HAVE_DEBUG_KMEMLEAK
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index c1d7930a82f4..098adbb62660 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -1365,6 +1365,18 @@ static inline void pmdp_invalidate(struct vm_area_struct *vma,
1365 __pmd_idte(address, pmdp); 1365 __pmd_idte(address, pmdp);
1366} 1366}
1367 1367
1368#define __HAVE_ARCH_PMDP_SET_WRPROTECT
1369static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1370 unsigned long address, pmd_t *pmdp)
1371{
1372 pmd_t pmd = *pmdp;
1373
1374 if (pmd_write(pmd)) {
1375 __pmd_idte(address, pmdp);
1376 set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd));
1377 }
1378}
1379
1368static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot) 1380static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1369{ 1381{
1370 pmd_t __pmd; 1382 pmd_t __pmd;
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index a5f4f5a1d24b..0aa98db8a80d 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -120,6 +120,9 @@ static int s390_next_ktime(ktime_t expires,
120 nsecs = ktime_to_ns(ktime_add(timespec_to_ktime(ts), expires)); 120 nsecs = ktime_to_ns(ktime_add(timespec_to_ktime(ts), expires));
121 do_div(nsecs, 125); 121 do_div(nsecs, 125);
122 S390_lowcore.clock_comparator = sched_clock_base_cc + (nsecs << 9); 122 S390_lowcore.clock_comparator = sched_clock_base_cc + (nsecs << 9);
123 /* Program the maximum value if we have an overflow (== year 2042) */
124 if (unlikely(S390_lowcore.clock_comparator < sched_clock_base_cc))
125 S390_lowcore.clock_comparator = -1ULL;
123 set_clock_comparator(S390_lowcore.clock_comparator); 126 set_clock_comparator(S390_lowcore.clock_comparator);
124 return 0; 127 return 0;
125} 128}
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index e84b8b68444a..ce9cc5aa2033 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -127,7 +127,7 @@ void vtime_account_user(struct task_struct *tsk)
127 * Update process times based on virtual cpu times stored by entry.S 127 * Update process times based on virtual cpu times stored by entry.S
128 * to the lowcore fields user_timer, system_timer & steal_clock. 128 * to the lowcore fields user_timer, system_timer & steal_clock.
129 */ 129 */
130void vtime_account(struct task_struct *tsk) 130void vtime_account_irq_enter(struct task_struct *tsk)
131{ 131{
132 struct thread_info *ti = task_thread_info(tsk); 132 struct thread_info *ti = task_thread_info(tsk);
133 u64 timer, system; 133 u64 timer, system;
@@ -145,10 +145,10 @@ void vtime_account(struct task_struct *tsk)
145 145
146 virt_timer_forward(system); 146 virt_timer_forward(system);
147} 147}
148EXPORT_SYMBOL_GPL(vtime_account); 148EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
149 149
150void vtime_account_system(struct task_struct *tsk) 150void vtime_account_system(struct task_struct *tsk)
151__attribute__((alias("vtime_account"))); 151__attribute__((alias("vtime_account_irq_enter")));
152EXPORT_SYMBOL_GPL(vtime_account_system); 152EXPORT_SYMBOL_GPL(vtime_account_system);
153 153
154void __kprobes vtime_stop_cpu(void) 154void __kprobes vtime_stop_cpu(void)
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index babc2b826c5c..9c833c585871 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -11,7 +11,6 @@ config SUPERH
11 select HAVE_ARCH_TRACEHOOK 11 select HAVE_ARCH_TRACEHOOK
12 select HAVE_DMA_API_DEBUG 12 select HAVE_DMA_API_DEBUG
13 select HAVE_DMA_ATTRS 13 select HAVE_DMA_ATTRS
14 select HAVE_IRQ_WORK
15 select HAVE_PERF_EVENTS 14 select HAVE_PERF_EVENTS
16 select HAVE_DEBUG_BUGVERBOSE 15 select HAVE_DEBUG_BUGVERBOSE
17 select ARCH_HAVE_CUSTOM_GPIO_H 16 select ARCH_HAVE_CUSTOM_GPIO_H
@@ -91,9 +90,6 @@ config GENERIC_CSUM
91config GENERIC_HWEIGHT 90config GENERIC_HWEIGHT
92 def_bool y 91 def_bool y
93 92
94config IRQ_PER_CPU
95 def_bool y
96
97config GENERIC_GPIO 93config GENERIC_GPIO
98 def_bool n 94 def_bool n
99 95
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c
index 0c910163caa3..3d5a1b387cc0 100644
--- a/arch/sh/kernel/idle.c
+++ b/arch/sh/kernel/idle.c
@@ -22,7 +22,7 @@
22#include <asm/smp.h> 22#include <asm/smp.h>
23#include <asm/bl_bit.h> 23#include <asm/bl_bit.h>
24 24
25void (*pm_idle)(void); 25static void (*sh_idle)(void);
26 26
27static int hlt_counter; 27static int hlt_counter;
28 28
@@ -103,9 +103,9 @@ void cpu_idle(void)
103 /* Don't trace irqs off for idle */ 103 /* Don't trace irqs off for idle */
104 stop_critical_timings(); 104 stop_critical_timings();
105 if (cpuidle_idle_call()) 105 if (cpuidle_idle_call())
106 pm_idle(); 106 sh_idle();
107 /* 107 /*
108 * Sanity check to ensure that pm_idle() returns 108 * Sanity check to ensure that sh_idle() returns
109 * with IRQs enabled 109 * with IRQs enabled
110 */ 110 */
111 WARN_ON(irqs_disabled()); 111 WARN_ON(irqs_disabled());
@@ -123,13 +123,13 @@ void __init select_idle_routine(void)
123 /* 123 /*
124 * If a platform has set its own idle routine, leave it alone. 124 * If a platform has set its own idle routine, leave it alone.
125 */ 125 */
126 if (pm_idle) 126 if (sh_idle)
127 return; 127 return;
128 128
129 if (hlt_works()) 129 if (hlt_works())
130 pm_idle = default_idle; 130 sh_idle = default_idle;
131 else 131 else
132 pm_idle = poll_idle; 132 sh_idle = poll_idle;
133} 133}
134 134
135void stop_this_cpu(void *unused) 135void stop_this_cpu(void *unused)
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 9f2edb5c5551..9bff3db17c8c 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -23,7 +23,6 @@ config SPARC
23 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE 23 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
24 select RTC_CLASS 24 select RTC_CLASS
25 select RTC_DRV_M48T59 25 select RTC_DRV_M48T59
26 select HAVE_IRQ_WORK
27 select HAVE_DMA_ATTRS 26 select HAVE_DMA_ATTRS
28 select HAVE_DMA_API_DEBUG 27 select HAVE_DMA_API_DEBUG
29 select HAVE_ARCH_JUMP_LABEL 28 select HAVE_ARCH_JUMP_LABEL
@@ -61,6 +60,7 @@ config SPARC64
61 select HAVE_MEMBLOCK 60 select HAVE_MEMBLOCK
62 select HAVE_MEMBLOCK_NODE_MAP 61 select HAVE_MEMBLOCK_NODE_MAP
63 select HAVE_SYSCALL_WRAPPERS 62 select HAVE_SYSCALL_WRAPPERS
63 select HAVE_ARCH_TRANSPARENT_HUGEPAGE
64 select HAVE_DYNAMIC_FTRACE 64 select HAVE_DYNAMIC_FTRACE
65 select HAVE_FTRACE_MCOUNT_RECORD 65 select HAVE_FTRACE_MCOUNT_RECORD
66 select HAVE_SYSCALL_TRACEPOINTS 66 select HAVE_SYSCALL_TRACEPOINTS
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 7870be0f5adc..08fcce90316b 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -71,7 +71,6 @@
71#define PMD_PADDR _AC(0xfffffffe,UL) 71#define PMD_PADDR _AC(0xfffffffe,UL)
72#define PMD_PADDR_SHIFT _AC(11,UL) 72#define PMD_PADDR_SHIFT _AC(11,UL)
73 73
74#ifdef CONFIG_TRANSPARENT_HUGEPAGE
75#define PMD_ISHUGE _AC(0x00000001,UL) 74#define PMD_ISHUGE _AC(0x00000001,UL)
76 75
77/* This is the PMD layout when PMD_ISHUGE is set. With 4MB huge 76/* This is the PMD layout when PMD_ISHUGE is set. With 4MB huge
@@ -86,7 +85,6 @@
86#define PMD_HUGE_ACCESSED _AC(0x00000080,UL) 85#define PMD_HUGE_ACCESSED _AC(0x00000080,UL)
87#define PMD_HUGE_EXEC _AC(0x00000040,UL) 86#define PMD_HUGE_EXEC _AC(0x00000040,UL)
88#define PMD_HUGE_SPLITTING _AC(0x00000020,UL) 87#define PMD_HUGE_SPLITTING _AC(0x00000020,UL)
89#endif
90 88
91/* PGDs point to PMD tables which are 8K aligned. */ 89/* PGDs point to PMD tables which are 8K aligned. */
92#define PGD_PADDR _AC(0xfffffffc,UL) 90#define PGD_PADDR _AC(0xfffffffc,UL)
@@ -628,6 +626,12 @@ static inline unsigned long pte_special(pte_t pte)
628 return pte_val(pte) & _PAGE_SPECIAL; 626 return pte_val(pte) & _PAGE_SPECIAL;
629} 627}
630 628
629static inline int pmd_large(pmd_t pmd)
630{
631 return (pmd_val(pmd) & (PMD_ISHUGE | PMD_HUGE_PRESENT)) ==
632 (PMD_ISHUGE | PMD_HUGE_PRESENT);
633}
634
631#ifdef CONFIG_TRANSPARENT_HUGEPAGE 635#ifdef CONFIG_TRANSPARENT_HUGEPAGE
632static inline int pmd_young(pmd_t pmd) 636static inline int pmd_young(pmd_t pmd)
633{ 637{
@@ -646,12 +650,6 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
646 return val >> (PAGE_SHIFT - PMD_PADDR_SHIFT); 650 return val >> (PAGE_SHIFT - PMD_PADDR_SHIFT);
647} 651}
648 652
649static inline int pmd_large(pmd_t pmd)
650{
651 return (pmd_val(pmd) & (PMD_ISHUGE | PMD_HUGE_PRESENT)) ==
652 (PMD_ISHUGE | PMD_HUGE_PRESENT);
653}
654
655static inline int pmd_trans_splitting(pmd_t pmd) 653static inline int pmd_trans_splitting(pmd_t pmd)
656{ 654{
657 return (pmd_val(pmd) & (PMD_ISHUGE|PMD_HUGE_SPLITTING)) == 655 return (pmd_val(pmd) & (PMD_ISHUGE|PMD_HUGE_SPLITTING)) ==
diff --git a/arch/sparc/include/asm/processor_32.h b/arch/sparc/include/asm/processor_32.h
index c1e01914fd98..2c7baa4c4505 100644
--- a/arch/sparc/include/asm/processor_32.h
+++ b/arch/sparc/include/asm/processor_32.h
@@ -118,6 +118,7 @@ extern unsigned long get_wchan(struct task_struct *);
118extern struct task_struct *last_task_used_math; 118extern struct task_struct *last_task_used_math;
119 119
120#define cpu_relax() barrier() 120#define cpu_relax() barrier()
121extern void (*sparc_idle)(void);
121 122
122#endif 123#endif
123 124
diff --git a/arch/sparc/kernel/apc.c b/arch/sparc/kernel/apc.c
index 348fa1aeabce..eefda32b595e 100644
--- a/arch/sparc/kernel/apc.c
+++ b/arch/sparc/kernel/apc.c
@@ -20,6 +20,7 @@
20#include <asm/uaccess.h> 20#include <asm/uaccess.h>
21#include <asm/auxio.h> 21#include <asm/auxio.h>
22#include <asm/apc.h> 22#include <asm/apc.h>
23#include <asm/processor.h>
23 24
24/* Debugging 25/* Debugging
25 * 26 *
@@ -158,7 +159,7 @@ static int apc_probe(struct platform_device *op)
158 159
159 /* Assign power management IDLE handler */ 160 /* Assign power management IDLE handler */
160 if (!apc_no_idle) 161 if (!apc_no_idle)
161 pm_idle = apc_swift_idle; 162 sparc_idle = apc_swift_idle;
162 163
163 printk(KERN_INFO "%s: power management initialized%s\n", 164 printk(KERN_INFO "%s: power management initialized%s\n",
164 APC_DEVNAME, apc_no_idle ? " (CPU idle disabled)" : ""); 165 APC_DEVNAME, apc_no_idle ? " (CPU idle disabled)" : "");
diff --git a/arch/sparc/kernel/leon_pmc.c b/arch/sparc/kernel/leon_pmc.c
index 4e174321097d..708bca435219 100644
--- a/arch/sparc/kernel/leon_pmc.c
+++ b/arch/sparc/kernel/leon_pmc.c
@@ -9,6 +9,7 @@
9#include <asm/leon_amba.h> 9#include <asm/leon_amba.h>
10#include <asm/cpu_type.h> 10#include <asm/cpu_type.h>
11#include <asm/leon.h> 11#include <asm/leon.h>
12#include <asm/processor.h>
12 13
13/* List of Systems that need fixup instructions around power-down instruction */ 14/* List of Systems that need fixup instructions around power-down instruction */
14unsigned int pmc_leon_fixup_ids[] = { 15unsigned int pmc_leon_fixup_ids[] = {
@@ -69,9 +70,9 @@ static int __init leon_pmc_install(void)
69 if (sparc_cpu_model == sparc_leon) { 70 if (sparc_cpu_model == sparc_leon) {
70 /* Assign power management IDLE handler */ 71 /* Assign power management IDLE handler */
71 if (pmc_leon_need_fixup()) 72 if (pmc_leon_need_fixup())
72 pm_idle = pmc_leon_idle_fixup; 73 sparc_idle = pmc_leon_idle_fixup;
73 else 74 else
74 pm_idle = pmc_leon_idle; 75 sparc_idle = pmc_leon_idle;
75 76
76 printk(KERN_INFO "leon: power management initialized\n"); 77 printk(KERN_INFO "leon: power management initialized\n");
77 } 78 }
diff --git a/arch/sparc/kernel/pmc.c b/arch/sparc/kernel/pmc.c
index dcbb62f63068..8b7297faca79 100644
--- a/arch/sparc/kernel/pmc.c
+++ b/arch/sparc/kernel/pmc.c
@@ -17,6 +17,7 @@
17#include <asm/oplib.h> 17#include <asm/oplib.h>
18#include <asm/uaccess.h> 18#include <asm/uaccess.h>
19#include <asm/auxio.h> 19#include <asm/auxio.h>
20#include <asm/processor.h>
20 21
21/* Debug 22/* Debug
22 * 23 *
@@ -63,7 +64,7 @@ static int pmc_probe(struct platform_device *op)
63 64
64#ifndef PMC_NO_IDLE 65#ifndef PMC_NO_IDLE
65 /* Assign power management IDLE handler */ 66 /* Assign power management IDLE handler */
66 pm_idle = pmc_swift_idle; 67 sparc_idle = pmc_swift_idle;
67#endif 68#endif
68 69
69 printk(KERN_INFO "%s: power management initialized\n", PMC_DEVNAME); 70 printk(KERN_INFO "%s: power management initialized\n", PMC_DEVNAME);
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index be8e862badaf..62eede13831a 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -43,8 +43,7 @@
43 * Power management idle function 43 * Power management idle function
44 * Set in pm platform drivers (apc.c and pmc.c) 44 * Set in pm platform drivers (apc.c and pmc.c)
45 */ 45 */
46void (*pm_idle)(void); 46void (*sparc_idle)(void);
47EXPORT_SYMBOL(pm_idle);
48 47
49/* 48/*
50 * Power-off handler instantiation for pm.h compliance 49 * Power-off handler instantiation for pm.h compliance
@@ -75,8 +74,8 @@ void cpu_idle(void)
75 /* endless idle loop with no priority at all */ 74 /* endless idle loop with no priority at all */
76 for (;;) { 75 for (;;) {
77 while (!need_resched()) { 76 while (!need_resched()) {
78 if (pm_idle) 77 if (sparc_idle)
79 (*pm_idle)(); 78 (*sparc_idle)();
80 else 79 else
81 cpu_relax(); 80 cpu_relax();
82 } 81 }
diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
index 1303021748c8..9f20566b0773 100644
--- a/arch/sparc/kernel/prom_common.c
+++ b/arch/sparc/kernel/prom_common.c
@@ -64,7 +64,7 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len
64 err = -ENODEV; 64 err = -ENODEV;
65 65
66 mutex_lock(&of_set_property_mutex); 66 mutex_lock(&of_set_property_mutex);
67 write_lock(&devtree_lock); 67 raw_spin_lock(&devtree_lock);
68 prevp = &dp->properties; 68 prevp = &dp->properties;
69 while (*prevp) { 69 while (*prevp) {
70 struct property *prop = *prevp; 70 struct property *prop = *prevp;
@@ -91,7 +91,7 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len
91 } 91 }
92 prevp = &(*prevp)->next; 92 prevp = &(*prevp)->next;
93 } 93 }
94 write_unlock(&devtree_lock); 94 raw_spin_unlock(&devtree_lock);
95 mutex_unlock(&of_set_property_mutex); 95 mutex_unlock(&of_set_property_mutex);
96 96
97 /* XXX Upate procfs if necessary... */ 97 /* XXX Upate procfs if necessary... */
diff --git a/arch/sparc/kernel/sbus.c b/arch/sparc/kernel/sbus.c
index 1271b3a27d4e..be5bdf93c767 100644
--- a/arch/sparc/kernel/sbus.c
+++ b/arch/sparc/kernel/sbus.c
@@ -554,10 +554,8 @@ static void __init sbus_iommu_init(struct platform_device *op)
554 regs = pr->phys_addr; 554 regs = pr->phys_addr;
555 555
556 iommu = kzalloc(sizeof(*iommu), GFP_ATOMIC); 556 iommu = kzalloc(sizeof(*iommu), GFP_ATOMIC);
557 if (!iommu)
558 goto fatal_memory_error;
559 strbuf = kzalloc(sizeof(*strbuf), GFP_ATOMIC); 557 strbuf = kzalloc(sizeof(*strbuf), GFP_ATOMIC);
560 if (!strbuf) 558 if (!iommu || !strbuf)
561 goto fatal_memory_error; 559 goto fatal_memory_error;
562 560
563 op->dev.archdata.iommu = iommu; 561 op->dev.archdata.iommu = iommu;
@@ -656,6 +654,8 @@ static void __init sbus_iommu_init(struct platform_device *op)
656 return; 654 return;
657 655
658fatal_memory_error: 656fatal_memory_error:
657 kfree(iommu);
658 kfree(strbuf);
659 prom_printf("sbus_iommu_init: Fatal memory allocation error.\n"); 659 prom_printf("sbus_iommu_init: Fatal memory allocation error.\n");
660} 660}
661 661
diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c
index 42c55df3aec3..01ee23dd724d 100644
--- a/arch/sparc/mm/gup.c
+++ b/arch/sparc/mm/gup.c
@@ -66,6 +66,56 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
66 return 1; 66 return 1;
67} 67}
68 68
69static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
70 unsigned long end, int write, struct page **pages,
71 int *nr)
72{
73 struct page *head, *page, *tail;
74 u32 mask;
75 int refs;
76
77 mask = PMD_HUGE_PRESENT;
78 if (write)
79 mask |= PMD_HUGE_WRITE;
80 if ((pmd_val(pmd) & mask) != mask)
81 return 0;
82
83 refs = 0;
84 head = pmd_page(pmd);
85 page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
86 tail = page;
87 do {
88 VM_BUG_ON(compound_head(page) != head);
89 pages[*nr] = page;
90 (*nr)++;
91 page++;
92 refs++;
93 } while (addr += PAGE_SIZE, addr != end);
94
95 if (!page_cache_add_speculative(head, refs)) {
96 *nr -= refs;
97 return 0;
98 }
99
100 if (unlikely(pmd_val(pmd) != pmd_val(*pmdp))) {
101 *nr -= refs;
102 while (refs--)
103 put_page(head);
104 return 0;
105 }
106
107 /* Any tail page need their mapcount reference taken before we
108 * return.
109 */
110 while (refs--) {
111 if (PageTail(tail))
112 get_huge_page_tail(tail);
113 tail++;
114 }
115
116 return 1;
117}
118
69static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, 119static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
70 int write, struct page **pages, int *nr) 120 int write, struct page **pages, int *nr)
71{ 121{
@@ -77,9 +127,14 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
77 pmd_t pmd = *pmdp; 127 pmd_t pmd = *pmdp;
78 128
79 next = pmd_addr_end(addr, end); 129 next = pmd_addr_end(addr, end);
80 if (pmd_none(pmd)) 130 if (pmd_none(pmd) || pmd_trans_splitting(pmd))
81 return 0; 131 return 0;
82 if (!gup_pte_range(pmd, addr, next, write, pages, nr)) 132 if (unlikely(pmd_large(pmd))) {
133 if (!gup_huge_pmd(pmdp, pmd, addr, next,
134 write, pages, nr))
135 return 0;
136 } else if (!gup_pte_range(pmd, addr, next, write,
137 pages, nr))
83 return 0; 138 return 0;
84 } while (pmdp++, addr = next, addr != end); 139 } while (pmdp++, addr = next, addr != end);
85 140
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 875d008828b8..1bb7ad4aeff4 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -140,6 +140,8 @@ config ARCH_DEFCONFIG
140 140
141source "init/Kconfig" 141source "init/Kconfig"
142 142
143source "kernel/Kconfig.freezer"
144
143menu "Tilera-specific configuration" 145menu "Tilera-specific configuration"
144 146
145config NR_CPUS 147config NR_CPUS
diff --git a/arch/tile/include/asm/io.h b/arch/tile/include/asm/io.h
index 2a9b293fece6..31672918064c 100644
--- a/arch/tile/include/asm/io.h
+++ b/arch/tile/include/asm/io.h
@@ -250,7 +250,9 @@ static inline void writeq(u64 val, unsigned long addr)
250#define iowrite32 writel 250#define iowrite32 writel
251#define iowrite64 writeq 251#define iowrite64 writeq
252 252
253static inline void memset_io(void *dst, int val, size_t len) 253#if CHIP_HAS_MMIO() || defined(CONFIG_PCI)
254
255static inline void memset_io(volatile void *dst, int val, size_t len)
254{ 256{
255 int x; 257 int x;
256 BUG_ON((unsigned long)dst & 0x3); 258 BUG_ON((unsigned long)dst & 0x3);
@@ -277,6 +279,8 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
277 writel(*(u32 *)(src + x), dst + x); 279 writel(*(u32 *)(src + x), dst + x);
278} 280}
279 281
282#endif
283
280/* 284/*
281 * The Tile architecture does not support IOPORT, even with PCI. 285 * The Tile architecture does not support IOPORT, even with PCI.
282 * Unfortunately we can't yet simply not declare these methods, 286 * Unfortunately we can't yet simply not declare these methods,
diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h
index b4e96fef2cf8..241c0bb60b12 100644
--- a/arch/tile/include/asm/irqflags.h
+++ b/arch/tile/include/asm/irqflags.h
@@ -18,32 +18,20 @@
18#include <arch/interrupts.h> 18#include <arch/interrupts.h>
19#include <arch/chip.h> 19#include <arch/chip.h>
20 20
21#if !defined(__tilegx__) && defined(__ASSEMBLY__)
22
23/* 21/*
24 * The set of interrupts we want to allow when interrupts are nominally 22 * The set of interrupts we want to allow when interrupts are nominally
25 * disabled. The remainder are effectively "NMI" interrupts from 23 * disabled. The remainder are effectively "NMI" interrupts from
26 * the point of view of the generic Linux code. Note that synchronous 24 * the point of view of the generic Linux code. Note that synchronous
27 * interrupts (aka "non-queued") are not blocked by the mask in any case. 25 * interrupts (aka "non-queued") are not blocked by the mask in any case.
28 */ 26 */
29#if CHIP_HAS_AUX_PERF_COUNTERS()
30#define LINUX_MASKABLE_INTERRUPTS_HI \
31 (~(INT_MASK_HI(INT_PERF_COUNT) | INT_MASK_HI(INT_AUX_PERF_COUNT)))
32#else
33#define LINUX_MASKABLE_INTERRUPTS_HI \
34 (~(INT_MASK_HI(INT_PERF_COUNT)))
35#endif
36
37#else
38
39#if CHIP_HAS_AUX_PERF_COUNTERS()
40#define LINUX_MASKABLE_INTERRUPTS \
41 (~(INT_MASK(INT_PERF_COUNT) | INT_MASK(INT_AUX_PERF_COUNT)))
42#else
43#define LINUX_MASKABLE_INTERRUPTS \ 27#define LINUX_MASKABLE_INTERRUPTS \
44 (~(INT_MASK(INT_PERF_COUNT))) 28 (~((_AC(1,ULL) << INT_PERF_COUNT) | (_AC(1,ULL) << INT_AUX_PERF_COUNT)))
45#endif
46 29
30#if CHIP_HAS_SPLIT_INTR_MASK()
31/* The same macro, but for the two 32-bit SPRs separately. */
32#define LINUX_MASKABLE_INTERRUPTS_LO (-1)
33#define LINUX_MASKABLE_INTERRUPTS_HI \
34 (~((1 << (INT_PERF_COUNT - 32)) | (1 << (INT_AUX_PERF_COUNT - 32))))
47#endif 35#endif
48 36
49#ifndef __ASSEMBLY__ 37#ifndef __ASSEMBLY__
@@ -126,7 +114,7 @@
126 * to know our current state. 114 * to know our current state.
127 */ 115 */
128DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); 116DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
129#define INITIAL_INTERRUPTS_ENABLED INT_MASK(INT_MEM_ERROR) 117#define INITIAL_INTERRUPTS_ENABLED (1ULL << INT_MEM_ERROR)
130 118
131/* Disable interrupts. */ 119/* Disable interrupts. */
132#define arch_local_irq_disable() \ 120#define arch_local_irq_disable() \
@@ -165,7 +153,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
165 153
166/* Prevent the given interrupt from being enabled next time we enable irqs. */ 154/* Prevent the given interrupt from being enabled next time we enable irqs. */
167#define arch_local_irq_mask(interrupt) \ 155#define arch_local_irq_mask(interrupt) \
168 (__get_cpu_var(interrupts_enabled_mask) &= ~INT_MASK(interrupt)) 156 (__get_cpu_var(interrupts_enabled_mask) &= ~(1ULL << (interrupt)))
169 157
170/* Prevent the given interrupt from being enabled immediately. */ 158/* Prevent the given interrupt from being enabled immediately. */
171#define arch_local_irq_mask_now(interrupt) do { \ 159#define arch_local_irq_mask_now(interrupt) do { \
@@ -175,7 +163,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
175 163
176/* Allow the given interrupt to be enabled next time we enable irqs. */ 164/* Allow the given interrupt to be enabled next time we enable irqs. */
177#define arch_local_irq_unmask(interrupt) \ 165#define arch_local_irq_unmask(interrupt) \
178 (__get_cpu_var(interrupts_enabled_mask) |= INT_MASK(interrupt)) 166 (__get_cpu_var(interrupts_enabled_mask) |= (1ULL << (interrupt)))
179 167
180/* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */ 168/* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */
181#define arch_local_irq_unmask_now(interrupt) do { \ 169#define arch_local_irq_unmask_now(interrupt) do { \
@@ -250,7 +238,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
250/* Disable interrupts. */ 238/* Disable interrupts. */
251#define IRQ_DISABLE(tmp0, tmp1) \ 239#define IRQ_DISABLE(tmp0, tmp1) \
252 { \ 240 { \
253 movei tmp0, -1; \ 241 movei tmp0, LINUX_MASKABLE_INTERRUPTS_LO; \
254 moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS_HI) \ 242 moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS_HI) \
255 }; \ 243 }; \
256 { \ 244 { \
diff --git a/arch/tile/include/uapi/arch/interrupts_32.h b/arch/tile/include/uapi/arch/interrupts_32.h
index 96b5710505b6..2efe3f68b2d6 100644
--- a/arch/tile/include/uapi/arch/interrupts_32.h
+++ b/arch/tile/include/uapi/arch/interrupts_32.h
@@ -15,6 +15,7 @@
15#ifndef __ARCH_INTERRUPTS_H__ 15#ifndef __ARCH_INTERRUPTS_H__
16#define __ARCH_INTERRUPTS_H__ 16#define __ARCH_INTERRUPTS_H__
17 17
18#ifndef __KERNEL__
18/** Mask for an interrupt. */ 19/** Mask for an interrupt. */
19/* Note: must handle breaking interrupts into high and low words manually. */ 20/* Note: must handle breaking interrupts into high and low words manually. */
20#define INT_MASK_LO(intno) (1 << (intno)) 21#define INT_MASK_LO(intno) (1 << (intno))
@@ -23,6 +24,7 @@
23#ifndef __ASSEMBLER__ 24#ifndef __ASSEMBLER__
24#define INT_MASK(intno) (1ULL << (intno)) 25#define INT_MASK(intno) (1ULL << (intno))
25#endif 26#endif
27#endif
26 28
27 29
28/** Where a given interrupt executes */ 30/** Where a given interrupt executes */
@@ -92,216 +94,216 @@
92 94
93#ifndef __ASSEMBLER__ 95#ifndef __ASSEMBLER__
94#define QUEUED_INTERRUPTS ( \ 96#define QUEUED_INTERRUPTS ( \
95 INT_MASK(INT_MEM_ERROR) | \ 97 (1ULL << INT_MEM_ERROR) | \
96 INT_MASK(INT_DMATLB_MISS) | \ 98 (1ULL << INT_DMATLB_MISS) | \
97 INT_MASK(INT_DMATLB_ACCESS) | \ 99 (1ULL << INT_DMATLB_ACCESS) | \
98 INT_MASK(INT_SNITLB_MISS) | \ 100 (1ULL << INT_SNITLB_MISS) | \
99 INT_MASK(INT_SN_NOTIFY) | \ 101 (1ULL << INT_SN_NOTIFY) | \
100 INT_MASK(INT_SN_FIREWALL) | \ 102 (1ULL << INT_SN_FIREWALL) | \
101 INT_MASK(INT_IDN_FIREWALL) | \ 103 (1ULL << INT_IDN_FIREWALL) | \
102 INT_MASK(INT_UDN_FIREWALL) | \ 104 (1ULL << INT_UDN_FIREWALL) | \
103 INT_MASK(INT_TILE_TIMER) | \ 105 (1ULL << INT_TILE_TIMER) | \
104 INT_MASK(INT_IDN_TIMER) | \ 106 (1ULL << INT_IDN_TIMER) | \
105 INT_MASK(INT_UDN_TIMER) | \ 107 (1ULL << INT_UDN_TIMER) | \
106 INT_MASK(INT_DMA_NOTIFY) | \ 108 (1ULL << INT_DMA_NOTIFY) | \
107 INT_MASK(INT_IDN_CA) | \ 109 (1ULL << INT_IDN_CA) | \
108 INT_MASK(INT_UDN_CA) | \ 110 (1ULL << INT_UDN_CA) | \
109 INT_MASK(INT_IDN_AVAIL) | \ 111 (1ULL << INT_IDN_AVAIL) | \
110 INT_MASK(INT_UDN_AVAIL) | \ 112 (1ULL << INT_UDN_AVAIL) | \
111 INT_MASK(INT_PERF_COUNT) | \ 113 (1ULL << INT_PERF_COUNT) | \
112 INT_MASK(INT_INTCTRL_3) | \ 114 (1ULL << INT_INTCTRL_3) | \
113 INT_MASK(INT_INTCTRL_2) | \ 115 (1ULL << INT_INTCTRL_2) | \
114 INT_MASK(INT_INTCTRL_1) | \ 116 (1ULL << INT_INTCTRL_1) | \
115 INT_MASK(INT_INTCTRL_0) | \ 117 (1ULL << INT_INTCTRL_0) | \
116 INT_MASK(INT_BOOT_ACCESS) | \ 118 (1ULL << INT_BOOT_ACCESS) | \
117 INT_MASK(INT_WORLD_ACCESS) | \ 119 (1ULL << INT_WORLD_ACCESS) | \
118 INT_MASK(INT_I_ASID) | \ 120 (1ULL << INT_I_ASID) | \
119 INT_MASK(INT_D_ASID) | \ 121 (1ULL << INT_D_ASID) | \
120 INT_MASK(INT_DMA_ASID) | \ 122 (1ULL << INT_DMA_ASID) | \
121 INT_MASK(INT_SNI_ASID) | \ 123 (1ULL << INT_SNI_ASID) | \
122 INT_MASK(INT_DMA_CPL) | \ 124 (1ULL << INT_DMA_CPL) | \
123 INT_MASK(INT_SN_CPL) | \ 125 (1ULL << INT_SN_CPL) | \
124 INT_MASK(INT_DOUBLE_FAULT) | \ 126 (1ULL << INT_DOUBLE_FAULT) | \
125 INT_MASK(INT_AUX_PERF_COUNT) | \ 127 (1ULL << INT_AUX_PERF_COUNT) | \
126 0) 128 0)
127#define NONQUEUED_INTERRUPTS ( \ 129#define NONQUEUED_INTERRUPTS ( \
128 INT_MASK(INT_ITLB_MISS) | \ 130 (1ULL << INT_ITLB_MISS) | \
129 INT_MASK(INT_ILL) | \ 131 (1ULL << INT_ILL) | \
130 INT_MASK(INT_GPV) | \ 132 (1ULL << INT_GPV) | \
131 INT_MASK(INT_SN_ACCESS) | \ 133 (1ULL << INT_SN_ACCESS) | \
132 INT_MASK(INT_IDN_ACCESS) | \ 134 (1ULL << INT_IDN_ACCESS) | \
133 INT_MASK(INT_UDN_ACCESS) | \ 135 (1ULL << INT_UDN_ACCESS) | \
134 INT_MASK(INT_IDN_REFILL) | \ 136 (1ULL << INT_IDN_REFILL) | \
135 INT_MASK(INT_UDN_REFILL) | \ 137 (1ULL << INT_UDN_REFILL) | \
136 INT_MASK(INT_IDN_COMPLETE) | \ 138 (1ULL << INT_IDN_COMPLETE) | \
137 INT_MASK(INT_UDN_COMPLETE) | \ 139 (1ULL << INT_UDN_COMPLETE) | \
138 INT_MASK(INT_SWINT_3) | \ 140 (1ULL << INT_SWINT_3) | \
139 INT_MASK(INT_SWINT_2) | \ 141 (1ULL << INT_SWINT_2) | \
140 INT_MASK(INT_SWINT_1) | \ 142 (1ULL << INT_SWINT_1) | \
141 INT_MASK(INT_SWINT_0) | \ 143 (1ULL << INT_SWINT_0) | \
142 INT_MASK(INT_UNALIGN_DATA) | \ 144 (1ULL << INT_UNALIGN_DATA) | \
143 INT_MASK(INT_DTLB_MISS) | \ 145 (1ULL << INT_DTLB_MISS) | \
144 INT_MASK(INT_DTLB_ACCESS) | \ 146 (1ULL << INT_DTLB_ACCESS) | \
145 INT_MASK(INT_SN_STATIC_ACCESS) | \ 147 (1ULL << INT_SN_STATIC_ACCESS) | \
146 0) 148 0)
147#define CRITICAL_MASKED_INTERRUPTS ( \ 149#define CRITICAL_MASKED_INTERRUPTS ( \
148 INT_MASK(INT_MEM_ERROR) | \ 150 (1ULL << INT_MEM_ERROR) | \
149 INT_MASK(INT_DMATLB_MISS) | \ 151 (1ULL << INT_DMATLB_MISS) | \
150 INT_MASK(INT_DMATLB_ACCESS) | \ 152 (1ULL << INT_DMATLB_ACCESS) | \
151 INT_MASK(INT_SNITLB_MISS) | \ 153 (1ULL << INT_SNITLB_MISS) | \
152 INT_MASK(INT_SN_NOTIFY) | \ 154 (1ULL << INT_SN_NOTIFY) | \
153 INT_MASK(INT_SN_FIREWALL) | \ 155 (1ULL << INT_SN_FIREWALL) | \
154 INT_MASK(INT_IDN_FIREWALL) | \ 156 (1ULL << INT_IDN_FIREWALL) | \
155 INT_MASK(INT_UDN_FIREWALL) | \ 157 (1ULL << INT_UDN_FIREWALL) | \
156 INT_MASK(INT_TILE_TIMER) | \ 158 (1ULL << INT_TILE_TIMER) | \
157 INT_MASK(INT_IDN_TIMER) | \ 159 (1ULL << INT_IDN_TIMER) | \
158 INT_MASK(INT_UDN_TIMER) | \ 160 (1ULL << INT_UDN_TIMER) | \
159 INT_MASK(INT_DMA_NOTIFY) | \ 161 (1ULL << INT_DMA_NOTIFY) | \
160 INT_MASK(INT_IDN_CA) | \ 162 (1ULL << INT_IDN_CA) | \
161 INT_MASK(INT_UDN_CA) | \ 163 (1ULL << INT_UDN_CA) | \
162 INT_MASK(INT_IDN_AVAIL) | \ 164 (1ULL << INT_IDN_AVAIL) | \
163 INT_MASK(INT_UDN_AVAIL) | \ 165 (1ULL << INT_UDN_AVAIL) | \
164 INT_MASK(INT_PERF_COUNT) | \ 166 (1ULL << INT_PERF_COUNT) | \
165 INT_MASK(INT_INTCTRL_3) | \ 167 (1ULL << INT_INTCTRL_3) | \
166 INT_MASK(INT_INTCTRL_2) | \ 168 (1ULL << INT_INTCTRL_2) | \
167 INT_MASK(INT_INTCTRL_1) | \ 169 (1ULL << INT_INTCTRL_1) | \
168 INT_MASK(INT_INTCTRL_0) | \ 170 (1ULL << INT_INTCTRL_0) | \
169 INT_MASK(INT_AUX_PERF_COUNT) | \ 171 (1ULL << INT_AUX_PERF_COUNT) | \
170 0) 172 0)
171#define CRITICAL_UNMASKED_INTERRUPTS ( \ 173#define CRITICAL_UNMASKED_INTERRUPTS ( \
172 INT_MASK(INT_ITLB_MISS) | \ 174 (1ULL << INT_ITLB_MISS) | \
173 INT_MASK(INT_ILL) | \ 175 (1ULL << INT_ILL) | \
174 INT_MASK(INT_GPV) | \ 176 (1ULL << INT_GPV) | \
175 INT_MASK(INT_SN_ACCESS) | \ 177 (1ULL << INT_SN_ACCESS) | \
176 INT_MASK(INT_IDN_ACCESS) | \ 178 (1ULL << INT_IDN_ACCESS) | \
177 INT_MASK(INT_UDN_ACCESS) | \ 179 (1ULL << INT_UDN_ACCESS) | \
178 INT_MASK(INT_IDN_REFILL) | \ 180 (1ULL << INT_IDN_REFILL) | \
179 INT_MASK(INT_UDN_REFILL) | \ 181 (1ULL << INT_UDN_REFILL) | \
180 INT_MASK(INT_IDN_COMPLETE) | \ 182 (1ULL << INT_IDN_COMPLETE) | \
181 INT_MASK(INT_UDN_COMPLETE) | \ 183 (1ULL << INT_UDN_COMPLETE) | \
182 INT_MASK(INT_SWINT_3) | \ 184 (1ULL << INT_SWINT_3) | \
183 INT_MASK(INT_SWINT_2) | \ 185 (1ULL << INT_SWINT_2) | \
184 INT_MASK(INT_SWINT_1) | \ 186 (1ULL << INT_SWINT_1) | \
185 INT_MASK(INT_SWINT_0) | \ 187 (1ULL << INT_SWINT_0) | \
186 INT_MASK(INT_UNALIGN_DATA) | \ 188 (1ULL << INT_UNALIGN_DATA) | \
187 INT_MASK(INT_DTLB_MISS) | \ 189 (1ULL << INT_DTLB_MISS) | \
188 INT_MASK(INT_DTLB_ACCESS) | \ 190 (1ULL << INT_DTLB_ACCESS) | \
189 INT_MASK(INT_BOOT_ACCESS) | \ 191 (1ULL << INT_BOOT_ACCESS) | \
190 INT_MASK(INT_WORLD_ACCESS) | \ 192 (1ULL << INT_WORLD_ACCESS) | \
191 INT_MASK(INT_I_ASID) | \ 193 (1ULL << INT_I_ASID) | \
192 INT_MASK(INT_D_ASID) | \ 194 (1ULL << INT_D_ASID) | \
193 INT_MASK(INT_DMA_ASID) | \ 195 (1ULL << INT_DMA_ASID) | \
194 INT_MASK(INT_SNI_ASID) | \ 196 (1ULL << INT_SNI_ASID) | \
195 INT_MASK(INT_DMA_CPL) | \ 197 (1ULL << INT_DMA_CPL) | \
196 INT_MASK(INT_SN_CPL) | \ 198 (1ULL << INT_SN_CPL) | \
197 INT_MASK(INT_DOUBLE_FAULT) | \ 199 (1ULL << INT_DOUBLE_FAULT) | \
198 INT_MASK(INT_SN_STATIC_ACCESS) | \ 200 (1ULL << INT_SN_STATIC_ACCESS) | \
199 0) 201 0)
200#define MASKABLE_INTERRUPTS ( \ 202#define MASKABLE_INTERRUPTS ( \
201 INT_MASK(INT_MEM_ERROR) | \ 203 (1ULL << INT_MEM_ERROR) | \
202 INT_MASK(INT_IDN_REFILL) | \ 204 (1ULL << INT_IDN_REFILL) | \
203 INT_MASK(INT_UDN_REFILL) | \ 205 (1ULL << INT_UDN_REFILL) | \
204 INT_MASK(INT_IDN_COMPLETE) | \ 206 (1ULL << INT_IDN_COMPLETE) | \
205 INT_MASK(INT_UDN_COMPLETE) | \ 207 (1ULL << INT_UDN_COMPLETE) | \
206 INT_MASK(INT_DMATLB_MISS) | \ 208 (1ULL << INT_DMATLB_MISS) | \
207 INT_MASK(INT_DMATLB_ACCESS) | \ 209 (1ULL << INT_DMATLB_ACCESS) | \
208 INT_MASK(INT_SNITLB_MISS) | \ 210 (1ULL << INT_SNITLB_MISS) | \
209 INT_MASK(INT_SN_NOTIFY) | \ 211 (1ULL << INT_SN_NOTIFY) | \
210 INT_MASK(INT_SN_FIREWALL) | \ 212 (1ULL << INT_SN_FIREWALL) | \
211 INT_MASK(INT_IDN_FIREWALL) | \ 213 (1ULL << INT_IDN_FIREWALL) | \
212 INT_MASK(INT_UDN_FIREWALL) | \ 214 (1ULL << INT_UDN_FIREWALL) | \
213 INT_MASK(INT_TILE_TIMER) | \ 215 (1ULL << INT_TILE_TIMER) | \
214 INT_MASK(INT_IDN_TIMER) | \ 216 (1ULL << INT_IDN_TIMER) | \
215 INT_MASK(INT_UDN_TIMER) | \ 217 (1ULL << INT_UDN_TIMER) | \
216 INT_MASK(INT_DMA_NOTIFY) | \ 218 (1ULL << INT_DMA_NOTIFY) | \
217 INT_MASK(INT_IDN_CA) | \ 219 (1ULL << INT_IDN_CA) | \
218 INT_MASK(INT_UDN_CA) | \ 220 (1ULL << INT_UDN_CA) | \
219 INT_MASK(INT_IDN_AVAIL) | \ 221 (1ULL << INT_IDN_AVAIL) | \
220 INT_MASK(INT_UDN_AVAIL) | \ 222 (1ULL << INT_UDN_AVAIL) | \
221 INT_MASK(INT_PERF_COUNT) | \ 223 (1ULL << INT_PERF_COUNT) | \
222 INT_MASK(INT_INTCTRL_3) | \ 224 (1ULL << INT_INTCTRL_3) | \
223 INT_MASK(INT_INTCTRL_2) | \ 225 (1ULL << INT_INTCTRL_2) | \
224 INT_MASK(INT_INTCTRL_1) | \ 226 (1ULL << INT_INTCTRL_1) | \
225 INT_MASK(INT_INTCTRL_0) | \ 227 (1ULL << INT_INTCTRL_0) | \
226 INT_MASK(INT_AUX_PERF_COUNT) | \ 228 (1ULL << INT_AUX_PERF_COUNT) | \
227 0) 229 0)
228#define UNMASKABLE_INTERRUPTS ( \ 230#define UNMASKABLE_INTERRUPTS ( \
229 INT_MASK(INT_ITLB_MISS) | \ 231 (1ULL << INT_ITLB_MISS) | \
230 INT_MASK(INT_ILL) | \ 232 (1ULL << INT_ILL) | \
231 INT_MASK(INT_GPV) | \ 233 (1ULL << INT_GPV) | \
232 INT_MASK(INT_SN_ACCESS) | \ 234 (1ULL << INT_SN_ACCESS) | \
233 INT_MASK(INT_IDN_ACCESS) | \ 235 (1ULL << INT_IDN_ACCESS) | \
234 INT_MASK(INT_UDN_ACCESS) | \ 236 (1ULL << INT_UDN_ACCESS) | \
235 INT_MASK(INT_SWINT_3) | \ 237 (1ULL << INT_SWINT_3) | \
236 INT_MASK(INT_SWINT_2) | \ 238 (1ULL << INT_SWINT_2) | \
237 INT_MASK(INT_SWINT_1) | \ 239 (1ULL << INT_SWINT_1) | \
238 INT_MASK(INT_SWINT_0) | \ 240 (1ULL << INT_SWINT_0) | \
239 INT_MASK(INT_UNALIGN_DATA) | \ 241 (1ULL << INT_UNALIGN_DATA) | \
240 INT_MASK(INT_DTLB_MISS) | \ 242 (1ULL << INT_DTLB_MISS) | \
241 INT_MASK(INT_DTLB_ACCESS) | \ 243 (1ULL << INT_DTLB_ACCESS) | \
242 INT_MASK(INT_BOOT_ACCESS) | \ 244 (1ULL << INT_BOOT_ACCESS) | \
243 INT_MASK(INT_WORLD_ACCESS) | \ 245 (1ULL << INT_WORLD_ACCESS) | \
244 INT_MASK(INT_I_ASID) | \ 246 (1ULL << INT_I_ASID) | \
245 INT_MASK(INT_D_ASID) | \ 247 (1ULL << INT_D_ASID) | \
246 INT_MASK(INT_DMA_ASID) | \ 248 (1ULL << INT_DMA_ASID) | \
247 INT_MASK(INT_SNI_ASID) | \ 249 (1ULL << INT_SNI_ASID) | \
248 INT_MASK(INT_DMA_CPL) | \ 250 (1ULL << INT_DMA_CPL) | \
249 INT_MASK(INT_SN_CPL) | \ 251 (1ULL << INT_SN_CPL) | \
250 INT_MASK(INT_DOUBLE_FAULT) | \ 252 (1ULL << INT_DOUBLE_FAULT) | \
251 INT_MASK(INT_SN_STATIC_ACCESS) | \ 253 (1ULL << INT_SN_STATIC_ACCESS) | \
252 0) 254 0)
253#define SYNC_INTERRUPTS ( \ 255#define SYNC_INTERRUPTS ( \
254 INT_MASK(INT_ITLB_MISS) | \ 256 (1ULL << INT_ITLB_MISS) | \
255 INT_MASK(INT_ILL) | \ 257 (1ULL << INT_ILL) | \
256 INT_MASK(INT_GPV) | \ 258 (1ULL << INT_GPV) | \
257 INT_MASK(INT_SN_ACCESS) | \ 259 (1ULL << INT_SN_ACCESS) | \
258 INT_MASK(INT_IDN_ACCESS) | \ 260 (1ULL << INT_IDN_ACCESS) | \
259 INT_MASK(INT_UDN_ACCESS) | \ 261 (1ULL << INT_UDN_ACCESS) | \
260 INT_MASK(INT_IDN_REFILL) | \ 262 (1ULL << INT_IDN_REFILL) | \
261 INT_MASK(INT_UDN_REFILL) | \ 263 (1ULL << INT_UDN_REFILL) | \
262 INT_MASK(INT_IDN_COMPLETE) | \ 264 (1ULL << INT_IDN_COMPLETE) | \
263 INT_MASK(INT_UDN_COMPLETE) | \ 265 (1ULL << INT_UDN_COMPLETE) | \
264 INT_MASK(INT_SWINT_3) | \ 266 (1ULL << INT_SWINT_3) | \
265 INT_MASK(INT_SWINT_2) | \ 267 (1ULL << INT_SWINT_2) | \
266 INT_MASK(INT_SWINT_1) | \ 268 (1ULL << INT_SWINT_1) | \
267 INT_MASK(INT_SWINT_0) | \ 269 (1ULL << INT_SWINT_0) | \
268 INT_MASK(INT_UNALIGN_DATA) | \ 270 (1ULL << INT_UNALIGN_DATA) | \
269 INT_MASK(INT_DTLB_MISS) | \ 271 (1ULL << INT_DTLB_MISS) | \
270 INT_MASK(INT_DTLB_ACCESS) | \ 272 (1ULL << INT_DTLB_ACCESS) | \
271 INT_MASK(INT_SN_STATIC_ACCESS) | \ 273 (1ULL << INT_SN_STATIC_ACCESS) | \
272 0) 274 0)
273#define NON_SYNC_INTERRUPTS ( \ 275#define NON_SYNC_INTERRUPTS ( \
274 INT_MASK(INT_MEM_ERROR) | \ 276 (1ULL << INT_MEM_ERROR) | \
275 INT_MASK(INT_DMATLB_MISS) | \ 277 (1ULL << INT_DMATLB_MISS) | \
276 INT_MASK(INT_DMATLB_ACCESS) | \ 278 (1ULL << INT_DMATLB_ACCESS) | \
277 INT_MASK(INT_SNITLB_MISS) | \ 279 (1ULL << INT_SNITLB_MISS) | \
278 INT_MASK(INT_SN_NOTIFY) | \ 280 (1ULL << INT_SN_NOTIFY) | \
279 INT_MASK(INT_SN_FIREWALL) | \ 281 (1ULL << INT_SN_FIREWALL) | \
280 INT_MASK(INT_IDN_FIREWALL) | \ 282 (1ULL << INT_IDN_FIREWALL) | \
281 INT_MASK(INT_UDN_FIREWALL) | \ 283 (1ULL << INT_UDN_FIREWALL) | \
282 INT_MASK(INT_TILE_TIMER) | \ 284 (1ULL << INT_TILE_TIMER) | \
283 INT_MASK(INT_IDN_TIMER) | \ 285 (1ULL << INT_IDN_TIMER) | \
284 INT_MASK(INT_UDN_TIMER) | \ 286 (1ULL << INT_UDN_TIMER) | \
285 INT_MASK(INT_DMA_NOTIFY) | \ 287 (1ULL << INT_DMA_NOTIFY) | \
286 INT_MASK(INT_IDN_CA) | \ 288 (1ULL << INT_IDN_CA) | \
287 INT_MASK(INT_UDN_CA) | \ 289 (1ULL << INT_UDN_CA) | \
288 INT_MASK(INT_IDN_AVAIL) | \ 290 (1ULL << INT_IDN_AVAIL) | \
289 INT_MASK(INT_UDN_AVAIL) | \ 291 (1ULL << INT_UDN_AVAIL) | \
290 INT_MASK(INT_PERF_COUNT) | \ 292 (1ULL << INT_PERF_COUNT) | \
291 INT_MASK(INT_INTCTRL_3) | \ 293 (1ULL << INT_INTCTRL_3) | \
292 INT_MASK(INT_INTCTRL_2) | \ 294 (1ULL << INT_INTCTRL_2) | \
293 INT_MASK(INT_INTCTRL_1) | \ 295 (1ULL << INT_INTCTRL_1) | \
294 INT_MASK(INT_INTCTRL_0) | \ 296 (1ULL << INT_INTCTRL_0) | \
295 INT_MASK(INT_BOOT_ACCESS) | \ 297 (1ULL << INT_BOOT_ACCESS) | \
296 INT_MASK(INT_WORLD_ACCESS) | \ 298 (1ULL << INT_WORLD_ACCESS) | \
297 INT_MASK(INT_I_ASID) | \ 299 (1ULL << INT_I_ASID) | \
298 INT_MASK(INT_D_ASID) | \ 300 (1ULL << INT_D_ASID) | \
299 INT_MASK(INT_DMA_ASID) | \ 301 (1ULL << INT_DMA_ASID) | \
300 INT_MASK(INT_SNI_ASID) | \ 302 (1ULL << INT_SNI_ASID) | \
301 INT_MASK(INT_DMA_CPL) | \ 303 (1ULL << INT_DMA_CPL) | \
302 INT_MASK(INT_SN_CPL) | \ 304 (1ULL << INT_SN_CPL) | \
303 INT_MASK(INT_DOUBLE_FAULT) | \ 305 (1ULL << INT_DOUBLE_FAULT) | \
304 INT_MASK(INT_AUX_PERF_COUNT) | \ 306 (1ULL << INT_AUX_PERF_COUNT) | \
305 0) 307 0)
306#endif /* !__ASSEMBLER__ */ 308#endif /* !__ASSEMBLER__ */
307#endif /* !__ARCH_INTERRUPTS_H__ */ 309#endif /* !__ARCH_INTERRUPTS_H__ */
diff --git a/arch/tile/include/uapi/arch/interrupts_64.h b/arch/tile/include/uapi/arch/interrupts_64.h
index 5bb58b2e4e6f..13c9f9182348 100644
--- a/arch/tile/include/uapi/arch/interrupts_64.h
+++ b/arch/tile/include/uapi/arch/interrupts_64.h
@@ -15,6 +15,7 @@
15#ifndef __ARCH_INTERRUPTS_H__ 15#ifndef __ARCH_INTERRUPTS_H__
16#define __ARCH_INTERRUPTS_H__ 16#define __ARCH_INTERRUPTS_H__
17 17
18#ifndef __KERNEL__
18/** Mask for an interrupt. */ 19/** Mask for an interrupt. */
19#ifdef __ASSEMBLER__ 20#ifdef __ASSEMBLER__
20/* Note: must handle breaking interrupts into high and low words manually. */ 21/* Note: must handle breaking interrupts into high and low words manually. */
@@ -22,6 +23,7 @@
22#else 23#else
23#define INT_MASK(intno) (1ULL << (intno)) 24#define INT_MASK(intno) (1ULL << (intno))
24#endif 25#endif
26#endif
25 27
26 28
27/** Where a given interrupt executes */ 29/** Where a given interrupt executes */
@@ -85,192 +87,192 @@
85 87
86#ifndef __ASSEMBLER__ 88#ifndef __ASSEMBLER__
87#define QUEUED_INTERRUPTS ( \ 89#define QUEUED_INTERRUPTS ( \
88 INT_MASK(INT_MEM_ERROR) | \ 90 (1ULL << INT_MEM_ERROR) | \
89 INT_MASK(INT_IDN_COMPLETE) | \ 91 (1ULL << INT_IDN_COMPLETE) | \
90 INT_MASK(INT_UDN_COMPLETE) | \ 92 (1ULL << INT_UDN_COMPLETE) | \
91 INT_MASK(INT_IDN_FIREWALL) | \ 93 (1ULL << INT_IDN_FIREWALL) | \
92 INT_MASK(INT_UDN_FIREWALL) | \ 94 (1ULL << INT_UDN_FIREWALL) | \
93 INT_MASK(INT_TILE_TIMER) | \ 95 (1ULL << INT_TILE_TIMER) | \
94 INT_MASK(INT_AUX_TILE_TIMER) | \ 96 (1ULL << INT_AUX_TILE_TIMER) | \
95 INT_MASK(INT_IDN_TIMER) | \ 97 (1ULL << INT_IDN_TIMER) | \
96 INT_MASK(INT_UDN_TIMER) | \ 98 (1ULL << INT_UDN_TIMER) | \
97 INT_MASK(INT_IDN_AVAIL) | \ 99 (1ULL << INT_IDN_AVAIL) | \
98 INT_MASK(INT_UDN_AVAIL) | \ 100 (1ULL << INT_UDN_AVAIL) | \
99 INT_MASK(INT_IPI_3) | \ 101 (1ULL << INT_IPI_3) | \
100 INT_MASK(INT_IPI_2) | \ 102 (1ULL << INT_IPI_2) | \
101 INT_MASK(INT_IPI_1) | \ 103 (1ULL << INT_IPI_1) | \
102 INT_MASK(INT_IPI_0) | \ 104 (1ULL << INT_IPI_0) | \
103 INT_MASK(INT_PERF_COUNT) | \ 105 (1ULL << INT_PERF_COUNT) | \
104 INT_MASK(INT_AUX_PERF_COUNT) | \ 106 (1ULL << INT_AUX_PERF_COUNT) | \
105 INT_MASK(INT_INTCTRL_3) | \ 107 (1ULL << INT_INTCTRL_3) | \
106 INT_MASK(INT_INTCTRL_2) | \ 108 (1ULL << INT_INTCTRL_2) | \
107 INT_MASK(INT_INTCTRL_1) | \ 109 (1ULL << INT_INTCTRL_1) | \
108 INT_MASK(INT_INTCTRL_0) | \ 110 (1ULL << INT_INTCTRL_0) | \
109 INT_MASK(INT_BOOT_ACCESS) | \ 111 (1ULL << INT_BOOT_ACCESS) | \
110 INT_MASK(INT_WORLD_ACCESS) | \ 112 (1ULL << INT_WORLD_ACCESS) | \
111 INT_MASK(INT_I_ASID) | \ 113 (1ULL << INT_I_ASID) | \
112 INT_MASK(INT_D_ASID) | \ 114 (1ULL << INT_D_ASID) | \
113 INT_MASK(INT_DOUBLE_FAULT) | \ 115 (1ULL << INT_DOUBLE_FAULT) | \
114 0) 116 0)
115#define NONQUEUED_INTERRUPTS ( \ 117#define NONQUEUED_INTERRUPTS ( \
116 INT_MASK(INT_SINGLE_STEP_3) | \ 118 (1ULL << INT_SINGLE_STEP_3) | \
117 INT_MASK(INT_SINGLE_STEP_2) | \ 119 (1ULL << INT_SINGLE_STEP_2) | \
118 INT_MASK(INT_SINGLE_STEP_1) | \ 120 (1ULL << INT_SINGLE_STEP_1) | \
119 INT_MASK(INT_SINGLE_STEP_0) | \ 121 (1ULL << INT_SINGLE_STEP_0) | \
120 INT_MASK(INT_ITLB_MISS) | \ 122 (1ULL << INT_ITLB_MISS) | \
121 INT_MASK(INT_ILL) | \ 123 (1ULL << INT_ILL) | \
122 INT_MASK(INT_GPV) | \ 124 (1ULL << INT_GPV) | \
123 INT_MASK(INT_IDN_ACCESS) | \ 125 (1ULL << INT_IDN_ACCESS) | \
124 INT_MASK(INT_UDN_ACCESS) | \ 126 (1ULL << INT_UDN_ACCESS) | \
125 INT_MASK(INT_SWINT_3) | \ 127 (1ULL << INT_SWINT_3) | \
126 INT_MASK(INT_SWINT_2) | \ 128 (1ULL << INT_SWINT_2) | \
127 INT_MASK(INT_SWINT_1) | \ 129 (1ULL << INT_SWINT_1) | \
128 INT_MASK(INT_SWINT_0) | \ 130 (1ULL << INT_SWINT_0) | \
129 INT_MASK(INT_ILL_TRANS) | \ 131 (1ULL << INT_ILL_TRANS) | \
130 INT_MASK(INT_UNALIGN_DATA) | \ 132 (1ULL << INT_UNALIGN_DATA) | \
131 INT_MASK(INT_DTLB_MISS) | \ 133 (1ULL << INT_DTLB_MISS) | \
132 INT_MASK(INT_DTLB_ACCESS) | \ 134 (1ULL << INT_DTLB_ACCESS) | \
133 0) 135 0)
134#define CRITICAL_MASKED_INTERRUPTS ( \ 136#define CRITICAL_MASKED_INTERRUPTS ( \
135 INT_MASK(INT_MEM_ERROR) | \ 137 (1ULL << INT_MEM_ERROR) | \
136 INT_MASK(INT_SINGLE_STEP_3) | \ 138 (1ULL << INT_SINGLE_STEP_3) | \
137 INT_MASK(INT_SINGLE_STEP_2) | \ 139 (1ULL << INT_SINGLE_STEP_2) | \
138 INT_MASK(INT_SINGLE_STEP_1) | \ 140 (1ULL << INT_SINGLE_STEP_1) | \
139 INT_MASK(INT_SINGLE_STEP_0) | \ 141 (1ULL << INT_SINGLE_STEP_0) | \
140 INT_MASK(INT_IDN_COMPLETE) | \ 142 (1ULL << INT_IDN_COMPLETE) | \
141 INT_MASK(INT_UDN_COMPLETE) | \ 143 (1ULL << INT_UDN_COMPLETE) | \
142 INT_MASK(INT_IDN_FIREWALL) | \ 144 (1ULL << INT_IDN_FIREWALL) | \
143 INT_MASK(INT_UDN_FIREWALL) | \ 145 (1ULL << INT_UDN_FIREWALL) | \
144 INT_MASK(INT_TILE_TIMER) | \ 146 (1ULL << INT_TILE_TIMER) | \
145 INT_MASK(INT_AUX_TILE_TIMER) | \ 147 (1ULL << INT_AUX_TILE_TIMER) | \
146 INT_MASK(INT_IDN_TIMER) | \ 148 (1ULL << INT_IDN_TIMER) | \
147 INT_MASK(INT_UDN_TIMER) | \ 149 (1ULL << INT_UDN_TIMER) | \
148 INT_MASK(INT_IDN_AVAIL) | \ 150 (1ULL << INT_IDN_AVAIL) | \
149 INT_MASK(INT_UDN_AVAIL) | \ 151 (1ULL << INT_UDN_AVAIL) | \
150 INT_MASK(INT_IPI_3) | \ 152 (1ULL << INT_IPI_3) | \
151 INT_MASK(INT_IPI_2) | \ 153 (1ULL << INT_IPI_2) | \
152 INT_MASK(INT_IPI_1) | \ 154 (1ULL << INT_IPI_1) | \
153 INT_MASK(INT_IPI_0) | \ 155 (1ULL << INT_IPI_0) | \
154 INT_MASK(INT_PERF_COUNT) | \ 156 (1ULL << INT_PERF_COUNT) | \
155 INT_MASK(INT_AUX_PERF_COUNT) | \ 157 (1ULL << INT_AUX_PERF_COUNT) | \
156 INT_MASK(INT_INTCTRL_3) | \ 158 (1ULL << INT_INTCTRL_3) | \
157 INT_MASK(INT_INTCTRL_2) | \ 159 (1ULL << INT_INTCTRL_2) | \
158 INT_MASK(INT_INTCTRL_1) | \ 160 (1ULL << INT_INTCTRL_1) | \
159 INT_MASK(INT_INTCTRL_0) | \ 161 (1ULL << INT_INTCTRL_0) | \
160 0) 162 0)
161#define CRITICAL_UNMASKED_INTERRUPTS ( \ 163#define CRITICAL_UNMASKED_INTERRUPTS ( \
162 INT_MASK(INT_ITLB_MISS) | \ 164 (1ULL << INT_ITLB_MISS) | \
163 INT_MASK(INT_ILL) | \ 165 (1ULL << INT_ILL) | \
164 INT_MASK(INT_GPV) | \ 166 (1ULL << INT_GPV) | \
165 INT_MASK(INT_IDN_ACCESS) | \ 167 (1ULL << INT_IDN_ACCESS) | \
166 INT_MASK(INT_UDN_ACCESS) | \ 168 (1ULL << INT_UDN_ACCESS) | \
167 INT_MASK(INT_SWINT_3) | \ 169 (1ULL << INT_SWINT_3) | \
168 INT_MASK(INT_SWINT_2) | \ 170 (1ULL << INT_SWINT_2) | \
169 INT_MASK(INT_SWINT_1) | \ 171 (1ULL << INT_SWINT_1) | \
170 INT_MASK(INT_SWINT_0) | \ 172 (1ULL << INT_SWINT_0) | \
171 INT_MASK(INT_ILL_TRANS) | \ 173 (1ULL << INT_ILL_TRANS) | \
172 INT_MASK(INT_UNALIGN_DATA) | \ 174 (1ULL << INT_UNALIGN_DATA) | \
173 INT_MASK(INT_DTLB_MISS) | \ 175 (1ULL << INT_DTLB_MISS) | \
174 INT_MASK(INT_DTLB_ACCESS) | \ 176 (1ULL << INT_DTLB_ACCESS) | \
175 INT_MASK(INT_BOOT_ACCESS) | \ 177 (1ULL << INT_BOOT_ACCESS) | \
176 INT_MASK(INT_WORLD_ACCESS) | \ 178 (1ULL << INT_WORLD_ACCESS) | \
177 INT_MASK(INT_I_ASID) | \ 179 (1ULL << INT_I_ASID) | \
178 INT_MASK(INT_D_ASID) | \ 180 (1ULL << INT_D_ASID) | \
179 INT_MASK(INT_DOUBLE_FAULT) | \ 181 (1ULL << INT_DOUBLE_FAULT) | \
180 0) 182 0)
181#define MASKABLE_INTERRUPTS ( \ 183#define MASKABLE_INTERRUPTS ( \
182 INT_MASK(INT_MEM_ERROR) | \ 184 (1ULL << INT_MEM_ERROR) | \
183 INT_MASK(INT_SINGLE_STEP_3) | \ 185 (1ULL << INT_SINGLE_STEP_3) | \
184 INT_MASK(INT_SINGLE_STEP_2) | \ 186 (1ULL << INT_SINGLE_STEP_2) | \
185 INT_MASK(INT_SINGLE_STEP_1) | \ 187 (1ULL << INT_SINGLE_STEP_1) | \
186 INT_MASK(INT_SINGLE_STEP_0) | \ 188 (1ULL << INT_SINGLE_STEP_0) | \
187 INT_MASK(INT_IDN_COMPLETE) | \ 189 (1ULL << INT_IDN_COMPLETE) | \
188 INT_MASK(INT_UDN_COMPLETE) | \ 190 (1ULL << INT_UDN_COMPLETE) | \
189 INT_MASK(INT_IDN_FIREWALL) | \ 191 (1ULL << INT_IDN_FIREWALL) | \
190 INT_MASK(INT_UDN_FIREWALL) | \ 192 (1ULL << INT_UDN_FIREWALL) | \
191 INT_MASK(INT_TILE_TIMER) | \ 193 (1ULL << INT_TILE_TIMER) | \
192 INT_MASK(INT_AUX_TILE_TIMER) | \ 194 (1ULL << INT_AUX_TILE_TIMER) | \
193 INT_MASK(INT_IDN_TIMER) | \ 195 (1ULL << INT_IDN_TIMER) | \
194 INT_MASK(INT_UDN_TIMER) | \ 196 (1ULL << INT_UDN_TIMER) | \
195 INT_MASK(INT_IDN_AVAIL) | \ 197 (1ULL << INT_IDN_AVAIL) | \
196 INT_MASK(INT_UDN_AVAIL) | \ 198 (1ULL << INT_UDN_AVAIL) | \
197 INT_MASK(INT_IPI_3) | \ 199 (1ULL << INT_IPI_3) | \
198 INT_MASK(INT_IPI_2) | \ 200 (1ULL << INT_IPI_2) | \
199 INT_MASK(INT_IPI_1) | \ 201 (1ULL << INT_IPI_1) | \
200 INT_MASK(INT_IPI_0) | \ 202 (1ULL << INT_IPI_0) | \
201 INT_MASK(INT_PERF_COUNT) | \ 203 (1ULL << INT_PERF_COUNT) | \
202 INT_MASK(INT_AUX_PERF_COUNT) | \ 204 (1ULL << INT_AUX_PERF_COUNT) | \
203 INT_MASK(INT_INTCTRL_3) | \ 205 (1ULL << INT_INTCTRL_3) | \
204 INT_MASK(INT_INTCTRL_2) | \ 206 (1ULL << INT_INTCTRL_2) | \
205 INT_MASK(INT_INTCTRL_1) | \ 207 (1ULL << INT_INTCTRL_1) | \
206 INT_MASK(INT_INTCTRL_0) | \ 208 (1ULL << INT_INTCTRL_0) | \
207 0) 209 0)
208#define UNMASKABLE_INTERRUPTS ( \ 210#define UNMASKABLE_INTERRUPTS ( \
209 INT_MASK(INT_ITLB_MISS) | \ 211 (1ULL << INT_ITLB_MISS) | \
210 INT_MASK(INT_ILL) | \ 212 (1ULL << INT_ILL) | \
211 INT_MASK(INT_GPV) | \ 213 (1ULL << INT_GPV) | \
212 INT_MASK(INT_IDN_ACCESS) | \ 214 (1ULL << INT_IDN_ACCESS) | \
213 INT_MASK(INT_UDN_ACCESS) | \ 215 (1ULL << INT_UDN_ACCESS) | \
214 INT_MASK(INT_SWINT_3) | \ 216 (1ULL << INT_SWINT_3) | \
215 INT_MASK(INT_SWINT_2) | \ 217 (1ULL << INT_SWINT_2) | \
216 INT_MASK(INT_SWINT_1) | \ 218 (1ULL << INT_SWINT_1) | \
217 INT_MASK(INT_SWINT_0) | \ 219 (1ULL << INT_SWINT_0) | \
218 INT_MASK(INT_ILL_TRANS) | \ 220 (1ULL << INT_ILL_TRANS) | \
219 INT_MASK(INT_UNALIGN_DATA) | \ 221 (1ULL << INT_UNALIGN_DATA) | \
220 INT_MASK(INT_DTLB_MISS) | \ 222 (1ULL << INT_DTLB_MISS) | \
221 INT_MASK(INT_DTLB_ACCESS) | \ 223 (1ULL << INT_DTLB_ACCESS) | \
222 INT_MASK(INT_BOOT_ACCESS) | \ 224 (1ULL << INT_BOOT_ACCESS) | \
223 INT_MASK(INT_WORLD_ACCESS) | \ 225 (1ULL << INT_WORLD_ACCESS) | \
224 INT_MASK(INT_I_ASID) | \ 226 (1ULL << INT_I_ASID) | \
225 INT_MASK(INT_D_ASID) | \ 227 (1ULL << INT_D_ASID) | \
226 INT_MASK(INT_DOUBLE_FAULT) | \ 228 (1ULL << INT_DOUBLE_FAULT) | \
227 0) 229 0)
228#define SYNC_INTERRUPTS ( \ 230#define SYNC_INTERRUPTS ( \
229 INT_MASK(INT_SINGLE_STEP_3) | \ 231 (1ULL << INT_SINGLE_STEP_3) | \
230 INT_MASK(INT_SINGLE_STEP_2) | \ 232 (1ULL << INT_SINGLE_STEP_2) | \
231 INT_MASK(INT_SINGLE_STEP_1) | \ 233 (1ULL << INT_SINGLE_STEP_1) | \
232 INT_MASK(INT_SINGLE_STEP_0) | \ 234 (1ULL << INT_SINGLE_STEP_0) | \
233 INT_MASK(INT_IDN_COMPLETE) | \ 235 (1ULL << INT_IDN_COMPLETE) | \
234 INT_MASK(INT_UDN_COMPLETE) | \ 236 (1ULL << INT_UDN_COMPLETE) | \
235 INT_MASK(INT_ITLB_MISS) | \ 237 (1ULL << INT_ITLB_MISS) | \
236 INT_MASK(INT_ILL) | \ 238 (1ULL << INT_ILL) | \
237 INT_MASK(INT_GPV) | \ 239 (1ULL << INT_GPV) | \
238 INT_MASK(INT_IDN_ACCESS) | \ 240 (1ULL << INT_IDN_ACCESS) | \
239 INT_MASK(INT_UDN_ACCESS) | \ 241 (1ULL << INT_UDN_ACCESS) | \
240 INT_MASK(INT_SWINT_3) | \ 242 (1ULL << INT_SWINT_3) | \
241 INT_MASK(INT_SWINT_2) | \ 243 (1ULL << INT_SWINT_2) | \
242 INT_MASK(INT_SWINT_1) | \ 244 (1ULL << INT_SWINT_1) | \
243 INT_MASK(INT_SWINT_0) | \ 245 (1ULL << INT_SWINT_0) | \
244 INT_MASK(INT_ILL_TRANS) | \ 246 (1ULL << INT_ILL_TRANS) | \
245 INT_MASK(INT_UNALIGN_DATA) | \ 247 (1ULL << INT_UNALIGN_DATA) | \
246 INT_MASK(INT_DTLB_MISS) | \ 248 (1ULL << INT_DTLB_MISS) | \
247 INT_MASK(INT_DTLB_ACCESS) | \ 249 (1ULL << INT_DTLB_ACCESS) | \
248 0) 250 0)
249#define NON_SYNC_INTERRUPTS ( \ 251#define NON_SYNC_INTERRUPTS ( \
250 INT_MASK(INT_MEM_ERROR) | \ 252 (1ULL << INT_MEM_ERROR) | \
251 INT_MASK(INT_IDN_FIREWALL) | \ 253 (1ULL << INT_IDN_FIREWALL) | \
252 INT_MASK(INT_UDN_FIREWALL) | \ 254 (1ULL << INT_UDN_FIREWALL) | \
253 INT_MASK(INT_TILE_TIMER) | \ 255 (1ULL << INT_TILE_TIMER) | \
254 INT_MASK(INT_AUX_TILE_TIMER) | \ 256 (1ULL << INT_AUX_TILE_TIMER) | \
255 INT_MASK(INT_IDN_TIMER) | \ 257 (1ULL << INT_IDN_TIMER) | \
256 INT_MASK(INT_UDN_TIMER) | \ 258 (1ULL << INT_UDN_TIMER) | \
257 INT_MASK(INT_IDN_AVAIL) | \ 259 (1ULL << INT_IDN_AVAIL) | \
258 INT_MASK(INT_UDN_AVAIL) | \ 260 (1ULL << INT_UDN_AVAIL) | \
259 INT_MASK(INT_IPI_3) | \ 261 (1ULL << INT_IPI_3) | \
260 INT_MASK(INT_IPI_2) | \ 262 (1ULL << INT_IPI_2) | \
261 INT_MASK(INT_IPI_1) | \ 263 (1ULL << INT_IPI_1) | \
262 INT_MASK(INT_IPI_0) | \ 264 (1ULL << INT_IPI_0) | \
263 INT_MASK(INT_PERF_COUNT) | \ 265 (1ULL << INT_PERF_COUNT) | \
264 INT_MASK(INT_AUX_PERF_COUNT) | \ 266 (1ULL << INT_AUX_PERF_COUNT) | \
265 INT_MASK(INT_INTCTRL_3) | \ 267 (1ULL << INT_INTCTRL_3) | \
266 INT_MASK(INT_INTCTRL_2) | \ 268 (1ULL << INT_INTCTRL_2) | \
267 INT_MASK(INT_INTCTRL_1) | \ 269 (1ULL << INT_INTCTRL_1) | \
268 INT_MASK(INT_INTCTRL_0) | \ 270 (1ULL << INT_INTCTRL_0) | \
269 INT_MASK(INT_BOOT_ACCESS) | \ 271 (1ULL << INT_BOOT_ACCESS) | \
270 INT_MASK(INT_WORLD_ACCESS) | \ 272 (1ULL << INT_WORLD_ACCESS) | \
271 INT_MASK(INT_I_ASID) | \ 273 (1ULL << INT_I_ASID) | \
272 INT_MASK(INT_D_ASID) | \ 274 (1ULL << INT_D_ASID) | \
273 INT_MASK(INT_DOUBLE_FAULT) | \ 275 (1ULL << INT_DOUBLE_FAULT) | \
274 0) 276 0)
275#endif /* !__ASSEMBLER__ */ 277#endif /* !__ASSEMBLER__ */
276#endif /* !__ARCH_INTERRUPTS_H__ */ 278#endif /* !__ARCH_INTERRUPTS_H__ */
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S
index 54bc9a6678e8..4ea080902654 100644
--- a/arch/tile/kernel/intvec_64.S
+++ b/arch/tile/kernel/intvec_64.S
@@ -1035,7 +1035,9 @@ handle_syscall:
1035 /* Ensure that the syscall number is within the legal range. */ 1035 /* Ensure that the syscall number is within the legal range. */
1036 { 1036 {
1037 moveli r20, hw2(sys_call_table) 1037 moveli r20, hw2(sys_call_table)
1038#ifdef CONFIG_COMPAT
1038 blbs r30, .Lcompat_syscall 1039 blbs r30, .Lcompat_syscall
1040#endif
1039 } 1041 }
1040 { 1042 {
1041 cmpltu r21, TREG_SYSCALL_NR_NAME, r21 1043 cmpltu r21, TREG_SYSCALL_NR_NAME, r21
@@ -1093,6 +1095,7 @@ handle_syscall:
1093 j .Lresume_userspace /* jump into middle of interrupt_return */ 1095 j .Lresume_userspace /* jump into middle of interrupt_return */
1094 } 1096 }
1095 1097
1098#ifdef CONFIG_COMPAT
1096.Lcompat_syscall: 1099.Lcompat_syscall:
1097 /* 1100 /*
1098 * Load the base of the compat syscall table in r20, and 1101 * Load the base of the compat syscall table in r20, and
@@ -1117,6 +1120,7 @@ handle_syscall:
1117 { move r15, r4; addxi r4, r4, 0 } 1120 { move r15, r4; addxi r4, r4, 0 }
1118 { move r16, r5; addxi r5, r5, 0 } 1121 { move r16, r5; addxi r5, r5, 0 }
1119 j .Lload_syscall_pointer 1122 j .Lload_syscall_pointer
1123#endif
1120 1124
1121.Linvalid_syscall: 1125.Linvalid_syscall:
1122 /* Report an invalid syscall back to the user program */ 1126 /* Report an invalid syscall back to the user program */
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 0e5661e7d00d..caf93ae11793 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -159,7 +159,7 @@ static void save_arch_state(struct thread_struct *t);
159int copy_thread(unsigned long clone_flags, unsigned long sp, 159int copy_thread(unsigned long clone_flags, unsigned long sp,
160 unsigned long arg, struct task_struct *p) 160 unsigned long arg, struct task_struct *p)
161{ 161{
162 struct pt_regs *childregs = task_pt_regs(p), *regs = current_pt_regs(); 162 struct pt_regs *childregs = task_pt_regs(p);
163 unsigned long ksp; 163 unsigned long ksp;
164 unsigned long *callee_regs; 164 unsigned long *callee_regs;
165 165
diff --git a/arch/tile/kernel/reboot.c b/arch/tile/kernel/reboot.c
index baa3d905fee2..d1b5c913ae72 100644
--- a/arch/tile/kernel/reboot.c
+++ b/arch/tile/kernel/reboot.c
@@ -16,6 +16,7 @@
16#include <linux/reboot.h> 16#include <linux/reboot.h>
17#include <linux/smp.h> 17#include <linux/smp.h>
18#include <linux/pm.h> 18#include <linux/pm.h>
19#include <linux/export.h>
19#include <asm/page.h> 20#include <asm/page.h>
20#include <asm/setup.h> 21#include <asm/setup.h>
21#include <hv/hypervisor.h> 22#include <hv/hypervisor.h>
@@ -49,3 +50,4 @@ void machine_restart(char *cmd)
49 50
50/* No interesting distinction to be made here. */ 51/* No interesting distinction to be made here. */
51void (*pm_power_off)(void) = NULL; 52void (*pm_power_off)(void) = NULL;
53EXPORT_SYMBOL(pm_power_off);
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 6a649a4462d3..d1e15f7b59c6 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -31,6 +31,7 @@
31#include <linux/timex.h> 31#include <linux/timex.h>
32#include <linux/hugetlb.h> 32#include <linux/hugetlb.h>
33#include <linux/start_kernel.h> 33#include <linux/start_kernel.h>
34#include <linux/screen_info.h>
34#include <asm/setup.h> 35#include <asm/setup.h>
35#include <asm/sections.h> 36#include <asm/sections.h>
36#include <asm/cacheflush.h> 37#include <asm/cacheflush.h>
@@ -49,6 +50,10 @@ static inline int ABS(int x) { return x >= 0 ? x : -x; }
49/* Chip information */ 50/* Chip information */
50char chip_model[64] __write_once; 51char chip_model[64] __write_once;
51 52
53#ifdef CONFIG_VT
54struct screen_info screen_info;
55#endif
56
52struct pglist_data node_data[MAX_NUMNODES] __read_mostly; 57struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
53EXPORT_SYMBOL(node_data); 58EXPORT_SYMBOL(node_data);
54 59
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c
index b2f44c28dda6..ed258b8ae320 100644
--- a/arch/tile/kernel/stack.c
+++ b/arch/tile/kernel/stack.c
@@ -112,7 +112,7 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
112 p->pc, p->sp, p->ex1); 112 p->pc, p->sp, p->ex1);
113 p = NULL; 113 p = NULL;
114 } 114 }
115 if (!kbt->profile || (INT_MASK(p->faultnum) & QUEUED_INTERRUPTS) == 0) 115 if (!kbt->profile || ((1ULL << p->faultnum) & QUEUED_INTERRUPTS) == 0)
116 return p; 116 return p;
117 return NULL; 117 return NULL;
118} 118}
@@ -484,6 +484,7 @@ void save_stack_trace(struct stack_trace *trace)
484{ 484{
485 save_stack_trace_tsk(NULL, trace); 485 save_stack_trace_tsk(NULL, trace);
486} 486}
487EXPORT_SYMBOL_GPL(save_stack_trace);
487 488
488#endif 489#endif
489 490
diff --git a/arch/tile/lib/cacheflush.c b/arch/tile/lib/cacheflush.c
index db4fb89e12d8..8f8ad814b139 100644
--- a/arch/tile/lib/cacheflush.c
+++ b/arch/tile/lib/cacheflush.c
@@ -12,6 +12,7 @@
12 * more details. 12 * more details.
13 */ 13 */
14 14
15#include <linux/export.h>
15#include <asm/page.h> 16#include <asm/page.h>
16#include <asm/cacheflush.h> 17#include <asm/cacheflush.h>
17#include <arch/icache.h> 18#include <arch/icache.h>
@@ -165,3 +166,4 @@ void finv_buffer_remote(void *buffer, size_t size, int hfh)
165 __insn_mtspr(SPR_DSTREAM_PF, old_dstream_pf); 166 __insn_mtspr(SPR_DSTREAM_PF, old_dstream_pf);
166#endif 167#endif
167} 168}
169EXPORT_SYMBOL_GPL(finv_buffer_remote);
diff --git a/arch/tile/lib/cpumask.c b/arch/tile/lib/cpumask.c
index fdc403614d12..75947edccb26 100644
--- a/arch/tile/lib/cpumask.c
+++ b/arch/tile/lib/cpumask.c
@@ -16,6 +16,7 @@
16#include <linux/ctype.h> 16#include <linux/ctype.h>
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <linux/smp.h> 18#include <linux/smp.h>
19#include <linux/export.h>
19 20
20/* 21/*
21 * Allow cropping out bits beyond the end of the array. 22 * Allow cropping out bits beyond the end of the array.
@@ -50,3 +51,4 @@ int bitmap_parselist_crop(const char *bp, unsigned long *maskp, int nmaskbits)
50 } while (*bp != '\0' && *bp != '\n'); 51 } while (*bp != '\0' && *bp != '\n');
51 return 0; 52 return 0;
52} 53}
54EXPORT_SYMBOL(bitmap_parselist_crop);
diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c
index dd5f0a33fdaf..4385cb6fa00a 100644
--- a/arch/tile/lib/exports.c
+++ b/arch/tile/lib/exports.c
@@ -55,6 +55,8 @@ EXPORT_SYMBOL(hv_dev_poll_cancel);
55EXPORT_SYMBOL(hv_dev_close); 55EXPORT_SYMBOL(hv_dev_close);
56EXPORT_SYMBOL(hv_sysconf); 56EXPORT_SYMBOL(hv_sysconf);
57EXPORT_SYMBOL(hv_confstr); 57EXPORT_SYMBOL(hv_confstr);
58EXPORT_SYMBOL(hv_get_rtc);
59EXPORT_SYMBOL(hv_set_rtc);
58 60
59/* libgcc.a */ 61/* libgcc.a */
60uint32_t __udivsi3(uint32_t dividend, uint32_t divisor); 62uint32_t __udivsi3(uint32_t dividend, uint32_t divisor);
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c
index 5f7868dcd6d4..1ae911939a18 100644
--- a/arch/tile/mm/homecache.c
+++ b/arch/tile/mm/homecache.c
@@ -408,6 +408,7 @@ void homecache_change_page_home(struct page *page, int order, int home)
408 __set_pte(ptep, pte_set_home(pteval, home)); 408 __set_pte(ptep, pte_set_home(pteval, home));
409 } 409 }
410} 410}
411EXPORT_SYMBOL(homecache_change_page_home);
411 412
412struct page *homecache_alloc_pages(gfp_t gfp_mask, 413struct page *homecache_alloc_pages(gfp_t gfp_mask,
413 unsigned int order, int home) 414 unsigned int order, int home)
diff --git a/arch/unicore32/kernel/process.c b/arch/unicore32/kernel/process.c
index 62bad9fed03e..872d7e22d847 100644
--- a/arch/unicore32/kernel/process.c
+++ b/arch/unicore32/kernel/process.c
@@ -45,11 +45,6 @@ static const char * const processor_modes[] = {
45 "UK18", "UK19", "UK1A", "EXTN", "UK1C", "UK1D", "UK1E", "SUSR" 45 "UK18", "UK19", "UK1A", "EXTN", "UK1C", "UK1D", "UK1E", "SUSR"
46}; 46};
47 47
48/*
49 * The idle thread, has rather strange semantics for calling pm_idle,
50 * but this is what x86 does and we need to do the same, so that
51 * things like cpuidle get called in the same way.
52 */
53void cpu_idle(void) 48void cpu_idle(void)
54{ 49{
55 /* endless idle loop with no priority at all */ 50 /* endless idle loop with no priority at all */
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 79795af59810..f7a27fdb5098 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1,7 +1,7 @@
1# Select 32 or 64 bit 1# Select 32 or 64 bit
2config 64BIT 2config 64BIT
3 bool "64-bit kernel" if ARCH = "x86" 3 bool "64-bit kernel" if ARCH = "x86"
4 default ARCH = "x86_64" 4 default ARCH != "i386"
5 ---help--- 5 ---help---
6 Say yes to build a 64-bit kernel - formerly known as x86_64 6 Say yes to build a 64-bit kernel - formerly known as x86_64
7 Say no to build a 32-bit kernel - formerly known as i386 7 Say no to build a 32-bit kernel - formerly known as i386
@@ -28,7 +28,6 @@ config X86
28 select HAVE_OPROFILE 28 select HAVE_OPROFILE
29 select HAVE_PCSPKR_PLATFORM 29 select HAVE_PCSPKR_PLATFORM
30 select HAVE_PERF_EVENTS 30 select HAVE_PERF_EVENTS
31 select HAVE_IRQ_WORK
32 select HAVE_IOREMAP_PROT 31 select HAVE_IOREMAP_PROT
33 select HAVE_KPROBES 32 select HAVE_KPROBES
34 select HAVE_MEMBLOCK 33 select HAVE_MEMBLOCK
@@ -40,10 +39,12 @@ config X86
40 select HAVE_DMA_CONTIGUOUS if !SWIOTLB 39 select HAVE_DMA_CONTIGUOUS if !SWIOTLB
41 select HAVE_KRETPROBES 40 select HAVE_KRETPROBES
42 select HAVE_OPTPROBES 41 select HAVE_OPTPROBES
42 select HAVE_KPROBES_ON_FTRACE
43 select HAVE_FTRACE_MCOUNT_RECORD 43 select HAVE_FTRACE_MCOUNT_RECORD
44 select HAVE_FENTRY if X86_64 44 select HAVE_FENTRY if X86_64
45 select HAVE_C_RECORDMCOUNT 45 select HAVE_C_RECORDMCOUNT
46 select HAVE_DYNAMIC_FTRACE 46 select HAVE_DYNAMIC_FTRACE
47 select HAVE_DYNAMIC_FTRACE_WITH_REGS
47 select HAVE_FUNCTION_TRACER 48 select HAVE_FUNCTION_TRACER
48 select HAVE_FUNCTION_GRAPH_TRACER 49 select HAVE_FUNCTION_GRAPH_TRACER
49 select HAVE_FUNCTION_GRAPH_FP_TEST 50 select HAVE_FUNCTION_GRAPH_FP_TEST
@@ -106,6 +107,7 @@ config X86
106 select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC) 107 select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC)
107 select GENERIC_TIME_VSYSCALL if X86_64 108 select GENERIC_TIME_VSYSCALL if X86_64
108 select KTIME_SCALAR if X86_32 109 select KTIME_SCALAR if X86_32
110 select ALWAYS_USE_PERSISTENT_CLOCK
109 select GENERIC_STRNCPY_FROM_USER 111 select GENERIC_STRNCPY_FROM_USER
110 select GENERIC_STRNLEN_USER 112 select GENERIC_STRNLEN_USER
111 select HAVE_CONTEXT_TRACKING if X86_64 113 select HAVE_CONTEXT_TRACKING if X86_64
@@ -114,6 +116,7 @@ config X86
114 select MODULES_USE_ELF_RELA if X86_64 116 select MODULES_USE_ELF_RELA if X86_64
115 select CLONE_BACKWARDS if X86_32 117 select CLONE_BACKWARDS if X86_32
116 select GENERIC_SIGALTSTACK 118 select GENERIC_SIGALTSTACK
119 select ARCH_USE_BUILTIN_BSWAP
117 120
118config INSTRUCTION_DECODER 121config INSTRUCTION_DECODER
119 def_bool y 122 def_bool y
@@ -320,6 +323,10 @@ config X86_BIGSMP
320 ---help--- 323 ---help---
321 This option is needed for the systems that have more than 8 CPUs 324 This option is needed for the systems that have more than 8 CPUs
322 325
326config GOLDFISH
327 def_bool y
328 depends on X86_GOLDFISH
329
323if X86_32 330if X86_32
324config X86_EXTENDED_PLATFORM 331config X86_EXTENDED_PLATFORM
325 bool "Support for extended (non-PC) x86 platforms" 332 bool "Support for extended (non-PC) x86 platforms"
@@ -402,6 +409,14 @@ config X86_UV
402# Following is an alphabetically sorted list of 32 bit extended platforms 409# Following is an alphabetically sorted list of 32 bit extended platforms
403# Please maintain the alphabetic order if and when there are additions 410# Please maintain the alphabetic order if and when there are additions
404 411
412config X86_GOLDFISH
413 bool "Goldfish (Virtual Platform)"
414 depends on X86_32
415 ---help---
416 Enable support for the Goldfish virtual platform used primarily
417 for Android development. Unless you are building for the Android
418 Goldfish emulator say N here.
419
405config X86_INTEL_CE 420config X86_INTEL_CE
406 bool "CE4100 TV platform" 421 bool "CE4100 TV platform"
407 depends on PCI 422 depends on PCI
@@ -454,6 +469,16 @@ config X86_MDFLD
454 469
455endif 470endif
456 471
472config X86_INTEL_LPSS
473 bool "Intel Low Power Subsystem Support"
474 depends on ACPI
475 select COMMON_CLK
476 ---help---
477 Select to build support for Intel Low Power Subsystem such as
478 found on Intel Lynxpoint PCH. Selecting this option enables
479 things like clock tree (common clock framework) which are needed
480 by the LPSS peripheral drivers.
481
457config X86_RDC321X 482config X86_RDC321X
458 bool "RDC R-321x SoC" 483 bool "RDC R-321x SoC"
459 depends on X86_32 484 depends on X86_32
@@ -1912,6 +1937,7 @@ config APM_DO_ENABLE
1912 this feature. 1937 this feature.
1913 1938
1914config APM_CPU_IDLE 1939config APM_CPU_IDLE
1940 depends on CPU_IDLE
1915 bool "Make CPU Idle calls when idle" 1941 bool "Make CPU Idle calls when idle"
1916 ---help--- 1942 ---help---
1917 Enable calls to APM CPU Idle/CPU Busy inside the kernel's idle loop. 1943 Enable calls to APM CPU Idle/CPU Busy inside the kernel's idle loop.
@@ -2138,6 +2164,7 @@ config OLPC_XO1_RTC
2138config OLPC_XO1_SCI 2164config OLPC_XO1_SCI
2139 bool "OLPC XO-1 SCI extras" 2165 bool "OLPC XO-1 SCI extras"
2140 depends on OLPC && OLPC_XO1_PM 2166 depends on OLPC && OLPC_XO1_PM
2167 depends on INPUT=y
2141 select POWER_SUPPLY 2168 select POWER_SUPPLY
2142 select GPIO_CS5535 2169 select GPIO_CS5535
2143 select MFD_CORE 2170 select MFD_CORE
@@ -2187,6 +2214,15 @@ config GEOS
2187 ---help--- 2214 ---help---
2188 This option enables system support for the Traverse Technologies GEOS. 2215 This option enables system support for the Traverse Technologies GEOS.
2189 2216
2217config TS5500
2218 bool "Technologic Systems TS-5500 platform support"
2219 depends on MELAN
2220 select CHECK_SIGNATURE
2221 select NEW_LEDS
2222 select LEDS_CLASS
2223 ---help---
2224 This option enables system support for the Technologic Systems TS-5500.
2225
2190endif # X86_32 2226endif # X86_32
2191 2227
2192config AMD_NB 2228config AMD_NB
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index e71fc4279aab..5c477260294f 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -2,7 +2,11 @@
2 2
3# select defconfig based on actual architecture 3# select defconfig based on actual architecture
4ifeq ($(ARCH),x86) 4ifeq ($(ARCH),x86)
5 ifeq ($(shell uname -m),x86_64)
6 KBUILD_DEFCONFIG := x86_64_defconfig
7 else
5 KBUILD_DEFCONFIG := i386_defconfig 8 KBUILD_DEFCONFIG := i386_defconfig
9 endif
6else 10else
7 KBUILD_DEFCONFIG := $(ARCH)_defconfig 11 KBUILD_DEFCONFIG := $(ARCH)_defconfig
8endif 12endif
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index ccce0ed67dde..379814bc41e3 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -71,7 +71,7 @@ GCOV_PROFILE := n
71$(obj)/bzImage: asflags-y := $(SVGA_MODE) 71$(obj)/bzImage: asflags-y := $(SVGA_MODE)
72 72
73quiet_cmd_image = BUILD $@ 73quiet_cmd_image = BUILD $@
74cmd_image = $(obj)/tools/build $(obj)/setup.bin $(obj)/vmlinux.bin > $@ 74cmd_image = $(obj)/tools/build $(obj)/setup.bin $(obj)/vmlinux.bin $(obj)/zoffset.h > $@
75 75
76$(obj)/bzImage: $(obj)/setup.bin $(obj)/vmlinux.bin $(obj)/tools/build FORCE 76$(obj)/bzImage: $(obj)/setup.bin $(obj)/vmlinux.bin $(obj)/tools/build FORCE
77 $(call if_changed,image) 77 $(call if_changed,image)
@@ -92,7 +92,7 @@ targets += voffset.h
92$(obj)/voffset.h: vmlinux FORCE 92$(obj)/voffset.h: vmlinux FORCE
93 $(call if_changed,voffset) 93 $(call if_changed,voffset)
94 94
95sed-zoffset := -e 's/^\([0-9a-fA-F]*\) . \(startup_32\|input_data\|_end\|z_.*\)$$/\#define ZO_\2 0x\1/p' 95sed-zoffset := -e 's/^\([0-9a-fA-F]*\) . \(startup_32\|startup_64\|efi_pe_entry\|efi_stub_entry\|input_data\|_end\|z_.*\)$$/\#define ZO_\2 0x\1/p'
96 96
97quiet_cmd_zoffset = ZOFFSET $@ 97quiet_cmd_zoffset = ZOFFSET $@
98 cmd_zoffset = $(NM) $< | sed -n $(sed-zoffset) > $@ 98 cmd_zoffset = $(NM) $< | sed -n $(sed-zoffset) > $@
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index 18e329ca108e..f8fa41190c35 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -256,10 +256,10 @@ static efi_status_t setup_efi_pci(struct boot_params *params)
256 int i; 256 int i;
257 struct setup_data *data; 257 struct setup_data *data;
258 258
259 data = (struct setup_data *)params->hdr.setup_data; 259 data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
260 260
261 while (data && data->next) 261 while (data && data->next)
262 data = (struct setup_data *)data->next; 262 data = (struct setup_data *)(unsigned long)data->next;
263 263
264 status = efi_call_phys5(sys_table->boottime->locate_handle, 264 status = efi_call_phys5(sys_table->boottime->locate_handle,
265 EFI_LOCATE_BY_PROTOCOL, &pci_proto, 265 EFI_LOCATE_BY_PROTOCOL, &pci_proto,
@@ -295,16 +295,18 @@ static efi_status_t setup_efi_pci(struct boot_params *params)
295 if (!pci) 295 if (!pci)
296 continue; 296 continue;
297 297
298#ifdef CONFIG_X86_64
298 status = efi_call_phys4(pci->attributes, pci, 299 status = efi_call_phys4(pci->attributes, pci,
299 EfiPciIoAttributeOperationGet, 0, 300 EfiPciIoAttributeOperationGet, 0,
300 &attributes); 301 &attributes);
301 302#else
303 status = efi_call_phys5(pci->attributes, pci,
304 EfiPciIoAttributeOperationGet, 0, 0,
305 &attributes);
306#endif
302 if (status != EFI_SUCCESS) 307 if (status != EFI_SUCCESS)
303 continue; 308 continue;
304 309
305 if (!(attributes & EFI_PCI_IO_ATTRIBUTE_EMBEDDED_ROM))
306 continue;
307
308 if (!pci->romimage || !pci->romsize) 310 if (!pci->romimage || !pci->romsize)
309 continue; 311 continue;
310 312
@@ -345,9 +347,9 @@ static efi_status_t setup_efi_pci(struct boot_params *params)
345 memcpy(rom->romdata, pci->romimage, pci->romsize); 347 memcpy(rom->romdata, pci->romimage, pci->romsize);
346 348
347 if (data) 349 if (data)
348 data->next = (uint64_t)rom; 350 data->next = (unsigned long)rom;
349 else 351 else
350 params->hdr.setup_data = (uint64_t)rom; 352 params->hdr.setup_data = (unsigned long)rom;
351 353
352 data = (struct setup_data *)rom; 354 data = (struct setup_data *)rom;
353 355
@@ -432,10 +434,9 @@ static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto,
432 * Once we've found a GOP supporting ConOut, 434 * Once we've found a GOP supporting ConOut,
433 * don't bother looking any further. 435 * don't bother looking any further.
434 */ 436 */
437 first_gop = gop;
435 if (conout_found) 438 if (conout_found)
436 break; 439 break;
437
438 first_gop = gop;
439 } 440 }
440 } 441 }
441 442
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
index aa4aaf1b2380..1e3184f6072f 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -35,11 +35,11 @@ ENTRY(startup_32)
35#ifdef CONFIG_EFI_STUB 35#ifdef CONFIG_EFI_STUB
36 jmp preferred_addr 36 jmp preferred_addr
37 37
38 .balign 0x10
39 /* 38 /*
40 * We don't need the return address, so set up the stack so 39 * We don't need the return address, so set up the stack so
41 * efi_main() can find its arugments. 40 * efi_main() can find its arguments.
42 */ 41 */
42ENTRY(efi_pe_entry)
43 add $0x4, %esp 43 add $0x4, %esp
44 44
45 call make_boot_params 45 call make_boot_params
@@ -50,8 +50,10 @@ ENTRY(startup_32)
50 pushl %eax 50 pushl %eax
51 pushl %esi 51 pushl %esi
52 pushl %ecx 52 pushl %ecx
53 sub $0x4, %esp
53 54
54 .org 0x30,0x90 55ENTRY(efi_stub_entry)
56 add $0x4, %esp
55 call efi_main 57 call efi_main
56 cmpl $0, %eax 58 cmpl $0, %eax
57 movl %eax, %esi 59 movl %eax, %esi
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index 2c4b171eec33..f5d1aaa0dec8 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -201,12 +201,12 @@ ENTRY(startup_64)
201 */ 201 */
202#ifdef CONFIG_EFI_STUB 202#ifdef CONFIG_EFI_STUB
203 /* 203 /*
204 * The entry point for the PE/COFF executable is 0x210, so only 204 * The entry point for the PE/COFF executable is efi_pe_entry, so
205 * legacy boot loaders will execute this jmp. 205 * only legacy boot loaders will execute this jmp.
206 */ 206 */
207 jmp preferred_addr 207 jmp preferred_addr
208 208
209 .org 0x210 209ENTRY(efi_pe_entry)
210 mov %rcx, %rdi 210 mov %rcx, %rdi
211 mov %rdx, %rsi 211 mov %rdx, %rsi
212 pushq %rdi 212 pushq %rdi
@@ -218,7 +218,7 @@ ENTRY(startup_64)
218 popq %rsi 218 popq %rsi
219 popq %rdi 219 popq %rdi
220 220
221 .org 0x230,0x90 221ENTRY(efi_stub_entry)
222 call efi_main 222 call efi_main
223 movq %rax,%rsi 223 movq %rax,%rsi
224 cmpq $0,%rax 224 cmpq $0,%rax
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 88f7ff6da404..7cb56c6ca351 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -325,6 +325,8 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
325{ 325{
326 real_mode = rmode; 326 real_mode = rmode;
327 327
328 sanitize_boot_params(real_mode);
329
328 if (real_mode->screen_info.orig_video_mode == 7) { 330 if (real_mode->screen_info.orig_video_mode == 7) {
329 vidmem = (char *) 0xb0000; 331 vidmem = (char *) 0xb0000;
330 vidport = 0x3b4; 332 vidport = 0x3b4;
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index 0e6dc0ee0eea..674019d8e235 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -18,6 +18,7 @@
18#include <asm/page.h> 18#include <asm/page.h>
19#include <asm/boot.h> 19#include <asm/boot.h>
20#include <asm/bootparam.h> 20#include <asm/bootparam.h>
21#include <asm/bootparam_utils.h>
21 22
22#define BOOT_BOOT_H 23#define BOOT_BOOT_H
23#include "../ctype.h" 24#include "../ctype.h"
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index 8c132a625b94..944ce595f767 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -21,6 +21,7 @@
21#include <asm/e820.h> 21#include <asm/e820.h>
22#include <asm/page_types.h> 22#include <asm/page_types.h>
23#include <asm/setup.h> 23#include <asm/setup.h>
24#include <asm/bootparam.h>
24#include "boot.h" 25#include "boot.h"
25#include "voffset.h" 26#include "voffset.h"
26#include "zoffset.h" 27#include "zoffset.h"
@@ -255,6 +256,9 @@ section_table:
255 # header, from the old boot sector. 256 # header, from the old boot sector.
256 257
257 .section ".header", "a" 258 .section ".header", "a"
259 .globl sentinel
260sentinel: .byte 0xff, 0xff /* Used to detect broken loaders */
261
258 .globl hdr 262 .globl hdr
259hdr: 263hdr:
260setup_sects: .byte 0 /* Filled in by build.c */ 264setup_sects: .byte 0 /* Filled in by build.c */
@@ -279,7 +283,7 @@ _start:
279 # Part 2 of the header, from the old setup.S 283 # Part 2 of the header, from the old setup.S
280 284
281 .ascii "HdrS" # header signature 285 .ascii "HdrS" # header signature
282 .word 0x020b # header version number (>= 0x0105) 286 .word 0x020c # header version number (>= 0x0105)
283 # or else old loadlin-1.5 will fail) 287 # or else old loadlin-1.5 will fail)
284 .globl realmode_swtch 288 .globl realmode_swtch
285realmode_swtch: .word 0, 0 # default_switch, SETUPSEG 289realmode_swtch: .word 0, 0 # default_switch, SETUPSEG
@@ -297,13 +301,7 @@ type_of_loader: .byte 0 # 0 means ancient bootloader, newer
297 301
298# flags, unused bits must be zero (RFU) bit within loadflags 302# flags, unused bits must be zero (RFU) bit within loadflags
299loadflags: 303loadflags:
300LOADED_HIGH = 1 # If set, the kernel is loaded high 304 .byte LOADED_HIGH # The kernel is to be loaded high
301CAN_USE_HEAP = 0x80 # If set, the loader also has set
302 # heap_end_ptr to tell how much
303 # space behind setup.S can be used for
304 # heap purposes.
305 # Only the loader knows what is free
306 .byte LOADED_HIGH
307 305
308setup_move_size: .word 0x8000 # size to move, when setup is not 306setup_move_size: .word 0x8000 # size to move, when setup is not
309 # loaded at 0x90000. We will move setup 307 # loaded at 0x90000. We will move setup
@@ -369,7 +367,23 @@ relocatable_kernel: .byte 1
369relocatable_kernel: .byte 0 367relocatable_kernel: .byte 0
370#endif 368#endif
371min_alignment: .byte MIN_KERNEL_ALIGN_LG2 # minimum alignment 369min_alignment: .byte MIN_KERNEL_ALIGN_LG2 # minimum alignment
372pad3: .word 0 370
371xloadflags:
372#ifdef CONFIG_X86_64
373# define XLF0 XLF_KERNEL_64 /* 64-bit kernel */
374#else
375# define XLF0 0
376#endif
377#ifdef CONFIG_EFI_STUB
378# ifdef CONFIG_X86_64
379# define XLF23 XLF_EFI_HANDOVER_64 /* 64-bit EFI handover ok */
380# else
381# define XLF23 XLF_EFI_HANDOVER_32 /* 32-bit EFI handover ok */
382# endif
383#else
384# define XLF23 0
385#endif
386 .word XLF0 | XLF23
373 387
374cmdline_size: .long COMMAND_LINE_SIZE-1 #length of the command line, 388cmdline_size: .long COMMAND_LINE_SIZE-1 #length of the command line,
375 #added with boot protocol 389 #added with boot protocol
@@ -397,8 +411,13 @@ pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
397#define INIT_SIZE VO_INIT_SIZE 411#define INIT_SIZE VO_INIT_SIZE
398#endif 412#endif
399init_size: .long INIT_SIZE # kernel initialization size 413init_size: .long INIT_SIZE # kernel initialization size
400handover_offset: .long 0x30 # offset to the handover 414handover_offset:
415#ifdef CONFIG_EFI_STUB
416 .long 0x30 # offset to the handover
401 # protocol entry point 417 # protocol entry point
418#else
419 .long 0
420#endif
402 421
403# End of setup header ##################################################### 422# End of setup header #####################################################
404 423
diff --git a/arch/x86/boot/setup.ld b/arch/x86/boot/setup.ld
index 03c0683636b6..96a6c7563538 100644
--- a/arch/x86/boot/setup.ld
+++ b/arch/x86/boot/setup.ld
@@ -13,7 +13,7 @@ SECTIONS
13 .bstext : { *(.bstext) } 13 .bstext : { *(.bstext) }
14 .bsdata : { *(.bsdata) } 14 .bsdata : { *(.bsdata) }
15 15
16 . = 497; 16 . = 495;
17 .header : { *(.header) } 17 .header : { *(.header) }
18 .entrytext : { *(.entrytext) } 18 .entrytext : { *(.entrytext) }
19 .inittext : { *(.inittext) } 19 .inittext : { *(.inittext) }
diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c
index 4b8e165ee572..94c544650020 100644
--- a/arch/x86/boot/tools/build.c
+++ b/arch/x86/boot/tools/build.c
@@ -52,6 +52,10 @@ int is_big_kernel;
52 52
53#define PECOFF_RELOC_RESERVE 0x20 53#define PECOFF_RELOC_RESERVE 0x20
54 54
55unsigned long efi_stub_entry;
56unsigned long efi_pe_entry;
57unsigned long startup_64;
58
55/*----------------------------------------------------------------------*/ 59/*----------------------------------------------------------------------*/
56 60
57static const u32 crctab32[] = { 61static const u32 crctab32[] = {
@@ -132,7 +136,7 @@ static void die(const char * str, ...)
132 136
133static void usage(void) 137static void usage(void)
134{ 138{
135 die("Usage: build setup system [> image]"); 139 die("Usage: build setup system [zoffset.h] [> image]");
136} 140}
137 141
138#ifdef CONFIG_EFI_STUB 142#ifdef CONFIG_EFI_STUB
@@ -206,30 +210,54 @@ static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
206 */ 210 */
207 put_unaligned_le32(file_sz - 512, &buf[pe_header + 0x1c]); 211 put_unaligned_le32(file_sz - 512, &buf[pe_header + 0x1c]);
208 212
209#ifdef CONFIG_X86_32
210 /* 213 /*
211 * Address of entry point. 214 * Address of entry point for PE/COFF executable
212 *
213 * The EFI stub entry point is +16 bytes from the start of
214 * the .text section.
215 */ 215 */
216 put_unaligned_le32(text_start + 16, &buf[pe_header + 0x28]); 216 put_unaligned_le32(text_start + efi_pe_entry, &buf[pe_header + 0x28]);
217#else
218 /*
219 * Address of entry point. startup_32 is at the beginning and
220 * the 64-bit entry point (startup_64) is always 512 bytes
221 * after. The EFI stub entry point is 16 bytes after that, as
222 * the first instruction allows legacy loaders to jump over
223 * the EFI stub initialisation
224 */
225 put_unaligned_le32(text_start + 528, &buf[pe_header + 0x28]);
226#endif /* CONFIG_X86_32 */
227 217
228 update_pecoff_section_header(".text", text_start, text_sz); 218 update_pecoff_section_header(".text", text_start, text_sz);
229} 219}
230 220
231#endif /* CONFIG_EFI_STUB */ 221#endif /* CONFIG_EFI_STUB */
232 222
223
224/*
225 * Parse zoffset.h and find the entry points. We could just #include zoffset.h
226 * but that would mean tools/build would have to be rebuilt every time. It's
227 * not as if parsing it is hard...
228 */
229#define PARSE_ZOFS(p, sym) do { \
230 if (!strncmp(p, "#define ZO_" #sym " ", 11+sizeof(#sym))) \
231 sym = strtoul(p + 11 + sizeof(#sym), NULL, 16); \
232} while (0)
233
234static void parse_zoffset(char *fname)
235{
236 FILE *file;
237 char *p;
238 int c;
239
240 file = fopen(fname, "r");
241 if (!file)
242 die("Unable to open `%s': %m", fname);
243 c = fread(buf, 1, sizeof(buf) - 1, file);
244 if (ferror(file))
245 die("read-error on `zoffset.h'");
246 buf[c] = 0;
247
248 p = (char *)buf;
249
250 while (p && *p) {
251 PARSE_ZOFS(p, efi_stub_entry);
252 PARSE_ZOFS(p, efi_pe_entry);
253 PARSE_ZOFS(p, startup_64);
254
255 p = strchr(p, '\n');
256 while (p && (*p == '\r' || *p == '\n'))
257 p++;
258 }
259}
260
233int main(int argc, char ** argv) 261int main(int argc, char ** argv)
234{ 262{
235 unsigned int i, sz, setup_sectors; 263 unsigned int i, sz, setup_sectors;
@@ -241,7 +269,19 @@ int main(int argc, char ** argv)
241 void *kernel; 269 void *kernel;
242 u32 crc = 0xffffffffUL; 270 u32 crc = 0xffffffffUL;
243 271
244 if (argc != 3) 272 /* Defaults for old kernel */
273#ifdef CONFIG_X86_32
274 efi_pe_entry = 0x10;
275 efi_stub_entry = 0x30;
276#else
277 efi_pe_entry = 0x210;
278 efi_stub_entry = 0x230;
279 startup_64 = 0x200;
280#endif
281
282 if (argc == 4)
283 parse_zoffset(argv[3]);
284 else if (argc != 3)
245 usage(); 285 usage();
246 286
247 /* Copy the setup code */ 287 /* Copy the setup code */
@@ -299,6 +339,11 @@ int main(int argc, char ** argv)
299 339
300#ifdef CONFIG_EFI_STUB 340#ifdef CONFIG_EFI_STUB
301 update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz)); 341 update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz));
342
343#ifdef CONFIG_X86_64 /* Yes, this is really how we defined it :( */
344 efi_stub_entry -= 0x200;
345#endif
346 put_unaligned_le32(efi_stub_entry, &buf[0x264]);
302#endif 347#endif
303 348
304 crc = partial_crc32(buf, i, crc); 349 crc = partial_crc32(buf, i, crc);
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index 5598547281a7..94447086e551 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -1,3 +1,4 @@
1# CONFIG_64BIT is not set
1CONFIG_EXPERIMENTAL=y 2CONFIG_EXPERIMENTAL=y
2# CONFIG_LOCALVERSION_AUTO is not set 3# CONFIG_LOCALVERSION_AUTO is not set
3CONFIG_SYSVIPC=y 4CONFIG_SYSVIPC=y
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 102ff7cb3e41..142c4ceff112 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -207,7 +207,7 @@ sysexit_from_sys_call:
207 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) 207 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
208 jnz ia32_ret_from_sys_call 208 jnz ia32_ret_from_sys_call
209 TRACE_IRQS_ON 209 TRACE_IRQS_ON
210 sti 210 ENABLE_INTERRUPTS(CLBR_NONE)
211 movl %eax,%esi /* second arg, syscall return value */ 211 movl %eax,%esi /* second arg, syscall return value */
212 cmpl $-MAX_ERRNO,%eax /* is it an error ? */ 212 cmpl $-MAX_ERRNO,%eax /* is it an error ? */
213 jbe 1f 213 jbe 1f
@@ -217,7 +217,7 @@ sysexit_from_sys_call:
217 call __audit_syscall_exit 217 call __audit_syscall_exit
218 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */ 218 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
219 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi 219 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
220 cli 220 DISABLE_INTERRUPTS(CLBR_NONE)
221 TRACE_IRQS_OFF 221 TRACE_IRQS_OFF
222 testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) 222 testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
223 jz \exit 223 jz \exit
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 0c44630d1789..b31bf97775fc 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -49,10 +49,6 @@
49 49
50/* Asm macros */ 50/* Asm macros */
51 51
52#define ACPI_ASM_MACROS
53#define BREAKPOINT3
54#define ACPI_DISABLE_IRQS() local_irq_disable()
55#define ACPI_ENABLE_IRQS() local_irq_enable()
56#define ACPI_FLUSH_CPU_CACHE() wbinvd() 52#define ACPI_FLUSH_CPU_CACHE() wbinvd()
57 53
58int __acpi_acquire_global_lock(unsigned int *lock); 54int __acpi_acquire_global_lock(unsigned int *lock);
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
index b3341e9cd8fd..a54ee1d054d9 100644
--- a/arch/x86/include/asm/amd_nb.h
+++ b/arch/x86/include/asm/amd_nb.h
@@ -81,6 +81,23 @@ static inline struct amd_northbridge *node_to_amd_nb(int node)
81 return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL; 81 return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
82} 82}
83 83
84static inline u16 amd_get_node_id(struct pci_dev *pdev)
85{
86 struct pci_dev *misc;
87 int i;
88
89 for (i = 0; i != amd_nb_num(); i++) {
90 misc = node_to_amd_nb(i)->misc;
91
92 if (pci_domain_nr(misc->bus) == pci_domain_nr(pdev->bus) &&
93 PCI_SLOT(misc->devfn) == PCI_SLOT(pdev->devfn))
94 return i;
95 }
96
97 WARN(1, "Unable to find AMD Northbridge id for %s\n", pci_name(pdev));
98 return 0;
99}
100
84#else 101#else
85 102
86#define amd_nb_num(x) 0 103#define amd_nb_num(x) 0
diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
new file mode 100644
index 000000000000..5b5e9cb774b5
--- /dev/null
+++ b/arch/x86/include/asm/bootparam_utils.h
@@ -0,0 +1,38 @@
1#ifndef _ASM_X86_BOOTPARAM_UTILS_H
2#define _ASM_X86_BOOTPARAM_UTILS_H
3
4#include <asm/bootparam.h>
5
6/*
7 * This file is included from multiple environments. Do not
8 * add completing #includes to make it standalone.
9 */
10
11/*
12 * Deal with bootloaders which fail to initialize unknown fields in
13 * boot_params to zero. The list fields in this list are taken from
14 * analysis of kexec-tools; if other broken bootloaders initialize a
15 * different set of fields we will need to figure out how to disambiguate.
16 *
17 */
18static void sanitize_boot_params(struct boot_params *boot_params)
19{
20 if (boot_params->sentinel) {
21 /*fields in boot_params are not valid, clear them */
22 memset(&boot_params->olpc_ofw_header, 0,
23 (char *)&boot_params->alt_mem_k -
24 (char *)&boot_params->olpc_ofw_header);
25 memset(&boot_params->kbd_status, 0,
26 (char *)&boot_params->hdr -
27 (char *)&boot_params->kbd_status);
28 memset(&boot_params->_pad7[0], 0,
29 (char *)&boot_params->edd_mbr_sig_buffer[0] -
30 (char *)&boot_params->_pad7[0]);
31 memset(&boot_params->_pad8[0], 0,
32 (char *)&boot_params->eddbuf[0] -
33 (char *)&boot_params->_pad8[0]);
34 memset(&boot_params->_pad9[0], 0, sizeof(boot_params->_pad9));
35 }
36}
37
38#endif /* _ASM_X86_BOOTPARAM_UTILS_H */
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 2d9075e863a0..93fe929d1cee 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -167,6 +167,7 @@
167#define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */ 167#define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */
168#define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */ 168#define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */
169#define X86_FEATURE_PERFCTR_CORE (6*32+23) /* core performance counter extensions */ 169#define X86_FEATURE_PERFCTR_CORE (6*32+23) /* core performance counter extensions */
170#define X86_FEATURE_PERFCTR_NB (6*32+24) /* NB performance counter extensions */
170 171
171/* 172/*
172 * Auxiliary flags: Linux defined - For features scattered in various 173 * Auxiliary flags: Linux defined - For features scattered in various
@@ -309,6 +310,7 @@ extern const char * const x86_power_flags[32];
309#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) 310#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
310#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) 311#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ)
311#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE) 312#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
313#define cpu_has_perfctr_nb boot_cpu_has(X86_FEATURE_PERFCTR_NB)
312#define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8) 314#define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8)
313#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16) 315#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
314#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU) 316#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 6e8fdf5ad113..28677c55113f 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -94,6 +94,7 @@ extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
94#endif /* CONFIG_X86_32 */ 94#endif /* CONFIG_X86_32 */
95 95
96extern int add_efi_memmap; 96extern int add_efi_memmap;
97extern unsigned long x86_efi_facility;
97extern void efi_set_executable(efi_memory_desc_t *md, bool executable); 98extern void efi_set_executable(efi_memory_desc_t *md, bool executable);
98extern int efi_memblock_x86_reserve_range(void); 99extern int efi_memblock_x86_reserve_range(void);
99extern void efi_call_phys_prelog(void); 100extern void efi_call_phys_prelog(void);
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index 9a25b522d377..86cb51e1ca96 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -44,7 +44,6 @@
44 44
45#ifdef CONFIG_DYNAMIC_FTRACE 45#ifdef CONFIG_DYNAMIC_FTRACE
46#define ARCH_SUPPORTS_FTRACE_OPS 1 46#define ARCH_SUPPORTS_FTRACE_OPS 1
47#define ARCH_SUPPORTS_FTRACE_SAVE_REGS
48#endif 47#endif
49 48
50#ifndef __ASSEMBLY__ 49#ifndef __ASSEMBLY__
diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h
index 434e2106cc87..b18df579c0e9 100644
--- a/arch/x86/include/asm/hpet.h
+++ b/arch/x86/include/asm/hpet.h
@@ -80,9 +80,9 @@ extern void hpet_msi_write(struct hpet_dev *hdev, struct msi_msg *msg);
80extern void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg); 80extern void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg);
81 81
82#ifdef CONFIG_PCI_MSI 82#ifdef CONFIG_PCI_MSI
83extern int arch_setup_hpet_msi(unsigned int irq, unsigned int id); 83extern int default_setup_hpet_msi(unsigned int irq, unsigned int id);
84#else 84#else
85static inline int arch_setup_hpet_msi(unsigned int irq, unsigned int id) 85static inline int default_setup_hpet_msi(unsigned int irq, unsigned int id)
86{ 86{
87 return -EINVAL; 87 return -EINVAL;
88} 88}
@@ -111,6 +111,7 @@ extern void hpet_unregister_irq_handler(rtc_irq_handler handler);
111static inline int hpet_enable(void) { return 0; } 111static inline int hpet_enable(void) { return 0; }
112static inline int is_hpet_enabled(void) { return 0; } 112static inline int is_hpet_enabled(void) { return 0; }
113#define hpet_readl(a) 0 113#define hpet_readl(a) 0
114#define default_setup_hpet_msi NULL
114 115
115#endif 116#endif
116#endif /* _ASM_X86_HPET_H */ 117#endif /* _ASM_X86_HPET_H */
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index eb92a6ed2be7..10a78c3d3d5a 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -101,6 +101,7 @@ static inline void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr,
101 irq_attr->polarity = polarity; 101 irq_attr->polarity = polarity;
102} 102}
103 103
104/* Intel specific interrupt remapping information */
104struct irq_2_iommu { 105struct irq_2_iommu {
105 struct intel_iommu *iommu; 106 struct intel_iommu *iommu;
106 u16 irte_index; 107 u16 irte_index;
@@ -108,6 +109,12 @@ struct irq_2_iommu {
108 u8 irte_mask; 109 u8 irte_mask;
109}; 110};
110 111
112/* AMD specific interrupt remapping information */
113struct irq_2_irte {
114 u16 devid; /* Device ID for IRTE table */
115 u16 index; /* Index into IRTE table*/
116};
117
111/* 118/*
112 * This is performance-critical, we want to do it O(1) 119 * This is performance-critical, we want to do it O(1)
113 * 120 *
@@ -120,7 +127,11 @@ struct irq_cfg {
120 u8 vector; 127 u8 vector;
121 u8 move_in_progress : 1; 128 u8 move_in_progress : 1;
122#ifdef CONFIG_IRQ_REMAP 129#ifdef CONFIG_IRQ_REMAP
123 struct irq_2_iommu irq_2_iommu; 130 u8 remapped : 1;
131 union {
132 struct irq_2_iommu irq_2_iommu;
133 struct irq_2_irte irq_2_irte;
134 };
124#endif 135#endif
125}; 136};
126 137
diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h
index b518c7509933..86095ed14135 100644
--- a/arch/x86/include/asm/hypervisor.h
+++ b/arch/x86/include/asm/hypervisor.h
@@ -25,6 +25,7 @@
25 25
26extern void init_hypervisor(struct cpuinfo_x86 *c); 26extern void init_hypervisor(struct cpuinfo_x86 *c);
27extern void init_hypervisor_platform(void); 27extern void init_hypervisor_platform(void);
28extern bool hypervisor_x2apic_available(void);
28 29
29/* 30/*
30 * x86 hypervisor information 31 * x86 hypervisor information
@@ -41,6 +42,9 @@ struct hypervisor_x86 {
41 42
42 /* Platform setup (run once per boot) */ 43 /* Platform setup (run once per boot) */
43 void (*init_platform)(void); 44 void (*init_platform)(void);
45
46 /* X2APIC detection (run once per boot) */
47 bool (*x2apic_available)(void);
44}; 48};
45 49
46extern const struct hypervisor_x86 *x86_hyper; 50extern const struct hypervisor_x86 *x86_hyper;
@@ -51,13 +55,4 @@ extern const struct hypervisor_x86 x86_hyper_ms_hyperv;
51extern const struct hypervisor_x86 x86_hyper_xen_hvm; 55extern const struct hypervisor_x86 x86_hyper_xen_hvm;
52extern const struct hypervisor_x86 x86_hyper_kvm; 56extern const struct hypervisor_x86 x86_hyper_kvm;
53 57
54static inline bool hypervisor_x2apic_available(void)
55{
56 if (kvm_para_available())
57 return true;
58 if (xen_x2apic_para_available())
59 return true;
60 return false;
61}
62
63#endif 58#endif
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index 73d8c5398ea9..459e50a424d1 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -144,11 +144,24 @@ extern int timer_through_8259;
144 (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs) 144 (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs)
145 145
146struct io_apic_irq_attr; 146struct io_apic_irq_attr;
147struct irq_cfg;
147extern int io_apic_set_pci_routing(struct device *dev, int irq, 148extern int io_apic_set_pci_routing(struct device *dev, int irq,
148 struct io_apic_irq_attr *irq_attr); 149 struct io_apic_irq_attr *irq_attr);
149void setup_IO_APIC_irq_extra(u32 gsi); 150void setup_IO_APIC_irq_extra(u32 gsi);
150extern void ioapic_insert_resources(void); 151extern void ioapic_insert_resources(void);
151 152
153extern int native_setup_ioapic_entry(int, struct IO_APIC_route_entry *,
154 unsigned int, int,
155 struct io_apic_irq_attr *);
156extern int native_setup_ioapic_entry(int, struct IO_APIC_route_entry *,
157 unsigned int, int,
158 struct io_apic_irq_attr *);
159extern void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg);
160
161extern void native_compose_msi_msg(struct pci_dev *pdev,
162 unsigned int irq, unsigned int dest,
163 struct msi_msg *msg, u8 hpet_id);
164extern void native_eoi_ioapic_pin(int apic, int pin, int vector);
152int io_apic_setup_irq_pin_once(unsigned int irq, int node, struct io_apic_irq_attr *attr); 165int io_apic_setup_irq_pin_once(unsigned int irq, int node, struct io_apic_irq_attr *attr);
153 166
154extern int save_ioapic_entries(void); 167extern int save_ioapic_entries(void);
@@ -179,6 +192,12 @@ extern void __init native_io_apic_init_mappings(void);
179extern unsigned int native_io_apic_read(unsigned int apic, unsigned int reg); 192extern unsigned int native_io_apic_read(unsigned int apic, unsigned int reg);
180extern void native_io_apic_write(unsigned int apic, unsigned int reg, unsigned int val); 193extern void native_io_apic_write(unsigned int apic, unsigned int reg, unsigned int val);
181extern void native_io_apic_modify(unsigned int apic, unsigned int reg, unsigned int val); 194extern void native_io_apic_modify(unsigned int apic, unsigned int reg, unsigned int val);
195extern void native_disable_io_apic(void);
196extern void native_io_apic_print_entries(unsigned int apic, unsigned int nr_entries);
197extern void intel_ir_io_apic_print_entries(unsigned int apic, unsigned int nr_entries);
198extern int native_ioapic_set_affinity(struct irq_data *,
199 const struct cpumask *,
200 bool);
182 201
183static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg) 202static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
184{ 203{
@@ -193,6 +212,9 @@ static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned
193{ 212{
194 x86_io_apic_ops.modify(apic, reg, value); 213 x86_io_apic_ops.modify(apic, reg, value);
195} 214}
215
216extern void io_apic_eoi(unsigned int apic, unsigned int vector);
217
196#else /* !CONFIG_X86_IO_APIC */ 218#else /* !CONFIG_X86_IO_APIC */
197 219
198#define io_apic_assign_pci_irqs 0 220#define io_apic_assign_pci_irqs 0
@@ -223,6 +245,12 @@ static inline void disable_ioapic_support(void) { }
223#define native_io_apic_read NULL 245#define native_io_apic_read NULL
224#define native_io_apic_write NULL 246#define native_io_apic_write NULL
225#define native_io_apic_modify NULL 247#define native_io_apic_modify NULL
248#define native_disable_io_apic NULL
249#define native_io_apic_print_entries NULL
250#define native_ioapic_set_affinity NULL
251#define native_setup_ioapic_entry NULL
252#define native_compose_msi_msg NULL
253#define native_eoi_ioapic_pin NULL
226#endif 254#endif
227 255
228#endif /* _ASM_X86_IO_APIC_H */ 256#endif /* _ASM_X86_IO_APIC_H */
diff --git a/arch/x86/include/asm/irq_remapping.h b/arch/x86/include/asm/irq_remapping.h
index 5fb9bbbd2f14..95fd3527f632 100644
--- a/arch/x86/include/asm/irq_remapping.h
+++ b/arch/x86/include/asm/irq_remapping.h
@@ -26,8 +26,6 @@
26 26
27#ifdef CONFIG_IRQ_REMAP 27#ifdef CONFIG_IRQ_REMAP
28 28
29extern int irq_remapping_enabled;
30
31extern void setup_irq_remapping_ops(void); 29extern void setup_irq_remapping_ops(void);
32extern int irq_remapping_supported(void); 30extern int irq_remapping_supported(void);
33extern int irq_remapping_prepare(void); 31extern int irq_remapping_prepare(void);
@@ -40,21 +38,19 @@ extern int setup_ioapic_remapped_entry(int irq,
40 unsigned int destination, 38 unsigned int destination,
41 int vector, 39 int vector,
42 struct io_apic_irq_attr *attr); 40 struct io_apic_irq_attr *attr);
43extern int set_remapped_irq_affinity(struct irq_data *data,
44 const struct cpumask *mask,
45 bool force);
46extern void free_remapped_irq(int irq); 41extern void free_remapped_irq(int irq);
47extern void compose_remapped_msi_msg(struct pci_dev *pdev, 42extern void compose_remapped_msi_msg(struct pci_dev *pdev,
48 unsigned int irq, unsigned int dest, 43 unsigned int irq, unsigned int dest,
49 struct msi_msg *msg, u8 hpet_id); 44 struct msi_msg *msg, u8 hpet_id);
50extern int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec);
51extern int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq,
52 int index, int sub_handle);
53extern int setup_hpet_msi_remapped(unsigned int irq, unsigned int id); 45extern int setup_hpet_msi_remapped(unsigned int irq, unsigned int id);
46extern void panic_if_irq_remap(const char *msg);
47extern bool setup_remapped_irq(int irq,
48 struct irq_cfg *cfg,
49 struct irq_chip *chip);
54 50
55#else /* CONFIG_IRQ_REMAP */ 51void irq_remap_modify_chip_defaults(struct irq_chip *chip);
56 52
57#define irq_remapping_enabled 0 53#else /* CONFIG_IRQ_REMAP */
58 54
59static inline void setup_irq_remapping_ops(void) { } 55static inline void setup_irq_remapping_ops(void) { }
60static inline int irq_remapping_supported(void) { return 0; } 56static inline int irq_remapping_supported(void) { return 0; }
@@ -71,30 +67,30 @@ static inline int setup_ioapic_remapped_entry(int irq,
71{ 67{
72 return -ENODEV; 68 return -ENODEV;
73} 69}
74static inline int set_remapped_irq_affinity(struct irq_data *data,
75 const struct cpumask *mask,
76 bool force)
77{
78 return 0;
79}
80static inline void free_remapped_irq(int irq) { } 70static inline void free_remapped_irq(int irq) { }
81static inline void compose_remapped_msi_msg(struct pci_dev *pdev, 71static inline void compose_remapped_msi_msg(struct pci_dev *pdev,
82 unsigned int irq, unsigned int dest, 72 unsigned int irq, unsigned int dest,
83 struct msi_msg *msg, u8 hpet_id) 73 struct msi_msg *msg, u8 hpet_id)
84{ 74{
85} 75}
86static inline int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec) 76static inline int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
87{ 77{
88 return -ENODEV; 78 return -ENODEV;
89} 79}
90static inline int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq, 80
91 int index, int sub_handle) 81static inline void panic_if_irq_remap(const char *msg)
82{
83}
84
85static inline void irq_remap_modify_chip_defaults(struct irq_chip *chip)
92{ 86{
93 return -ENODEV;
94} 87}
95static inline int setup_hpet_msi_remapped(unsigned int irq, unsigned int id) 88
89static inline bool setup_remapped_irq(int irq,
90 struct irq_cfg *cfg,
91 struct irq_chip *chip)
96{ 92{
97 return -ENODEV; 93 return false;
98} 94}
99#endif /* CONFIG_IRQ_REMAP */ 95#endif /* CONFIG_IRQ_REMAP */
100 96
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 1508e518c7e3..aac5fa62a86c 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -109,8 +109,8 @@
109 109
110#define UV_BAU_MESSAGE 0xf5 110#define UV_BAU_MESSAGE 0xf5
111 111
112/* Xen vector callback to receive events in a HVM domain */ 112/* Vector on which hypervisor callbacks will be delivered */
113#define XEN_HVM_EVTCHN_CALLBACK 0xf3 113#define HYPERVISOR_CALLBACK_VECTOR 0xf3
114 114
115/* 115/*
116 * Local APIC timer IRQ vector is on a different priority level, 116 * Local APIC timer IRQ vector is on a different priority level,
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index 5ed1f16187be..65231e173baf 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -85,13 +85,13 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
85 return ret; 85 return ret;
86} 86}
87 87
88static inline int kvm_para_available(void) 88static inline bool kvm_para_available(void)
89{ 89{
90 unsigned int eax, ebx, ecx, edx; 90 unsigned int eax, ebx, ecx, edx;
91 char signature[13]; 91 char signature[13];
92 92
93 if (boot_cpu_data.cpuid_level < 0) 93 if (boot_cpu_data.cpuid_level < 0)
94 return 0; /* So we don't blow up on old processors */ 94 return false; /* So we don't blow up on old processors */
95 95
96 if (cpu_has_hypervisor) { 96 if (cpu_has_hypervisor) {
97 cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx); 97 cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx);
@@ -101,10 +101,10 @@ static inline int kvm_para_available(void)
101 signature[12] = 0; 101 signature[12] = 0;
102 102
103 if (strcmp(signature, "KVMKVMKVM") == 0) 103 if (strcmp(signature, "KVMKVMKVM") == 0)
104 return 1; 104 return true;
105 } 105 }
106 106
107 return 0; 107 return false;
108} 108}
109 109
110static inline unsigned int kvm_arch_para_features(void) 110static inline unsigned int kvm_arch_para_features(void)
diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h
index 48142971b25d..79327e9483a3 100644
--- a/arch/x86/include/asm/linkage.h
+++ b/arch/x86/include/asm/linkage.h
@@ -27,20 +27,20 @@
27#define __asmlinkage_protect0(ret) \ 27#define __asmlinkage_protect0(ret) \
28 __asmlinkage_protect_n(ret) 28 __asmlinkage_protect_n(ret)
29#define __asmlinkage_protect1(ret, arg1) \ 29#define __asmlinkage_protect1(ret, arg1) \
30 __asmlinkage_protect_n(ret, "g" (arg1)) 30 __asmlinkage_protect_n(ret, "m" (arg1))
31#define __asmlinkage_protect2(ret, arg1, arg2) \ 31#define __asmlinkage_protect2(ret, arg1, arg2) \
32 __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2)) 32 __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2))
33#define __asmlinkage_protect3(ret, arg1, arg2, arg3) \ 33#define __asmlinkage_protect3(ret, arg1, arg2, arg3) \
34 __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3)) 34 __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3))
35#define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \ 35#define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \
36 __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \ 36 __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
37 "g" (arg4)) 37 "m" (arg4))
38#define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \ 38#define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \
39 __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \ 39 __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
40 "g" (arg4), "g" (arg5)) 40 "m" (arg4), "m" (arg5))
41#define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \ 41#define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \
42 __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \ 42 __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
43 "g" (arg4), "g" (arg5), "g" (arg6)) 43 "m" (arg4), "m" (arg5), "m" (arg6))
44 44
45#endif /* CONFIG_X86_32 */ 45#endif /* CONFIG_X86_32 */
46 46
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index ecdfee60ee4a..f4076af1f4ed 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -3,6 +3,90 @@
3 3
4#include <uapi/asm/mce.h> 4#include <uapi/asm/mce.h>
5 5
6/*
7 * Machine Check support for x86
8 */
9
10/* MCG_CAP register defines */
11#define MCG_BANKCNT_MASK 0xff /* Number of Banks */
12#define MCG_CTL_P (1ULL<<8) /* MCG_CTL register available */
13#define MCG_EXT_P (1ULL<<9) /* Extended registers available */
14#define MCG_CMCI_P (1ULL<<10) /* CMCI supported */
15#define MCG_EXT_CNT_MASK 0xff0000 /* Number of Extended registers */
16#define MCG_EXT_CNT_SHIFT 16
17#define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT)
18#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */
19
20/* MCG_STATUS register defines */
21#define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */
22#define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */
23#define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */
24
25/* MCi_STATUS register defines */
26#define MCI_STATUS_VAL (1ULL<<63) /* valid error */
27#define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */
28#define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */
29#define MCI_STATUS_EN (1ULL<<60) /* error enabled */
30#define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */
31#define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */
32#define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */
33#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
34#define MCI_STATUS_AR (1ULL<<55) /* Action required */
35#define MCACOD 0xffff /* MCA Error Code */
36
37/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */
38#define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */
39#define MCACOD_SCRUBMSK 0xfff0
40#define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */
41#define MCACOD_DATA 0x0134 /* Data Load */
42#define MCACOD_INSTR 0x0150 /* Instruction Fetch */
43
44/* MCi_MISC register defines */
45#define MCI_MISC_ADDR_LSB(m) ((m) & 0x3f)
46#define MCI_MISC_ADDR_MODE(m) (((m) >> 6) & 7)
47#define MCI_MISC_ADDR_SEGOFF 0 /* segment offset */
48#define MCI_MISC_ADDR_LINEAR 1 /* linear address */
49#define MCI_MISC_ADDR_PHYS 2 /* physical address */
50#define MCI_MISC_ADDR_MEM 3 /* memory address */
51#define MCI_MISC_ADDR_GENERIC 7 /* generic */
52
53/* CTL2 register defines */
54#define MCI_CTL2_CMCI_EN (1ULL << 30)
55#define MCI_CTL2_CMCI_THRESHOLD_MASK 0x7fffULL
56
57#define MCJ_CTX_MASK 3
58#define MCJ_CTX(flags) ((flags) & MCJ_CTX_MASK)
59#define MCJ_CTX_RANDOM 0 /* inject context: random */
60#define MCJ_CTX_PROCESS 0x1 /* inject context: process */
61#define MCJ_CTX_IRQ 0x2 /* inject context: IRQ */
62#define MCJ_NMI_BROADCAST 0x4 /* do NMI broadcasting */
63#define MCJ_EXCEPTION 0x8 /* raise as exception */
64#define MCJ_IRQ_BRAODCAST 0x10 /* do IRQ broadcasting */
65
66#define MCE_OVERFLOW 0 /* bit 0 in flags means overflow */
67
68/* Software defined banks */
69#define MCE_EXTENDED_BANK 128
70#define MCE_THERMAL_BANK (MCE_EXTENDED_BANK + 0)
71#define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1)
72
73#define MCE_LOG_LEN 32
74#define MCE_LOG_SIGNATURE "MACHINECHECK"
75
76/*
77 * This structure contains all data related to the MCE log. Also
78 * carries a signature to make it easier to find from external
79 * debugging tools. Each entry is only valid when its finished flag
80 * is set.
81 */
82struct mce_log {
83 char signature[12]; /* "MACHINECHECK" */
84 unsigned len; /* = MCE_LOG_LEN */
85 unsigned next;
86 unsigned flags;
87 unsigned recordlen; /* length of struct mce */
88 struct mce entry[MCE_LOG_LEN];
89};
6 90
7struct mca_config { 91struct mca_config {
8 bool dont_log_ce; 92 bool dont_log_ce;
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index 79ce5685ab64..c2934be2446a 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -11,4 +11,8 @@ struct ms_hyperv_info {
11 11
12extern struct ms_hyperv_info ms_hyperv; 12extern struct ms_hyperv_info ms_hyperv;
13 13
14void hyperv_callback_vector(void);
15void hyperv_vector_handler(struct pt_regs *regs);
16void hv_register_vmbus_handler(int irq, irq_handler_t handler);
17
14#endif 18#endif
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
index bcdff997668c..2f366d0ac6b4 100644
--- a/arch/x86/include/asm/mwait.h
+++ b/arch/x86/include/asm/mwait.h
@@ -4,7 +4,8 @@
4#define MWAIT_SUBSTATE_MASK 0xf 4#define MWAIT_SUBSTATE_MASK 0xf
5#define MWAIT_CSTATE_MASK 0xf 5#define MWAIT_CSTATE_MASK 0xf
6#define MWAIT_SUBSTATE_SIZE 4 6#define MWAIT_SUBSTATE_SIZE 4
7#define MWAIT_MAX_NUM_CSTATES 8 7#define MWAIT_HINT2CSTATE(hint) (((hint) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK)
8#define MWAIT_HINT2SUBSTATE(hint) ((hint) & MWAIT_CSTATE_MASK)
8 9
9#define CPUID_MWAIT_LEAF 5 10#define CPUID_MWAIT_LEAF 5
10#define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1 11#define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index dba7805176bf..c28fd02f4bf7 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -121,9 +121,12 @@ static inline void x86_restore_msi_irqs(struct pci_dev *dev, int irq)
121#define arch_teardown_msi_irq x86_teardown_msi_irq 121#define arch_teardown_msi_irq x86_teardown_msi_irq
122#define arch_restore_msi_irqs x86_restore_msi_irqs 122#define arch_restore_msi_irqs x86_restore_msi_irqs
123/* implemented in arch/x86/kernel/apic/io_apic. */ 123/* implemented in arch/x86/kernel/apic/io_apic. */
124struct msi_desc;
124int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); 125int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
125void native_teardown_msi_irq(unsigned int irq); 126void native_teardown_msi_irq(unsigned int irq);
126void native_restore_msi_irqs(struct pci_dev *dev, int irq); 127void native_restore_msi_irqs(struct pci_dev *dev, int irq);
128int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc,
129 unsigned int irq_base, unsigned int irq_offset);
127/* default to the implementation in drivers/lib/msi.c */ 130/* default to the implementation in drivers/lib/msi.c */
128#define HAVE_DEFAULT_MSI_TEARDOWN_IRQS 131#define HAVE_DEFAULT_MSI_TEARDOWN_IRQS
129#define HAVE_DEFAULT_MSI_RESTORE_IRQS 132#define HAVE_DEFAULT_MSI_RESTORE_IRQS
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 4fabcdf1cfa7..57cb63402213 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -29,8 +29,13 @@
29#define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23) 29#define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23)
30#define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL 30#define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL
31 31
32#define AMD_PERFMON_EVENTSEL_GUESTONLY (1ULL << 40) 32#define AMD64_EVENTSEL_INT_CORE_ENABLE (1ULL << 36)
33#define AMD_PERFMON_EVENTSEL_HOSTONLY (1ULL << 41) 33#define AMD64_EVENTSEL_GUESTONLY (1ULL << 40)
34#define AMD64_EVENTSEL_HOSTONLY (1ULL << 41)
35
36#define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT 37
37#define AMD64_EVENTSEL_INT_CORE_SEL_MASK \
38 (0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT)
34 39
35#define AMD64_EVENTSEL_EVENT \ 40#define AMD64_EVENTSEL_EVENT \
36 (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32)) 41 (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
@@ -46,8 +51,12 @@
46#define AMD64_RAW_EVENT_MASK \ 51#define AMD64_RAW_EVENT_MASK \
47 (X86_RAW_EVENT_MASK | \ 52 (X86_RAW_EVENT_MASK | \
48 AMD64_EVENTSEL_EVENT) 53 AMD64_EVENTSEL_EVENT)
54#define AMD64_RAW_EVENT_MASK_NB \
55 (AMD64_EVENTSEL_EVENT | \
56 ARCH_PERFMON_EVENTSEL_UMASK)
49#define AMD64_NUM_COUNTERS 4 57#define AMD64_NUM_COUNTERS 4
50#define AMD64_NUM_COUNTERS_CORE 6 58#define AMD64_NUM_COUNTERS_CORE 6
59#define AMD64_NUM_COUNTERS_NB 4
51 60
52#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c 61#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
53#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) 62#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 5199db2923d3..fc304279b559 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -142,6 +142,11 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
142 return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT; 142 return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
143} 143}
144 144
145static inline unsigned long pud_pfn(pud_t pud)
146{
147 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
148}
149
145#define pte_page(pte) pfn_to_page(pte_pfn(pte)) 150#define pte_page(pte) pfn_to_page(pte_pfn(pte))
146 151
147static inline int pmd_large(pmd_t pte) 152static inline int pmd_large(pmd_t pte)
@@ -781,6 +786,18 @@ static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
781 memcpy(dst, src, count * sizeof(pgd_t)); 786 memcpy(dst, src, count * sizeof(pgd_t));
782} 787}
783 788
789/*
790 * The x86 doesn't have any external MMU info: the kernel page
791 * tables contain all the necessary information.
792 */
793static inline void update_mmu_cache(struct vm_area_struct *vma,
794 unsigned long addr, pte_t *ptep)
795{
796}
797static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
798 unsigned long addr, pmd_t *pmd)
799{
800}
784 801
785#include <asm-generic/pgtable.h> 802#include <asm-generic/pgtable.h>
786#endif /* __ASSEMBLY__ */ 803#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index 8faa215a503e..9ee322103c6d 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -66,13 +66,6 @@ do { \
66 __flush_tlb_one((vaddr)); \ 66 __flush_tlb_one((vaddr)); \
67} while (0) 67} while (0)
68 68
69/*
70 * The i386 doesn't have any external MMU info: the kernel page
71 * tables contain all the necessary information.
72 */
73#define update_mmu_cache(vma, address, ptep) do { } while (0)
74#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
75
76#endif /* !__ASSEMBLY__ */ 69#endif /* !__ASSEMBLY__ */
77 70
78/* 71/*
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index 47356f9df82e..615b0c78449f 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -142,9 +142,6 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
142#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address)) 142#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
143#define pte_unmap(pte) ((void)(pte))/* NOP */ 143#define pte_unmap(pte) ((void)(pte))/* NOP */
144 144
145#define update_mmu_cache(vma, address, ptep) do { } while (0)
146#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
147
148/* Encode and de-code a swap entry */ 145/* Encode and de-code a swap entry */
149#if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE 146#if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE
150#define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1) 147#define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1)
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 888184b2fc85..d172588efae5 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -89,7 +89,6 @@ struct cpuinfo_x86 {
89 char wp_works_ok; /* It doesn't on 386's */ 89 char wp_works_ok; /* It doesn't on 386's */
90 90
91 /* Problems on some 486Dx4's and old 386's: */ 91 /* Problems on some 486Dx4's and old 386's: */
92 char hlt_works_ok;
93 char hard_math; 92 char hard_math;
94 char rfu; 93 char rfu;
95 char fdiv_bug; 94 char fdiv_bug;
@@ -165,15 +164,6 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
165 164
166extern const struct seq_operations cpuinfo_op; 165extern const struct seq_operations cpuinfo_op;
167 166
168static inline int hlt_works(int cpu)
169{
170#ifdef CONFIG_X86_32
171 return cpu_data(cpu).hlt_works_ok;
172#else
173 return 1;
174#endif
175}
176
177#define cache_line_size() (boot_cpu_data.x86_cache_alignment) 167#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
178 168
179extern void cpu_detect(struct cpuinfo_x86 *c); 169extern void cpu_detect(struct cpuinfo_x86 *c);
@@ -725,7 +715,7 @@ extern unsigned long boot_option_idle_override;
725extern bool amd_e400_c1e_detected; 715extern bool amd_e400_c1e_detected;
726 716
727enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT, 717enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
728 IDLE_POLL, IDLE_FORCE_MWAIT}; 718 IDLE_POLL};
729 719
730extern void enable_sep_cpu(void); 720extern void enable_sep_cpu(void);
731extern int sysenter_setup(void); 721extern int sysenter_setup(void);
@@ -943,7 +933,7 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
943extern int get_tsc_mode(unsigned long adr); 933extern int get_tsc_mode(unsigned long adr);
944extern int set_tsc_mode(unsigned int val); 934extern int set_tsc_mode(unsigned int val);
945 935
946extern int amd_get_nb_id(int cpu); 936extern u16 amd_get_nb_id(int cpu);
947 937
948struct aperfmperf { 938struct aperfmperf {
949 u64 aperf, mperf; 939 u64 aperf, mperf;
@@ -998,7 +988,11 @@ extern unsigned long arch_align_stack(unsigned long sp);
998extern void free_init_pages(char *what, unsigned long begin, unsigned long end); 988extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
999 989
1000void default_idle(void); 990void default_idle(void);
1001bool set_pm_idle_to_default(void); 991#ifdef CONFIG_XEN
992bool xen_set_default_idle(void);
993#else
994#define xen_set_default_idle 0
995#endif
1002 996
1003void stop_this_cpu(void *dummy); 997void stop_this_cpu(void *dummy);
1004 998
diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h
index 6c7fc25f2c34..5c6e4fb370f5 100644
--- a/arch/x86/include/asm/required-features.h
+++ b/arch/x86/include/asm/required-features.h
@@ -47,6 +47,12 @@
47# define NEED_NOPL 0 47# define NEED_NOPL 0
48#endif 48#endif
49 49
50#ifdef CONFIG_MATOM
51# define NEED_MOVBE (1<<(X86_FEATURE_MOVBE & 31))
52#else
53# define NEED_MOVBE 0
54#endif
55
50#ifdef CONFIG_X86_64 56#ifdef CONFIG_X86_64
51#ifdef CONFIG_PARAVIRT 57#ifdef CONFIG_PARAVIRT
52/* Paravirtualized systems may not have PSE or PGE available */ 58/* Paravirtualized systems may not have PSE or PGE available */
@@ -80,7 +86,7 @@
80 86
81#define REQUIRED_MASK2 0 87#define REQUIRED_MASK2 0
82#define REQUIRED_MASK3 (NEED_NOPL) 88#define REQUIRED_MASK3 (NEED_NOPL)
83#define REQUIRED_MASK4 0 89#define REQUIRED_MASK4 (NEED_MOVBE)
84#define REQUIRED_MASK5 0 90#define REQUIRED_MASK5 0
85#define REQUIRED_MASK6 0 91#define REQUIRED_MASK6 0
86#define REQUIRED_MASK7 0 92#define REQUIRED_MASK7 0
diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h
index b47c2a82ff15..062921ef34e9 100644
--- a/arch/x86/include/asm/uv/uv.h
+++ b/arch/x86/include/asm/uv/uv.h
@@ -16,7 +16,7 @@ extern void uv_system_init(void);
16extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, 16extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
17 struct mm_struct *mm, 17 struct mm_struct *mm,
18 unsigned long start, 18 unsigned long start,
19 unsigned end, 19 unsigned long end,
20 unsigned int cpu); 20 unsigned int cpu);
21 21
22#else /* X86_UV */ 22#else /* X86_UV */
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index 21f7385badb8..2c32df95bb78 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * SGI UV architectural definitions 6 * SGI UV architectural definitions
7 * 7 *
8 * Copyright (C) 2007-2010 Silicon Graphics, Inc. All rights reserved. 8 * Copyright (C) 2007-2013 Silicon Graphics, Inc. All rights reserved.
9 */ 9 */
10 10
11#ifndef _ASM_X86_UV_UV_HUB_H 11#ifndef _ASM_X86_UV_UV_HUB_H
@@ -175,6 +175,7 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
175 */ 175 */
176#define UV1_HUB_REVISION_BASE 1 176#define UV1_HUB_REVISION_BASE 1
177#define UV2_HUB_REVISION_BASE 3 177#define UV2_HUB_REVISION_BASE 3
178#define UV3_HUB_REVISION_BASE 5
178 179
179static inline int is_uv1_hub(void) 180static inline int is_uv1_hub(void)
180{ 181{
@@ -183,6 +184,23 @@ static inline int is_uv1_hub(void)
183 184
184static inline int is_uv2_hub(void) 185static inline int is_uv2_hub(void)
185{ 186{
187 return ((uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE) &&
188 (uv_hub_info->hub_revision < UV3_HUB_REVISION_BASE));
189}
190
191static inline int is_uv3_hub(void)
192{
193 return uv_hub_info->hub_revision >= UV3_HUB_REVISION_BASE;
194}
195
196static inline int is_uv_hub(void)
197{
198 return uv_hub_info->hub_revision;
199}
200
201/* code common to uv2 and uv3 only */
202static inline int is_uvx_hub(void)
203{
186 return uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE; 204 return uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE;
187} 205}
188 206
@@ -230,14 +248,23 @@ union uvh_apicid {
230#define UV2_LOCAL_MMR_SIZE (32UL * 1024 * 1024) 248#define UV2_LOCAL_MMR_SIZE (32UL * 1024 * 1024)
231#define UV2_GLOBAL_MMR32_SIZE (32UL * 1024 * 1024) 249#define UV2_GLOBAL_MMR32_SIZE (32UL * 1024 * 1024)
232 250
233#define UV_LOCAL_MMR_BASE (is_uv1_hub() ? UV1_LOCAL_MMR_BASE \ 251#define UV3_LOCAL_MMR_BASE 0xfa000000UL
234 : UV2_LOCAL_MMR_BASE) 252#define UV3_GLOBAL_MMR32_BASE 0xfc000000UL
235#define UV_GLOBAL_MMR32_BASE (is_uv1_hub() ? UV1_GLOBAL_MMR32_BASE \ 253#define UV3_LOCAL_MMR_SIZE (32UL * 1024 * 1024)
236 : UV2_GLOBAL_MMR32_BASE) 254#define UV3_GLOBAL_MMR32_SIZE (32UL * 1024 * 1024)
237#define UV_LOCAL_MMR_SIZE (is_uv1_hub() ? UV1_LOCAL_MMR_SIZE : \ 255
238 UV2_LOCAL_MMR_SIZE) 256#define UV_LOCAL_MMR_BASE (is_uv1_hub() ? UV1_LOCAL_MMR_BASE : \
257 (is_uv2_hub() ? UV2_LOCAL_MMR_BASE : \
258 UV3_LOCAL_MMR_BASE))
259#define UV_GLOBAL_MMR32_BASE (is_uv1_hub() ? UV1_GLOBAL_MMR32_BASE :\
260 (is_uv2_hub() ? UV2_GLOBAL_MMR32_BASE :\
261 UV3_GLOBAL_MMR32_BASE))
262#define UV_LOCAL_MMR_SIZE (is_uv1_hub() ? UV1_LOCAL_MMR_SIZE : \
263 (is_uv2_hub() ? UV2_LOCAL_MMR_SIZE : \
264 UV3_LOCAL_MMR_SIZE))
239#define UV_GLOBAL_MMR32_SIZE (is_uv1_hub() ? UV1_GLOBAL_MMR32_SIZE :\ 265#define UV_GLOBAL_MMR32_SIZE (is_uv1_hub() ? UV1_GLOBAL_MMR32_SIZE :\
240 UV2_GLOBAL_MMR32_SIZE) 266 (is_uv2_hub() ? UV2_GLOBAL_MMR32_SIZE :\
267 UV3_GLOBAL_MMR32_SIZE))
241#define UV_GLOBAL_MMR64_BASE (uv_hub_info->global_mmr_base) 268#define UV_GLOBAL_MMR64_BASE (uv_hub_info->global_mmr_base)
242 269
243#define UV_GLOBAL_GRU_MMR_BASE 0x4000000 270#define UV_GLOBAL_GRU_MMR_BASE 0x4000000
@@ -599,6 +626,7 @@ static inline void uv_hub_send_ipi(int pnode, int apicid, int vector)
599 * 1 - UV1 rev 1.0 initial silicon 626 * 1 - UV1 rev 1.0 initial silicon
600 * 2 - UV1 rev 2.0 production silicon 627 * 2 - UV1 rev 2.0 production silicon
601 * 3 - UV2 rev 1.0 initial silicon 628 * 3 - UV2 rev 1.0 initial silicon
629 * 5 - UV3 rev 1.0 initial silicon
602 */ 630 */
603static inline int uv_get_min_hub_revision_id(void) 631static inline int uv_get_min_hub_revision_id(void)
604{ 632{
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index cf1d73643f60..bd5f80e58a23 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -5,16 +5,25 @@
5 * 5 *
6 * SGI UV MMR definitions 6 * SGI UV MMR definitions
7 * 7 *
8 * Copyright (C) 2007-2011 Silicon Graphics, Inc. All rights reserved. 8 * Copyright (C) 2007-2013 Silicon Graphics, Inc. All rights reserved.
9 */ 9 */
10 10
11#ifndef _ASM_X86_UV_UV_MMRS_H 11#ifndef _ASM_X86_UV_UV_MMRS_H
12#define _ASM_X86_UV_UV_MMRS_H 12#define _ASM_X86_UV_UV_MMRS_H
13 13
14/* 14/*
15 * This file contains MMR definitions for both UV1 & UV2 hubs. 15 * This file contains MMR definitions for all UV hubs types.
16 * 16 *
17 * In general, MMR addresses and structures are identical on both hubs. 17 * To minimize coding differences between hub types, the symbols are
18 * grouped by architecture types.
19 *
20 * UVH - definitions common to all UV hub types.
21 * UVXH - definitions common to all UV eXtended hub types (currently 2 & 3).
22 * UV1H - definitions specific to UV type 1 hub.
23 * UV2H - definitions specific to UV type 2 hub.
24 * UV3H - definitions specific to UV type 3 hub.
25 *
26 * So in general, MMR addresses and structures are identical on all hubs types.
18 * These MMRs are identified as: 27 * These MMRs are identified as:
19 * #define UVH_xxx <address> 28 * #define UVH_xxx <address>
20 * union uvh_xxx { 29 * union uvh_xxx {
@@ -23,24 +32,36 @@
23 * } s; 32 * } s;
24 * }; 33 * };
25 * 34 *
26 * If the MMR exists on both hub type but has different addresses or 35 * If the MMR exists on all hub types but have different addresses:
27 * contents, the MMR definition is similar to: 36 * #define UV1Hxxx a
28 * #define UV1H_xxx <uv1 address> 37 * #define UV2Hxxx b
29 * #define UV2H_xxx <uv2address> 38 * #define UV3Hxxx c
30 * #define UVH_xxx (is_uv1_hub() ? UV1H_xxx : UV2H_xxx) 39 * #define UVHxxx (is_uv1_hub() ? UV1Hxxx :
40 * (is_uv2_hub() ? UV2Hxxx :
41 * UV3Hxxx))
42 *
43 * If the MMR exists on all hub types > 1 but have different addresses:
44 * #define UV2Hxxx b
45 * #define UV3Hxxx c
46 * #define UVXHxxx (is_uv2_hub() ? UV2Hxxx :
47 * UV3Hxxx))
48 *
31 * union uvh_xxx { 49 * union uvh_xxx {
32 * unsigned long v; 50 * unsigned long v;
33 * struct uv1h_int_cmpd_s { (Common fields only) 51 * struct uvh_xxx_s { # Common fields only
34 * } s; 52 * } s;
35 * struct uv1h_int_cmpd_s { (Full UV1 definition) 53 * struct uv1h_xxx_s { # Full UV1 definition (*)
36 * } s1; 54 * } s1;
37 * struct uv2h_int_cmpd_s { (Full UV2 definition) 55 * struct uv2h_xxx_s { # Full UV2 definition (*)
38 * } s2; 56 * } s2;
57 * struct uv3h_xxx_s { # Full UV3 definition (*)
58 * } s3;
39 * }; 59 * };
60 * (* - if present and different than the common struct)
40 * 61 *
41 * Only essential difference are enumerated. For example, if the address is 62 * Only essential differences are enumerated. For example, if the address is
42 * the same for both UV1 & UV2, only a single #define is generated. Likewise, 63 * the same for all UV's, only a single #define is generated. Likewise,
43 * if the contents is the same for both hubs, only the "s" structure is 64 * if the contents is the same for all hubs, only the "s" structure is
44 * generated. 65 * generated.
45 * 66 *
46 * If the MMR exists on ONLY 1 type of hub, no generic definition is 67 * If the MMR exists on ONLY 1 type of hub, no generic definition is
@@ -51,6 +72,8 @@
51 * struct uvh_int_cmpd_s { 72 * struct uvh_int_cmpd_s {
52 * } sn; 73 * } sn;
53 * }; 74 * };
75 *
76 * (GEN Flags: mflags_opt= undefs=0 UV23=UVXH)
54 */ 77 */
55 78
56#define UV_MMR_ENABLE (1UL << 63) 79#define UV_MMR_ENABLE (1UL << 63)
@@ -58,15 +81,18 @@
58#define UV1_HUB_PART_NUMBER 0x88a5 81#define UV1_HUB_PART_NUMBER 0x88a5
59#define UV2_HUB_PART_NUMBER 0x8eb8 82#define UV2_HUB_PART_NUMBER 0x8eb8
60#define UV2_HUB_PART_NUMBER_X 0x1111 83#define UV2_HUB_PART_NUMBER_X 0x1111
84#define UV3_HUB_PART_NUMBER 0x9578
85#define UV3_HUB_PART_NUMBER_X 0x4321
61 86
62/* Compat: if this #define is present, UV headers support UV2 */ 87/* Compat: Indicate which UV Hubs are supported. */
63#define UV2_HUB_IS_SUPPORTED 1 88#define UV2_HUB_IS_SUPPORTED 1
89#define UV3_HUB_IS_SUPPORTED 1
64 90
65/* ========================================================================= */ 91/* ========================================================================= */
66/* UVH_BAU_DATA_BROADCAST */ 92/* UVH_BAU_DATA_BROADCAST */
67/* ========================================================================= */ 93/* ========================================================================= */
68#define UVH_BAU_DATA_BROADCAST 0x61688UL 94#define UVH_BAU_DATA_BROADCAST 0x61688UL
69#define UVH_BAU_DATA_BROADCAST_32 0x440 95#define UVH_BAU_DATA_BROADCAST_32 0x440
70 96
71#define UVH_BAU_DATA_BROADCAST_ENABLE_SHFT 0 97#define UVH_BAU_DATA_BROADCAST_ENABLE_SHFT 0
72#define UVH_BAU_DATA_BROADCAST_ENABLE_MASK 0x0000000000000001UL 98#define UVH_BAU_DATA_BROADCAST_ENABLE_MASK 0x0000000000000001UL
@@ -82,8 +108,8 @@ union uvh_bau_data_broadcast_u {
82/* ========================================================================= */ 108/* ========================================================================= */
83/* UVH_BAU_DATA_CONFIG */ 109/* UVH_BAU_DATA_CONFIG */
84/* ========================================================================= */ 110/* ========================================================================= */
85#define UVH_BAU_DATA_CONFIG 0x61680UL 111#define UVH_BAU_DATA_CONFIG 0x61680UL
86#define UVH_BAU_DATA_CONFIG_32 0x438 112#define UVH_BAU_DATA_CONFIG_32 0x438
87 113
88#define UVH_BAU_DATA_CONFIG_VECTOR_SHFT 0 114#define UVH_BAU_DATA_CONFIG_VECTOR_SHFT 0
89#define UVH_BAU_DATA_CONFIG_DM_SHFT 8 115#define UVH_BAU_DATA_CONFIG_DM_SHFT 8
@@ -121,10 +147,14 @@ union uvh_bau_data_config_u {
121/* ========================================================================= */ 147/* ========================================================================= */
122/* UVH_EVENT_OCCURRED0 */ 148/* UVH_EVENT_OCCURRED0 */
123/* ========================================================================= */ 149/* ========================================================================= */
124#define UVH_EVENT_OCCURRED0 0x70000UL 150#define UVH_EVENT_OCCURRED0 0x70000UL
125#define UVH_EVENT_OCCURRED0_32 0x5e8 151#define UVH_EVENT_OCCURRED0_32 0x5e8
152
153#define UVH_EVENT_OCCURRED0_LB_HCERR_SHFT 0
154#define UVH_EVENT_OCCURRED0_RH_AOERR0_SHFT 11
155#define UVH_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL
156#define UVH_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL
126 157
127#define UV1H_EVENT_OCCURRED0_LB_HCERR_SHFT 0
128#define UV1H_EVENT_OCCURRED0_GR0_HCERR_SHFT 1 158#define UV1H_EVENT_OCCURRED0_GR0_HCERR_SHFT 1
129#define UV1H_EVENT_OCCURRED0_GR1_HCERR_SHFT 2 159#define UV1H_EVENT_OCCURRED0_GR1_HCERR_SHFT 2
130#define UV1H_EVENT_OCCURRED0_LH_HCERR_SHFT 3 160#define UV1H_EVENT_OCCURRED0_LH_HCERR_SHFT 3
@@ -135,7 +165,6 @@ union uvh_bau_data_config_u {
135#define UV1H_EVENT_OCCURRED0_GR0_AOERR0_SHFT 8 165#define UV1H_EVENT_OCCURRED0_GR0_AOERR0_SHFT 8
136#define UV1H_EVENT_OCCURRED0_GR1_AOERR0_SHFT 9 166#define UV1H_EVENT_OCCURRED0_GR1_AOERR0_SHFT 9
137#define UV1H_EVENT_OCCURRED0_LH_AOERR0_SHFT 10 167#define UV1H_EVENT_OCCURRED0_LH_AOERR0_SHFT 10
138#define UV1H_EVENT_OCCURRED0_RH_AOERR0_SHFT 11
139#define UV1H_EVENT_OCCURRED0_XN_AOERR0_SHFT 12 168#define UV1H_EVENT_OCCURRED0_XN_AOERR0_SHFT 12
140#define UV1H_EVENT_OCCURRED0_SI_AOERR0_SHFT 13 169#define UV1H_EVENT_OCCURRED0_SI_AOERR0_SHFT 13
141#define UV1H_EVENT_OCCURRED0_LB_AOERR1_SHFT 14 170#define UV1H_EVENT_OCCURRED0_LB_AOERR1_SHFT 14
@@ -181,7 +210,6 @@ union uvh_bau_data_config_u {
181#define UV1H_EVENT_OCCURRED0_RTC3_SHFT 54 210#define UV1H_EVENT_OCCURRED0_RTC3_SHFT 54
182#define UV1H_EVENT_OCCURRED0_BAU_DATA_SHFT 55 211#define UV1H_EVENT_OCCURRED0_BAU_DATA_SHFT 55
183#define UV1H_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_SHFT 56 212#define UV1H_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_SHFT 56
184#define UV1H_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL
185#define UV1H_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000002UL 213#define UV1H_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000002UL
186#define UV1H_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000004UL 214#define UV1H_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000004UL
187#define UV1H_EVENT_OCCURRED0_LH_HCERR_MASK 0x0000000000000008UL 215#define UV1H_EVENT_OCCURRED0_LH_HCERR_MASK 0x0000000000000008UL
@@ -192,7 +220,6 @@ union uvh_bau_data_config_u {
192#define UV1H_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000000100UL 220#define UV1H_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000000100UL
193#define UV1H_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000000200UL 221#define UV1H_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000000200UL
194#define UV1H_EVENT_OCCURRED0_LH_AOERR0_MASK 0x0000000000000400UL 222#define UV1H_EVENT_OCCURRED0_LH_AOERR0_MASK 0x0000000000000400UL
195#define UV1H_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL
196#define UV1H_EVENT_OCCURRED0_XN_AOERR0_MASK 0x0000000000001000UL 223#define UV1H_EVENT_OCCURRED0_XN_AOERR0_MASK 0x0000000000001000UL
197#define UV1H_EVENT_OCCURRED0_SI_AOERR0_MASK 0x0000000000002000UL 224#define UV1H_EVENT_OCCURRED0_SI_AOERR0_MASK 0x0000000000002000UL
198#define UV1H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000004000UL 225#define UV1H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000004000UL
@@ -239,188 +266,130 @@ union uvh_bau_data_config_u {
239#define UV1H_EVENT_OCCURRED0_BAU_DATA_MASK 0x0080000000000000UL 266#define UV1H_EVENT_OCCURRED0_BAU_DATA_MASK 0x0080000000000000UL
240#define UV1H_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_MASK 0x0100000000000000UL 267#define UV1H_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_MASK 0x0100000000000000UL
241 268
242#define UV2H_EVENT_OCCURRED0_LB_HCERR_SHFT 0 269#define UVXH_EVENT_OCCURRED0_QP_HCERR_SHFT 1
243#define UV2H_EVENT_OCCURRED0_QP_HCERR_SHFT 1 270#define UVXH_EVENT_OCCURRED0_RH_HCERR_SHFT 2
244#define UV2H_EVENT_OCCURRED0_RH_HCERR_SHFT 2 271#define UVXH_EVENT_OCCURRED0_LH0_HCERR_SHFT 3
245#define UV2H_EVENT_OCCURRED0_LH0_HCERR_SHFT 3 272#define UVXH_EVENT_OCCURRED0_LH1_HCERR_SHFT 4
246#define UV2H_EVENT_OCCURRED0_LH1_HCERR_SHFT 4 273#define UVXH_EVENT_OCCURRED0_GR0_HCERR_SHFT 5
247#define UV2H_EVENT_OCCURRED0_GR0_HCERR_SHFT 5 274#define UVXH_EVENT_OCCURRED0_GR1_HCERR_SHFT 6
248#define UV2H_EVENT_OCCURRED0_GR1_HCERR_SHFT 6 275#define UVXH_EVENT_OCCURRED0_NI0_HCERR_SHFT 7
249#define UV2H_EVENT_OCCURRED0_NI0_HCERR_SHFT 7 276#define UVXH_EVENT_OCCURRED0_NI1_HCERR_SHFT 8
250#define UV2H_EVENT_OCCURRED0_NI1_HCERR_SHFT 8 277#define UVXH_EVENT_OCCURRED0_LB_AOERR0_SHFT 9
251#define UV2H_EVENT_OCCURRED0_LB_AOERR0_SHFT 9 278#define UVXH_EVENT_OCCURRED0_QP_AOERR0_SHFT 10
252#define UV2H_EVENT_OCCURRED0_QP_AOERR0_SHFT 10 279#define UVXH_EVENT_OCCURRED0_LH0_AOERR0_SHFT 12
253#define UV2H_EVENT_OCCURRED0_RH_AOERR0_SHFT 11 280#define UVXH_EVENT_OCCURRED0_LH1_AOERR0_SHFT 13
254#define UV2H_EVENT_OCCURRED0_LH0_AOERR0_SHFT 12 281#define UVXH_EVENT_OCCURRED0_GR0_AOERR0_SHFT 14
255#define UV2H_EVENT_OCCURRED0_LH1_AOERR0_SHFT 13 282#define UVXH_EVENT_OCCURRED0_GR1_AOERR0_SHFT 15
256#define UV2H_EVENT_OCCURRED0_GR0_AOERR0_SHFT 14 283#define UVXH_EVENT_OCCURRED0_XB_AOERR0_SHFT 16
257#define UV2H_EVENT_OCCURRED0_GR1_AOERR0_SHFT 15 284#define UVXH_EVENT_OCCURRED0_RT_AOERR0_SHFT 17
258#define UV2H_EVENT_OCCURRED0_XB_AOERR0_SHFT 16 285#define UVXH_EVENT_OCCURRED0_NI0_AOERR0_SHFT 18
259#define UV2H_EVENT_OCCURRED0_RT_AOERR0_SHFT 17 286#define UVXH_EVENT_OCCURRED0_NI1_AOERR0_SHFT 19
260#define UV2H_EVENT_OCCURRED0_NI0_AOERR0_SHFT 18 287#define UVXH_EVENT_OCCURRED0_LB_AOERR1_SHFT 20
261#define UV2H_EVENT_OCCURRED0_NI1_AOERR0_SHFT 19 288#define UVXH_EVENT_OCCURRED0_QP_AOERR1_SHFT 21
262#define UV2H_EVENT_OCCURRED0_LB_AOERR1_SHFT 20 289#define UVXH_EVENT_OCCURRED0_RH_AOERR1_SHFT 22
263#define UV2H_EVENT_OCCURRED0_QP_AOERR1_SHFT 21 290#define UVXH_EVENT_OCCURRED0_LH0_AOERR1_SHFT 23
264#define UV2H_EVENT_OCCURRED0_RH_AOERR1_SHFT 22 291#define UVXH_EVENT_OCCURRED0_LH1_AOERR1_SHFT 24
265#define UV2H_EVENT_OCCURRED0_LH0_AOERR1_SHFT 23 292#define UVXH_EVENT_OCCURRED0_GR0_AOERR1_SHFT 25
266#define UV2H_EVENT_OCCURRED0_LH1_AOERR1_SHFT 24 293#define UVXH_EVENT_OCCURRED0_GR1_AOERR1_SHFT 26
267#define UV2H_EVENT_OCCURRED0_GR0_AOERR1_SHFT 25 294#define UVXH_EVENT_OCCURRED0_XB_AOERR1_SHFT 27
268#define UV2H_EVENT_OCCURRED0_GR1_AOERR1_SHFT 26 295#define UVXH_EVENT_OCCURRED0_RT_AOERR1_SHFT 28
269#define UV2H_EVENT_OCCURRED0_XB_AOERR1_SHFT 27 296#define UVXH_EVENT_OCCURRED0_NI0_AOERR1_SHFT 29
270#define UV2H_EVENT_OCCURRED0_RT_AOERR1_SHFT 28 297#define UVXH_EVENT_OCCURRED0_NI1_AOERR1_SHFT 30
271#define UV2H_EVENT_OCCURRED0_NI0_AOERR1_SHFT 29 298#define UVXH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 31
272#define UV2H_EVENT_OCCURRED0_NI1_AOERR1_SHFT 30 299#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 32
273#define UV2H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 31 300#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 33
274#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 32 301#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 34
275#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 33 302#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 35
276#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 34 303#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 36
277#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 35 304#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 37
278#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 36 305#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 38
279#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 37 306#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 39
280#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 38 307#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 40
281#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 39 308#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 41
282#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 40 309#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 42
283#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 41 310#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 43
284#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 42 311#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 44
285#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 43 312#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 45
286#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 44 313#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 46
287#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 45 314#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 47
288#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 46 315#define UVXH_EVENT_OCCURRED0_L1_NMI_INT_SHFT 48
289#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 47 316#define UVXH_EVENT_OCCURRED0_STOP_CLOCK_SHFT 49
290#define UV2H_EVENT_OCCURRED0_L1_NMI_INT_SHFT 48 317#define UVXH_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 50
291#define UV2H_EVENT_OCCURRED0_STOP_CLOCK_SHFT 49 318#define UVXH_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 51
292#define UV2H_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 50 319#define UVXH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 52
293#define UV2H_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 51 320#define UVXH_EVENT_OCCURRED0_IPI_INT_SHFT 53
294#define UV2H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 52 321#define UVXH_EVENT_OCCURRED0_EXTIO_INT0_SHFT 54
295#define UV2H_EVENT_OCCURRED0_IPI_INT_SHFT 53 322#define UVXH_EVENT_OCCURRED0_EXTIO_INT1_SHFT 55
296#define UV2H_EVENT_OCCURRED0_EXTIO_INT0_SHFT 54 323#define UVXH_EVENT_OCCURRED0_EXTIO_INT2_SHFT 56
297#define UV2H_EVENT_OCCURRED0_EXTIO_INT1_SHFT 55 324#define UVXH_EVENT_OCCURRED0_EXTIO_INT3_SHFT 57
298#define UV2H_EVENT_OCCURRED0_EXTIO_INT2_SHFT 56 325#define UVXH_EVENT_OCCURRED0_PROFILE_INT_SHFT 58
299#define UV2H_EVENT_OCCURRED0_EXTIO_INT3_SHFT 57 326#define UVXH_EVENT_OCCURRED0_QP_HCERR_MASK 0x0000000000000002UL
300#define UV2H_EVENT_OCCURRED0_PROFILE_INT_SHFT 58 327#define UVXH_EVENT_OCCURRED0_RH_HCERR_MASK 0x0000000000000004UL
301#define UV2H_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL 328#define UVXH_EVENT_OCCURRED0_LH0_HCERR_MASK 0x0000000000000008UL
302#define UV2H_EVENT_OCCURRED0_QP_HCERR_MASK 0x0000000000000002UL 329#define UVXH_EVENT_OCCURRED0_LH1_HCERR_MASK 0x0000000000000010UL
303#define UV2H_EVENT_OCCURRED0_RH_HCERR_MASK 0x0000000000000004UL 330#define UVXH_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000020UL
304#define UV2H_EVENT_OCCURRED0_LH0_HCERR_MASK 0x0000000000000008UL 331#define UVXH_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000040UL
305#define UV2H_EVENT_OCCURRED0_LH1_HCERR_MASK 0x0000000000000010UL 332#define UVXH_EVENT_OCCURRED0_NI0_HCERR_MASK 0x0000000000000080UL
306#define UV2H_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000020UL 333#define UVXH_EVENT_OCCURRED0_NI1_HCERR_MASK 0x0000000000000100UL
307#define UV2H_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000040UL 334#define UVXH_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000000200UL
308#define UV2H_EVENT_OCCURRED0_NI0_HCERR_MASK 0x0000000000000080UL 335#define UVXH_EVENT_OCCURRED0_QP_AOERR0_MASK 0x0000000000000400UL
309#define UV2H_EVENT_OCCURRED0_NI1_HCERR_MASK 0x0000000000000100UL 336#define UVXH_EVENT_OCCURRED0_LH0_AOERR0_MASK 0x0000000000001000UL
310#define UV2H_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000000200UL 337#define UVXH_EVENT_OCCURRED0_LH1_AOERR0_MASK 0x0000000000002000UL
311#define UV2H_EVENT_OCCURRED0_QP_AOERR0_MASK 0x0000000000000400UL 338#define UVXH_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000004000UL
312#define UV2H_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL 339#define UVXH_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000008000UL
313#define UV2H_EVENT_OCCURRED0_LH0_AOERR0_MASK 0x0000000000001000UL 340#define UVXH_EVENT_OCCURRED0_XB_AOERR0_MASK 0x0000000000010000UL
314#define UV2H_EVENT_OCCURRED0_LH1_AOERR0_MASK 0x0000000000002000UL 341#define UVXH_EVENT_OCCURRED0_RT_AOERR0_MASK 0x0000000000020000UL
315#define UV2H_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000004000UL 342#define UVXH_EVENT_OCCURRED0_NI0_AOERR0_MASK 0x0000000000040000UL
316#define UV2H_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000008000UL 343#define UVXH_EVENT_OCCURRED0_NI1_AOERR0_MASK 0x0000000000080000UL
317#define UV2H_EVENT_OCCURRED0_XB_AOERR0_MASK 0x0000000000010000UL 344#define UVXH_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000100000UL
318#define UV2H_EVENT_OCCURRED0_RT_AOERR0_MASK 0x0000000000020000UL 345#define UVXH_EVENT_OCCURRED0_QP_AOERR1_MASK 0x0000000000200000UL
319#define UV2H_EVENT_OCCURRED0_NI0_AOERR0_MASK 0x0000000000040000UL 346#define UVXH_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000400000UL
320#define UV2H_EVENT_OCCURRED0_NI1_AOERR0_MASK 0x0000000000080000UL 347#define UVXH_EVENT_OCCURRED0_LH0_AOERR1_MASK 0x0000000000800000UL
321#define UV2H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000100000UL 348#define UVXH_EVENT_OCCURRED0_LH1_AOERR1_MASK 0x0000000001000000UL
322#define UV2H_EVENT_OCCURRED0_QP_AOERR1_MASK 0x0000000000200000UL 349#define UVXH_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000002000000UL
323#define UV2H_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000400000UL 350#define UVXH_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000004000000UL
324#define UV2H_EVENT_OCCURRED0_LH0_AOERR1_MASK 0x0000000000800000UL 351#define UVXH_EVENT_OCCURRED0_XB_AOERR1_MASK 0x0000000008000000UL
325#define UV2H_EVENT_OCCURRED0_LH1_AOERR1_MASK 0x0000000001000000UL 352#define UVXH_EVENT_OCCURRED0_RT_AOERR1_MASK 0x0000000010000000UL
326#define UV2H_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000002000000UL 353#define UVXH_EVENT_OCCURRED0_NI0_AOERR1_MASK 0x0000000020000000UL
327#define UV2H_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000004000000UL 354#define UVXH_EVENT_OCCURRED0_NI1_AOERR1_MASK 0x0000000040000000UL
328#define UV2H_EVENT_OCCURRED0_XB_AOERR1_MASK 0x0000000008000000UL 355#define UVXH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000080000000UL
329#define UV2H_EVENT_OCCURRED0_RT_AOERR1_MASK 0x0000000010000000UL 356#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000100000000UL
330#define UV2H_EVENT_OCCURRED0_NI0_AOERR1_MASK 0x0000000020000000UL 357#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000200000000UL
331#define UV2H_EVENT_OCCURRED0_NI1_AOERR1_MASK 0x0000000040000000UL 358#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000400000000UL
332#define UV2H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000080000000UL 359#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000800000000UL
333#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000100000000UL 360#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000001000000000UL
334#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000200000000UL 361#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000002000000000UL
335#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000400000000UL 362#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000004000000000UL
336#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000800000000UL 363#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000008000000000UL
337#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000001000000000UL 364#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000010000000000UL
338#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000002000000000UL 365#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000020000000000UL
339#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000004000000000UL 366#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000040000000000UL
340#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000008000000000UL 367#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000080000000000UL
341#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000010000000000UL 368#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000100000000000UL
342#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000020000000000UL 369#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000200000000000UL
343#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000040000000000UL 370#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000400000000000UL
344#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000080000000000UL 371#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000800000000000UL
345#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000100000000000UL 372#define UVXH_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0001000000000000UL
346#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000200000000000UL 373#define UVXH_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0002000000000000UL
347#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000400000000000UL 374#define UVXH_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0004000000000000UL
348#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000800000000000UL 375#define UVXH_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0008000000000000UL
349#define UV2H_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0001000000000000UL 376#define UVXH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0010000000000000UL
350#define UV2H_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0002000000000000UL 377#define UVXH_EVENT_OCCURRED0_IPI_INT_MASK 0x0020000000000000UL
351#define UV2H_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0004000000000000UL 378#define UVXH_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0040000000000000UL
352#define UV2H_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0008000000000000UL 379#define UVXH_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0080000000000000UL
353#define UV2H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0010000000000000UL 380#define UVXH_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0100000000000000UL
354#define UV2H_EVENT_OCCURRED0_IPI_INT_MASK 0x0020000000000000UL 381#define UVXH_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0200000000000000UL
355#define UV2H_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0040000000000000UL 382#define UVXH_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0400000000000000UL
356#define UV2H_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0080000000000000UL
357#define UV2H_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0100000000000000UL
358#define UV2H_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0200000000000000UL
359#define UV2H_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0400000000000000UL
360 383
361union uvh_event_occurred0_u { 384union uvh_event_occurred0_u {
362 unsigned long v; 385 unsigned long v;
363 struct uv1h_event_occurred0_s { 386 struct uvh_event_occurred0_s {
364 unsigned long lb_hcerr:1; /* RW, W1C */ 387 unsigned long lb_hcerr:1; /* RW, W1C */
365 unsigned long gr0_hcerr:1; /* RW, W1C */ 388 unsigned long rsvd_1_10:10;
366 unsigned long gr1_hcerr:1; /* RW, W1C */
367 unsigned long lh_hcerr:1; /* RW, W1C */
368 unsigned long rh_hcerr:1; /* RW, W1C */
369 unsigned long xn_hcerr:1; /* RW, W1C */
370 unsigned long si_hcerr:1; /* RW, W1C */
371 unsigned long lb_aoerr0:1; /* RW, W1C */
372 unsigned long gr0_aoerr0:1; /* RW, W1C */
373 unsigned long gr1_aoerr0:1; /* RW, W1C */
374 unsigned long lh_aoerr0:1; /* RW, W1C */
375 unsigned long rh_aoerr0:1; /* RW, W1C */ 389 unsigned long rh_aoerr0:1; /* RW, W1C */
376 unsigned long xn_aoerr0:1; /* RW, W1C */ 390 unsigned long rsvd_12_63:52;
377 unsigned long si_aoerr0:1; /* RW, W1C */ 391 } s;
378 unsigned long lb_aoerr1:1; /* RW, W1C */ 392 struct uvxh_event_occurred0_s {
379 unsigned long gr0_aoerr1:1; /* RW, W1C */
380 unsigned long gr1_aoerr1:1; /* RW, W1C */
381 unsigned long lh_aoerr1:1; /* RW, W1C */
382 unsigned long rh_aoerr1:1; /* RW, W1C */
383 unsigned long xn_aoerr1:1; /* RW, W1C */
384 unsigned long si_aoerr1:1; /* RW, W1C */
385 unsigned long rh_vpi_int:1; /* RW, W1C */
386 unsigned long system_shutdown_int:1; /* RW, W1C */
387 unsigned long lb_irq_int_0:1; /* RW, W1C */
388 unsigned long lb_irq_int_1:1; /* RW, W1C */
389 unsigned long lb_irq_int_2:1; /* RW, W1C */
390 unsigned long lb_irq_int_3:1; /* RW, W1C */
391 unsigned long lb_irq_int_4:1; /* RW, W1C */
392 unsigned long lb_irq_int_5:1; /* RW, W1C */
393 unsigned long lb_irq_int_6:1; /* RW, W1C */
394 unsigned long lb_irq_int_7:1; /* RW, W1C */
395 unsigned long lb_irq_int_8:1; /* RW, W1C */
396 unsigned long lb_irq_int_9:1; /* RW, W1C */
397 unsigned long lb_irq_int_10:1; /* RW, W1C */
398 unsigned long lb_irq_int_11:1; /* RW, W1C */
399 unsigned long lb_irq_int_12:1; /* RW, W1C */
400 unsigned long lb_irq_int_13:1; /* RW, W1C */
401 unsigned long lb_irq_int_14:1; /* RW, W1C */
402 unsigned long lb_irq_int_15:1; /* RW, W1C */
403 unsigned long l1_nmi_int:1; /* RW, W1C */
404 unsigned long stop_clock:1; /* RW, W1C */
405 unsigned long asic_to_l1:1; /* RW, W1C */
406 unsigned long l1_to_asic:1; /* RW, W1C */
407 unsigned long ltc_int:1; /* RW, W1C */
408 unsigned long la_seq_trigger:1; /* RW, W1C */
409 unsigned long ipi_int:1; /* RW, W1C */
410 unsigned long extio_int0:1; /* RW, W1C */
411 unsigned long extio_int1:1; /* RW, W1C */
412 unsigned long extio_int2:1; /* RW, W1C */
413 unsigned long extio_int3:1; /* RW, W1C */
414 unsigned long profile_int:1; /* RW, W1C */
415 unsigned long rtc0:1; /* RW, W1C */
416 unsigned long rtc1:1; /* RW, W1C */
417 unsigned long rtc2:1; /* RW, W1C */
418 unsigned long rtc3:1; /* RW, W1C */
419 unsigned long bau_data:1; /* RW, W1C */
420 unsigned long power_management_req:1; /* RW, W1C */
421 unsigned long rsvd_57_63:7;
422 } s1;
423 struct uv2h_event_occurred0_s {
424 unsigned long lb_hcerr:1; /* RW */ 393 unsigned long lb_hcerr:1; /* RW */
425 unsigned long qp_hcerr:1; /* RW */ 394 unsigned long qp_hcerr:1; /* RW */
426 unsigned long rh_hcerr:1; /* RW */ 395 unsigned long rh_hcerr:1; /* RW */
@@ -481,19 +450,20 @@ union uvh_event_occurred0_u {
481 unsigned long extio_int3:1; /* RW */ 450 unsigned long extio_int3:1; /* RW */
482 unsigned long profile_int:1; /* RW */ 451 unsigned long profile_int:1; /* RW */
483 unsigned long rsvd_59_63:5; 452 unsigned long rsvd_59_63:5;
484 } s2; 453 } sx;
485}; 454};
486 455
487/* ========================================================================= */ 456/* ========================================================================= */
488/* UVH_EVENT_OCCURRED0_ALIAS */ 457/* UVH_EVENT_OCCURRED0_ALIAS */
489/* ========================================================================= */ 458/* ========================================================================= */
490#define UVH_EVENT_OCCURRED0_ALIAS 0x0000000000070008UL 459#define UVH_EVENT_OCCURRED0_ALIAS 0x70008UL
491#define UVH_EVENT_OCCURRED0_ALIAS_32 0x5f0 460#define UVH_EVENT_OCCURRED0_ALIAS_32 0x5f0
461
492 462
493/* ========================================================================= */ 463/* ========================================================================= */
494/* UVH_GR0_TLB_INT0_CONFIG */ 464/* UVH_GR0_TLB_INT0_CONFIG */
495/* ========================================================================= */ 465/* ========================================================================= */
496#define UVH_GR0_TLB_INT0_CONFIG 0x61b00UL 466#define UVH_GR0_TLB_INT0_CONFIG 0x61b00UL
497 467
498#define UVH_GR0_TLB_INT0_CONFIG_VECTOR_SHFT 0 468#define UVH_GR0_TLB_INT0_CONFIG_VECTOR_SHFT 0
499#define UVH_GR0_TLB_INT0_CONFIG_DM_SHFT 8 469#define UVH_GR0_TLB_INT0_CONFIG_DM_SHFT 8
@@ -531,7 +501,7 @@ union uvh_gr0_tlb_int0_config_u {
531/* ========================================================================= */ 501/* ========================================================================= */
532/* UVH_GR0_TLB_INT1_CONFIG */ 502/* UVH_GR0_TLB_INT1_CONFIG */
533/* ========================================================================= */ 503/* ========================================================================= */
534#define UVH_GR0_TLB_INT1_CONFIG 0x61b40UL 504#define UVH_GR0_TLB_INT1_CONFIG 0x61b40UL
535 505
536#define UVH_GR0_TLB_INT1_CONFIG_VECTOR_SHFT 0 506#define UVH_GR0_TLB_INT1_CONFIG_VECTOR_SHFT 0
537#define UVH_GR0_TLB_INT1_CONFIG_DM_SHFT 8 507#define UVH_GR0_TLB_INT1_CONFIG_DM_SHFT 8
@@ -571,9 +541,11 @@ union uvh_gr0_tlb_int1_config_u {
571/* ========================================================================= */ 541/* ========================================================================= */
572#define UV1H_GR0_TLB_MMR_CONTROL 0x401080UL 542#define UV1H_GR0_TLB_MMR_CONTROL 0x401080UL
573#define UV2H_GR0_TLB_MMR_CONTROL 0xc01080UL 543#define UV2H_GR0_TLB_MMR_CONTROL 0xc01080UL
574#define UVH_GR0_TLB_MMR_CONTROL (is_uv1_hub() ? \ 544#define UV3H_GR0_TLB_MMR_CONTROL 0xc01080UL
575 UV1H_GR0_TLB_MMR_CONTROL : \ 545#define UVH_GR0_TLB_MMR_CONTROL \
576 UV2H_GR0_TLB_MMR_CONTROL) 546 (is_uv1_hub() ? UV1H_GR0_TLB_MMR_CONTROL : \
547 (is_uv2_hub() ? UV2H_GR0_TLB_MMR_CONTROL : \
548 UV3H_GR0_TLB_MMR_CONTROL))
577 549
578#define UVH_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0 550#define UVH_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0
579#define UVH_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12 551#define UVH_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
@@ -611,6 +583,21 @@ union uvh_gr0_tlb_int1_config_u {
611#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRREG_MASK 0x0100000000000000UL 583#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRREG_MASK 0x0100000000000000UL
612#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBLRUV_MASK 0x1000000000000000UL 584#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBLRUV_MASK 0x1000000000000000UL
613 585
586#define UVXH_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0
587#define UVXH_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
588#define UVXH_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
589#define UVXH_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
590#define UVXH_GR0_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
591#define UVXH_GR0_TLB_MMR_CONTROL_MMR_READ_SHFT 31
592#define UVXH_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32
593#define UVXH_GR0_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
594#define UVXH_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
595#define UVXH_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
596#define UVXH_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
597#define UVXH_GR0_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
598#define UVXH_GR0_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
599#define UVXH_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL
600
614#define UV2H_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0 601#define UV2H_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0
615#define UV2H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12 602#define UV2H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
616#define UV2H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16 603#define UV2H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
@@ -630,6 +617,23 @@ union uvh_gr0_tlb_int1_config_u {
630#define UV2H_GR0_TLB_MMR_CONTROL_MMR_INJ_CON_MASK 0x0001000000000000UL 617#define UV2H_GR0_TLB_MMR_CONTROL_MMR_INJ_CON_MASK 0x0001000000000000UL
631#define UV2H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_MASK 0x0010000000000000UL 618#define UV2H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_MASK 0x0010000000000000UL
632 619
620#define UV3H_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0
621#define UV3H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
622#define UV3H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
623#define UV3H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
624#define UV3H_GR0_TLB_MMR_CONTROL_ECC_SEL_SHFT 21
625#define UV3H_GR0_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
626#define UV3H_GR0_TLB_MMR_CONTROL_MMR_READ_SHFT 31
627#define UV3H_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32
628#define UV3H_GR0_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
629#define UV3H_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
630#define UV3H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
631#define UV3H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
632#define UV3H_GR0_TLB_MMR_CONTROL_ECC_SEL_MASK 0x0000000000200000UL
633#define UV3H_GR0_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
634#define UV3H_GR0_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
635#define UV3H_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL
636
633union uvh_gr0_tlb_mmr_control_u { 637union uvh_gr0_tlb_mmr_control_u {
634 unsigned long v; 638 unsigned long v;
635 struct uvh_gr0_tlb_mmr_control_s { 639 struct uvh_gr0_tlb_mmr_control_s {
@@ -642,7 +646,9 @@ union uvh_gr0_tlb_mmr_control_u {
642 unsigned long rsvd_21_29:9; 646 unsigned long rsvd_21_29:9;
643 unsigned long mmr_write:1; /* WP */ 647 unsigned long mmr_write:1; /* WP */
644 unsigned long mmr_read:1; /* WP */ 648 unsigned long mmr_read:1; /* WP */
645 unsigned long rsvd_32_63:32; 649 unsigned long rsvd_32_48:17;
650 unsigned long rsvd_49_51:3;
651 unsigned long rsvd_52_63:12;
646 } s; 652 } s;
647 struct uv1h_gr0_tlb_mmr_control_s { 653 struct uv1h_gr0_tlb_mmr_control_s {
648 unsigned long index:12; /* RW */ 654 unsigned long index:12; /* RW */
@@ -666,6 +672,23 @@ union uvh_gr0_tlb_mmr_control_u {
666 unsigned long mmr_inj_tlblruv:1; /* RW */ 672 unsigned long mmr_inj_tlblruv:1; /* RW */
667 unsigned long rsvd_61_63:3; 673 unsigned long rsvd_61_63:3;
668 } s1; 674 } s1;
675 struct uvxh_gr0_tlb_mmr_control_s {
676 unsigned long index:12; /* RW */
677 unsigned long mem_sel:2; /* RW */
678 unsigned long rsvd_14_15:2;
679 unsigned long auto_valid_en:1; /* RW */
680 unsigned long rsvd_17_19:3;
681 unsigned long mmr_hash_index_en:1; /* RW */
682 unsigned long rsvd_21_29:9;
683 unsigned long mmr_write:1; /* WP */
684 unsigned long mmr_read:1; /* WP */
685 unsigned long mmr_op_done:1; /* RW */
686 unsigned long rsvd_33_47:15;
687 unsigned long rsvd_48:1;
688 unsigned long rsvd_49_51:3;
689 unsigned long rsvd_52:1;
690 unsigned long rsvd_53_63:11;
691 } sx;
669 struct uv2h_gr0_tlb_mmr_control_s { 692 struct uv2h_gr0_tlb_mmr_control_s {
670 unsigned long index:12; /* RW */ 693 unsigned long index:12; /* RW */
671 unsigned long mem_sel:2; /* RW */ 694 unsigned long mem_sel:2; /* RW */
@@ -683,6 +706,24 @@ union uvh_gr0_tlb_mmr_control_u {
683 unsigned long mmr_inj_tlbram:1; /* RW */ 706 unsigned long mmr_inj_tlbram:1; /* RW */
684 unsigned long rsvd_53_63:11; 707 unsigned long rsvd_53_63:11;
685 } s2; 708 } s2;
709 struct uv3h_gr0_tlb_mmr_control_s {
710 unsigned long index:12; /* RW */
711 unsigned long mem_sel:2; /* RW */
712 unsigned long rsvd_14_15:2;
713 unsigned long auto_valid_en:1; /* RW */
714 unsigned long rsvd_17_19:3;
715 unsigned long mmr_hash_index_en:1; /* RW */
716 unsigned long ecc_sel:1; /* RW */
717 unsigned long rsvd_22_29:8;
718 unsigned long mmr_write:1; /* WP */
719 unsigned long mmr_read:1; /* WP */
720 unsigned long mmr_op_done:1; /* RW */
721 unsigned long rsvd_33_47:15;
722 unsigned long undef_48:1; /* Undefined */
723 unsigned long rsvd_49_51:3;
724 unsigned long undef_52:1; /* Undefined */
725 unsigned long rsvd_53_63:11;
726 } s3;
686}; 727};
687 728
688/* ========================================================================= */ 729/* ========================================================================= */
@@ -690,9 +731,11 @@ union uvh_gr0_tlb_mmr_control_u {
690/* ========================================================================= */ 731/* ========================================================================= */
691#define UV1H_GR0_TLB_MMR_READ_DATA_HI 0x4010a0UL 732#define UV1H_GR0_TLB_MMR_READ_DATA_HI 0x4010a0UL
692#define UV2H_GR0_TLB_MMR_READ_DATA_HI 0xc010a0UL 733#define UV2H_GR0_TLB_MMR_READ_DATA_HI 0xc010a0UL
693#define UVH_GR0_TLB_MMR_READ_DATA_HI (is_uv1_hub() ? \ 734#define UV3H_GR0_TLB_MMR_READ_DATA_HI 0xc010a0UL
694 UV1H_GR0_TLB_MMR_READ_DATA_HI : \ 735#define UVH_GR0_TLB_MMR_READ_DATA_HI \
695 UV2H_GR0_TLB_MMR_READ_DATA_HI) 736 (is_uv1_hub() ? UV1H_GR0_TLB_MMR_READ_DATA_HI : \
737 (is_uv2_hub() ? UV2H_GR0_TLB_MMR_READ_DATA_HI : \
738 UV3H_GR0_TLB_MMR_READ_DATA_HI))
696 739
697#define UVH_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0 740#define UVH_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
698#define UVH_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 41 741#define UVH_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
@@ -703,6 +746,46 @@ union uvh_gr0_tlb_mmr_control_u {
703#define UVH_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL 746#define UVH_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
704#define UVH_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL 747#define UVH_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
705 748
749#define UV1H_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
750#define UV1H_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
751#define UV1H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
752#define UV1H_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
753#define UV1H_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
754#define UV1H_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
755#define UV1H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
756#define UV1H_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
757
758#define UVXH_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
759#define UVXH_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
760#define UVXH_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
761#define UVXH_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
762#define UVXH_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
763#define UVXH_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
764#define UVXH_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
765#define UVXH_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
766
767#define UV2H_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
768#define UV2H_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
769#define UV2H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
770#define UV2H_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
771#define UV2H_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
772#define UV2H_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
773#define UV2H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
774#define UV2H_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
775
776#define UV3H_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
777#define UV3H_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
778#define UV3H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
779#define UV3H_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
780#define UV3H_GR0_TLB_MMR_READ_DATA_HI_AA_EXT_SHFT 45
781#define UV3H_GR0_TLB_MMR_READ_DATA_HI_WAY_ECC_SHFT 55
782#define UV3H_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
783#define UV3H_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
784#define UV3H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
785#define UV3H_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
786#define UV3H_GR0_TLB_MMR_READ_DATA_HI_AA_EXT_MASK 0x0000200000000000UL
787#define UV3H_GR0_TLB_MMR_READ_DATA_HI_WAY_ECC_MASK 0xff80000000000000UL
788
706union uvh_gr0_tlb_mmr_read_data_hi_u { 789union uvh_gr0_tlb_mmr_read_data_hi_u {
707 unsigned long v; 790 unsigned long v;
708 struct uvh_gr0_tlb_mmr_read_data_hi_s { 791 struct uvh_gr0_tlb_mmr_read_data_hi_s {
@@ -712,6 +795,36 @@ union uvh_gr0_tlb_mmr_read_data_hi_u {
712 unsigned long larger:1; /* RO */ 795 unsigned long larger:1; /* RO */
713 unsigned long rsvd_45_63:19; 796 unsigned long rsvd_45_63:19;
714 } s; 797 } s;
798 struct uv1h_gr0_tlb_mmr_read_data_hi_s {
799 unsigned long pfn:41; /* RO */
800 unsigned long gaa:2; /* RO */
801 unsigned long dirty:1; /* RO */
802 unsigned long larger:1; /* RO */
803 unsigned long rsvd_45_63:19;
804 } s1;
805 struct uvxh_gr0_tlb_mmr_read_data_hi_s {
806 unsigned long pfn:41; /* RO */
807 unsigned long gaa:2; /* RO */
808 unsigned long dirty:1; /* RO */
809 unsigned long larger:1; /* RO */
810 unsigned long rsvd_45_63:19;
811 } sx;
812 struct uv2h_gr0_tlb_mmr_read_data_hi_s {
813 unsigned long pfn:41; /* RO */
814 unsigned long gaa:2; /* RO */
815 unsigned long dirty:1; /* RO */
816 unsigned long larger:1; /* RO */
817 unsigned long rsvd_45_63:19;
818 } s2;
819 struct uv3h_gr0_tlb_mmr_read_data_hi_s {
820 unsigned long pfn:41; /* RO */
821 unsigned long gaa:2; /* RO */
822 unsigned long dirty:1; /* RO */
823 unsigned long larger:1; /* RO */
824 unsigned long aa_ext:1; /* RO */
825 unsigned long undef_46_54:9; /* Undefined */
826 unsigned long way_ecc:9; /* RO */
827 } s3;
715}; 828};
716 829
717/* ========================================================================= */ 830/* ========================================================================= */
@@ -719,9 +832,11 @@ union uvh_gr0_tlb_mmr_read_data_hi_u {
719/* ========================================================================= */ 832/* ========================================================================= */
720#define UV1H_GR0_TLB_MMR_READ_DATA_LO 0x4010a8UL 833#define UV1H_GR0_TLB_MMR_READ_DATA_LO 0x4010a8UL
721#define UV2H_GR0_TLB_MMR_READ_DATA_LO 0xc010a8UL 834#define UV2H_GR0_TLB_MMR_READ_DATA_LO 0xc010a8UL
722#define UVH_GR0_TLB_MMR_READ_DATA_LO (is_uv1_hub() ? \ 835#define UV3H_GR0_TLB_MMR_READ_DATA_LO 0xc010a8UL
723 UV1H_GR0_TLB_MMR_READ_DATA_LO : \ 836#define UVH_GR0_TLB_MMR_READ_DATA_LO \
724 UV2H_GR0_TLB_MMR_READ_DATA_LO) 837 (is_uv1_hub() ? UV1H_GR0_TLB_MMR_READ_DATA_LO : \
838 (is_uv2_hub() ? UV2H_GR0_TLB_MMR_READ_DATA_LO : \
839 UV3H_GR0_TLB_MMR_READ_DATA_LO))
725 840
726#define UVH_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0 841#define UVH_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
727#define UVH_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39 842#define UVH_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
@@ -730,6 +845,34 @@ union uvh_gr0_tlb_mmr_read_data_hi_u {
730#define UVH_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL 845#define UVH_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
731#define UVH_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL 846#define UVH_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
732 847
848#define UV1H_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
849#define UV1H_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
850#define UV1H_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
851#define UV1H_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
852#define UV1H_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
853#define UV1H_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
854
855#define UVXH_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
856#define UVXH_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
857#define UVXH_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
858#define UVXH_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
859#define UVXH_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
860#define UVXH_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
861
862#define UV2H_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
863#define UV2H_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
864#define UV2H_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
865#define UV2H_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
866#define UV2H_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
867#define UV2H_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
868
869#define UV3H_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
870#define UV3H_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
871#define UV3H_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
872#define UV3H_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
873#define UV3H_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
874#define UV3H_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
875
733union uvh_gr0_tlb_mmr_read_data_lo_u { 876union uvh_gr0_tlb_mmr_read_data_lo_u {
734 unsigned long v; 877 unsigned long v;
735 struct uvh_gr0_tlb_mmr_read_data_lo_s { 878 struct uvh_gr0_tlb_mmr_read_data_lo_s {
@@ -737,12 +880,32 @@ union uvh_gr0_tlb_mmr_read_data_lo_u {
737 unsigned long asid:24; /* RO */ 880 unsigned long asid:24; /* RO */
738 unsigned long valid:1; /* RO */ 881 unsigned long valid:1; /* RO */
739 } s; 882 } s;
883 struct uv1h_gr0_tlb_mmr_read_data_lo_s {
884 unsigned long vpn:39; /* RO */
885 unsigned long asid:24; /* RO */
886 unsigned long valid:1; /* RO */
887 } s1;
888 struct uvxh_gr0_tlb_mmr_read_data_lo_s {
889 unsigned long vpn:39; /* RO */
890 unsigned long asid:24; /* RO */
891 unsigned long valid:1; /* RO */
892 } sx;
893 struct uv2h_gr0_tlb_mmr_read_data_lo_s {
894 unsigned long vpn:39; /* RO */
895 unsigned long asid:24; /* RO */
896 unsigned long valid:1; /* RO */
897 } s2;
898 struct uv3h_gr0_tlb_mmr_read_data_lo_s {
899 unsigned long vpn:39; /* RO */
900 unsigned long asid:24; /* RO */
901 unsigned long valid:1; /* RO */
902 } s3;
740}; 903};
741 904
742/* ========================================================================= */ 905/* ========================================================================= */
743/* UVH_GR1_TLB_INT0_CONFIG */ 906/* UVH_GR1_TLB_INT0_CONFIG */
744/* ========================================================================= */ 907/* ========================================================================= */
745#define UVH_GR1_TLB_INT0_CONFIG 0x61f00UL 908#define UVH_GR1_TLB_INT0_CONFIG 0x61f00UL
746 909
747#define UVH_GR1_TLB_INT0_CONFIG_VECTOR_SHFT 0 910#define UVH_GR1_TLB_INT0_CONFIG_VECTOR_SHFT 0
748#define UVH_GR1_TLB_INT0_CONFIG_DM_SHFT 8 911#define UVH_GR1_TLB_INT0_CONFIG_DM_SHFT 8
@@ -780,7 +943,7 @@ union uvh_gr1_tlb_int0_config_u {
780/* ========================================================================= */ 943/* ========================================================================= */
781/* UVH_GR1_TLB_INT1_CONFIG */ 944/* UVH_GR1_TLB_INT1_CONFIG */
782/* ========================================================================= */ 945/* ========================================================================= */
783#define UVH_GR1_TLB_INT1_CONFIG 0x61f40UL 946#define UVH_GR1_TLB_INT1_CONFIG 0x61f40UL
784 947
785#define UVH_GR1_TLB_INT1_CONFIG_VECTOR_SHFT 0 948#define UVH_GR1_TLB_INT1_CONFIG_VECTOR_SHFT 0
786#define UVH_GR1_TLB_INT1_CONFIG_DM_SHFT 8 949#define UVH_GR1_TLB_INT1_CONFIG_DM_SHFT 8
@@ -820,9 +983,11 @@ union uvh_gr1_tlb_int1_config_u {
820/* ========================================================================= */ 983/* ========================================================================= */
821#define UV1H_GR1_TLB_MMR_CONTROL 0x801080UL 984#define UV1H_GR1_TLB_MMR_CONTROL 0x801080UL
822#define UV2H_GR1_TLB_MMR_CONTROL 0x1001080UL 985#define UV2H_GR1_TLB_MMR_CONTROL 0x1001080UL
823#define UVH_GR1_TLB_MMR_CONTROL (is_uv1_hub() ? \ 986#define UV3H_GR1_TLB_MMR_CONTROL 0x1001080UL
824 UV1H_GR1_TLB_MMR_CONTROL : \ 987#define UVH_GR1_TLB_MMR_CONTROL \
825 UV2H_GR1_TLB_MMR_CONTROL) 988 (is_uv1_hub() ? UV1H_GR1_TLB_MMR_CONTROL : \
989 (is_uv2_hub() ? UV2H_GR1_TLB_MMR_CONTROL : \
990 UV3H_GR1_TLB_MMR_CONTROL))
826 991
827#define UVH_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0 992#define UVH_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0
828#define UVH_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12 993#define UVH_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
@@ -860,6 +1025,21 @@ union uvh_gr1_tlb_int1_config_u {
860#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRREG_MASK 0x0100000000000000UL 1025#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRREG_MASK 0x0100000000000000UL
861#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBLRUV_MASK 0x1000000000000000UL 1026#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBLRUV_MASK 0x1000000000000000UL
862 1027
1028#define UVXH_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0
1029#define UVXH_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
1030#define UVXH_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
1031#define UVXH_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
1032#define UVXH_GR1_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
1033#define UVXH_GR1_TLB_MMR_CONTROL_MMR_READ_SHFT 31
1034#define UVXH_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32
1035#define UVXH_GR1_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
1036#define UVXH_GR1_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
1037#define UVXH_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
1038#define UVXH_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
1039#define UVXH_GR1_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
1040#define UVXH_GR1_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
1041#define UVXH_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL
1042
863#define UV2H_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0 1043#define UV2H_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0
864#define UV2H_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12 1044#define UV2H_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
865#define UV2H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16 1045#define UV2H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
@@ -879,6 +1059,23 @@ union uvh_gr1_tlb_int1_config_u {
879#define UV2H_GR1_TLB_MMR_CONTROL_MMR_INJ_CON_MASK 0x0001000000000000UL 1059#define UV2H_GR1_TLB_MMR_CONTROL_MMR_INJ_CON_MASK 0x0001000000000000UL
880#define UV2H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_MASK 0x0010000000000000UL 1060#define UV2H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_MASK 0x0010000000000000UL
881 1061
1062#define UV3H_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0
1063#define UV3H_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
1064#define UV3H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
1065#define UV3H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
1066#define UV3H_GR1_TLB_MMR_CONTROL_ECC_SEL_SHFT 21
1067#define UV3H_GR1_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
1068#define UV3H_GR1_TLB_MMR_CONTROL_MMR_READ_SHFT 31
1069#define UV3H_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32
1070#define UV3H_GR1_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
1071#define UV3H_GR1_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
1072#define UV3H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
1073#define UV3H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
1074#define UV3H_GR1_TLB_MMR_CONTROL_ECC_SEL_MASK 0x0000000000200000UL
1075#define UV3H_GR1_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
1076#define UV3H_GR1_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
1077#define UV3H_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL
1078
882union uvh_gr1_tlb_mmr_control_u { 1079union uvh_gr1_tlb_mmr_control_u {
883 unsigned long v; 1080 unsigned long v;
884 struct uvh_gr1_tlb_mmr_control_s { 1081 struct uvh_gr1_tlb_mmr_control_s {
@@ -891,7 +1088,9 @@ union uvh_gr1_tlb_mmr_control_u {
891 unsigned long rsvd_21_29:9; 1088 unsigned long rsvd_21_29:9;
892 unsigned long mmr_write:1; /* WP */ 1089 unsigned long mmr_write:1; /* WP */
893 unsigned long mmr_read:1; /* WP */ 1090 unsigned long mmr_read:1; /* WP */
894 unsigned long rsvd_32_63:32; 1091 unsigned long rsvd_32_48:17;
1092 unsigned long rsvd_49_51:3;
1093 unsigned long rsvd_52_63:12;
895 } s; 1094 } s;
896 struct uv1h_gr1_tlb_mmr_control_s { 1095 struct uv1h_gr1_tlb_mmr_control_s {
897 unsigned long index:12; /* RW */ 1096 unsigned long index:12; /* RW */
@@ -915,6 +1114,23 @@ union uvh_gr1_tlb_mmr_control_u {
915 unsigned long mmr_inj_tlblruv:1; /* RW */ 1114 unsigned long mmr_inj_tlblruv:1; /* RW */
916 unsigned long rsvd_61_63:3; 1115 unsigned long rsvd_61_63:3;
917 } s1; 1116 } s1;
1117 struct uvxh_gr1_tlb_mmr_control_s {
1118 unsigned long index:12; /* RW */
1119 unsigned long mem_sel:2; /* RW */
1120 unsigned long rsvd_14_15:2;
1121 unsigned long auto_valid_en:1; /* RW */
1122 unsigned long rsvd_17_19:3;
1123 unsigned long mmr_hash_index_en:1; /* RW */
1124 unsigned long rsvd_21_29:9;
1125 unsigned long mmr_write:1; /* WP */
1126 unsigned long mmr_read:1; /* WP */
1127 unsigned long mmr_op_done:1; /* RW */
1128 unsigned long rsvd_33_47:15;
1129 unsigned long rsvd_48:1;
1130 unsigned long rsvd_49_51:3;
1131 unsigned long rsvd_52:1;
1132 unsigned long rsvd_53_63:11;
1133 } sx;
918 struct uv2h_gr1_tlb_mmr_control_s { 1134 struct uv2h_gr1_tlb_mmr_control_s {
919 unsigned long index:12; /* RW */ 1135 unsigned long index:12; /* RW */
920 unsigned long mem_sel:2; /* RW */ 1136 unsigned long mem_sel:2; /* RW */
@@ -932,6 +1148,24 @@ union uvh_gr1_tlb_mmr_control_u {
932 unsigned long mmr_inj_tlbram:1; /* RW */ 1148 unsigned long mmr_inj_tlbram:1; /* RW */
933 unsigned long rsvd_53_63:11; 1149 unsigned long rsvd_53_63:11;
934 } s2; 1150 } s2;
1151 struct uv3h_gr1_tlb_mmr_control_s {
1152 unsigned long index:12; /* RW */
1153 unsigned long mem_sel:2; /* RW */
1154 unsigned long rsvd_14_15:2;
1155 unsigned long auto_valid_en:1; /* RW */
1156 unsigned long rsvd_17_19:3;
1157 unsigned long mmr_hash_index_en:1; /* RW */
1158 unsigned long ecc_sel:1; /* RW */
1159 unsigned long rsvd_22_29:8;
1160 unsigned long mmr_write:1; /* WP */
1161 unsigned long mmr_read:1; /* WP */
1162 unsigned long mmr_op_done:1; /* RW */
1163 unsigned long rsvd_33_47:15;
1164 unsigned long undef_48:1; /* Undefined */
1165 unsigned long rsvd_49_51:3;
1166 unsigned long undef_52:1; /* Undefined */
1167 unsigned long rsvd_53_63:11;
1168 } s3;
935}; 1169};
936 1170
937/* ========================================================================= */ 1171/* ========================================================================= */
@@ -939,9 +1173,11 @@ union uvh_gr1_tlb_mmr_control_u {
939/* ========================================================================= */ 1173/* ========================================================================= */
940#define UV1H_GR1_TLB_MMR_READ_DATA_HI 0x8010a0UL 1174#define UV1H_GR1_TLB_MMR_READ_DATA_HI 0x8010a0UL
941#define UV2H_GR1_TLB_MMR_READ_DATA_HI 0x10010a0UL 1175#define UV2H_GR1_TLB_MMR_READ_DATA_HI 0x10010a0UL
942#define UVH_GR1_TLB_MMR_READ_DATA_HI (is_uv1_hub() ? \ 1176#define UV3H_GR1_TLB_MMR_READ_DATA_HI 0x10010a0UL
943 UV1H_GR1_TLB_MMR_READ_DATA_HI : \ 1177#define UVH_GR1_TLB_MMR_READ_DATA_HI \
944 UV2H_GR1_TLB_MMR_READ_DATA_HI) 1178 (is_uv1_hub() ? UV1H_GR1_TLB_MMR_READ_DATA_HI : \
1179 (is_uv2_hub() ? UV2H_GR1_TLB_MMR_READ_DATA_HI : \
1180 UV3H_GR1_TLB_MMR_READ_DATA_HI))
945 1181
946#define UVH_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0 1182#define UVH_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
947#define UVH_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 41 1183#define UVH_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
@@ -952,6 +1188,46 @@ union uvh_gr1_tlb_mmr_control_u {
952#define UVH_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL 1188#define UVH_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
953#define UVH_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL 1189#define UVH_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
954 1190
1191#define UV1H_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
1192#define UV1H_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
1193#define UV1H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
1194#define UV1H_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
1195#define UV1H_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
1196#define UV1H_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
1197#define UV1H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
1198#define UV1H_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
1199
1200#define UVXH_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
1201#define UVXH_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
1202#define UVXH_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
1203#define UVXH_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
1204#define UVXH_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
1205#define UVXH_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
1206#define UVXH_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
1207#define UVXH_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
1208
1209#define UV2H_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
1210#define UV2H_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
1211#define UV2H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
1212#define UV2H_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
1213#define UV2H_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
1214#define UV2H_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
1215#define UV2H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
1216#define UV2H_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
1217
1218#define UV3H_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
1219#define UV3H_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
1220#define UV3H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
1221#define UV3H_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
1222#define UV3H_GR1_TLB_MMR_READ_DATA_HI_AA_EXT_SHFT 45
1223#define UV3H_GR1_TLB_MMR_READ_DATA_HI_WAY_ECC_SHFT 55
1224#define UV3H_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
1225#define UV3H_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
1226#define UV3H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
1227#define UV3H_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
1228#define UV3H_GR1_TLB_MMR_READ_DATA_HI_AA_EXT_MASK 0x0000200000000000UL
1229#define UV3H_GR1_TLB_MMR_READ_DATA_HI_WAY_ECC_MASK 0xff80000000000000UL
1230
955union uvh_gr1_tlb_mmr_read_data_hi_u { 1231union uvh_gr1_tlb_mmr_read_data_hi_u {
956 unsigned long v; 1232 unsigned long v;
957 struct uvh_gr1_tlb_mmr_read_data_hi_s { 1233 struct uvh_gr1_tlb_mmr_read_data_hi_s {
@@ -961,6 +1237,36 @@ union uvh_gr1_tlb_mmr_read_data_hi_u {
961 unsigned long larger:1; /* RO */ 1237 unsigned long larger:1; /* RO */
962 unsigned long rsvd_45_63:19; 1238 unsigned long rsvd_45_63:19;
963 } s; 1239 } s;
1240 struct uv1h_gr1_tlb_mmr_read_data_hi_s {
1241 unsigned long pfn:41; /* RO */
1242 unsigned long gaa:2; /* RO */
1243 unsigned long dirty:1; /* RO */
1244 unsigned long larger:1; /* RO */
1245 unsigned long rsvd_45_63:19;
1246 } s1;
1247 struct uvxh_gr1_tlb_mmr_read_data_hi_s {
1248 unsigned long pfn:41; /* RO */
1249 unsigned long gaa:2; /* RO */
1250 unsigned long dirty:1; /* RO */
1251 unsigned long larger:1; /* RO */
1252 unsigned long rsvd_45_63:19;
1253 } sx;
1254 struct uv2h_gr1_tlb_mmr_read_data_hi_s {
1255 unsigned long pfn:41; /* RO */
1256 unsigned long gaa:2; /* RO */
1257 unsigned long dirty:1; /* RO */
1258 unsigned long larger:1; /* RO */
1259 unsigned long rsvd_45_63:19;
1260 } s2;
1261 struct uv3h_gr1_tlb_mmr_read_data_hi_s {
1262 unsigned long pfn:41; /* RO */
1263 unsigned long gaa:2; /* RO */
1264 unsigned long dirty:1; /* RO */
1265 unsigned long larger:1; /* RO */
1266 unsigned long aa_ext:1; /* RO */
1267 unsigned long undef_46_54:9; /* Undefined */
1268 unsigned long way_ecc:9; /* RO */
1269 } s3;
964}; 1270};
965 1271
966/* ========================================================================= */ 1272/* ========================================================================= */
@@ -968,9 +1274,11 @@ union uvh_gr1_tlb_mmr_read_data_hi_u {
968/* ========================================================================= */ 1274/* ========================================================================= */
969#define UV1H_GR1_TLB_MMR_READ_DATA_LO 0x8010a8UL 1275#define UV1H_GR1_TLB_MMR_READ_DATA_LO 0x8010a8UL
970#define UV2H_GR1_TLB_MMR_READ_DATA_LO 0x10010a8UL 1276#define UV2H_GR1_TLB_MMR_READ_DATA_LO 0x10010a8UL
971#define UVH_GR1_TLB_MMR_READ_DATA_LO (is_uv1_hub() ? \ 1277#define UV3H_GR1_TLB_MMR_READ_DATA_LO 0x10010a8UL
972 UV1H_GR1_TLB_MMR_READ_DATA_LO : \ 1278#define UVH_GR1_TLB_MMR_READ_DATA_LO \
973 UV2H_GR1_TLB_MMR_READ_DATA_LO) 1279 (is_uv1_hub() ? UV1H_GR1_TLB_MMR_READ_DATA_LO : \
1280 (is_uv2_hub() ? UV2H_GR1_TLB_MMR_READ_DATA_LO : \
1281 UV3H_GR1_TLB_MMR_READ_DATA_LO))
974 1282
975#define UVH_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0 1283#define UVH_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
976#define UVH_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39 1284#define UVH_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
@@ -979,6 +1287,34 @@ union uvh_gr1_tlb_mmr_read_data_hi_u {
979#define UVH_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL 1287#define UVH_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
980#define UVH_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL 1288#define UVH_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
981 1289
1290#define UV1H_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
1291#define UV1H_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
1292#define UV1H_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
1293#define UV1H_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
1294#define UV1H_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
1295#define UV1H_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
1296
1297#define UVXH_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
1298#define UVXH_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
1299#define UVXH_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
1300#define UVXH_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
1301#define UVXH_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
1302#define UVXH_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
1303
1304#define UV2H_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
1305#define UV2H_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
1306#define UV2H_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
1307#define UV2H_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
1308#define UV2H_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
1309#define UV2H_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
1310
1311#define UV3H_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
1312#define UV3H_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
1313#define UV3H_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
1314#define UV3H_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
1315#define UV3H_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
1316#define UV3H_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
1317
982union uvh_gr1_tlb_mmr_read_data_lo_u { 1318union uvh_gr1_tlb_mmr_read_data_lo_u {
983 unsigned long v; 1319 unsigned long v;
984 struct uvh_gr1_tlb_mmr_read_data_lo_s { 1320 struct uvh_gr1_tlb_mmr_read_data_lo_s {
@@ -986,12 +1322,32 @@ union uvh_gr1_tlb_mmr_read_data_lo_u {
986 unsigned long asid:24; /* RO */ 1322 unsigned long asid:24; /* RO */
987 unsigned long valid:1; /* RO */ 1323 unsigned long valid:1; /* RO */
988 } s; 1324 } s;
1325 struct uv1h_gr1_tlb_mmr_read_data_lo_s {
1326 unsigned long vpn:39; /* RO */
1327 unsigned long asid:24; /* RO */
1328 unsigned long valid:1; /* RO */
1329 } s1;
1330 struct uvxh_gr1_tlb_mmr_read_data_lo_s {
1331 unsigned long vpn:39; /* RO */
1332 unsigned long asid:24; /* RO */
1333 unsigned long valid:1; /* RO */
1334 } sx;
1335 struct uv2h_gr1_tlb_mmr_read_data_lo_s {
1336 unsigned long vpn:39; /* RO */
1337 unsigned long asid:24; /* RO */
1338 unsigned long valid:1; /* RO */
1339 } s2;
1340 struct uv3h_gr1_tlb_mmr_read_data_lo_s {
1341 unsigned long vpn:39; /* RO */
1342 unsigned long asid:24; /* RO */
1343 unsigned long valid:1; /* RO */
1344 } s3;
989}; 1345};
990 1346
991/* ========================================================================= */ 1347/* ========================================================================= */
992/* UVH_INT_CMPB */ 1348/* UVH_INT_CMPB */
993/* ========================================================================= */ 1349/* ========================================================================= */
994#define UVH_INT_CMPB 0x22080UL 1350#define UVH_INT_CMPB 0x22080UL
995 1351
996#define UVH_INT_CMPB_REAL_TIME_CMPB_SHFT 0 1352#define UVH_INT_CMPB_REAL_TIME_CMPB_SHFT 0
997#define UVH_INT_CMPB_REAL_TIME_CMPB_MASK 0x00ffffffffffffffUL 1353#define UVH_INT_CMPB_REAL_TIME_CMPB_MASK 0x00ffffffffffffffUL
@@ -1007,10 +1363,13 @@ union uvh_int_cmpb_u {
1007/* ========================================================================= */ 1363/* ========================================================================= */
1008/* UVH_INT_CMPC */ 1364/* UVH_INT_CMPC */
1009/* ========================================================================= */ 1365/* ========================================================================= */
1010#define UVH_INT_CMPC 0x22100UL 1366#define UVH_INT_CMPC 0x22100UL
1367
1368#define UV1H_INT_CMPC_REAL_TIME_CMPC_SHFT 0
1369#define UV1H_INT_CMPC_REAL_TIME_CMPC_MASK 0x00ffffffffffffffUL
1011 1370
1012#define UVH_INT_CMPC_REAL_TIME_CMPC_SHFT 0 1371#define UVXH_INT_CMPC_REAL_TIME_CMP_2_SHFT 0
1013#define UVH_INT_CMPC_REAL_TIME_CMPC_MASK 0xffffffffffffffUL 1372#define UVXH_INT_CMPC_REAL_TIME_CMP_2_MASK 0x00ffffffffffffffUL
1014 1373
1015union uvh_int_cmpc_u { 1374union uvh_int_cmpc_u {
1016 unsigned long v; 1375 unsigned long v;
@@ -1023,10 +1382,13 @@ union uvh_int_cmpc_u {
1023/* ========================================================================= */ 1382/* ========================================================================= */
1024/* UVH_INT_CMPD */ 1383/* UVH_INT_CMPD */
1025/* ========================================================================= */ 1384/* ========================================================================= */
1026#define UVH_INT_CMPD 0x22180UL 1385#define UVH_INT_CMPD 0x22180UL
1027 1386
1028#define UVH_INT_CMPD_REAL_TIME_CMPD_SHFT 0 1387#define UV1H_INT_CMPD_REAL_TIME_CMPD_SHFT 0
1029#define UVH_INT_CMPD_REAL_TIME_CMPD_MASK 0xffffffffffffffUL 1388#define UV1H_INT_CMPD_REAL_TIME_CMPD_MASK 0x00ffffffffffffffUL
1389
1390#define UVXH_INT_CMPD_REAL_TIME_CMP_3_SHFT 0
1391#define UVXH_INT_CMPD_REAL_TIME_CMP_3_MASK 0x00ffffffffffffffUL
1030 1392
1031union uvh_int_cmpd_u { 1393union uvh_int_cmpd_u {
1032 unsigned long v; 1394 unsigned long v;
@@ -1039,8 +1401,8 @@ union uvh_int_cmpd_u {
1039/* ========================================================================= */ 1401/* ========================================================================= */
1040/* UVH_IPI_INT */ 1402/* UVH_IPI_INT */
1041/* ========================================================================= */ 1403/* ========================================================================= */
1042#define UVH_IPI_INT 0x60500UL 1404#define UVH_IPI_INT 0x60500UL
1043#define UVH_IPI_INT_32 0x348 1405#define UVH_IPI_INT_32 0x348
1044 1406
1045#define UVH_IPI_INT_VECTOR_SHFT 0 1407#define UVH_IPI_INT_VECTOR_SHFT 0
1046#define UVH_IPI_INT_DELIVERY_MODE_SHFT 8 1408#define UVH_IPI_INT_DELIVERY_MODE_SHFT 8
@@ -1069,8 +1431,8 @@ union uvh_ipi_int_u {
1069/* ========================================================================= */ 1431/* ========================================================================= */
1070/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST */ 1432/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST */
1071/* ========================================================================= */ 1433/* ========================================================================= */
1072#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST 0x320050UL 1434#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST 0x320050UL
1073#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_32 0x9c0 1435#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_32 0x9c0
1074 1436
1075#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_SHFT 4 1437#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_SHFT 4
1076#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_SHFT 49 1438#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_SHFT 49
@@ -1091,8 +1453,8 @@ union uvh_lb_bau_intd_payload_queue_first_u {
1091/* ========================================================================= */ 1453/* ========================================================================= */
1092/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST */ 1454/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST */
1093/* ========================================================================= */ 1455/* ========================================================================= */
1094#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST 0x320060UL 1456#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST 0x320060UL
1095#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_32 0x9c8 1457#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_32 0x9c8
1096 1458
1097#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_SHFT 4 1459#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_SHFT 4
1098#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_MASK 0x000007fffffffff0UL 1460#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_MASK 0x000007fffffffff0UL
@@ -1109,8 +1471,8 @@ union uvh_lb_bau_intd_payload_queue_last_u {
1109/* ========================================================================= */ 1471/* ========================================================================= */
1110/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL */ 1472/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL */
1111/* ========================================================================= */ 1473/* ========================================================================= */
1112#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL 0x320070UL 1474#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL 0x320070UL
1113#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_32 0x9d0 1475#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_32 0x9d0
1114 1476
1115#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_SHFT 4 1477#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_SHFT 4
1116#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_MASK 0x000007fffffffff0UL 1478#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_MASK 0x000007fffffffff0UL
@@ -1127,8 +1489,8 @@ union uvh_lb_bau_intd_payload_queue_tail_u {
1127/* ========================================================================= */ 1489/* ========================================================================= */
1128/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE */ 1490/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE */
1129/* ========================================================================= */ 1491/* ========================================================================= */
1130#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL 1492#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL
1131#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_32 0xa68 1493#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_32 0xa68
1132 1494
1133#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_SHFT 0 1495#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_SHFT 0
1134#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_SHFT 1 1496#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_SHFT 1
@@ -1189,14 +1551,21 @@ union uvh_lb_bau_intd_software_acknowledge_u {
1189/* ========================================================================= */ 1551/* ========================================================================= */
1190/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS */ 1552/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS */
1191/* ========================================================================= */ 1553/* ========================================================================= */
1192#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x0000000000320088UL 1554#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x320088UL
1193#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS_32 0xa70 1555#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS_32 0xa70
1556
1194 1557
1195/* ========================================================================= */ 1558/* ========================================================================= */
1196/* UVH_LB_BAU_MISC_CONTROL */ 1559/* UVH_LB_BAU_MISC_CONTROL */
1197/* ========================================================================= */ 1560/* ========================================================================= */
1198#define UVH_LB_BAU_MISC_CONTROL 0x320170UL 1561#define UVH_LB_BAU_MISC_CONTROL 0x320170UL
1199#define UVH_LB_BAU_MISC_CONTROL_32 0xa10 1562#define UV1H_LB_BAU_MISC_CONTROL 0x320170UL
1563#define UV2H_LB_BAU_MISC_CONTROL 0x320170UL
1564#define UV3H_LB_BAU_MISC_CONTROL 0x320170UL
1565#define UVH_LB_BAU_MISC_CONTROL_32 0xa10
1566#define UV1H_LB_BAU_MISC_CONTROL_32 0x320170UL
1567#define UV2H_LB_BAU_MISC_CONTROL_32 0x320170UL
1568#define UV3H_LB_BAU_MISC_CONTROL_32 0x320170UL
1200 1569
1201#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0 1570#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
1202#define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8 1571#define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
@@ -1213,6 +1582,7 @@ union uvh_lb_bau_intd_software_acknowledge_u {
1213#define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24 1582#define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
1214#define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27 1583#define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
1215#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28 1584#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
1585#define UVH_LB_BAU_MISC_CONTROL_FUN_SHFT 48
1216#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL 1586#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
1217#define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL 1587#define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
1218#define UVH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL 1588#define UVH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
@@ -1228,6 +1598,7 @@ union uvh_lb_bau_intd_software_acknowledge_u {
1228#define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL 1598#define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
1229#define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL 1599#define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
1230#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL 1600#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
1601#define UVH_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
1231 1602
1232#define UV1H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0 1603#define UV1H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
1233#define UV1H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8 1604#define UV1H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
@@ -1262,6 +1633,53 @@ union uvh_lb_bau_intd_software_acknowledge_u {
1262#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL 1633#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
1263#define UV1H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL 1634#define UV1H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
1264 1635
1636#define UVXH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
1637#define UVXH_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
1638#define UVXH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
1639#define UVXH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
1640#define UVXH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
1641#define UVXH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
1642#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15
1643#define UVXH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16
1644#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
1645#define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
1646#define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
1647#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
1648#define UVXH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
1649#define UVXH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
1650#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
1651#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_SHFT 29
1652#define UVXH_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_SHFT 30
1653#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_SHFT 31
1654#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_SHFT 32
1655#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT 33
1656#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_SHFT 34
1657#define UVXH_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT 35
1658#define UVXH_LB_BAU_MISC_CONTROL_FUN_SHFT 48
1659#define UVXH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
1660#define UVXH_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
1661#define UVXH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
1662#define UVXH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
1663#define UVXH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
1664#define UVXH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
1665#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
1666#define UVXH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
1667#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
1668#define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
1669#define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
1670#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
1671#define UVXH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
1672#define UVXH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
1673#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
1674#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_MASK 0x0000000020000000UL
1675#define UVXH_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_MASK 0x0000000040000000UL
1676#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_MASK 0x0000000080000000UL
1677#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_MASK 0x0000000100000000UL
1678#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_MASK 0x0000000200000000UL
1679#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_MASK 0x0000000400000000UL
1680#define UVXH_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_MASK 0x0000000800000000UL
1681#define UVXH_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
1682
1265#define UV2H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0 1683#define UV2H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
1266#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8 1684#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
1267#define UV2H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9 1685#define UV2H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
@@ -1309,6 +1727,59 @@ union uvh_lb_bau_intd_software_acknowledge_u {
1309#define UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_MASK 0x0000000800000000UL 1727#define UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_MASK 0x0000000800000000UL
1310#define UV2H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL 1728#define UV2H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
1311 1729
1730#define UV3H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
1731#define UV3H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
1732#define UV3H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
1733#define UV3H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
1734#define UV3H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
1735#define UV3H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
1736#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15
1737#define UV3H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16
1738#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
1739#define UV3H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
1740#define UV3H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
1741#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
1742#define UV3H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
1743#define UV3H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
1744#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
1745#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_SHFT 29
1746#define UV3H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_SHFT 30
1747#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_SHFT 31
1748#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_SHFT 32
1749#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT 33
1750#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_SHFT 34
1751#define UV3H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT 35
1752#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_QUIESCE_MSGS_TO_QPI_SHFT 36
1753#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_PREFETCH_HINT_SHFT 37
1754#define UV3H_LB_BAU_MISC_CONTROL_THREAD_KILL_TIMEBASE_SHFT 38
1755#define UV3H_LB_BAU_MISC_CONTROL_FUN_SHFT 48
1756#define UV3H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
1757#define UV3H_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
1758#define UV3H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
1759#define UV3H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
1760#define UV3H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
1761#define UV3H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
1762#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
1763#define UV3H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
1764#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
1765#define UV3H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
1766#define UV3H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
1767#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
1768#define UV3H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
1769#define UV3H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
1770#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
1771#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_MASK 0x0000000020000000UL
1772#define UV3H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_MASK 0x0000000040000000UL
1773#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_MASK 0x0000000080000000UL
1774#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_MASK 0x0000000100000000UL
1775#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_MASK 0x0000000200000000UL
1776#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_MASK 0x0000000400000000UL
1777#define UV3H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_MASK 0x0000000800000000UL
1778#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_QUIESCE_MSGS_TO_QPI_MASK 0x0000001000000000UL
1779#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_PREFETCH_HINT_MASK 0x0000002000000000UL
1780#define UV3H_LB_BAU_MISC_CONTROL_THREAD_KILL_TIMEBASE_MASK 0x00003fc000000000UL
1781#define UV3H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
1782
1312union uvh_lb_bau_misc_control_u { 1783union uvh_lb_bau_misc_control_u {
1313 unsigned long v; 1784 unsigned long v;
1314 struct uvh_lb_bau_misc_control_s { 1785 struct uvh_lb_bau_misc_control_s {
@@ -1327,7 +1798,8 @@ union uvh_lb_bau_misc_control_u {
1327 unsigned long programmed_initial_priority:3; /* RW */ 1798 unsigned long programmed_initial_priority:3; /* RW */
1328 unsigned long use_incoming_priority:1; /* RW */ 1799 unsigned long use_incoming_priority:1; /* RW */
1329 unsigned long enable_programmed_initial_priority:1;/* RW */ 1800 unsigned long enable_programmed_initial_priority:1;/* RW */
1330 unsigned long rsvd_29_63:35; 1801 unsigned long rsvd_29_47:19;
1802 unsigned long fun:16; /* RW */
1331 } s; 1803 } s;
1332 struct uv1h_lb_bau_misc_control_s { 1804 struct uv1h_lb_bau_misc_control_s {
1333 unsigned long rejection_delay:8; /* RW */ 1805 unsigned long rejection_delay:8; /* RW */
@@ -1348,6 +1820,32 @@ union uvh_lb_bau_misc_control_u {
1348 unsigned long rsvd_29_47:19; 1820 unsigned long rsvd_29_47:19;
1349 unsigned long fun:16; /* RW */ 1821 unsigned long fun:16; /* RW */
1350 } s1; 1822 } s1;
1823 struct uvxh_lb_bau_misc_control_s {
1824 unsigned long rejection_delay:8; /* RW */
1825 unsigned long apic_mode:1; /* RW */
1826 unsigned long force_broadcast:1; /* RW */
1827 unsigned long force_lock_nop:1; /* RW */
1828 unsigned long qpi_agent_presence_vector:3; /* RW */
1829 unsigned long descriptor_fetch_mode:1; /* RW */
1830 unsigned long enable_intd_soft_ack_mode:1; /* RW */
1831 unsigned long intd_soft_ack_timeout_period:4; /* RW */
1832 unsigned long enable_dual_mapping_mode:1; /* RW */
1833 unsigned long vga_io_port_decode_enable:1; /* RW */
1834 unsigned long vga_io_port_16_bit_decode:1; /* RW */
1835 unsigned long suppress_dest_registration:1; /* RW */
1836 unsigned long programmed_initial_priority:3; /* RW */
1837 unsigned long use_incoming_priority:1; /* RW */
1838 unsigned long enable_programmed_initial_priority:1;/* RW */
1839 unsigned long enable_automatic_apic_mode_selection:1;/* RW */
1840 unsigned long apic_mode_status:1; /* RO */
1841 unsigned long suppress_interrupts_to_self:1; /* RW */
1842 unsigned long enable_lock_based_system_flush:1;/* RW */
1843 unsigned long enable_extended_sb_status:1; /* RW */
1844 unsigned long suppress_int_prio_udt_to_self:1;/* RW */
1845 unsigned long use_legacy_descriptor_formats:1;/* RW */
1846 unsigned long rsvd_36_47:12;
1847 unsigned long fun:16; /* RW */
1848 } sx;
1351 struct uv2h_lb_bau_misc_control_s { 1849 struct uv2h_lb_bau_misc_control_s {
1352 unsigned long rejection_delay:8; /* RW */ 1850 unsigned long rejection_delay:8; /* RW */
1353 unsigned long apic_mode:1; /* RW */ 1851 unsigned long apic_mode:1; /* RW */
@@ -1374,13 +1872,42 @@ union uvh_lb_bau_misc_control_u {
1374 unsigned long rsvd_36_47:12; 1872 unsigned long rsvd_36_47:12;
1375 unsigned long fun:16; /* RW */ 1873 unsigned long fun:16; /* RW */
1376 } s2; 1874 } s2;
1875 struct uv3h_lb_bau_misc_control_s {
1876 unsigned long rejection_delay:8; /* RW */
1877 unsigned long apic_mode:1; /* RW */
1878 unsigned long force_broadcast:1; /* RW */
1879 unsigned long force_lock_nop:1; /* RW */
1880 unsigned long qpi_agent_presence_vector:3; /* RW */
1881 unsigned long descriptor_fetch_mode:1; /* RW */
1882 unsigned long enable_intd_soft_ack_mode:1; /* RW */
1883 unsigned long intd_soft_ack_timeout_period:4; /* RW */
1884 unsigned long enable_dual_mapping_mode:1; /* RW */
1885 unsigned long vga_io_port_decode_enable:1; /* RW */
1886 unsigned long vga_io_port_16_bit_decode:1; /* RW */
1887 unsigned long suppress_dest_registration:1; /* RW */
1888 unsigned long programmed_initial_priority:3; /* RW */
1889 unsigned long use_incoming_priority:1; /* RW */
1890 unsigned long enable_programmed_initial_priority:1;/* RW */
1891 unsigned long enable_automatic_apic_mode_selection:1;/* RW */
1892 unsigned long apic_mode_status:1; /* RO */
1893 unsigned long suppress_interrupts_to_self:1; /* RW */
1894 unsigned long enable_lock_based_system_flush:1;/* RW */
1895 unsigned long enable_extended_sb_status:1; /* RW */
1896 unsigned long suppress_int_prio_udt_to_self:1;/* RW */
1897 unsigned long use_legacy_descriptor_formats:1;/* RW */
1898 unsigned long suppress_quiesce_msgs_to_qpi:1; /* RW */
1899 unsigned long enable_intd_prefetch_hint:1; /* RW */
1900 unsigned long thread_kill_timebase:8; /* RW */
1901 unsigned long rsvd_46_47:2;
1902 unsigned long fun:16; /* RW */
1903 } s3;
1377}; 1904};
1378 1905
1379/* ========================================================================= */ 1906/* ========================================================================= */
1380/* UVH_LB_BAU_SB_ACTIVATION_CONTROL */ 1907/* UVH_LB_BAU_SB_ACTIVATION_CONTROL */
1381/* ========================================================================= */ 1908/* ========================================================================= */
1382#define UVH_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL 1909#define UVH_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL
1383#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_32 0x9a8 1910#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_32 0x9a8
1384 1911
1385#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_SHFT 0 1912#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_SHFT 0
1386#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT 62 1913#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT 62
@@ -1402,8 +1929,8 @@ union uvh_lb_bau_sb_activation_control_u {
1402/* ========================================================================= */ 1929/* ========================================================================= */
1403/* UVH_LB_BAU_SB_ACTIVATION_STATUS_0 */ 1930/* UVH_LB_BAU_SB_ACTIVATION_STATUS_0 */
1404/* ========================================================================= */ 1931/* ========================================================================= */
1405#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0 0x320030UL 1932#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0 0x320030UL
1406#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x9b0 1933#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x9b0
1407 1934
1408#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_SHFT 0 1935#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_SHFT 0
1409#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_MASK 0xffffffffffffffffUL 1936#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_MASK 0xffffffffffffffffUL
@@ -1418,8 +1945,8 @@ union uvh_lb_bau_sb_activation_status_0_u {
1418/* ========================================================================= */ 1945/* ========================================================================= */
1419/* UVH_LB_BAU_SB_ACTIVATION_STATUS_1 */ 1946/* UVH_LB_BAU_SB_ACTIVATION_STATUS_1 */
1420/* ========================================================================= */ 1947/* ========================================================================= */
1421#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1 0x320040UL 1948#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1 0x320040UL
1422#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x9b8 1949#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x9b8
1423 1950
1424#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_SHFT 0 1951#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_SHFT 0
1425#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_MASK 0xffffffffffffffffUL 1952#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_MASK 0xffffffffffffffffUL
@@ -1434,8 +1961,8 @@ union uvh_lb_bau_sb_activation_status_1_u {
1434/* ========================================================================= */ 1961/* ========================================================================= */
1435/* UVH_LB_BAU_SB_DESCRIPTOR_BASE */ 1962/* UVH_LB_BAU_SB_DESCRIPTOR_BASE */
1436/* ========================================================================= */ 1963/* ========================================================================= */
1437#define UVH_LB_BAU_SB_DESCRIPTOR_BASE 0x320010UL 1964#define UVH_LB_BAU_SB_DESCRIPTOR_BASE 0x320010UL
1438#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_32 0x9a0 1965#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_32 0x9a0
1439 1966
1440#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_SHFT 12 1967#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_SHFT 12
1441#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 49 1968#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 49
@@ -1456,7 +1983,10 @@ union uvh_lb_bau_sb_descriptor_base_u {
1456/* ========================================================================= */ 1983/* ========================================================================= */
1457/* UVH_NODE_ID */ 1984/* UVH_NODE_ID */
1458/* ========================================================================= */ 1985/* ========================================================================= */
1459#define UVH_NODE_ID 0x0UL 1986#define UVH_NODE_ID 0x0UL
1987#define UV1H_NODE_ID 0x0UL
1988#define UV2H_NODE_ID 0x0UL
1989#define UV3H_NODE_ID 0x0UL
1460 1990
1461#define UVH_NODE_ID_FORCE1_SHFT 0 1991#define UVH_NODE_ID_FORCE1_SHFT 0
1462#define UVH_NODE_ID_MANUFACTURER_SHFT 1 1992#define UVH_NODE_ID_MANUFACTURER_SHFT 1
@@ -1484,6 +2014,21 @@ union uvh_lb_bau_sb_descriptor_base_u {
1484#define UV1H_NODE_ID_NODES_PER_BIT_MASK 0x007f000000000000UL 2014#define UV1H_NODE_ID_NODES_PER_BIT_MASK 0x007f000000000000UL
1485#define UV1H_NODE_ID_NI_PORT_MASK 0x0f00000000000000UL 2015#define UV1H_NODE_ID_NI_PORT_MASK 0x0f00000000000000UL
1486 2016
2017#define UVXH_NODE_ID_FORCE1_SHFT 0
2018#define UVXH_NODE_ID_MANUFACTURER_SHFT 1
2019#define UVXH_NODE_ID_PART_NUMBER_SHFT 12
2020#define UVXH_NODE_ID_REVISION_SHFT 28
2021#define UVXH_NODE_ID_NODE_ID_SHFT 32
2022#define UVXH_NODE_ID_NODES_PER_BIT_SHFT 50
2023#define UVXH_NODE_ID_NI_PORT_SHFT 57
2024#define UVXH_NODE_ID_FORCE1_MASK 0x0000000000000001UL
2025#define UVXH_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
2026#define UVXH_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
2027#define UVXH_NODE_ID_REVISION_MASK 0x00000000f0000000UL
2028#define UVXH_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
2029#define UVXH_NODE_ID_NODES_PER_BIT_MASK 0x01fc000000000000UL
2030#define UVXH_NODE_ID_NI_PORT_MASK 0x3e00000000000000UL
2031
1487#define UV2H_NODE_ID_FORCE1_SHFT 0 2032#define UV2H_NODE_ID_FORCE1_SHFT 0
1488#define UV2H_NODE_ID_MANUFACTURER_SHFT 1 2033#define UV2H_NODE_ID_MANUFACTURER_SHFT 1
1489#define UV2H_NODE_ID_PART_NUMBER_SHFT 12 2034#define UV2H_NODE_ID_PART_NUMBER_SHFT 12
@@ -1499,6 +2044,25 @@ union uvh_lb_bau_sb_descriptor_base_u {
1499#define UV2H_NODE_ID_NODES_PER_BIT_MASK 0x01fc000000000000UL 2044#define UV2H_NODE_ID_NODES_PER_BIT_MASK 0x01fc000000000000UL
1500#define UV2H_NODE_ID_NI_PORT_MASK 0x3e00000000000000UL 2045#define UV2H_NODE_ID_NI_PORT_MASK 0x3e00000000000000UL
1501 2046
2047#define UV3H_NODE_ID_FORCE1_SHFT 0
2048#define UV3H_NODE_ID_MANUFACTURER_SHFT 1
2049#define UV3H_NODE_ID_PART_NUMBER_SHFT 12
2050#define UV3H_NODE_ID_REVISION_SHFT 28
2051#define UV3H_NODE_ID_NODE_ID_SHFT 32
2052#define UV3H_NODE_ID_ROUTER_SELECT_SHFT 48
2053#define UV3H_NODE_ID_RESERVED_2_SHFT 49
2054#define UV3H_NODE_ID_NODES_PER_BIT_SHFT 50
2055#define UV3H_NODE_ID_NI_PORT_SHFT 57
2056#define UV3H_NODE_ID_FORCE1_MASK 0x0000000000000001UL
2057#define UV3H_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
2058#define UV3H_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
2059#define UV3H_NODE_ID_REVISION_MASK 0x00000000f0000000UL
2060#define UV3H_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
2061#define UV3H_NODE_ID_ROUTER_SELECT_MASK 0x0001000000000000UL
2062#define UV3H_NODE_ID_RESERVED_2_MASK 0x0002000000000000UL
2063#define UV3H_NODE_ID_NODES_PER_BIT_MASK 0x01fc000000000000UL
2064#define UV3H_NODE_ID_NI_PORT_MASK 0x3e00000000000000UL
2065
1502union uvh_node_id_u { 2066union uvh_node_id_u {
1503 unsigned long v; 2067 unsigned long v;
1504 struct uvh_node_id_s { 2068 struct uvh_node_id_s {
@@ -1521,6 +2085,17 @@ union uvh_node_id_u {
1521 unsigned long ni_port:4; /* RO */ 2085 unsigned long ni_port:4; /* RO */
1522 unsigned long rsvd_60_63:4; 2086 unsigned long rsvd_60_63:4;
1523 } s1; 2087 } s1;
2088 struct uvxh_node_id_s {
2089 unsigned long force1:1; /* RO */
2090 unsigned long manufacturer:11; /* RO */
2091 unsigned long part_number:16; /* RO */
2092 unsigned long revision:4; /* RO */
2093 unsigned long node_id:15; /* RW */
2094 unsigned long rsvd_47_49:3;
2095 unsigned long nodes_per_bit:7; /* RO */
2096 unsigned long ni_port:5; /* RO */
2097 unsigned long rsvd_62_63:2;
2098 } sx;
1524 struct uv2h_node_id_s { 2099 struct uv2h_node_id_s {
1525 unsigned long force1:1; /* RO */ 2100 unsigned long force1:1; /* RO */
1526 unsigned long manufacturer:11; /* RO */ 2101 unsigned long manufacturer:11; /* RO */
@@ -1532,13 +2107,26 @@ union uvh_node_id_u {
1532 unsigned long ni_port:5; /* RO */ 2107 unsigned long ni_port:5; /* RO */
1533 unsigned long rsvd_62_63:2; 2108 unsigned long rsvd_62_63:2;
1534 } s2; 2109 } s2;
2110 struct uv3h_node_id_s {
2111 unsigned long force1:1; /* RO */
2112 unsigned long manufacturer:11; /* RO */
2113 unsigned long part_number:16; /* RO */
2114 unsigned long revision:4; /* RO */
2115 unsigned long node_id:15; /* RW */
2116 unsigned long rsvd_47:1;
2117 unsigned long router_select:1; /* RO */
2118 unsigned long rsvd_49:1;
2119 unsigned long nodes_per_bit:7; /* RO */
2120 unsigned long ni_port:5; /* RO */
2121 unsigned long rsvd_62_63:2;
2122 } s3;
1535}; 2123};
1536 2124
1537/* ========================================================================= */ 2125/* ========================================================================= */
1538/* UVH_NODE_PRESENT_TABLE */ 2126/* UVH_NODE_PRESENT_TABLE */
1539/* ========================================================================= */ 2127/* ========================================================================= */
1540#define UVH_NODE_PRESENT_TABLE 0x1400UL 2128#define UVH_NODE_PRESENT_TABLE 0x1400UL
1541#define UVH_NODE_PRESENT_TABLE_DEPTH 16 2129#define UVH_NODE_PRESENT_TABLE_DEPTH 16
1542 2130
1543#define UVH_NODE_PRESENT_TABLE_NODES_SHFT 0 2131#define UVH_NODE_PRESENT_TABLE_NODES_SHFT 0
1544#define UVH_NODE_PRESENT_TABLE_NODES_MASK 0xffffffffffffffffUL 2132#define UVH_NODE_PRESENT_TABLE_NODES_MASK 0xffffffffffffffffUL
@@ -1553,7 +2141,7 @@ union uvh_node_present_table_u {
1553/* ========================================================================= */ 2141/* ========================================================================= */
1554/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR */ 2142/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR */
1555/* ========================================================================= */ 2143/* ========================================================================= */
1556#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x16000c8UL 2144#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x16000c8UL
1557 2145
1558#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24 2146#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24
1559#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48 2147#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48
@@ -1577,7 +2165,7 @@ union uvh_rh_gam_alias210_overlay_config_0_mmr_u {
1577/* ========================================================================= */ 2165/* ========================================================================= */
1578/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR */ 2166/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR */
1579/* ========================================================================= */ 2167/* ========================================================================= */
1580#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x16000d8UL 2168#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x16000d8UL
1581 2169
1582#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24 2170#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24
1583#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48 2171#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48
@@ -1601,7 +2189,7 @@ union uvh_rh_gam_alias210_overlay_config_1_mmr_u {
1601/* ========================================================================= */ 2189/* ========================================================================= */
1602/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR */ 2190/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR */
1603/* ========================================================================= */ 2191/* ========================================================================= */
1604#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x16000e8UL 2192#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x16000e8UL
1605 2193
1606#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24 2194#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24
1607#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48 2195#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48
@@ -1625,7 +2213,7 @@ union uvh_rh_gam_alias210_overlay_config_2_mmr_u {
1625/* ========================================================================= */ 2213/* ========================================================================= */
1626/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR */ 2214/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR */
1627/* ========================================================================= */ 2215/* ========================================================================= */
1628#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL 2216#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL
1629 2217
1630#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24 2218#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24
1631#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL 2219#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL
@@ -1642,7 +2230,7 @@ union uvh_rh_gam_alias210_redirect_config_0_mmr_u {
1642/* ========================================================================= */ 2230/* ========================================================================= */
1643/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR */ 2231/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR */
1644/* ========================================================================= */ 2232/* ========================================================================= */
1645#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR 0x16000e0UL 2233#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR 0x16000e0UL
1646 2234
1647#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24 2235#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24
1648#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL 2236#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL
@@ -1659,7 +2247,7 @@ union uvh_rh_gam_alias210_redirect_config_1_mmr_u {
1659/* ========================================================================= */ 2247/* ========================================================================= */
1660/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR */ 2248/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR */
1661/* ========================================================================= */ 2249/* ========================================================================= */
1662#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR 0x16000f0UL 2250#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR 0x16000f0UL
1663 2251
1664#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24 2252#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24
1665#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL 2253#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL
@@ -1676,7 +2264,10 @@ union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
1676/* ========================================================================= */ 2264/* ========================================================================= */
1677/* UVH_RH_GAM_CONFIG_MMR */ 2265/* UVH_RH_GAM_CONFIG_MMR */
1678/* ========================================================================= */ 2266/* ========================================================================= */
1679#define UVH_RH_GAM_CONFIG_MMR 0x1600000UL 2267#define UVH_RH_GAM_CONFIG_MMR 0x1600000UL
2268#define UV1H_RH_GAM_CONFIG_MMR 0x1600000UL
2269#define UV2H_RH_GAM_CONFIG_MMR 0x1600000UL
2270#define UV3H_RH_GAM_CONFIG_MMR 0x1600000UL
1680 2271
1681#define UVH_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0 2272#define UVH_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
1682#define UVH_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6 2273#define UVH_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
@@ -1690,11 +2281,21 @@ union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
1690#define UV1H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL 2281#define UV1H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
1691#define UV1H_RH_GAM_CONFIG_MMR_MMIOL_CFG_MASK 0x0000000000001000UL 2282#define UV1H_RH_GAM_CONFIG_MMR_MMIOL_CFG_MASK 0x0000000000001000UL
1692 2283
2284#define UVXH_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
2285#define UVXH_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
2286#define UVXH_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
2287#define UVXH_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
2288
1693#define UV2H_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0 2289#define UV2H_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
1694#define UV2H_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6 2290#define UV2H_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
1695#define UV2H_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL 2291#define UV2H_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
1696#define UV2H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL 2292#define UV2H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
1697 2293
2294#define UV3H_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
2295#define UV3H_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
2296#define UV3H_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
2297#define UV3H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
2298
1698union uvh_rh_gam_config_mmr_u { 2299union uvh_rh_gam_config_mmr_u {
1699 unsigned long v; 2300 unsigned long v;
1700 struct uvh_rh_gam_config_mmr_s { 2301 struct uvh_rh_gam_config_mmr_s {
@@ -1709,20 +2310,37 @@ union uvh_rh_gam_config_mmr_u {
1709 unsigned long mmiol_cfg:1; /* RW */ 2310 unsigned long mmiol_cfg:1; /* RW */
1710 unsigned long rsvd_13_63:51; 2311 unsigned long rsvd_13_63:51;
1711 } s1; 2312 } s1;
2313 struct uvxh_rh_gam_config_mmr_s {
2314 unsigned long m_skt:6; /* RW */
2315 unsigned long n_skt:4; /* RW */
2316 unsigned long rsvd_10_63:54;
2317 } sx;
1712 struct uv2h_rh_gam_config_mmr_s { 2318 struct uv2h_rh_gam_config_mmr_s {
1713 unsigned long m_skt:6; /* RW */ 2319 unsigned long m_skt:6; /* RW */
1714 unsigned long n_skt:4; /* RW */ 2320 unsigned long n_skt:4; /* RW */
1715 unsigned long rsvd_10_63:54; 2321 unsigned long rsvd_10_63:54;
1716 } s2; 2322 } s2;
2323 struct uv3h_rh_gam_config_mmr_s {
2324 unsigned long m_skt:6; /* RW */
2325 unsigned long n_skt:4; /* RW */
2326 unsigned long rsvd_10_63:54;
2327 } s3;
1717}; 2328};
1718 2329
1719/* ========================================================================= */ 2330/* ========================================================================= */
1720/* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */ 2331/* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */
1721/* ========================================================================= */ 2332/* ========================================================================= */
1722#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL 2333#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
2334#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
2335#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
2336#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
1723 2337
1724#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28 2338#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
2339#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
2340#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
1725#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL 2341#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
2342#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
2343#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
1726 2344
1727#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28 2345#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
1728#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_SHFT 48 2346#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_SHFT 48
@@ -1733,6 +2351,13 @@ union uvh_rh_gam_config_mmr_u {
1733#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL 2351#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
1734#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL 2352#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
1735 2353
2354#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
2355#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
2356#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
2357#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
2358#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
2359#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
2360
1736#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28 2361#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
1737#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52 2362#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
1738#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 2363#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
@@ -1740,12 +2365,23 @@ union uvh_rh_gam_config_mmr_u {
1740#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL 2365#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
1741#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL 2366#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
1742 2367
2368#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
2369#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
2370#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_MODE_SHFT 62
2371#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
2372#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
2373#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
2374#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_MODE_MASK 0x4000000000000000UL
2375#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
2376
1743union uvh_rh_gam_gru_overlay_config_mmr_u { 2377union uvh_rh_gam_gru_overlay_config_mmr_u {
1744 unsigned long v; 2378 unsigned long v;
1745 struct uvh_rh_gam_gru_overlay_config_mmr_s { 2379 struct uvh_rh_gam_gru_overlay_config_mmr_s {
1746 unsigned long rsvd_0_27:28; 2380 unsigned long rsvd_0_27:28;
1747 unsigned long base:18; /* RW */ 2381 unsigned long base:18; /* RW */
1748 unsigned long rsvd_46_62:17; 2382 unsigned long rsvd_46_51:6;
2383 unsigned long n_gru:4; /* RW */
2384 unsigned long rsvd_56_62:7;
1749 unsigned long enable:1; /* RW */ 2385 unsigned long enable:1; /* RW */
1750 } s; 2386 } s;
1751 struct uv1h_rh_gam_gru_overlay_config_mmr_s { 2387 struct uv1h_rh_gam_gru_overlay_config_mmr_s {
@@ -1758,6 +2394,14 @@ union uvh_rh_gam_gru_overlay_config_mmr_u {
1758 unsigned long rsvd_56_62:7; 2394 unsigned long rsvd_56_62:7;
1759 unsigned long enable:1; /* RW */ 2395 unsigned long enable:1; /* RW */
1760 } s1; 2396 } s1;
2397 struct uvxh_rh_gam_gru_overlay_config_mmr_s {
2398 unsigned long rsvd_0_27:28;
2399 unsigned long base:18; /* RW */
2400 unsigned long rsvd_46_51:6;
2401 unsigned long n_gru:4; /* RW */
2402 unsigned long rsvd_56_62:7;
2403 unsigned long enable:1; /* RW */
2404 } sx;
1761 struct uv2h_rh_gam_gru_overlay_config_mmr_s { 2405 struct uv2h_rh_gam_gru_overlay_config_mmr_s {
1762 unsigned long rsvd_0_27:28; 2406 unsigned long rsvd_0_27:28;
1763 unsigned long base:18; /* RW */ 2407 unsigned long base:18; /* RW */
@@ -1766,12 +2410,22 @@ union uvh_rh_gam_gru_overlay_config_mmr_u {
1766 unsigned long rsvd_56_62:7; 2410 unsigned long rsvd_56_62:7;
1767 unsigned long enable:1; /* RW */ 2411 unsigned long enable:1; /* RW */
1768 } s2; 2412 } s2;
2413 struct uv3h_rh_gam_gru_overlay_config_mmr_s {
2414 unsigned long rsvd_0_27:28;
2415 unsigned long base:18; /* RW */
2416 unsigned long rsvd_46_51:6;
2417 unsigned long n_gru:4; /* RW */
2418 unsigned long rsvd_56_61:6;
2419 unsigned long mode:1; /* RW */
2420 unsigned long enable:1; /* RW */
2421 } s3;
1769}; 2422};
1770 2423
1771/* ========================================================================= */ 2424/* ========================================================================= */
1772/* UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR */ 2425/* UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR */
1773/* ========================================================================= */ 2426/* ========================================================================= */
1774#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR 0x1600030UL 2427#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR 0x1600030UL
2428#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR 0x1600030UL
1775 2429
1776#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT 30 2430#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT 30
1777#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_SHFT 46 2431#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_SHFT 46
@@ -1814,10 +2468,15 @@ union uvh_rh_gam_mmioh_overlay_config_mmr_u {
1814/* ========================================================================= */ 2468/* ========================================================================= */
1815/* UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR */ 2469/* UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR */
1816/* ========================================================================= */ 2470/* ========================================================================= */
1817#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL 2471#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
2472#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
2473#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
2474#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
1818 2475
1819#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26 2476#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
2477#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
1820#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL 2478#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
2479#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
1821 2480
1822#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26 2481#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
1823#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_SHFT 46 2482#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_SHFT 46
@@ -1826,11 +2485,21 @@ union uvh_rh_gam_mmioh_overlay_config_mmr_u {
1826#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_MASK 0x0000400000000000UL 2485#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_MASK 0x0000400000000000UL
1827#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL 2486#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
1828 2487
2488#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
2489#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
2490#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
2491#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
2492
1829#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26 2493#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
1830#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 2494#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
1831#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL 2495#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
1832#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL 2496#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
1833 2497
2498#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
2499#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
2500#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
2501#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
2502
1834union uvh_rh_gam_mmr_overlay_config_mmr_u { 2503union uvh_rh_gam_mmr_overlay_config_mmr_u {
1835 unsigned long v; 2504 unsigned long v;
1836 struct uvh_rh_gam_mmr_overlay_config_mmr_s { 2505 struct uvh_rh_gam_mmr_overlay_config_mmr_s {
@@ -1846,18 +2515,30 @@ union uvh_rh_gam_mmr_overlay_config_mmr_u {
1846 unsigned long rsvd_47_62:16; 2515 unsigned long rsvd_47_62:16;
1847 unsigned long enable:1; /* RW */ 2516 unsigned long enable:1; /* RW */
1848 } s1; 2517 } s1;
2518 struct uvxh_rh_gam_mmr_overlay_config_mmr_s {
2519 unsigned long rsvd_0_25:26;
2520 unsigned long base:20; /* RW */
2521 unsigned long rsvd_46_62:17;
2522 unsigned long enable:1; /* RW */
2523 } sx;
1849 struct uv2h_rh_gam_mmr_overlay_config_mmr_s { 2524 struct uv2h_rh_gam_mmr_overlay_config_mmr_s {
1850 unsigned long rsvd_0_25:26; 2525 unsigned long rsvd_0_25:26;
1851 unsigned long base:20; /* RW */ 2526 unsigned long base:20; /* RW */
1852 unsigned long rsvd_46_62:17; 2527 unsigned long rsvd_46_62:17;
1853 unsigned long enable:1; /* RW */ 2528 unsigned long enable:1; /* RW */
1854 } s2; 2529 } s2;
2530 struct uv3h_rh_gam_mmr_overlay_config_mmr_s {
2531 unsigned long rsvd_0_25:26;
2532 unsigned long base:20; /* RW */
2533 unsigned long rsvd_46_62:17;
2534 unsigned long enable:1; /* RW */
2535 } s3;
1855}; 2536};
1856 2537
1857/* ========================================================================= */ 2538/* ========================================================================= */
1858/* UVH_RTC */ 2539/* UVH_RTC */
1859/* ========================================================================= */ 2540/* ========================================================================= */
1860#define UVH_RTC 0x340000UL 2541#define UVH_RTC 0x340000UL
1861 2542
1862#define UVH_RTC_REAL_TIME_CLOCK_SHFT 0 2543#define UVH_RTC_REAL_TIME_CLOCK_SHFT 0
1863#define UVH_RTC_REAL_TIME_CLOCK_MASK 0x00ffffffffffffffUL 2544#define UVH_RTC_REAL_TIME_CLOCK_MASK 0x00ffffffffffffffUL
@@ -1873,7 +2554,7 @@ union uvh_rtc_u {
1873/* ========================================================================= */ 2554/* ========================================================================= */
1874/* UVH_RTC1_INT_CONFIG */ 2555/* UVH_RTC1_INT_CONFIG */
1875/* ========================================================================= */ 2556/* ========================================================================= */
1876#define UVH_RTC1_INT_CONFIG 0x615c0UL 2557#define UVH_RTC1_INT_CONFIG 0x615c0UL
1877 2558
1878#define UVH_RTC1_INT_CONFIG_VECTOR_SHFT 0 2559#define UVH_RTC1_INT_CONFIG_VECTOR_SHFT 0
1879#define UVH_RTC1_INT_CONFIG_DM_SHFT 8 2560#define UVH_RTC1_INT_CONFIG_DM_SHFT 8
@@ -1911,8 +2592,8 @@ union uvh_rtc1_int_config_u {
1911/* ========================================================================= */ 2592/* ========================================================================= */
1912/* UVH_SCRATCH5 */ 2593/* UVH_SCRATCH5 */
1913/* ========================================================================= */ 2594/* ========================================================================= */
1914#define UVH_SCRATCH5 0x2d0200UL 2595#define UVH_SCRATCH5 0x2d0200UL
1915#define UVH_SCRATCH5_32 0x778 2596#define UVH_SCRATCH5_32 0x778
1916 2597
1917#define UVH_SCRATCH5_SCRATCH5_SHFT 0 2598#define UVH_SCRATCH5_SCRATCH5_SHFT 0
1918#define UVH_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL 2599#define UVH_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL
@@ -1925,79 +2606,79 @@ union uvh_scratch5_u {
1925}; 2606};
1926 2607
1927/* ========================================================================= */ 2608/* ========================================================================= */
1928/* UV2H_EVENT_OCCURRED2 */ 2609/* UVXH_EVENT_OCCURRED2 */
1929/* ========================================================================= */ 2610/* ========================================================================= */
1930#define UV2H_EVENT_OCCURRED2 0x70100UL 2611#define UVXH_EVENT_OCCURRED2 0x70100UL
1931#define UV2H_EVENT_OCCURRED2_32 0xb68 2612#define UVXH_EVENT_OCCURRED2_32 0xb68
1932 2613
1933#define UV2H_EVENT_OCCURRED2_RTC_0_SHFT 0 2614#define UVXH_EVENT_OCCURRED2_RTC_0_SHFT 0
1934#define UV2H_EVENT_OCCURRED2_RTC_1_SHFT 1 2615#define UVXH_EVENT_OCCURRED2_RTC_1_SHFT 1
1935#define UV2H_EVENT_OCCURRED2_RTC_2_SHFT 2 2616#define UVXH_EVENT_OCCURRED2_RTC_2_SHFT 2
1936#define UV2H_EVENT_OCCURRED2_RTC_3_SHFT 3 2617#define UVXH_EVENT_OCCURRED2_RTC_3_SHFT 3
1937#define UV2H_EVENT_OCCURRED2_RTC_4_SHFT 4 2618#define UVXH_EVENT_OCCURRED2_RTC_4_SHFT 4
1938#define UV2H_EVENT_OCCURRED2_RTC_5_SHFT 5 2619#define UVXH_EVENT_OCCURRED2_RTC_5_SHFT 5
1939#define UV2H_EVENT_OCCURRED2_RTC_6_SHFT 6 2620#define UVXH_EVENT_OCCURRED2_RTC_6_SHFT 6
1940#define UV2H_EVENT_OCCURRED2_RTC_7_SHFT 7 2621#define UVXH_EVENT_OCCURRED2_RTC_7_SHFT 7
1941#define UV2H_EVENT_OCCURRED2_RTC_8_SHFT 8 2622#define UVXH_EVENT_OCCURRED2_RTC_8_SHFT 8
1942#define UV2H_EVENT_OCCURRED2_RTC_9_SHFT 9 2623#define UVXH_EVENT_OCCURRED2_RTC_9_SHFT 9
1943#define UV2H_EVENT_OCCURRED2_RTC_10_SHFT 10 2624#define UVXH_EVENT_OCCURRED2_RTC_10_SHFT 10
1944#define UV2H_EVENT_OCCURRED2_RTC_11_SHFT 11 2625#define UVXH_EVENT_OCCURRED2_RTC_11_SHFT 11
1945#define UV2H_EVENT_OCCURRED2_RTC_12_SHFT 12 2626#define UVXH_EVENT_OCCURRED2_RTC_12_SHFT 12
1946#define UV2H_EVENT_OCCURRED2_RTC_13_SHFT 13 2627#define UVXH_EVENT_OCCURRED2_RTC_13_SHFT 13
1947#define UV2H_EVENT_OCCURRED2_RTC_14_SHFT 14 2628#define UVXH_EVENT_OCCURRED2_RTC_14_SHFT 14
1948#define UV2H_EVENT_OCCURRED2_RTC_15_SHFT 15 2629#define UVXH_EVENT_OCCURRED2_RTC_15_SHFT 15
1949#define UV2H_EVENT_OCCURRED2_RTC_16_SHFT 16 2630#define UVXH_EVENT_OCCURRED2_RTC_16_SHFT 16
1950#define UV2H_EVENT_OCCURRED2_RTC_17_SHFT 17 2631#define UVXH_EVENT_OCCURRED2_RTC_17_SHFT 17
1951#define UV2H_EVENT_OCCURRED2_RTC_18_SHFT 18 2632#define UVXH_EVENT_OCCURRED2_RTC_18_SHFT 18
1952#define UV2H_EVENT_OCCURRED2_RTC_19_SHFT 19 2633#define UVXH_EVENT_OCCURRED2_RTC_19_SHFT 19
1953#define UV2H_EVENT_OCCURRED2_RTC_20_SHFT 20 2634#define UVXH_EVENT_OCCURRED2_RTC_20_SHFT 20
1954#define UV2H_EVENT_OCCURRED2_RTC_21_SHFT 21 2635#define UVXH_EVENT_OCCURRED2_RTC_21_SHFT 21
1955#define UV2H_EVENT_OCCURRED2_RTC_22_SHFT 22 2636#define UVXH_EVENT_OCCURRED2_RTC_22_SHFT 22
1956#define UV2H_EVENT_OCCURRED2_RTC_23_SHFT 23 2637#define UVXH_EVENT_OCCURRED2_RTC_23_SHFT 23
1957#define UV2H_EVENT_OCCURRED2_RTC_24_SHFT 24 2638#define UVXH_EVENT_OCCURRED2_RTC_24_SHFT 24
1958#define UV2H_EVENT_OCCURRED2_RTC_25_SHFT 25 2639#define UVXH_EVENT_OCCURRED2_RTC_25_SHFT 25
1959#define UV2H_EVENT_OCCURRED2_RTC_26_SHFT 26 2640#define UVXH_EVENT_OCCURRED2_RTC_26_SHFT 26
1960#define UV2H_EVENT_OCCURRED2_RTC_27_SHFT 27 2641#define UVXH_EVENT_OCCURRED2_RTC_27_SHFT 27
1961#define UV2H_EVENT_OCCURRED2_RTC_28_SHFT 28 2642#define UVXH_EVENT_OCCURRED2_RTC_28_SHFT 28
1962#define UV2H_EVENT_OCCURRED2_RTC_29_SHFT 29 2643#define UVXH_EVENT_OCCURRED2_RTC_29_SHFT 29
1963#define UV2H_EVENT_OCCURRED2_RTC_30_SHFT 30 2644#define UVXH_EVENT_OCCURRED2_RTC_30_SHFT 30
1964#define UV2H_EVENT_OCCURRED2_RTC_31_SHFT 31 2645#define UVXH_EVENT_OCCURRED2_RTC_31_SHFT 31
1965#define UV2H_EVENT_OCCURRED2_RTC_0_MASK 0x0000000000000001UL 2646#define UVXH_EVENT_OCCURRED2_RTC_0_MASK 0x0000000000000001UL
1966#define UV2H_EVENT_OCCURRED2_RTC_1_MASK 0x0000000000000002UL 2647#define UVXH_EVENT_OCCURRED2_RTC_1_MASK 0x0000000000000002UL
1967#define UV2H_EVENT_OCCURRED2_RTC_2_MASK 0x0000000000000004UL 2648#define UVXH_EVENT_OCCURRED2_RTC_2_MASK 0x0000000000000004UL
1968#define UV2H_EVENT_OCCURRED2_RTC_3_MASK 0x0000000000000008UL 2649#define UVXH_EVENT_OCCURRED2_RTC_3_MASK 0x0000000000000008UL
1969#define UV2H_EVENT_OCCURRED2_RTC_4_MASK 0x0000000000000010UL 2650#define UVXH_EVENT_OCCURRED2_RTC_4_MASK 0x0000000000000010UL
1970#define UV2H_EVENT_OCCURRED2_RTC_5_MASK 0x0000000000000020UL 2651#define UVXH_EVENT_OCCURRED2_RTC_5_MASK 0x0000000000000020UL
1971#define UV2H_EVENT_OCCURRED2_RTC_6_MASK 0x0000000000000040UL 2652#define UVXH_EVENT_OCCURRED2_RTC_6_MASK 0x0000000000000040UL
1972#define UV2H_EVENT_OCCURRED2_RTC_7_MASK 0x0000000000000080UL 2653#define UVXH_EVENT_OCCURRED2_RTC_7_MASK 0x0000000000000080UL
1973#define UV2H_EVENT_OCCURRED2_RTC_8_MASK 0x0000000000000100UL 2654#define UVXH_EVENT_OCCURRED2_RTC_8_MASK 0x0000000000000100UL
1974#define UV2H_EVENT_OCCURRED2_RTC_9_MASK 0x0000000000000200UL 2655#define UVXH_EVENT_OCCURRED2_RTC_9_MASK 0x0000000000000200UL
1975#define UV2H_EVENT_OCCURRED2_RTC_10_MASK 0x0000000000000400UL 2656#define UVXH_EVENT_OCCURRED2_RTC_10_MASK 0x0000000000000400UL
1976#define UV2H_EVENT_OCCURRED2_RTC_11_MASK 0x0000000000000800UL 2657#define UVXH_EVENT_OCCURRED2_RTC_11_MASK 0x0000000000000800UL
1977#define UV2H_EVENT_OCCURRED2_RTC_12_MASK 0x0000000000001000UL 2658#define UVXH_EVENT_OCCURRED2_RTC_12_MASK 0x0000000000001000UL
1978#define UV2H_EVENT_OCCURRED2_RTC_13_MASK 0x0000000000002000UL 2659#define UVXH_EVENT_OCCURRED2_RTC_13_MASK 0x0000000000002000UL
1979#define UV2H_EVENT_OCCURRED2_RTC_14_MASK 0x0000000000004000UL 2660#define UVXH_EVENT_OCCURRED2_RTC_14_MASK 0x0000000000004000UL
1980#define UV2H_EVENT_OCCURRED2_RTC_15_MASK 0x0000000000008000UL 2661#define UVXH_EVENT_OCCURRED2_RTC_15_MASK 0x0000000000008000UL
1981#define UV2H_EVENT_OCCURRED2_RTC_16_MASK 0x0000000000010000UL 2662#define UVXH_EVENT_OCCURRED2_RTC_16_MASK 0x0000000000010000UL
1982#define UV2H_EVENT_OCCURRED2_RTC_17_MASK 0x0000000000020000UL 2663#define UVXH_EVENT_OCCURRED2_RTC_17_MASK 0x0000000000020000UL
1983#define UV2H_EVENT_OCCURRED2_RTC_18_MASK 0x0000000000040000UL 2664#define UVXH_EVENT_OCCURRED2_RTC_18_MASK 0x0000000000040000UL
1984#define UV2H_EVENT_OCCURRED2_RTC_19_MASK 0x0000000000080000UL 2665#define UVXH_EVENT_OCCURRED2_RTC_19_MASK 0x0000000000080000UL
1985#define UV2H_EVENT_OCCURRED2_RTC_20_MASK 0x0000000000100000UL 2666#define UVXH_EVENT_OCCURRED2_RTC_20_MASK 0x0000000000100000UL
1986#define UV2H_EVENT_OCCURRED2_RTC_21_MASK 0x0000000000200000UL 2667#define UVXH_EVENT_OCCURRED2_RTC_21_MASK 0x0000000000200000UL
1987#define UV2H_EVENT_OCCURRED2_RTC_22_MASK 0x0000000000400000UL 2668#define UVXH_EVENT_OCCURRED2_RTC_22_MASK 0x0000000000400000UL
1988#define UV2H_EVENT_OCCURRED2_RTC_23_MASK 0x0000000000800000UL 2669#define UVXH_EVENT_OCCURRED2_RTC_23_MASK 0x0000000000800000UL
1989#define UV2H_EVENT_OCCURRED2_RTC_24_MASK 0x0000000001000000UL 2670#define UVXH_EVENT_OCCURRED2_RTC_24_MASK 0x0000000001000000UL
1990#define UV2H_EVENT_OCCURRED2_RTC_25_MASK 0x0000000002000000UL 2671#define UVXH_EVENT_OCCURRED2_RTC_25_MASK 0x0000000002000000UL
1991#define UV2H_EVENT_OCCURRED2_RTC_26_MASK 0x0000000004000000UL 2672#define UVXH_EVENT_OCCURRED2_RTC_26_MASK 0x0000000004000000UL
1992#define UV2H_EVENT_OCCURRED2_RTC_27_MASK 0x0000000008000000UL 2673#define UVXH_EVENT_OCCURRED2_RTC_27_MASK 0x0000000008000000UL
1993#define UV2H_EVENT_OCCURRED2_RTC_28_MASK 0x0000000010000000UL 2674#define UVXH_EVENT_OCCURRED2_RTC_28_MASK 0x0000000010000000UL
1994#define UV2H_EVENT_OCCURRED2_RTC_29_MASK 0x0000000020000000UL 2675#define UVXH_EVENT_OCCURRED2_RTC_29_MASK 0x0000000020000000UL
1995#define UV2H_EVENT_OCCURRED2_RTC_30_MASK 0x0000000040000000UL 2676#define UVXH_EVENT_OCCURRED2_RTC_30_MASK 0x0000000040000000UL
1996#define UV2H_EVENT_OCCURRED2_RTC_31_MASK 0x0000000080000000UL 2677#define UVXH_EVENT_OCCURRED2_RTC_31_MASK 0x0000000080000000UL
1997 2678
1998union uv2h_event_occurred2_u { 2679union uvxh_event_occurred2_u {
1999 unsigned long v; 2680 unsigned long v;
2000 struct uv2h_event_occurred2_s { 2681 struct uvxh_event_occurred2_s {
2001 unsigned long rtc_0:1; /* RW */ 2682 unsigned long rtc_0:1; /* RW */
2002 unsigned long rtc_1:1; /* RW */ 2683 unsigned long rtc_1:1; /* RW */
2003 unsigned long rtc_2:1; /* RW */ 2684 unsigned long rtc_2:1; /* RW */
@@ -2031,29 +2712,46 @@ union uv2h_event_occurred2_u {
2031 unsigned long rtc_30:1; /* RW */ 2712 unsigned long rtc_30:1; /* RW */
2032 unsigned long rtc_31:1; /* RW */ 2713 unsigned long rtc_31:1; /* RW */
2033 unsigned long rsvd_32_63:32; 2714 unsigned long rsvd_32_63:32;
2034 } s1; 2715 } sx;
2035}; 2716};
2036 2717
2037/* ========================================================================= */ 2718/* ========================================================================= */
2038/* UV2H_EVENT_OCCURRED2_ALIAS */ 2719/* UVXH_EVENT_OCCURRED2_ALIAS */
2039/* ========================================================================= */ 2720/* ========================================================================= */
2040#define UV2H_EVENT_OCCURRED2_ALIAS 0x70108UL 2721#define UVXH_EVENT_OCCURRED2_ALIAS 0x70108UL
2041#define UV2H_EVENT_OCCURRED2_ALIAS_32 0xb70 2722#define UVXH_EVENT_OCCURRED2_ALIAS_32 0xb70
2723
2042 2724
2043/* ========================================================================= */ 2725/* ========================================================================= */
2044/* UV2H_LB_BAU_SB_ACTIVATION_STATUS_2 */ 2726/* UVXH_LB_BAU_SB_ACTIVATION_STATUS_2 */
2045/* ========================================================================= */ 2727/* ========================================================================= */
2046#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2 0x320130UL 2728#define UVXH_LB_BAU_SB_ACTIVATION_STATUS_2 0x320130UL
2047#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x9f0 2729#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2 0x320130UL
2730#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2 0x320130UL
2731#define UVXH_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x9f0
2732#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x320130UL
2733#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x320130UL
2734
2735#define UVXH_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0
2736#define UVXH_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL
2048 2737
2049#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0 2738#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0
2050#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL 2739#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL
2051 2740
2052union uv2h_lb_bau_sb_activation_status_2_u { 2741#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0
2742#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL
2743
2744union uvxh_lb_bau_sb_activation_status_2_u {
2053 unsigned long v; 2745 unsigned long v;
2746 struct uvxh_lb_bau_sb_activation_status_2_s {
2747 unsigned long aux_error:64; /* RW */
2748 } sx;
2054 struct uv2h_lb_bau_sb_activation_status_2_s { 2749 struct uv2h_lb_bau_sb_activation_status_2_s {
2055 unsigned long aux_error:64; /* RW */ 2750 unsigned long aux_error:64; /* RW */
2056 } s1; 2751 } s2;
2752 struct uv3h_lb_bau_sb_activation_status_2_s {
2753 unsigned long aux_error:64; /* RW */
2754 } s3;
2057}; 2755};
2058 2756
2059/* ========================================================================= */ 2757/* ========================================================================= */
@@ -2073,5 +2771,87 @@ union uv1h_lb_target_physical_apic_id_mask_u {
2073 } s1; 2771 } s1;
2074}; 2772};
2075 2773
2774/* ========================================================================= */
2775/* UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR */
2776/* ========================================================================= */
2777#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR 0x1603000UL
2778
2779#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_SHFT 26
2780#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT 46
2781#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_SHFT 63
2782#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK 0x00003ffffc000000UL
2783#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK 0x000fc00000000000UL
2784#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK 0x8000000000000000UL
2785
2786union uv3h_rh_gam_mmioh_overlay_config0_mmr_u {
2787 unsigned long v;
2788 struct uv3h_rh_gam_mmioh_overlay_config0_mmr_s {
2789 unsigned long rsvd_0_25:26;
2790 unsigned long base:20; /* RW */
2791 unsigned long m_io:6; /* RW */
2792 unsigned long n_io:4;
2793 unsigned long rsvd_56_62:7;
2794 unsigned long enable:1; /* RW */
2795 } s3;
2796};
2797
2798/* ========================================================================= */
2799/* UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR */
2800/* ========================================================================= */
2801#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR 0x1604000UL
2802
2803#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_SHFT 26
2804#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT 46
2805#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_ENABLE_SHFT 63
2806#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK 0x00003ffffc000000UL
2807#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK 0x000fc00000000000UL
2808#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_ENABLE_MASK 0x8000000000000000UL
2809
2810union uv3h_rh_gam_mmioh_overlay_config1_mmr_u {
2811 unsigned long v;
2812 struct uv3h_rh_gam_mmioh_overlay_config1_mmr_s {
2813 unsigned long rsvd_0_25:26;
2814 unsigned long base:20; /* RW */
2815 unsigned long m_io:6; /* RW */
2816 unsigned long n_io:4;
2817 unsigned long rsvd_56_62:7;
2818 unsigned long enable:1; /* RW */
2819 } s3;
2820};
2821
2822/* ========================================================================= */
2823/* UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR */
2824/* ========================================================================= */
2825#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR 0x1603800UL
2826#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH 128
2827
2828#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_SHFT 0
2829#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK 0x0000000000007fffUL
2830
2831union uv3h_rh_gam_mmioh_redirect_config0_mmr_u {
2832 unsigned long v;
2833 struct uv3h_rh_gam_mmioh_redirect_config0_mmr_s {
2834 unsigned long nasid:15; /* RW */
2835 unsigned long rsvd_15_63:49;
2836 } s3;
2837};
2838
2839/* ========================================================================= */
2840/* UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR */
2841/* ========================================================================= */
2842#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR 0x1604800UL
2843#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH 128
2844
2845#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_SHFT 0
2846#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK 0x0000000000007fffUL
2847
2848union uv3h_rh_gam_mmioh_redirect_config1_mmr_u {
2849 unsigned long v;
2850 struct uv3h_rh_gam_mmioh_redirect_config1_mmr_s {
2851 unsigned long nasid:15; /* RW */
2852 unsigned long rsvd_15_63:49;
2853 } s3;
2854};
2855
2076 2856
2077#endif /* _ASM_X86_UV_UV_MMRS_H */ 2857#endif /* _ASM_X86_UV_UV_MMRS_H */
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 57693498519c..7669941cc9d2 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -181,19 +181,38 @@ struct x86_platform_ops {
181}; 181};
182 182
183struct pci_dev; 183struct pci_dev;
184struct msi_msg;
184 185
185struct x86_msi_ops { 186struct x86_msi_ops {
186 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type); 187 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
188 void (*compose_msi_msg)(struct pci_dev *dev, unsigned int irq,
189 unsigned int dest, struct msi_msg *msg,
190 u8 hpet_id);
187 void (*teardown_msi_irq)(unsigned int irq); 191 void (*teardown_msi_irq)(unsigned int irq);
188 void (*teardown_msi_irqs)(struct pci_dev *dev); 192 void (*teardown_msi_irqs)(struct pci_dev *dev);
189 void (*restore_msi_irqs)(struct pci_dev *dev, int irq); 193 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
194 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
190}; 195};
191 196
197struct IO_APIC_route_entry;
198struct io_apic_irq_attr;
199struct irq_data;
200struct cpumask;
201
192struct x86_io_apic_ops { 202struct x86_io_apic_ops {
193 void (*init) (void); 203 void (*init) (void);
194 unsigned int (*read) (unsigned int apic, unsigned int reg); 204 unsigned int (*read) (unsigned int apic, unsigned int reg);
195 void (*write) (unsigned int apic, unsigned int reg, unsigned int value); 205 void (*write) (unsigned int apic, unsigned int reg, unsigned int value);
196 void (*modify)(unsigned int apic, unsigned int reg, unsigned int value); 206 void (*modify) (unsigned int apic, unsigned int reg, unsigned int value);
207 void (*disable)(void);
208 void (*print_entries)(unsigned int apic, unsigned int nr_entries);
209 int (*set_affinity)(struct irq_data *data,
210 const struct cpumask *mask,
211 bool force);
212 int (*setup_entry)(int irq, struct IO_APIC_route_entry *entry,
213 unsigned int destination, int vector,
214 struct io_apic_irq_attr *attr);
215 void (*eoi_ioapic_pin)(int apic, int pin, int vector);
197}; 216};
198 217
199extern struct x86_init_ops x86_init; 218extern struct x86_init_ops x86_init;
diff --git a/arch/x86/include/asm/xor.h b/arch/x86/include/asm/xor.h
index f8fde90bc45e..d8829751b3f8 100644
--- a/arch/x86/include/asm/xor.h
+++ b/arch/x86/include/asm/xor.h
@@ -1,10 +1,499 @@
1#ifdef CONFIG_KMEMCHECK 1#ifdef CONFIG_KMEMCHECK
2/* kmemcheck doesn't handle MMX/SSE/SSE2 instructions */ 2/* kmemcheck doesn't handle MMX/SSE/SSE2 instructions */
3# include <asm-generic/xor.h> 3# include <asm-generic/xor.h>
4#elif !defined(_ASM_X86_XOR_H)
5#define _ASM_X86_XOR_H
6
7/*
8 * Optimized RAID-5 checksumming functions for SSE.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * You should have received a copy of the GNU General Public License
16 * (for example /usr/src/linux/COPYING); if not, write to the Free
17 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 */
19
20/*
21 * Cache avoiding checksumming functions utilizing KNI instructions
22 * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
23 */
24
25/*
26 * Based on
27 * High-speed RAID5 checksumming functions utilizing SSE instructions.
28 * Copyright (C) 1998 Ingo Molnar.
29 */
30
31/*
32 * x86-64 changes / gcc fixes from Andi Kleen.
33 * Copyright 2002 Andi Kleen, SuSE Labs.
34 *
35 * This hasn't been optimized for the hammer yet, but there are likely
36 * no advantages to be gotten from x86-64 here anyways.
37 */
38
39#include <asm/i387.h>
40
41#ifdef CONFIG_X86_32
42/* reduce register pressure */
43# define XOR_CONSTANT_CONSTRAINT "i"
4#else 44#else
45# define XOR_CONSTANT_CONSTRAINT "re"
46#endif
47
48#define OFFS(x) "16*("#x")"
49#define PF_OFFS(x) "256+16*("#x")"
50#define PF0(x) " prefetchnta "PF_OFFS(x)"(%[p1]) ;\n"
51#define LD(x, y) " movaps "OFFS(x)"(%[p1]), %%xmm"#y" ;\n"
52#define ST(x, y) " movaps %%xmm"#y", "OFFS(x)"(%[p1]) ;\n"
53#define PF1(x) " prefetchnta "PF_OFFS(x)"(%[p2]) ;\n"
54#define PF2(x) " prefetchnta "PF_OFFS(x)"(%[p3]) ;\n"
55#define PF3(x) " prefetchnta "PF_OFFS(x)"(%[p4]) ;\n"
56#define PF4(x) " prefetchnta "PF_OFFS(x)"(%[p5]) ;\n"
57#define XO1(x, y) " xorps "OFFS(x)"(%[p2]), %%xmm"#y" ;\n"
58#define XO2(x, y) " xorps "OFFS(x)"(%[p3]), %%xmm"#y" ;\n"
59#define XO3(x, y) " xorps "OFFS(x)"(%[p4]), %%xmm"#y" ;\n"
60#define XO4(x, y) " xorps "OFFS(x)"(%[p5]), %%xmm"#y" ;\n"
61#define NOP(x)
62
63#define BLK64(pf, op, i) \
64 pf(i) \
65 op(i, 0) \
66 op(i + 1, 1) \
67 op(i + 2, 2) \
68 op(i + 3, 3)
69
70static void
71xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
72{
73 unsigned long lines = bytes >> 8;
74
75 kernel_fpu_begin();
76
77 asm volatile(
78#undef BLOCK
79#define BLOCK(i) \
80 LD(i, 0) \
81 LD(i + 1, 1) \
82 PF1(i) \
83 PF1(i + 2) \
84 LD(i + 2, 2) \
85 LD(i + 3, 3) \
86 PF0(i + 4) \
87 PF0(i + 6) \
88 XO1(i, 0) \
89 XO1(i + 1, 1) \
90 XO1(i + 2, 2) \
91 XO1(i + 3, 3) \
92 ST(i, 0) \
93 ST(i + 1, 1) \
94 ST(i + 2, 2) \
95 ST(i + 3, 3) \
96
97
98 PF0(0)
99 PF0(2)
100
101 " .align 32 ;\n"
102 " 1: ;\n"
103
104 BLOCK(0)
105 BLOCK(4)
106 BLOCK(8)
107 BLOCK(12)
108
109 " add %[inc], %[p1] ;\n"
110 " add %[inc], %[p2] ;\n"
111 " dec %[cnt] ;\n"
112 " jnz 1b ;\n"
113 : [cnt] "+r" (lines),
114 [p1] "+r" (p1), [p2] "+r" (p2)
115 : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
116 : "memory");
117
118 kernel_fpu_end();
119}
120
121static void
122xor_sse_2_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2)
123{
124 unsigned long lines = bytes >> 8;
125
126 kernel_fpu_begin();
127
128 asm volatile(
129#undef BLOCK
130#define BLOCK(i) \
131 BLK64(PF0, LD, i) \
132 BLK64(PF1, XO1, i) \
133 BLK64(NOP, ST, i) \
134
135 " .align 32 ;\n"
136 " 1: ;\n"
137
138 BLOCK(0)
139 BLOCK(4)
140 BLOCK(8)
141 BLOCK(12)
142
143 " add %[inc], %[p1] ;\n"
144 " add %[inc], %[p2] ;\n"
145 " dec %[cnt] ;\n"
146 " jnz 1b ;\n"
147 : [cnt] "+r" (lines),
148 [p1] "+r" (p1), [p2] "+r" (p2)
149 : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
150 : "memory");
151
152 kernel_fpu_end();
153}
154
155static void
156xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
157 unsigned long *p3)
158{
159 unsigned long lines = bytes >> 8;
160
161 kernel_fpu_begin();
162
163 asm volatile(
164#undef BLOCK
165#define BLOCK(i) \
166 PF1(i) \
167 PF1(i + 2) \
168 LD(i, 0) \
169 LD(i + 1, 1) \
170 LD(i + 2, 2) \
171 LD(i + 3, 3) \
172 PF2(i) \
173 PF2(i + 2) \
174 PF0(i + 4) \
175 PF0(i + 6) \
176 XO1(i, 0) \
177 XO1(i + 1, 1) \
178 XO1(i + 2, 2) \
179 XO1(i + 3, 3) \
180 XO2(i, 0) \
181 XO2(i + 1, 1) \
182 XO2(i + 2, 2) \
183 XO2(i + 3, 3) \
184 ST(i, 0) \
185 ST(i + 1, 1) \
186 ST(i + 2, 2) \
187 ST(i + 3, 3) \
188
189
190 PF0(0)
191 PF0(2)
192
193 " .align 32 ;\n"
194 " 1: ;\n"
195
196 BLOCK(0)
197 BLOCK(4)
198 BLOCK(8)
199 BLOCK(12)
200
201 " add %[inc], %[p1] ;\n"
202 " add %[inc], %[p2] ;\n"
203 " add %[inc], %[p3] ;\n"
204 " dec %[cnt] ;\n"
205 " jnz 1b ;\n"
206 : [cnt] "+r" (lines),
207 [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3)
208 : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
209 : "memory");
210
211 kernel_fpu_end();
212}
213
214static void
215xor_sse_3_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2,
216 unsigned long *p3)
217{
218 unsigned long lines = bytes >> 8;
219
220 kernel_fpu_begin();
221
222 asm volatile(
223#undef BLOCK
224#define BLOCK(i) \
225 BLK64(PF0, LD, i) \
226 BLK64(PF1, XO1, i) \
227 BLK64(PF2, XO2, i) \
228 BLK64(NOP, ST, i) \
229
230 " .align 32 ;\n"
231 " 1: ;\n"
232
233 BLOCK(0)
234 BLOCK(4)
235 BLOCK(8)
236 BLOCK(12)
237
238 " add %[inc], %[p1] ;\n"
239 " add %[inc], %[p2] ;\n"
240 " add %[inc], %[p3] ;\n"
241 " dec %[cnt] ;\n"
242 " jnz 1b ;\n"
243 : [cnt] "+r" (lines),
244 [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3)
245 : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
246 : "memory");
247
248 kernel_fpu_end();
249}
250
251static void
252xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
253 unsigned long *p3, unsigned long *p4)
254{
255 unsigned long lines = bytes >> 8;
256
257 kernel_fpu_begin();
258
259 asm volatile(
260#undef BLOCK
261#define BLOCK(i) \
262 PF1(i) \
263 PF1(i + 2) \
264 LD(i, 0) \
265 LD(i + 1, 1) \
266 LD(i + 2, 2) \
267 LD(i + 3, 3) \
268 PF2(i) \
269 PF2(i + 2) \
270 XO1(i, 0) \
271 XO1(i + 1, 1) \
272 XO1(i + 2, 2) \
273 XO1(i + 3, 3) \
274 PF3(i) \
275 PF3(i + 2) \
276 PF0(i + 4) \
277 PF0(i + 6) \
278 XO2(i, 0) \
279 XO2(i + 1, 1) \
280 XO2(i + 2, 2) \
281 XO2(i + 3, 3) \
282 XO3(i, 0) \
283 XO3(i + 1, 1) \
284 XO3(i + 2, 2) \
285 XO3(i + 3, 3) \
286 ST(i, 0) \
287 ST(i + 1, 1) \
288 ST(i + 2, 2) \
289 ST(i + 3, 3) \
290
291
292 PF0(0)
293 PF0(2)
294
295 " .align 32 ;\n"
296 " 1: ;\n"
297
298 BLOCK(0)
299 BLOCK(4)
300 BLOCK(8)
301 BLOCK(12)
302
303 " add %[inc], %[p1] ;\n"
304 " add %[inc], %[p2] ;\n"
305 " add %[inc], %[p3] ;\n"
306 " add %[inc], %[p4] ;\n"
307 " dec %[cnt] ;\n"
308 " jnz 1b ;\n"
309 : [cnt] "+r" (lines), [p1] "+r" (p1),
310 [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4)
311 : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
312 : "memory");
313
314 kernel_fpu_end();
315}
316
317static void
318xor_sse_4_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2,
319 unsigned long *p3, unsigned long *p4)
320{
321 unsigned long lines = bytes >> 8;
322
323 kernel_fpu_begin();
324
325 asm volatile(
326#undef BLOCK
327#define BLOCK(i) \
328 BLK64(PF0, LD, i) \
329 BLK64(PF1, XO1, i) \
330 BLK64(PF2, XO2, i) \
331 BLK64(PF3, XO3, i) \
332 BLK64(NOP, ST, i) \
333
334 " .align 32 ;\n"
335 " 1: ;\n"
336
337 BLOCK(0)
338 BLOCK(4)
339 BLOCK(8)
340 BLOCK(12)
341
342 " add %[inc], %[p1] ;\n"
343 " add %[inc], %[p2] ;\n"
344 " add %[inc], %[p3] ;\n"
345 " add %[inc], %[p4] ;\n"
346 " dec %[cnt] ;\n"
347 " jnz 1b ;\n"
348 : [cnt] "+r" (lines), [p1] "+r" (p1),
349 [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4)
350 : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
351 : "memory");
352
353 kernel_fpu_end();
354}
355
356static void
357xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
358 unsigned long *p3, unsigned long *p4, unsigned long *p5)
359{
360 unsigned long lines = bytes >> 8;
361
362 kernel_fpu_begin();
363
364 asm volatile(
365#undef BLOCK
366#define BLOCK(i) \
367 PF1(i) \
368 PF1(i + 2) \
369 LD(i, 0) \
370 LD(i + 1, 1) \
371 LD(i + 2, 2) \
372 LD(i + 3, 3) \
373 PF2(i) \
374 PF2(i + 2) \
375 XO1(i, 0) \
376 XO1(i + 1, 1) \
377 XO1(i + 2, 2) \
378 XO1(i + 3, 3) \
379 PF3(i) \
380 PF3(i + 2) \
381 XO2(i, 0) \
382 XO2(i + 1, 1) \
383 XO2(i + 2, 2) \
384 XO2(i + 3, 3) \
385 PF4(i) \
386 PF4(i + 2) \
387 PF0(i + 4) \
388 PF0(i + 6) \
389 XO3(i, 0) \
390 XO3(i + 1, 1) \
391 XO3(i + 2, 2) \
392 XO3(i + 3, 3) \
393 XO4(i, 0) \
394 XO4(i + 1, 1) \
395 XO4(i + 2, 2) \
396 XO4(i + 3, 3) \
397 ST(i, 0) \
398 ST(i + 1, 1) \
399 ST(i + 2, 2) \
400 ST(i + 3, 3) \
401
402
403 PF0(0)
404 PF0(2)
405
406 " .align 32 ;\n"
407 " 1: ;\n"
408
409 BLOCK(0)
410 BLOCK(4)
411 BLOCK(8)
412 BLOCK(12)
413
414 " add %[inc], %[p1] ;\n"
415 " add %[inc], %[p2] ;\n"
416 " add %[inc], %[p3] ;\n"
417 " add %[inc], %[p4] ;\n"
418 " add %[inc], %[p5] ;\n"
419 " dec %[cnt] ;\n"
420 " jnz 1b ;\n"
421 : [cnt] "+r" (lines), [p1] "+r" (p1), [p2] "+r" (p2),
422 [p3] "+r" (p3), [p4] "+r" (p4), [p5] "+r" (p5)
423 : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
424 : "memory");
425
426 kernel_fpu_end();
427}
428
429static void
430xor_sse_5_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2,
431 unsigned long *p3, unsigned long *p4, unsigned long *p5)
432{
433 unsigned long lines = bytes >> 8;
434
435 kernel_fpu_begin();
436
437 asm volatile(
438#undef BLOCK
439#define BLOCK(i) \
440 BLK64(PF0, LD, i) \
441 BLK64(PF1, XO1, i) \
442 BLK64(PF2, XO2, i) \
443 BLK64(PF3, XO3, i) \
444 BLK64(PF4, XO4, i) \
445 BLK64(NOP, ST, i) \
446
447 " .align 32 ;\n"
448 " 1: ;\n"
449
450 BLOCK(0)
451 BLOCK(4)
452 BLOCK(8)
453 BLOCK(12)
454
455 " add %[inc], %[p1] ;\n"
456 " add %[inc], %[p2] ;\n"
457 " add %[inc], %[p3] ;\n"
458 " add %[inc], %[p4] ;\n"
459 " add %[inc], %[p5] ;\n"
460 " dec %[cnt] ;\n"
461 " jnz 1b ;\n"
462 : [cnt] "+r" (lines), [p1] "+r" (p1), [p2] "+r" (p2),
463 [p3] "+r" (p3), [p4] "+r" (p4), [p5] "+r" (p5)
464 : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
465 : "memory");
466
467 kernel_fpu_end();
468}
469
470static struct xor_block_template xor_block_sse_pf64 = {
471 .name = "prefetch64-sse",
472 .do_2 = xor_sse_2_pf64,
473 .do_3 = xor_sse_3_pf64,
474 .do_4 = xor_sse_4_pf64,
475 .do_5 = xor_sse_5_pf64,
476};
477
478#undef LD
479#undef XO1
480#undef XO2
481#undef XO3
482#undef XO4
483#undef ST
484#undef NOP
485#undef BLK64
486#undef BLOCK
487
488#undef XOR_CONSTANT_CONSTRAINT
489
5#ifdef CONFIG_X86_32 490#ifdef CONFIG_X86_32
6# include <asm/xor_32.h> 491# include <asm/xor_32.h>
7#else 492#else
8# include <asm/xor_64.h> 493# include <asm/xor_64.h>
9#endif 494#endif
10#endif 495
496#define XOR_SELECT_TEMPLATE(FASTEST) \
497 AVX_SELECT(FASTEST)
498
499#endif /* _ASM_X86_XOR_H */
diff --git a/arch/x86/include/asm/xor_32.h b/arch/x86/include/asm/xor_32.h
index f79cb7ec0e06..ce05722e3c68 100644
--- a/arch/x86/include/asm/xor_32.h
+++ b/arch/x86/include/asm/xor_32.h
@@ -2,7 +2,7 @@
2#define _ASM_X86_XOR_32_H 2#define _ASM_X86_XOR_32_H
3 3
4/* 4/*
5 * Optimized RAID-5 checksumming functions for MMX and SSE. 5 * Optimized RAID-5 checksumming functions for MMX.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -529,290 +529,6 @@ static struct xor_block_template xor_block_p5_mmx = {
529 .do_5 = xor_p5_mmx_5, 529 .do_5 = xor_p5_mmx_5,
530}; 530};
531 531
532/*
533 * Cache avoiding checksumming functions utilizing KNI instructions
534 * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
535 */
536
537#define OFFS(x) "16*("#x")"
538#define PF_OFFS(x) "256+16*("#x")"
539#define PF0(x) " prefetchnta "PF_OFFS(x)"(%1) ;\n"
540#define LD(x, y) " movaps "OFFS(x)"(%1), %%xmm"#y" ;\n"
541#define ST(x, y) " movaps %%xmm"#y", "OFFS(x)"(%1) ;\n"
542#define PF1(x) " prefetchnta "PF_OFFS(x)"(%2) ;\n"
543#define PF2(x) " prefetchnta "PF_OFFS(x)"(%3) ;\n"
544#define PF3(x) " prefetchnta "PF_OFFS(x)"(%4) ;\n"
545#define PF4(x) " prefetchnta "PF_OFFS(x)"(%5) ;\n"
546#define PF5(x) " prefetchnta "PF_OFFS(x)"(%6) ;\n"
547#define XO1(x, y) " xorps "OFFS(x)"(%2), %%xmm"#y" ;\n"
548#define XO2(x, y) " xorps "OFFS(x)"(%3), %%xmm"#y" ;\n"
549#define XO3(x, y) " xorps "OFFS(x)"(%4), %%xmm"#y" ;\n"
550#define XO4(x, y) " xorps "OFFS(x)"(%5), %%xmm"#y" ;\n"
551#define XO5(x, y) " xorps "OFFS(x)"(%6), %%xmm"#y" ;\n"
552
553
554static void
555xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
556{
557 unsigned long lines = bytes >> 8;
558
559 kernel_fpu_begin();
560
561 asm volatile(
562#undef BLOCK
563#define BLOCK(i) \
564 LD(i, 0) \
565 LD(i + 1, 1) \
566 PF1(i) \
567 PF1(i + 2) \
568 LD(i + 2, 2) \
569 LD(i + 3, 3) \
570 PF0(i + 4) \
571 PF0(i + 6) \
572 XO1(i, 0) \
573 XO1(i + 1, 1) \
574 XO1(i + 2, 2) \
575 XO1(i + 3, 3) \
576 ST(i, 0) \
577 ST(i + 1, 1) \
578 ST(i + 2, 2) \
579 ST(i + 3, 3) \
580
581
582 PF0(0)
583 PF0(2)
584
585 " .align 32 ;\n"
586 " 1: ;\n"
587
588 BLOCK(0)
589 BLOCK(4)
590 BLOCK(8)
591 BLOCK(12)
592
593 " addl $256, %1 ;\n"
594 " addl $256, %2 ;\n"
595 " decl %0 ;\n"
596 " jnz 1b ;\n"
597 : "+r" (lines),
598 "+r" (p1), "+r" (p2)
599 :
600 : "memory");
601
602 kernel_fpu_end();
603}
604
605static void
606xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
607 unsigned long *p3)
608{
609 unsigned long lines = bytes >> 8;
610
611 kernel_fpu_begin();
612
613 asm volatile(
614#undef BLOCK
615#define BLOCK(i) \
616 PF1(i) \
617 PF1(i + 2) \
618 LD(i,0) \
619 LD(i + 1, 1) \
620 LD(i + 2, 2) \
621 LD(i + 3, 3) \
622 PF2(i) \
623 PF2(i + 2) \
624 PF0(i + 4) \
625 PF0(i + 6) \
626 XO1(i,0) \
627 XO1(i + 1, 1) \
628 XO1(i + 2, 2) \
629 XO1(i + 3, 3) \
630 XO2(i,0) \
631 XO2(i + 1, 1) \
632 XO2(i + 2, 2) \
633 XO2(i + 3, 3) \
634 ST(i,0) \
635 ST(i + 1, 1) \
636 ST(i + 2, 2) \
637 ST(i + 3, 3) \
638
639
640 PF0(0)
641 PF0(2)
642
643 " .align 32 ;\n"
644 " 1: ;\n"
645
646 BLOCK(0)
647 BLOCK(4)
648 BLOCK(8)
649 BLOCK(12)
650
651 " addl $256, %1 ;\n"
652 " addl $256, %2 ;\n"
653 " addl $256, %3 ;\n"
654 " decl %0 ;\n"
655 " jnz 1b ;\n"
656 : "+r" (lines),
657 "+r" (p1), "+r"(p2), "+r"(p3)
658 :
659 : "memory" );
660
661 kernel_fpu_end();
662}
663
664static void
665xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
666 unsigned long *p3, unsigned long *p4)
667{
668 unsigned long lines = bytes >> 8;
669
670 kernel_fpu_begin();
671
672 asm volatile(
673#undef BLOCK
674#define BLOCK(i) \
675 PF1(i) \
676 PF1(i + 2) \
677 LD(i,0) \
678 LD(i + 1, 1) \
679 LD(i + 2, 2) \
680 LD(i + 3, 3) \
681 PF2(i) \
682 PF2(i + 2) \
683 XO1(i,0) \
684 XO1(i + 1, 1) \
685 XO1(i + 2, 2) \
686 XO1(i + 3, 3) \
687 PF3(i) \
688 PF3(i + 2) \
689 PF0(i + 4) \
690 PF0(i + 6) \
691 XO2(i,0) \
692 XO2(i + 1, 1) \
693 XO2(i + 2, 2) \
694 XO2(i + 3, 3) \
695 XO3(i,0) \
696 XO3(i + 1, 1) \
697 XO3(i + 2, 2) \
698 XO3(i + 3, 3) \
699 ST(i,0) \
700 ST(i + 1, 1) \
701 ST(i + 2, 2) \
702 ST(i + 3, 3) \
703
704
705 PF0(0)
706 PF0(2)
707
708 " .align 32 ;\n"
709 " 1: ;\n"
710
711 BLOCK(0)
712 BLOCK(4)
713 BLOCK(8)
714 BLOCK(12)
715
716 " addl $256, %1 ;\n"
717 " addl $256, %2 ;\n"
718 " addl $256, %3 ;\n"
719 " addl $256, %4 ;\n"
720 " decl %0 ;\n"
721 " jnz 1b ;\n"
722 : "+r" (lines),
723 "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
724 :
725 : "memory" );
726
727 kernel_fpu_end();
728}
729
730static void
731xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
732 unsigned long *p3, unsigned long *p4, unsigned long *p5)
733{
734 unsigned long lines = bytes >> 8;
735
736 kernel_fpu_begin();
737
738 /* Make sure GCC forgets anything it knows about p4 or p5,
739 such that it won't pass to the asm volatile below a
740 register that is shared with any other variable. That's
741 because we modify p4 and p5 there, but we can't mark them
742 as read/write, otherwise we'd overflow the 10-asm-operands
743 limit of GCC < 3.1. */
744 asm("" : "+r" (p4), "+r" (p5));
745
746 asm volatile(
747#undef BLOCK
748#define BLOCK(i) \
749 PF1(i) \
750 PF1(i + 2) \
751 LD(i,0) \
752 LD(i + 1, 1) \
753 LD(i + 2, 2) \
754 LD(i + 3, 3) \
755 PF2(i) \
756 PF2(i + 2) \
757 XO1(i,0) \
758 XO1(i + 1, 1) \
759 XO1(i + 2, 2) \
760 XO1(i + 3, 3) \
761 PF3(i) \
762 PF3(i + 2) \
763 XO2(i,0) \
764 XO2(i + 1, 1) \
765 XO2(i + 2, 2) \
766 XO2(i + 3, 3) \
767 PF4(i) \
768 PF4(i + 2) \
769 PF0(i + 4) \
770 PF0(i + 6) \
771 XO3(i,0) \
772 XO3(i + 1, 1) \
773 XO3(i + 2, 2) \
774 XO3(i + 3, 3) \
775 XO4(i,0) \
776 XO4(i + 1, 1) \
777 XO4(i + 2, 2) \
778 XO4(i + 3, 3) \
779 ST(i,0) \
780 ST(i + 1, 1) \
781 ST(i + 2, 2) \
782 ST(i + 3, 3) \
783
784
785 PF0(0)
786 PF0(2)
787
788 " .align 32 ;\n"
789 " 1: ;\n"
790
791 BLOCK(0)
792 BLOCK(4)
793 BLOCK(8)
794 BLOCK(12)
795
796 " addl $256, %1 ;\n"
797 " addl $256, %2 ;\n"
798 " addl $256, %3 ;\n"
799 " addl $256, %4 ;\n"
800 " addl $256, %5 ;\n"
801 " decl %0 ;\n"
802 " jnz 1b ;\n"
803 : "+r" (lines),
804 "+r" (p1), "+r" (p2), "+r" (p3)
805 : "r" (p4), "r" (p5)
806 : "memory");
807
808 /* p4 and p5 were modified, and now the variables are dead.
809 Clobber them just to be sure nobody does something stupid
810 like assuming they have some legal value. */
811 asm("" : "=r" (p4), "=r" (p5));
812
813 kernel_fpu_end();
814}
815
816static struct xor_block_template xor_block_pIII_sse = { 532static struct xor_block_template xor_block_pIII_sse = {
817 .name = "pIII_sse", 533 .name = "pIII_sse",
818 .do_2 = xor_sse_2, 534 .do_2 = xor_sse_2,
@@ -827,26 +543,25 @@ static struct xor_block_template xor_block_pIII_sse = {
827/* Also try the generic routines. */ 543/* Also try the generic routines. */
828#include <asm-generic/xor.h> 544#include <asm-generic/xor.h>
829 545
546/* We force the use of the SSE xor block because it can write around L2.
547 We may also be able to load into the L1 only depending on how the cpu
548 deals with a load to a line that is being prefetched. */
830#undef XOR_TRY_TEMPLATES 549#undef XOR_TRY_TEMPLATES
831#define XOR_TRY_TEMPLATES \ 550#define XOR_TRY_TEMPLATES \
832do { \ 551do { \
833 xor_speed(&xor_block_8regs); \
834 xor_speed(&xor_block_8regs_p); \
835 xor_speed(&xor_block_32regs); \
836 xor_speed(&xor_block_32regs_p); \
837 AVX_XOR_SPEED; \ 552 AVX_XOR_SPEED; \
838 if (cpu_has_xmm) \ 553 if (cpu_has_xmm) { \
839 xor_speed(&xor_block_pIII_sse); \ 554 xor_speed(&xor_block_pIII_sse); \
840 if (cpu_has_mmx) { \ 555 xor_speed(&xor_block_sse_pf64); \
556 } else if (cpu_has_mmx) { \
841 xor_speed(&xor_block_pII_mmx); \ 557 xor_speed(&xor_block_pII_mmx); \
842 xor_speed(&xor_block_p5_mmx); \ 558 xor_speed(&xor_block_p5_mmx); \
559 } else { \
560 xor_speed(&xor_block_8regs); \
561 xor_speed(&xor_block_8regs_p); \
562 xor_speed(&xor_block_32regs); \
563 xor_speed(&xor_block_32regs_p); \
843 } \ 564 } \
844} while (0) 565} while (0)
845 566
846/* We force the use of the SSE xor block because it can write around L2.
847 We may also be able to load into the L1 only depending on how the cpu
848 deals with a load to a line that is being prefetched. */
849#define XOR_SELECT_TEMPLATE(FASTEST) \
850 AVX_SELECT(cpu_has_xmm ? &xor_block_pIII_sse : FASTEST)
851
852#endif /* _ASM_X86_XOR_32_H */ 567#endif /* _ASM_X86_XOR_32_H */
diff --git a/arch/x86/include/asm/xor_64.h b/arch/x86/include/asm/xor_64.h
index 87ac522c4af5..546f1e3b87cc 100644
--- a/arch/x86/include/asm/xor_64.h
+++ b/arch/x86/include/asm/xor_64.h
@@ -1,301 +1,6 @@
1#ifndef _ASM_X86_XOR_64_H 1#ifndef _ASM_X86_XOR_64_H
2#define _ASM_X86_XOR_64_H 2#define _ASM_X86_XOR_64_H
3 3
4/*
5 * Optimized RAID-5 checksumming functions for MMX and SSE.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * You should have received a copy of the GNU General Public License
13 * (for example /usr/src/linux/COPYING); if not, write to the Free
14 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
15 */
16
17
18/*
19 * Cache avoiding checksumming functions utilizing KNI instructions
20 * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
21 */
22
23/*
24 * Based on
25 * High-speed RAID5 checksumming functions utilizing SSE instructions.
26 * Copyright (C) 1998 Ingo Molnar.
27 */
28
29/*
30 * x86-64 changes / gcc fixes from Andi Kleen.
31 * Copyright 2002 Andi Kleen, SuSE Labs.
32 *
33 * This hasn't been optimized for the hammer yet, but there are likely
34 * no advantages to be gotten from x86-64 here anyways.
35 */
36
37#include <asm/i387.h>
38
39#define OFFS(x) "16*("#x")"
40#define PF_OFFS(x) "256+16*("#x")"
41#define PF0(x) " prefetchnta "PF_OFFS(x)"(%[p1]) ;\n"
42#define LD(x, y) " movaps "OFFS(x)"(%[p1]), %%xmm"#y" ;\n"
43#define ST(x, y) " movaps %%xmm"#y", "OFFS(x)"(%[p1]) ;\n"
44#define PF1(x) " prefetchnta "PF_OFFS(x)"(%[p2]) ;\n"
45#define PF2(x) " prefetchnta "PF_OFFS(x)"(%[p3]) ;\n"
46#define PF3(x) " prefetchnta "PF_OFFS(x)"(%[p4]) ;\n"
47#define PF4(x) " prefetchnta "PF_OFFS(x)"(%[p5]) ;\n"
48#define PF5(x) " prefetchnta "PF_OFFS(x)"(%[p6]) ;\n"
49#define XO1(x, y) " xorps "OFFS(x)"(%[p2]), %%xmm"#y" ;\n"
50#define XO2(x, y) " xorps "OFFS(x)"(%[p3]), %%xmm"#y" ;\n"
51#define XO3(x, y) " xorps "OFFS(x)"(%[p4]), %%xmm"#y" ;\n"
52#define XO4(x, y) " xorps "OFFS(x)"(%[p5]), %%xmm"#y" ;\n"
53#define XO5(x, y) " xorps "OFFS(x)"(%[p6]), %%xmm"#y" ;\n"
54
55
56static void
57xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
58{
59 unsigned int lines = bytes >> 8;
60
61 kernel_fpu_begin();
62
63 asm volatile(
64#undef BLOCK
65#define BLOCK(i) \
66 LD(i, 0) \
67 LD(i + 1, 1) \
68 PF1(i) \
69 PF1(i + 2) \
70 LD(i + 2, 2) \
71 LD(i + 3, 3) \
72 PF0(i + 4) \
73 PF0(i + 6) \
74 XO1(i, 0) \
75 XO1(i + 1, 1) \
76 XO1(i + 2, 2) \
77 XO1(i + 3, 3) \
78 ST(i, 0) \
79 ST(i + 1, 1) \
80 ST(i + 2, 2) \
81 ST(i + 3, 3) \
82
83
84 PF0(0)
85 PF0(2)
86
87 " .align 32 ;\n"
88 " 1: ;\n"
89
90 BLOCK(0)
91 BLOCK(4)
92 BLOCK(8)
93 BLOCK(12)
94
95 " addq %[inc], %[p1] ;\n"
96 " addq %[inc], %[p2] ;\n"
97 " decl %[cnt] ; jnz 1b"
98 : [p1] "+r" (p1), [p2] "+r" (p2), [cnt] "+r" (lines)
99 : [inc] "r" (256UL)
100 : "memory");
101
102 kernel_fpu_end();
103}
104
105static void
106xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
107 unsigned long *p3)
108{
109 unsigned int lines = bytes >> 8;
110
111 kernel_fpu_begin();
112 asm volatile(
113#undef BLOCK
114#define BLOCK(i) \
115 PF1(i) \
116 PF1(i + 2) \
117 LD(i, 0) \
118 LD(i + 1, 1) \
119 LD(i + 2, 2) \
120 LD(i + 3, 3) \
121 PF2(i) \
122 PF2(i + 2) \
123 PF0(i + 4) \
124 PF0(i + 6) \
125 XO1(i, 0) \
126 XO1(i + 1, 1) \
127 XO1(i + 2, 2) \
128 XO1(i + 3, 3) \
129 XO2(i, 0) \
130 XO2(i + 1, 1) \
131 XO2(i + 2, 2) \
132 XO2(i + 3, 3) \
133 ST(i, 0) \
134 ST(i + 1, 1) \
135 ST(i + 2, 2) \
136 ST(i + 3, 3) \
137
138
139 PF0(0)
140 PF0(2)
141
142 " .align 32 ;\n"
143 " 1: ;\n"
144
145 BLOCK(0)
146 BLOCK(4)
147 BLOCK(8)
148 BLOCK(12)
149
150 " addq %[inc], %[p1] ;\n"
151 " addq %[inc], %[p2] ;\n"
152 " addq %[inc], %[p3] ;\n"
153 " decl %[cnt] ; jnz 1b"
154 : [cnt] "+r" (lines),
155 [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3)
156 : [inc] "r" (256UL)
157 : "memory");
158 kernel_fpu_end();
159}
160
161static void
162xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
163 unsigned long *p3, unsigned long *p4)
164{
165 unsigned int lines = bytes >> 8;
166
167 kernel_fpu_begin();
168
169 asm volatile(
170#undef BLOCK
171#define BLOCK(i) \
172 PF1(i) \
173 PF1(i + 2) \
174 LD(i, 0) \
175 LD(i + 1, 1) \
176 LD(i + 2, 2) \
177 LD(i + 3, 3) \
178 PF2(i) \
179 PF2(i + 2) \
180 XO1(i, 0) \
181 XO1(i + 1, 1) \
182 XO1(i + 2, 2) \
183 XO1(i + 3, 3) \
184 PF3(i) \
185 PF3(i + 2) \
186 PF0(i + 4) \
187 PF0(i + 6) \
188 XO2(i, 0) \
189 XO2(i + 1, 1) \
190 XO2(i + 2, 2) \
191 XO2(i + 3, 3) \
192 XO3(i, 0) \
193 XO3(i + 1, 1) \
194 XO3(i + 2, 2) \
195 XO3(i + 3, 3) \
196 ST(i, 0) \
197 ST(i + 1, 1) \
198 ST(i + 2, 2) \
199 ST(i + 3, 3) \
200
201
202 PF0(0)
203 PF0(2)
204
205 " .align 32 ;\n"
206 " 1: ;\n"
207
208 BLOCK(0)
209 BLOCK(4)
210 BLOCK(8)
211 BLOCK(12)
212
213 " addq %[inc], %[p1] ;\n"
214 " addq %[inc], %[p2] ;\n"
215 " addq %[inc], %[p3] ;\n"
216 " addq %[inc], %[p4] ;\n"
217 " decl %[cnt] ; jnz 1b"
218 : [cnt] "+c" (lines),
219 [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4)
220 : [inc] "r" (256UL)
221 : "memory" );
222
223 kernel_fpu_end();
224}
225
226static void
227xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
228 unsigned long *p3, unsigned long *p4, unsigned long *p5)
229{
230 unsigned int lines = bytes >> 8;
231
232 kernel_fpu_begin();
233
234 asm volatile(
235#undef BLOCK
236#define BLOCK(i) \
237 PF1(i) \
238 PF1(i + 2) \
239 LD(i, 0) \
240 LD(i + 1, 1) \
241 LD(i + 2, 2) \
242 LD(i + 3, 3) \
243 PF2(i) \
244 PF2(i + 2) \
245 XO1(i, 0) \
246 XO1(i + 1, 1) \
247 XO1(i + 2, 2) \
248 XO1(i + 3, 3) \
249 PF3(i) \
250 PF3(i + 2) \
251 XO2(i, 0) \
252 XO2(i + 1, 1) \
253 XO2(i + 2, 2) \
254 XO2(i + 3, 3) \
255 PF4(i) \
256 PF4(i + 2) \
257 PF0(i + 4) \
258 PF0(i + 6) \
259 XO3(i, 0) \
260 XO3(i + 1, 1) \
261 XO3(i + 2, 2) \
262 XO3(i + 3, 3) \
263 XO4(i, 0) \
264 XO4(i + 1, 1) \
265 XO4(i + 2, 2) \
266 XO4(i + 3, 3) \
267 ST(i, 0) \
268 ST(i + 1, 1) \
269 ST(i + 2, 2) \
270 ST(i + 3, 3) \
271
272
273 PF0(0)
274 PF0(2)
275
276 " .align 32 ;\n"
277 " 1: ;\n"
278
279 BLOCK(0)
280 BLOCK(4)
281 BLOCK(8)
282 BLOCK(12)
283
284 " addq %[inc], %[p1] ;\n"
285 " addq %[inc], %[p2] ;\n"
286 " addq %[inc], %[p3] ;\n"
287 " addq %[inc], %[p4] ;\n"
288 " addq %[inc], %[p5] ;\n"
289 " decl %[cnt] ; jnz 1b"
290 : [cnt] "+c" (lines),
291 [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4),
292 [p5] "+r" (p5)
293 : [inc] "r" (256UL)
294 : "memory");
295
296 kernel_fpu_end();
297}
298
299static struct xor_block_template xor_block_sse = { 4static struct xor_block_template xor_block_sse = {
300 .name = "generic_sse", 5 .name = "generic_sse",
301 .do_2 = xor_sse_2, 6 .do_2 = xor_sse_2,
@@ -308,17 +13,15 @@ static struct xor_block_template xor_block_sse = {
308/* Also try the AVX routines */ 13/* Also try the AVX routines */
309#include <asm/xor_avx.h> 14#include <asm/xor_avx.h>
310 15
16/* We force the use of the SSE xor block because it can write around L2.
17 We may also be able to load into the L1 only depending on how the cpu
18 deals with a load to a line that is being prefetched. */
311#undef XOR_TRY_TEMPLATES 19#undef XOR_TRY_TEMPLATES
312#define XOR_TRY_TEMPLATES \ 20#define XOR_TRY_TEMPLATES \
313do { \ 21do { \
314 AVX_XOR_SPEED; \ 22 AVX_XOR_SPEED; \
23 xor_speed(&xor_block_sse_pf64); \
315 xor_speed(&xor_block_sse); \ 24 xor_speed(&xor_block_sse); \
316} while (0) 25} while (0)
317 26
318/* We force the use of the SSE xor block because it can write around L2.
319 We may also be able to load into the L1 only depending on how the cpu
320 deals with a load to a line that is being prefetched. */
321#define XOR_SELECT_TEMPLATE(FASTEST) \
322 AVX_SELECT(&xor_block_sse)
323
324#endif /* _ASM_X86_XOR_64_H */ 27#endif /* _ASM_X86_XOR_64_H */
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
index 92862cd90201..c15ddaf90710 100644
--- a/arch/x86/include/uapi/asm/bootparam.h
+++ b/arch/x86/include/uapi/asm/bootparam.h
@@ -1,6 +1,31 @@
1#ifndef _ASM_X86_BOOTPARAM_H 1#ifndef _ASM_X86_BOOTPARAM_H
2#define _ASM_X86_BOOTPARAM_H 2#define _ASM_X86_BOOTPARAM_H
3 3
4/* setup_data types */
5#define SETUP_NONE 0
6#define SETUP_E820_EXT 1
7#define SETUP_DTB 2
8#define SETUP_PCI 3
9
10/* ram_size flags */
11#define RAMDISK_IMAGE_START_MASK 0x07FF
12#define RAMDISK_PROMPT_FLAG 0x8000
13#define RAMDISK_LOAD_FLAG 0x4000
14
15/* loadflags */
16#define LOADED_HIGH (1<<0)
17#define QUIET_FLAG (1<<5)
18#define KEEP_SEGMENTS (1<<6)
19#define CAN_USE_HEAP (1<<7)
20
21/* xloadflags */
22#define XLF_KERNEL_64 (1<<0)
23#define XLF_CAN_BE_LOADED_ABOVE_4G (1<<1)
24#define XLF_EFI_HANDOVER_32 (1<<2)
25#define XLF_EFI_HANDOVER_64 (1<<3)
26
27#ifndef __ASSEMBLY__
28
4#include <linux/types.h> 29#include <linux/types.h>
5#include <linux/screen_info.h> 30#include <linux/screen_info.h>
6#include <linux/apm_bios.h> 31#include <linux/apm_bios.h>
@@ -9,12 +34,6 @@
9#include <asm/ist.h> 34#include <asm/ist.h>
10#include <video/edid.h> 35#include <video/edid.h>
11 36
12/* setup data types */
13#define SETUP_NONE 0
14#define SETUP_E820_EXT 1
15#define SETUP_DTB 2
16#define SETUP_PCI 3
17
18/* extensible setup data list node */ 37/* extensible setup data list node */
19struct setup_data { 38struct setup_data {
20 __u64 next; 39 __u64 next;
@@ -28,9 +47,6 @@ struct setup_header {
28 __u16 root_flags; 47 __u16 root_flags;
29 __u32 syssize; 48 __u32 syssize;
30 __u16 ram_size; 49 __u16 ram_size;
31#define RAMDISK_IMAGE_START_MASK 0x07FF
32#define RAMDISK_PROMPT_FLAG 0x8000
33#define RAMDISK_LOAD_FLAG 0x4000
34 __u16 vid_mode; 50 __u16 vid_mode;
35 __u16 root_dev; 51 __u16 root_dev;
36 __u16 boot_flag; 52 __u16 boot_flag;
@@ -42,10 +58,6 @@ struct setup_header {
42 __u16 kernel_version; 58 __u16 kernel_version;
43 __u8 type_of_loader; 59 __u8 type_of_loader;
44 __u8 loadflags; 60 __u8 loadflags;
45#define LOADED_HIGH (1<<0)
46#define QUIET_FLAG (1<<5)
47#define KEEP_SEGMENTS (1<<6)
48#define CAN_USE_HEAP (1<<7)
49 __u16 setup_move_size; 61 __u16 setup_move_size;
50 __u32 code32_start; 62 __u32 code32_start;
51 __u32 ramdisk_image; 63 __u32 ramdisk_image;
@@ -58,7 +70,8 @@ struct setup_header {
58 __u32 initrd_addr_max; 70 __u32 initrd_addr_max;
59 __u32 kernel_alignment; 71 __u32 kernel_alignment;
60 __u8 relocatable_kernel; 72 __u8 relocatable_kernel;
61 __u8 _pad2[3]; 73 __u8 min_alignment;
74 __u16 xloadflags;
62 __u32 cmdline_size; 75 __u32 cmdline_size;
63 __u32 hardware_subarch; 76 __u32 hardware_subarch;
64 __u64 hardware_subarch_data; 77 __u64 hardware_subarch_data;
@@ -106,7 +119,10 @@ struct boot_params {
106 __u8 hd1_info[16]; /* obsolete! */ /* 0x090 */ 119 __u8 hd1_info[16]; /* obsolete! */ /* 0x090 */
107 struct sys_desc_table sys_desc_table; /* 0x0a0 */ 120 struct sys_desc_table sys_desc_table; /* 0x0a0 */
108 struct olpc_ofw_header olpc_ofw_header; /* 0x0b0 */ 121 struct olpc_ofw_header olpc_ofw_header; /* 0x0b0 */
109 __u8 _pad4[128]; /* 0x0c0 */ 122 __u32 ext_ramdisk_image; /* 0x0c0 */
123 __u32 ext_ramdisk_size; /* 0x0c4 */
124 __u32 ext_cmd_line_ptr; /* 0x0c8 */
125 __u8 _pad4[116]; /* 0x0cc */
110 struct edid_info edid_info; /* 0x140 */ 126 struct edid_info edid_info; /* 0x140 */
111 struct efi_info efi_info; /* 0x1c0 */ 127 struct efi_info efi_info; /* 0x1c0 */
112 __u32 alt_mem_k; /* 0x1e0 */ 128 __u32 alt_mem_k; /* 0x1e0 */
@@ -115,7 +131,20 @@ struct boot_params {
115 __u8 eddbuf_entries; /* 0x1e9 */ 131 __u8 eddbuf_entries; /* 0x1e9 */
116 __u8 edd_mbr_sig_buf_entries; /* 0x1ea */ 132 __u8 edd_mbr_sig_buf_entries; /* 0x1ea */
117 __u8 kbd_status; /* 0x1eb */ 133 __u8 kbd_status; /* 0x1eb */
118 __u8 _pad6[5]; /* 0x1ec */ 134 __u8 _pad5[3]; /* 0x1ec */
135 /*
136 * The sentinel is set to a nonzero value (0xff) in header.S.
137 *
138 * A bootloader is supposed to only take setup_header and put
139 * it into a clean boot_params buffer. If it turns out that
140 * it is clumsy or too generous with the buffer, it most
141 * probably will pick up the sentinel variable too. The fact
142 * that this variable then is still 0xff will let kernel
143 * know that some variables in boot_params are invalid and
144 * kernel should zero out certain portions of boot_params.
145 */
146 __u8 sentinel; /* 0x1ef */
147 __u8 _pad6[1]; /* 0x1f0 */
119 struct setup_header hdr; /* setup header */ /* 0x1f1 */ 148 struct setup_header hdr; /* setup header */ /* 0x1f1 */
120 __u8 _pad7[0x290-0x1f1-sizeof(struct setup_header)]; 149 __u8 _pad7[0x290-0x1f1-sizeof(struct setup_header)];
121 __u32 edd_mbr_sig_buffer[EDD_MBR_SIG_MAX]; /* 0x290 */ 150 __u32 edd_mbr_sig_buffer[EDD_MBR_SIG_MAX]; /* 0x290 */
@@ -134,6 +163,6 @@ enum {
134 X86_NR_SUBARCHS, 163 X86_NR_SUBARCHS,
135}; 164};
136 165
137 166#endif /* __ASSEMBLY__ */
138 167
139#endif /* _ASM_X86_BOOTPARAM_H */ 168#endif /* _ASM_X86_BOOTPARAM_H */
diff --git a/arch/x86/include/uapi/asm/mce.h b/arch/x86/include/uapi/asm/mce.h
index 58c829871c31..a0eab85ce7b8 100644
--- a/arch/x86/include/uapi/asm/mce.h
+++ b/arch/x86/include/uapi/asm/mce.h
@@ -4,66 +4,6 @@
4#include <linux/types.h> 4#include <linux/types.h>
5#include <asm/ioctls.h> 5#include <asm/ioctls.h>
6 6
7/*
8 * Machine Check support for x86
9 */
10
11/* MCG_CAP register defines */
12#define MCG_BANKCNT_MASK 0xff /* Number of Banks */
13#define MCG_CTL_P (1ULL<<8) /* MCG_CTL register available */
14#define MCG_EXT_P (1ULL<<9) /* Extended registers available */
15#define MCG_CMCI_P (1ULL<<10) /* CMCI supported */
16#define MCG_EXT_CNT_MASK 0xff0000 /* Number of Extended registers */
17#define MCG_EXT_CNT_SHIFT 16
18#define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT)
19#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */
20
21/* MCG_STATUS register defines */
22#define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */
23#define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */
24#define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */
25
26/* MCi_STATUS register defines */
27#define MCI_STATUS_VAL (1ULL<<63) /* valid error */
28#define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */
29#define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */
30#define MCI_STATUS_EN (1ULL<<60) /* error enabled */
31#define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */
32#define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */
33#define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */
34#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
35#define MCI_STATUS_AR (1ULL<<55) /* Action required */
36#define MCACOD 0xffff /* MCA Error Code */
37
38/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */
39#define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */
40#define MCACOD_SCRUBMSK 0xfff0
41#define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */
42#define MCACOD_DATA 0x0134 /* Data Load */
43#define MCACOD_INSTR 0x0150 /* Instruction Fetch */
44
45/* MCi_MISC register defines */
46#define MCI_MISC_ADDR_LSB(m) ((m) & 0x3f)
47#define MCI_MISC_ADDR_MODE(m) (((m) >> 6) & 7)
48#define MCI_MISC_ADDR_SEGOFF 0 /* segment offset */
49#define MCI_MISC_ADDR_LINEAR 1 /* linear address */
50#define MCI_MISC_ADDR_PHYS 2 /* physical address */
51#define MCI_MISC_ADDR_MEM 3 /* memory address */
52#define MCI_MISC_ADDR_GENERIC 7 /* generic */
53
54/* CTL2 register defines */
55#define MCI_CTL2_CMCI_EN (1ULL << 30)
56#define MCI_CTL2_CMCI_THRESHOLD_MASK 0x7fffULL
57
58#define MCJ_CTX_MASK 3
59#define MCJ_CTX(flags) ((flags) & MCJ_CTX_MASK)
60#define MCJ_CTX_RANDOM 0 /* inject context: random */
61#define MCJ_CTX_PROCESS 0x1 /* inject context: process */
62#define MCJ_CTX_IRQ 0x2 /* inject context: IRQ */
63#define MCJ_NMI_BROADCAST 0x4 /* do NMI broadcasting */
64#define MCJ_EXCEPTION 0x8 /* raise as exception */
65#define MCJ_IRQ_BRAODCAST 0x10 /* do IRQ broadcasting */
66
67/* Fields are zero when not available */ 7/* Fields are zero when not available */
68struct mce { 8struct mce {
69 __u64 status; 9 __u64 status;
@@ -87,35 +27,8 @@ struct mce {
87 __u64 mcgcap; /* MCGCAP MSR: machine check capabilities of CPU */ 27 __u64 mcgcap; /* MCGCAP MSR: machine check capabilities of CPU */
88}; 28};
89 29
90/*
91 * This structure contains all data related to the MCE log. Also
92 * carries a signature to make it easier to find from external
93 * debugging tools. Each entry is only valid when its finished flag
94 * is set.
95 */
96
97#define MCE_LOG_LEN 32
98
99struct mce_log {
100 char signature[12]; /* "MACHINECHECK" */
101 unsigned len; /* = MCE_LOG_LEN */
102 unsigned next;
103 unsigned flags;
104 unsigned recordlen; /* length of struct mce */
105 struct mce entry[MCE_LOG_LEN];
106};
107
108#define MCE_OVERFLOW 0 /* bit 0 in flags means overflow */
109
110#define MCE_LOG_SIGNATURE "MACHINECHECK"
111
112#define MCE_GET_RECORD_LEN _IOR('M', 1, int) 30#define MCE_GET_RECORD_LEN _IOR('M', 1, int)
113#define MCE_GET_LOG_LEN _IOR('M', 2, int) 31#define MCE_GET_LOG_LEN _IOR('M', 2, int)
114#define MCE_GETCLEAR_FLAGS _IOR('M', 3, int) 32#define MCE_GETCLEAR_FLAGS _IOR('M', 3, int)
115 33
116/* Software defined banks */
117#define MCE_EXTENDED_BANK 128
118#define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0
119#define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1)
120
121#endif /* _UAPI_ASM_X86_MCE_H */ 34#endif /* _UAPI_ASM_X86_MCE_H */
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index 433a59fb1a74..f26d2771846f 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -103,6 +103,8 @@
103#define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10) 103#define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10)
104#define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11) 104#define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11)
105 105
106#define MSR_IA32_POWER_CTL 0x000001fc
107
106#define MSR_IA32_MC0_CTL 0x00000400 108#define MSR_IA32_MC0_CTL 0x00000400
107#define MSR_IA32_MC0_STATUS 0x00000401 109#define MSR_IA32_MC0_STATUS 0x00000401
108#define MSR_IA32_MC0_ADDR 0x00000402 110#define MSR_IA32_MC0_ADDR 0x00000402
@@ -194,6 +196,8 @@
194/* Fam 15h MSRs */ 196/* Fam 15h MSRs */
195#define MSR_F15H_PERF_CTL 0xc0010200 197#define MSR_F15H_PERF_CTL 0xc0010200
196#define MSR_F15H_PERF_CTR 0xc0010201 198#define MSR_F15H_PERF_CTR 0xc0010201
199#define MSR_F15H_NB_PERF_CTL 0xc0010240
200#define MSR_F15H_NB_PERF_CTR 0xc0010241
197 201
198/* Fam 10h MSRs */ 202/* Fam 10h MSRs */
199#define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058 203#define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058
@@ -272,6 +276,7 @@
272#define MSR_IA32_PLATFORM_ID 0x00000017 276#define MSR_IA32_PLATFORM_ID 0x00000017
273#define MSR_IA32_EBL_CR_POWERON 0x0000002a 277#define MSR_IA32_EBL_CR_POWERON 0x0000002a
274#define MSR_EBC_FREQUENCY_ID 0x0000002c 278#define MSR_EBC_FREQUENCY_ID 0x0000002c
279#define MSR_SMI_COUNT 0x00000034
275#define MSR_IA32_FEATURE_CONTROL 0x0000003a 280#define MSR_IA32_FEATURE_CONTROL 0x0000003a
276#define MSR_IA32_TSC_ADJUST 0x0000003b 281#define MSR_IA32_TSC_ADJUST 0x0000003b
277 282
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 34e923a53762..ac3b3d002833 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -65,8 +65,7 @@ obj-$(CONFIG_X86_TSC) += trace_clock.o
65obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o 65obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o
66obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o 66obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o
67obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o 67obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
68obj-$(CONFIG_KPROBES) += kprobes.o 68obj-y += kprobes/
69obj-$(CONFIG_OPTPROBES) += kprobes-opt.o
70obj-$(CONFIG_MODULES) += module.o 69obj-$(CONFIG_MODULES) += module.o
71obj-$(CONFIG_DOUBLEFAULT) += doublefault_32.o 70obj-$(CONFIG_DOUBLEFAULT) += doublefault_32.o
72obj-$(CONFIG_KGDB) += kgdb.o 71obj-$(CONFIG_KGDB) += kgdb.o
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index afdc3f756dea..c9876efecafb 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -240,7 +240,7 @@ static int apbt_cpuhp_notify(struct notifier_block *n,
240 dw_apb_clockevent_pause(adev->timer); 240 dw_apb_clockevent_pause(adev->timer);
241 if (system_state == SYSTEM_RUNNING) { 241 if (system_state == SYSTEM_RUNNING) {
242 pr_debug("skipping APBT CPU %lu offline\n", cpu); 242 pr_debug("skipping APBT CPU %lu offline\n", cpu);
243 } else if (adev) { 243 } else {
244 pr_debug("APBT clockevent for cpu %lu offline\n", cpu); 244 pr_debug("APBT clockevent for cpu %lu offline\n", cpu);
245 dw_apb_clockevent_stop(adev->timer); 245 dw_apb_clockevent_stop(adev->timer);
246 } 246 }
@@ -311,7 +311,6 @@ void __init apbt_time_init(void)
311#ifdef CONFIG_SMP 311#ifdef CONFIG_SMP
312 int i; 312 int i;
313 struct sfi_timer_table_entry *p_mtmr; 313 struct sfi_timer_table_entry *p_mtmr;
314 unsigned int percpu_timer;
315 struct apbt_dev *adev; 314 struct apbt_dev *adev;
316#endif 315#endif
317 316
@@ -346,13 +345,10 @@ void __init apbt_time_init(void)
346 return; 345 return;
347 } 346 }
348 pr_debug("%s: %d CPUs online\n", __func__, num_online_cpus()); 347 pr_debug("%s: %d CPUs online\n", __func__, num_online_cpus());
349 if (num_possible_cpus() <= sfi_mtimer_num) { 348 if (num_possible_cpus() <= sfi_mtimer_num)
350 percpu_timer = 1;
351 apbt_num_timers_used = num_possible_cpus(); 349 apbt_num_timers_used = num_possible_cpus();
352 } else { 350 else
353 percpu_timer = 0;
354 apbt_num_timers_used = 1; 351 apbt_num_timers_used = 1;
355 }
356 pr_debug("%s: %d APB timers used\n", __func__, apbt_num_timers_used); 352 pr_debug("%s: %d APB timers used\n", __func__, apbt_num_timers_used);
357 353
358 /* here we set up per CPU timer data structure */ 354 /* here we set up per CPU timer data structure */
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index b994cc84aa7e..a5b4dce1b7ac 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1477,8 +1477,7 @@ void __init bsp_end_local_APIC_setup(void)
1477 * Now that local APIC setup is completed for BP, configure the fault 1477 * Now that local APIC setup is completed for BP, configure the fault
1478 * handling for interrupt remapping. 1478 * handling for interrupt remapping.
1479 */ 1479 */
1480 if (irq_remapping_enabled) 1480 irq_remap_enable_fault_handling();
1481 irq_remap_enable_fault_handling();
1482 1481
1483} 1482}
1484 1483
@@ -2251,8 +2250,7 @@ static int lapic_suspend(void)
2251 local_irq_save(flags); 2250 local_irq_save(flags);
2252 disable_local_APIC(); 2251 disable_local_APIC();
2253 2252
2254 if (irq_remapping_enabled) 2253 irq_remapping_disable();
2255 irq_remapping_disable();
2256 2254
2257 local_irq_restore(flags); 2255 local_irq_restore(flags);
2258 return 0; 2256 return 0;
@@ -2268,16 +2266,15 @@ static void lapic_resume(void)
2268 return; 2266 return;
2269 2267
2270 local_irq_save(flags); 2268 local_irq_save(flags);
2271 if (irq_remapping_enabled) { 2269
2272 /* 2270 /*
2273 * IO-APIC and PIC have their own resume routines. 2271 * IO-APIC and PIC have their own resume routines.
2274 * We just mask them here to make sure the interrupt 2272 * We just mask them here to make sure the interrupt
2275 * subsystem is completely quiet while we enable x2apic 2273 * subsystem is completely quiet while we enable x2apic
2276 * and interrupt-remapping. 2274 * and interrupt-remapping.
2277 */ 2275 */
2278 mask_ioapic_entries(); 2276 mask_ioapic_entries();
2279 legacy_pic->mask_all(); 2277 legacy_pic->mask_all();
2280 }
2281 2278
2282 if (x2apic_mode) 2279 if (x2apic_mode)
2283 enable_x2apic(); 2280 enable_x2apic();
@@ -2320,8 +2317,7 @@ static void lapic_resume(void)
2320 apic_write(APIC_ESR, 0); 2317 apic_write(APIC_ESR, 0);
2321 apic_read(APIC_ESR); 2318 apic_read(APIC_ESR);
2322 2319
2323 if (irq_remapping_enabled) 2320 irq_remapping_reenable(x2apic_mode);
2324 irq_remapping_reenable(x2apic_mode);
2325 2321
2326 local_irq_restore(flags); 2322 local_irq_restore(flags);
2327} 2323}
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index b739d398bb29..9ed796ccc32c 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -68,22 +68,6 @@
68#define for_each_irq_pin(entry, head) \ 68#define for_each_irq_pin(entry, head) \
69 for (entry = head; entry; entry = entry->next) 69 for (entry = head; entry; entry = entry->next)
70 70
71#ifdef CONFIG_IRQ_REMAP
72static void irq_remap_modify_chip_defaults(struct irq_chip *chip);
73static inline bool irq_remapped(struct irq_cfg *cfg)
74{
75 return cfg->irq_2_iommu.iommu != NULL;
76}
77#else
78static inline bool irq_remapped(struct irq_cfg *cfg)
79{
80 return false;
81}
82static inline void irq_remap_modify_chip_defaults(struct irq_chip *chip)
83{
84}
85#endif
86
87/* 71/*
88 * Is the SiS APIC rmw bug present ? 72 * Is the SiS APIC rmw bug present ?
89 * -1 = don't know, 0 = no, 1 = yes 73 * -1 = don't know, 0 = no, 1 = yes
@@ -300,9 +284,9 @@ static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node)
300 return cfg; 284 return cfg;
301} 285}
302 286
303static int alloc_irq_from(unsigned int from, int node) 287static int alloc_irqs_from(unsigned int from, unsigned int count, int node)
304{ 288{
305 return irq_alloc_desc_from(from, node); 289 return irq_alloc_descs_from(from, count, node);
306} 290}
307 291
308static void free_irq_at(unsigned int at, struct irq_cfg *cfg) 292static void free_irq_at(unsigned int at, struct irq_cfg *cfg)
@@ -326,7 +310,7 @@ static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
326 + (mpc_ioapic_addr(idx) & ~PAGE_MASK); 310 + (mpc_ioapic_addr(idx) & ~PAGE_MASK);
327} 311}
328 312
329static inline void io_apic_eoi(unsigned int apic, unsigned int vector) 313void io_apic_eoi(unsigned int apic, unsigned int vector)
330{ 314{
331 struct io_apic __iomem *io_apic = io_apic_base(apic); 315 struct io_apic __iomem *io_apic = io_apic_base(apic);
332 writel(vector, &io_apic->eoi); 316 writel(vector, &io_apic->eoi);
@@ -573,19 +557,10 @@ static void unmask_ioapic_irq(struct irq_data *data)
573 * Otherwise, we simulate the EOI message manually by changing the trigger 557 * Otherwise, we simulate the EOI message manually by changing the trigger
574 * mode to edge and then back to level, with RTE being masked during this. 558 * mode to edge and then back to level, with RTE being masked during this.
575 */ 559 */
576static void __eoi_ioapic_pin(int apic, int pin, int vector, struct irq_cfg *cfg) 560void native_eoi_ioapic_pin(int apic, int pin, int vector)
577{ 561{
578 if (mpc_ioapic_ver(apic) >= 0x20) { 562 if (mpc_ioapic_ver(apic) >= 0x20) {
579 /* 563 io_apic_eoi(apic, vector);
580 * Intr-remapping uses pin number as the virtual vector
581 * in the RTE. Actual vector is programmed in
582 * intr-remapping table entry. Hence for the io-apic
583 * EOI we use the pin number.
584 */
585 if (cfg && irq_remapped(cfg))
586 io_apic_eoi(apic, pin);
587 else
588 io_apic_eoi(apic, vector);
589 } else { 564 } else {
590 struct IO_APIC_route_entry entry, entry1; 565 struct IO_APIC_route_entry entry, entry1;
591 566
@@ -606,14 +581,15 @@ static void __eoi_ioapic_pin(int apic, int pin, int vector, struct irq_cfg *cfg)
606 } 581 }
607} 582}
608 583
609static void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) 584void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg)
610{ 585{
611 struct irq_pin_list *entry; 586 struct irq_pin_list *entry;
612 unsigned long flags; 587 unsigned long flags;
613 588
614 raw_spin_lock_irqsave(&ioapic_lock, flags); 589 raw_spin_lock_irqsave(&ioapic_lock, flags);
615 for_each_irq_pin(entry, cfg->irq_2_pin) 590 for_each_irq_pin(entry, cfg->irq_2_pin)
616 __eoi_ioapic_pin(entry->apic, entry->pin, cfg->vector, cfg); 591 x86_io_apic_ops.eoi_ioapic_pin(entry->apic, entry->pin,
592 cfg->vector);
617 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 593 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
618} 594}
619 595
@@ -650,7 +626,7 @@ static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
650 } 626 }
651 627
652 raw_spin_lock_irqsave(&ioapic_lock, flags); 628 raw_spin_lock_irqsave(&ioapic_lock, flags);
653 __eoi_ioapic_pin(apic, pin, entry.vector, NULL); 629 x86_io_apic_ops.eoi_ioapic_pin(apic, pin, entry.vector);
654 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 630 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
655 } 631 }
656 632
@@ -1304,25 +1280,18 @@ static void ioapic_register_intr(unsigned int irq, struct irq_cfg *cfg,
1304 fasteoi = false; 1280 fasteoi = false;
1305 } 1281 }
1306 1282
1307 if (irq_remapped(cfg)) { 1283 if (setup_remapped_irq(irq, cfg, chip))
1308 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
1309 irq_remap_modify_chip_defaults(chip);
1310 fasteoi = trigger != 0; 1284 fasteoi = trigger != 0;
1311 }
1312 1285
1313 hdl = fasteoi ? handle_fasteoi_irq : handle_edge_irq; 1286 hdl = fasteoi ? handle_fasteoi_irq : handle_edge_irq;
1314 irq_set_chip_and_handler_name(irq, chip, hdl, 1287 irq_set_chip_and_handler_name(irq, chip, hdl,
1315 fasteoi ? "fasteoi" : "edge"); 1288 fasteoi ? "fasteoi" : "edge");
1316} 1289}
1317 1290
1318static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry, 1291int native_setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
1319 unsigned int destination, int vector, 1292 unsigned int destination, int vector,
1320 struct io_apic_irq_attr *attr) 1293 struct io_apic_irq_attr *attr)
1321{ 1294{
1322 if (irq_remapping_enabled)
1323 return setup_ioapic_remapped_entry(irq, entry, destination,
1324 vector, attr);
1325
1326 memset(entry, 0, sizeof(*entry)); 1295 memset(entry, 0, sizeof(*entry));
1327 1296
1328 entry->delivery_mode = apic->irq_delivery_mode; 1297 entry->delivery_mode = apic->irq_delivery_mode;
@@ -1370,8 +1339,8 @@ static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg,
1370 attr->ioapic, mpc_ioapic_id(attr->ioapic), attr->ioapic_pin, 1339 attr->ioapic, mpc_ioapic_id(attr->ioapic), attr->ioapic_pin,
1371 cfg->vector, irq, attr->trigger, attr->polarity, dest); 1340 cfg->vector, irq, attr->trigger, attr->polarity, dest);
1372 1341
1373 if (setup_ioapic_entry(irq, &entry, dest, cfg->vector, attr)) { 1342 if (x86_io_apic_ops.setup_entry(irq, &entry, dest, cfg->vector, attr)) {
1374 pr_warn("Failed to setup ioapic entry for ioapic %d, pin %d\n", 1343 pr_warn("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1375 mpc_ioapic_id(attr->ioapic), attr->ioapic_pin); 1344 mpc_ioapic_id(attr->ioapic), attr->ioapic_pin);
1376 __clear_irq_vector(irq, cfg); 1345 __clear_irq_vector(irq, cfg);
1377 1346
@@ -1479,9 +1448,6 @@ static void __init setup_timer_IRQ0_pin(unsigned int ioapic_idx,
1479 struct IO_APIC_route_entry entry; 1448 struct IO_APIC_route_entry entry;
1480 unsigned int dest; 1449 unsigned int dest;
1481 1450
1482 if (irq_remapping_enabled)
1483 return;
1484
1485 memset(&entry, 0, sizeof(entry)); 1451 memset(&entry, 0, sizeof(entry));
1486 1452
1487 /* 1453 /*
@@ -1513,9 +1479,63 @@ static void __init setup_timer_IRQ0_pin(unsigned int ioapic_idx,
1513 ioapic_write_entry(ioapic_idx, pin, entry); 1479 ioapic_write_entry(ioapic_idx, pin, entry);
1514} 1480}
1515 1481
1516__apicdebuginit(void) print_IO_APIC(int ioapic_idx) 1482void native_io_apic_print_entries(unsigned int apic, unsigned int nr_entries)
1517{ 1483{
1518 int i; 1484 int i;
1485
1486 pr_debug(" NR Dst Mask Trig IRR Pol Stat Dmod Deli Vect:\n");
1487
1488 for (i = 0; i <= nr_entries; i++) {
1489 struct IO_APIC_route_entry entry;
1490
1491 entry = ioapic_read_entry(apic, i);
1492
1493 pr_debug(" %02x %02X ", i, entry.dest);
1494 pr_cont("%1d %1d %1d %1d %1d "
1495 "%1d %1d %02X\n",
1496 entry.mask,
1497 entry.trigger,
1498 entry.irr,
1499 entry.polarity,
1500 entry.delivery_status,
1501 entry.dest_mode,
1502 entry.delivery_mode,
1503 entry.vector);
1504 }
1505}
1506
1507void intel_ir_io_apic_print_entries(unsigned int apic,
1508 unsigned int nr_entries)
1509{
1510 int i;
1511
1512 pr_debug(" NR Indx Fmt Mask Trig IRR Pol Stat Indx2 Zero Vect:\n");
1513
1514 for (i = 0; i <= nr_entries; i++) {
1515 struct IR_IO_APIC_route_entry *ir_entry;
1516 struct IO_APIC_route_entry entry;
1517
1518 entry = ioapic_read_entry(apic, i);
1519
1520 ir_entry = (struct IR_IO_APIC_route_entry *)&entry;
1521
1522 pr_debug(" %02x %04X ", i, ir_entry->index);
1523 pr_cont("%1d %1d %1d %1d %1d "
1524 "%1d %1d %X %02X\n",
1525 ir_entry->format,
1526 ir_entry->mask,
1527 ir_entry->trigger,
1528 ir_entry->irr,
1529 ir_entry->polarity,
1530 ir_entry->delivery_status,
1531 ir_entry->index2,
1532 ir_entry->zero,
1533 ir_entry->vector);
1534 }
1535}
1536
1537__apicdebuginit(void) print_IO_APIC(int ioapic_idx)
1538{
1519 union IO_APIC_reg_00 reg_00; 1539 union IO_APIC_reg_00 reg_00;
1520 union IO_APIC_reg_01 reg_01; 1540 union IO_APIC_reg_01 reg_01;
1521 union IO_APIC_reg_02 reg_02; 1541 union IO_APIC_reg_02 reg_02;
@@ -1568,58 +1588,7 @@ __apicdebuginit(void) print_IO_APIC(int ioapic_idx)
1568 1588
1569 printk(KERN_DEBUG ".... IRQ redirection table:\n"); 1589 printk(KERN_DEBUG ".... IRQ redirection table:\n");
1570 1590
1571 if (irq_remapping_enabled) { 1591 x86_io_apic_ops.print_entries(ioapic_idx, reg_01.bits.entries);
1572 printk(KERN_DEBUG " NR Indx Fmt Mask Trig IRR"
1573 " Pol Stat Indx2 Zero Vect:\n");
1574 } else {
1575 printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol"
1576 " Stat Dmod Deli Vect:\n");
1577 }
1578
1579 for (i = 0; i <= reg_01.bits.entries; i++) {
1580 if (irq_remapping_enabled) {
1581 struct IO_APIC_route_entry entry;
1582 struct IR_IO_APIC_route_entry *ir_entry;
1583
1584 entry = ioapic_read_entry(ioapic_idx, i);
1585 ir_entry = (struct IR_IO_APIC_route_entry *) &entry;
1586 printk(KERN_DEBUG " %02x %04X ",
1587 i,
1588 ir_entry->index
1589 );
1590 pr_cont("%1d %1d %1d %1d %1d "
1591 "%1d %1d %X %02X\n",
1592 ir_entry->format,
1593 ir_entry->mask,
1594 ir_entry->trigger,
1595 ir_entry->irr,
1596 ir_entry->polarity,
1597 ir_entry->delivery_status,
1598 ir_entry->index2,
1599 ir_entry->zero,
1600 ir_entry->vector
1601 );
1602 } else {
1603 struct IO_APIC_route_entry entry;
1604
1605 entry = ioapic_read_entry(ioapic_idx, i);
1606 printk(KERN_DEBUG " %02x %02X ",
1607 i,
1608 entry.dest
1609 );
1610 pr_cont("%1d %1d %1d %1d %1d "
1611 "%1d %1d %02X\n",
1612 entry.mask,
1613 entry.trigger,
1614 entry.irr,
1615 entry.polarity,
1616 entry.delivery_status,
1617 entry.dest_mode,
1618 entry.delivery_mode,
1619 entry.vector
1620 );
1621 }
1622 }
1623} 1592}
1624 1593
1625__apicdebuginit(void) print_IO_APICs(void) 1594__apicdebuginit(void) print_IO_APICs(void)
@@ -1921,30 +1890,14 @@ void __init enable_IO_APIC(void)
1921 clear_IO_APIC(); 1890 clear_IO_APIC();
1922} 1891}
1923 1892
1924/* 1893void native_disable_io_apic(void)
1925 * Not an __init, needed by the reboot code
1926 */
1927void disable_IO_APIC(void)
1928{ 1894{
1929 /* 1895 /*
1930 * Clear the IO-APIC before rebooting:
1931 */
1932 clear_IO_APIC();
1933
1934 if (!legacy_pic->nr_legacy_irqs)
1935 return;
1936
1937 /*
1938 * If the i8259 is routed through an IOAPIC 1896 * If the i8259 is routed through an IOAPIC
1939 * Put that IOAPIC in virtual wire mode 1897 * Put that IOAPIC in virtual wire mode
1940 * so legacy interrupts can be delivered. 1898 * so legacy interrupts can be delivered.
1941 *
1942 * With interrupt-remapping, for now we will use virtual wire A mode,
1943 * as virtual wire B is little complex (need to configure both
1944 * IOAPIC RTE as well as interrupt-remapping table entry).
1945 * As this gets called during crash dump, keep this simple for now.
1946 */ 1899 */
1947 if (ioapic_i8259.pin != -1 && !irq_remapping_enabled) { 1900 if (ioapic_i8259.pin != -1) {
1948 struct IO_APIC_route_entry entry; 1901 struct IO_APIC_route_entry entry;
1949 1902
1950 memset(&entry, 0, sizeof(entry)); 1903 memset(&entry, 0, sizeof(entry));
@@ -1964,12 +1917,25 @@ void disable_IO_APIC(void)
1964 ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry); 1917 ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
1965 } 1918 }
1966 1919
1920 if (cpu_has_apic || apic_from_smp_config())
1921 disconnect_bsp_APIC(ioapic_i8259.pin != -1);
1922
1923}
1924
1925/*
1926 * Not an __init, needed by the reboot code
1927 */
1928void disable_IO_APIC(void)
1929{
1967 /* 1930 /*
1968 * Use virtual wire A mode when interrupt remapping is enabled. 1931 * Clear the IO-APIC before rebooting:
1969 */ 1932 */
1970 if (cpu_has_apic || apic_from_smp_config()) 1933 clear_IO_APIC();
1971 disconnect_bsp_APIC(!irq_remapping_enabled && 1934
1972 ioapic_i8259.pin != -1); 1935 if (!legacy_pic->nr_legacy_irqs)
1936 return;
1937
1938 x86_io_apic_ops.disable();
1973} 1939}
1974 1940
1975#ifdef CONFIG_X86_32 1941#ifdef CONFIG_X86_32
@@ -2322,12 +2288,8 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq
2322 2288
2323 apic = entry->apic; 2289 apic = entry->apic;
2324 pin = entry->pin; 2290 pin = entry->pin;
2325 /* 2291
2326 * With interrupt-remapping, destination information comes 2292 io_apic_write(apic, 0x11 + pin*2, dest);
2327 * from interrupt-remapping table entry.
2328 */
2329 if (!irq_remapped(cfg))
2330 io_apic_write(apic, 0x11 + pin*2, dest);
2331 reg = io_apic_read(apic, 0x10 + pin*2); 2293 reg = io_apic_read(apic, 0x10 + pin*2);
2332 reg &= ~IO_APIC_REDIR_VECTOR_MASK; 2294 reg &= ~IO_APIC_REDIR_VECTOR_MASK;
2333 reg |= vector; 2295 reg |= vector;
@@ -2369,9 +2331,10 @@ int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
2369 return 0; 2331 return 0;
2370} 2332}
2371 2333
2372static int 2334
2373ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, 2335int native_ioapic_set_affinity(struct irq_data *data,
2374 bool force) 2336 const struct cpumask *mask,
2337 bool force)
2375{ 2338{
2376 unsigned int dest, irq = data->irq; 2339 unsigned int dest, irq = data->irq;
2377 unsigned long flags; 2340 unsigned long flags;
@@ -2548,33 +2511,6 @@ static void ack_apic_level(struct irq_data *data)
2548 ioapic_irqd_unmask(data, cfg, masked); 2511 ioapic_irqd_unmask(data, cfg, masked);
2549} 2512}
2550 2513
2551#ifdef CONFIG_IRQ_REMAP
2552static void ir_ack_apic_edge(struct irq_data *data)
2553{
2554 ack_APIC_irq();
2555}
2556
2557static void ir_ack_apic_level(struct irq_data *data)
2558{
2559 ack_APIC_irq();
2560 eoi_ioapic_irq(data->irq, data->chip_data);
2561}
2562
2563static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
2564{
2565 seq_printf(p, " IR-%s", data->chip->name);
2566}
2567
2568static void irq_remap_modify_chip_defaults(struct irq_chip *chip)
2569{
2570 chip->irq_print_chip = ir_print_prefix;
2571 chip->irq_ack = ir_ack_apic_edge;
2572 chip->irq_eoi = ir_ack_apic_level;
2573
2574 chip->irq_set_affinity = set_remapped_irq_affinity;
2575}
2576#endif /* CONFIG_IRQ_REMAP */
2577
2578static struct irq_chip ioapic_chip __read_mostly = { 2514static struct irq_chip ioapic_chip __read_mostly = {
2579 .name = "IO-APIC", 2515 .name = "IO-APIC",
2580 .irq_startup = startup_ioapic_irq, 2516 .irq_startup = startup_ioapic_irq,
@@ -2582,7 +2518,7 @@ static struct irq_chip ioapic_chip __read_mostly = {
2582 .irq_unmask = unmask_ioapic_irq, 2518 .irq_unmask = unmask_ioapic_irq,
2583 .irq_ack = ack_apic_edge, 2519 .irq_ack = ack_apic_edge,
2584 .irq_eoi = ack_apic_level, 2520 .irq_eoi = ack_apic_level,
2585 .irq_set_affinity = ioapic_set_affinity, 2521 .irq_set_affinity = native_ioapic_set_affinity,
2586 .irq_retrigger = ioapic_retrigger_irq, 2522 .irq_retrigger = ioapic_retrigger_irq,
2587}; 2523};
2588 2524
@@ -2781,8 +2717,7 @@ static inline void __init check_timer(void)
2781 * 8259A. 2717 * 8259A.
2782 */ 2718 */
2783 if (pin1 == -1) { 2719 if (pin1 == -1) {
2784 if (irq_remapping_enabled) 2720 panic_if_irq_remap("BIOS bug: timer not connected to IO-APIC");
2785 panic("BIOS bug: timer not connected to IO-APIC");
2786 pin1 = pin2; 2721 pin1 = pin2;
2787 apic1 = apic2; 2722 apic1 = apic2;
2788 no_pin1 = 1; 2723 no_pin1 = 1;
@@ -2814,8 +2749,7 @@ static inline void __init check_timer(void)
2814 clear_IO_APIC_pin(0, pin1); 2749 clear_IO_APIC_pin(0, pin1);
2815 goto out; 2750 goto out;
2816 } 2751 }
2817 if (irq_remapping_enabled) 2752 panic_if_irq_remap("timer doesn't work through Interrupt-remapped IO-APIC");
2818 panic("timer doesn't work through Interrupt-remapped IO-APIC");
2819 local_irq_disable(); 2753 local_irq_disable();
2820 clear_IO_APIC_pin(apic1, pin1); 2754 clear_IO_APIC_pin(apic1, pin1);
2821 if (!no_pin1) 2755 if (!no_pin1)
@@ -2982,37 +2916,58 @@ device_initcall(ioapic_init_ops);
2982/* 2916/*
2983 * Dynamic irq allocate and deallocation 2917 * Dynamic irq allocate and deallocation
2984 */ 2918 */
2985unsigned int create_irq_nr(unsigned int from, int node) 2919unsigned int __create_irqs(unsigned int from, unsigned int count, int node)
2986{ 2920{
2987 struct irq_cfg *cfg; 2921 struct irq_cfg **cfg;
2988 unsigned long flags; 2922 unsigned long flags;
2989 unsigned int ret = 0; 2923 int irq, i;
2990 int irq;
2991 2924
2992 if (from < nr_irqs_gsi) 2925 if (from < nr_irqs_gsi)
2993 from = nr_irqs_gsi; 2926 from = nr_irqs_gsi;
2994 2927
2995 irq = alloc_irq_from(from, node); 2928 cfg = kzalloc_node(count * sizeof(cfg[0]), GFP_KERNEL, node);
2996 if (irq < 0) 2929 if (!cfg)
2997 return 0;
2998 cfg = alloc_irq_cfg(irq, node);
2999 if (!cfg) {
3000 free_irq_at(irq, NULL);
3001 return 0; 2930 return 0;
2931
2932 irq = alloc_irqs_from(from, count, node);
2933 if (irq < 0)
2934 goto out_cfgs;
2935
2936 for (i = 0; i < count; i++) {
2937 cfg[i] = alloc_irq_cfg(irq + i, node);
2938 if (!cfg[i])
2939 goto out_irqs;
3002 } 2940 }
3003 2941
3004 raw_spin_lock_irqsave(&vector_lock, flags); 2942 raw_spin_lock_irqsave(&vector_lock, flags);
3005 if (!__assign_irq_vector(irq, cfg, apic->target_cpus())) 2943 for (i = 0; i < count; i++)
3006 ret = irq; 2944 if (__assign_irq_vector(irq + i, cfg[i], apic->target_cpus()))
2945 goto out_vecs;
3007 raw_spin_unlock_irqrestore(&vector_lock, flags); 2946 raw_spin_unlock_irqrestore(&vector_lock, flags);
3008 2947
3009 if (ret) { 2948 for (i = 0; i < count; i++) {
3010 irq_set_chip_data(irq, cfg); 2949 irq_set_chip_data(irq + i, cfg[i]);
3011 irq_clear_status_flags(irq, IRQ_NOREQUEST); 2950 irq_clear_status_flags(irq + i, IRQ_NOREQUEST);
3012 } else {
3013 free_irq_at(irq, cfg);
3014 } 2951 }
3015 return ret; 2952
2953 kfree(cfg);
2954 return irq;
2955
2956out_vecs:
2957 for (i--; i >= 0; i--)
2958 __clear_irq_vector(irq + i, cfg[i]);
2959 raw_spin_unlock_irqrestore(&vector_lock, flags);
2960out_irqs:
2961 for (i = 0; i < count; i++)
2962 free_irq_at(irq + i, cfg[i]);
2963out_cfgs:
2964 kfree(cfg);
2965 return 0;
2966}
2967
2968unsigned int create_irq_nr(unsigned int from, int node)
2969{
2970 return __create_irqs(from, 1, node);
3016} 2971}
3017 2972
3018int create_irq(void) 2973int create_irq(void)
@@ -3037,48 +2992,35 @@ void destroy_irq(unsigned int irq)
3037 2992
3038 irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE); 2993 irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE);
3039 2994
3040 if (irq_remapped(cfg)) 2995 free_remapped_irq(irq);
3041 free_remapped_irq(irq); 2996
3042 raw_spin_lock_irqsave(&vector_lock, flags); 2997 raw_spin_lock_irqsave(&vector_lock, flags);
3043 __clear_irq_vector(irq, cfg); 2998 __clear_irq_vector(irq, cfg);
3044 raw_spin_unlock_irqrestore(&vector_lock, flags); 2999 raw_spin_unlock_irqrestore(&vector_lock, flags);
3045 free_irq_at(irq, cfg); 3000 free_irq_at(irq, cfg);
3046} 3001}
3047 3002
3003void destroy_irqs(unsigned int irq, unsigned int count)
3004{
3005 unsigned int i;
3006
3007 for (i = 0; i < count; i++)
3008 destroy_irq(irq + i);
3009}
3010
3048/* 3011/*
3049 * MSI message composition 3012 * MSI message composition
3050 */ 3013 */
3051#ifdef CONFIG_PCI_MSI 3014void native_compose_msi_msg(struct pci_dev *pdev,
3052static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, 3015 unsigned int irq, unsigned int dest,
3053 struct msi_msg *msg, u8 hpet_id) 3016 struct msi_msg *msg, u8 hpet_id)
3054{ 3017{
3055 struct irq_cfg *cfg; 3018 struct irq_cfg *cfg = irq_cfg(irq);
3056 int err;
3057 unsigned dest;
3058
3059 if (disable_apic)
3060 return -ENXIO;
3061
3062 cfg = irq_cfg(irq);
3063 err = assign_irq_vector(irq, cfg, apic->target_cpus());
3064 if (err)
3065 return err;
3066 3019
3067 err = apic->cpu_mask_to_apicid_and(cfg->domain, 3020 msg->address_hi = MSI_ADDR_BASE_HI;
3068 apic->target_cpus(), &dest);
3069 if (err)
3070 return err;
3071
3072 if (irq_remapped(cfg)) {
3073 compose_remapped_msi_msg(pdev, irq, dest, msg, hpet_id);
3074 return err;
3075 }
3076 3021
3077 if (x2apic_enabled()) 3022 if (x2apic_enabled())
3078 msg->address_hi = MSI_ADDR_BASE_HI | 3023 msg->address_hi |= MSI_ADDR_EXT_DEST_ID(dest);
3079 MSI_ADDR_EXT_DEST_ID(dest);
3080 else
3081 msg->address_hi = MSI_ADDR_BASE_HI;
3082 3024
3083 msg->address_lo = 3025 msg->address_lo =
3084 MSI_ADDR_BASE_LO | 3026 MSI_ADDR_BASE_LO |
@@ -3097,8 +3039,32 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
3097 MSI_DATA_DELIVERY_FIXED: 3039 MSI_DATA_DELIVERY_FIXED:
3098 MSI_DATA_DELIVERY_LOWPRI) | 3040 MSI_DATA_DELIVERY_LOWPRI) |
3099 MSI_DATA_VECTOR(cfg->vector); 3041 MSI_DATA_VECTOR(cfg->vector);
3042}
3100 3043
3101 return err; 3044#ifdef CONFIG_PCI_MSI
3045static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
3046 struct msi_msg *msg, u8 hpet_id)
3047{
3048 struct irq_cfg *cfg;
3049 int err;
3050 unsigned dest;
3051
3052 if (disable_apic)
3053 return -ENXIO;
3054
3055 cfg = irq_cfg(irq);
3056 err = assign_irq_vector(irq, cfg, apic->target_cpus());
3057 if (err)
3058 return err;
3059
3060 err = apic->cpu_mask_to_apicid_and(cfg->domain,
3061 apic->target_cpus(), &dest);
3062 if (err)
3063 return err;
3064
3065 x86_msi.compose_msi_msg(pdev, irq, dest, msg, hpet_id);
3066
3067 return 0;
3102} 3068}
3103 3069
3104static int 3070static int
@@ -3136,23 +3102,28 @@ static struct irq_chip msi_chip = {
3136 .irq_retrigger = ioapic_retrigger_irq, 3102 .irq_retrigger = ioapic_retrigger_irq,
3137}; 3103};
3138 3104
3139static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) 3105int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc,
3106 unsigned int irq_base, unsigned int irq_offset)
3140{ 3107{
3141 struct irq_chip *chip = &msi_chip; 3108 struct irq_chip *chip = &msi_chip;
3142 struct msi_msg msg; 3109 struct msi_msg msg;
3110 unsigned int irq = irq_base + irq_offset;
3143 int ret; 3111 int ret;
3144 3112
3145 ret = msi_compose_msg(dev, irq, &msg, -1); 3113 ret = msi_compose_msg(dev, irq, &msg, -1);
3146 if (ret < 0) 3114 if (ret < 0)
3147 return ret; 3115 return ret;
3148 3116
3149 irq_set_msi_desc(irq, msidesc); 3117 irq_set_msi_desc_off(irq_base, irq_offset, msidesc);
3150 write_msi_msg(irq, &msg);
3151 3118
3152 if (irq_remapped(irq_get_chip_data(irq))) { 3119 /*
3153 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); 3120 * MSI-X message is written per-IRQ, the offset is always 0.
3154 irq_remap_modify_chip_defaults(chip); 3121 * MSI message denotes a contiguous group of IRQs, written for 0th IRQ.
3155 } 3122 */
3123 if (!irq_offset)
3124 write_msi_msg(irq, &msg);
3125
3126 setup_remapped_irq(irq, irq_get_chip_data(irq), chip);
3156 3127
3157 irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); 3128 irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge");
3158 3129
@@ -3163,46 +3134,26 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
3163 3134
3164int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) 3135int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
3165{ 3136{
3166 int node, ret, sub_handle, index = 0;
3167 unsigned int irq, irq_want; 3137 unsigned int irq, irq_want;
3168 struct msi_desc *msidesc; 3138 struct msi_desc *msidesc;
3139 int node, ret;
3169 3140
3170 /* x86 doesn't support multiple MSI yet */ 3141 /* Multiple MSI vectors only supported with interrupt remapping */
3171 if (type == PCI_CAP_ID_MSI && nvec > 1) 3142 if (type == PCI_CAP_ID_MSI && nvec > 1)
3172 return 1; 3143 return 1;
3173 3144
3174 node = dev_to_node(&dev->dev); 3145 node = dev_to_node(&dev->dev);
3175 irq_want = nr_irqs_gsi; 3146 irq_want = nr_irqs_gsi;
3176 sub_handle = 0;
3177 list_for_each_entry(msidesc, &dev->msi_list, list) { 3147 list_for_each_entry(msidesc, &dev->msi_list, list) {
3178 irq = create_irq_nr(irq_want, node); 3148 irq = create_irq_nr(irq_want, node);
3179 if (irq == 0) 3149 if (irq == 0)
3180 return -1; 3150 return -ENOSPC;
3151
3181 irq_want = irq + 1; 3152 irq_want = irq + 1;
3182 if (!irq_remapping_enabled)
3183 goto no_ir;
3184 3153
3185 if (!sub_handle) { 3154 ret = setup_msi_irq(dev, msidesc, irq, 0);
3186 /*
3187 * allocate the consecutive block of IRTE's
3188 * for 'nvec'
3189 */
3190 index = msi_alloc_remapped_irq(dev, irq, nvec);
3191 if (index < 0) {
3192 ret = index;
3193 goto error;
3194 }
3195 } else {
3196 ret = msi_setup_remapped_irq(dev, irq, index,
3197 sub_handle);
3198 if (ret < 0)
3199 goto error;
3200 }
3201no_ir:
3202 ret = setup_msi_irq(dev, msidesc, irq);
3203 if (ret < 0) 3155 if (ret < 0)
3204 goto error; 3156 goto error;
3205 sub_handle++;
3206 } 3157 }
3207 return 0; 3158 return 0;
3208 3159
@@ -3298,26 +3249,19 @@ static struct irq_chip hpet_msi_type = {
3298 .irq_retrigger = ioapic_retrigger_irq, 3249 .irq_retrigger = ioapic_retrigger_irq,
3299}; 3250};
3300 3251
3301int arch_setup_hpet_msi(unsigned int irq, unsigned int id) 3252int default_setup_hpet_msi(unsigned int irq, unsigned int id)
3302{ 3253{
3303 struct irq_chip *chip = &hpet_msi_type; 3254 struct irq_chip *chip = &hpet_msi_type;
3304 struct msi_msg msg; 3255 struct msi_msg msg;
3305 int ret; 3256 int ret;
3306 3257
3307 if (irq_remapping_enabled) {
3308 ret = setup_hpet_msi_remapped(irq, id);
3309 if (ret)
3310 return ret;
3311 }
3312
3313 ret = msi_compose_msg(NULL, irq, &msg, id); 3258 ret = msi_compose_msg(NULL, irq, &msg, id);
3314 if (ret < 0) 3259 if (ret < 0)
3315 return ret; 3260 return ret;
3316 3261
3317 hpet_msi_write(irq_get_handler_data(irq), &msg); 3262 hpet_msi_write(irq_get_handler_data(irq), &msg);
3318 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); 3263 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
3319 if (irq_remapped(irq_get_chip_data(irq))) 3264 setup_remapped_irq(irq, irq_get_chip_data(irq), chip);
3320 irq_remap_modify_chip_defaults(chip);
3321 3265
3322 irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); 3266 irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge");
3323 return 0; 3267 return 0;
@@ -3683,10 +3627,7 @@ void __init setup_ioapic_dest(void)
3683 else 3627 else
3684 mask = apic->target_cpus(); 3628 mask = apic->target_cpus();
3685 3629
3686 if (irq_remapping_enabled) 3630 x86_io_apic_ops.set_affinity(idata, mask, false);
3687 set_remapped_irq_affinity(idata, mask, false);
3688 else
3689 ioapic_set_affinity(idata, mask, false);
3690 } 3631 }
3691 3632
3692} 3633}
diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c
index cce91bf26676..7434d8556d09 100644
--- a/arch/x86/kernel/apic/ipi.c
+++ b/arch/x86/kernel/apic/ipi.c
@@ -106,7 +106,7 @@ void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
106 unsigned long mask = cpumask_bits(cpumask)[0]; 106 unsigned long mask = cpumask_bits(cpumask)[0];
107 unsigned long flags; 107 unsigned long flags;
108 108
109 if (WARN_ONCE(!mask, "empty IPI mask")) 109 if (!mask)
110 return; 110 return;
111 111
112 local_irq_save(flags); 112 local_irq_save(flags);
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
index e03a1e180e81..562a76d433c8 100644
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -20,18 +20,19 @@ static int set_x2apic_phys_mode(char *arg)
20} 20}
21early_param("x2apic_phys", set_x2apic_phys_mode); 21early_param("x2apic_phys", set_x2apic_phys_mode);
22 22
23static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) 23static bool x2apic_fadt_phys(void)
24{ 24{
25 if (x2apic_phys) 25 if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) &&
26 return x2apic_enabled(); 26 (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) {
27 else if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) &&
28 (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL) &&
29 x2apic_enabled()) {
30 printk(KERN_DEBUG "System requires x2apic physical mode\n"); 27 printk(KERN_DEBUG "System requires x2apic physical mode\n");
31 return 1; 28 return true;
32 } 29 }
33 else 30 return false;
34 return 0; 31}
32
33static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
34{
35 return x2apic_enabled() && (x2apic_phys || x2apic_fadt_phys());
35} 36}
36 37
37static void 38static void
@@ -82,7 +83,7 @@ static void init_x2apic_ldr(void)
82 83
83static int x2apic_phys_probe(void) 84static int x2apic_phys_probe(void)
84{ 85{
85 if (x2apic_mode && x2apic_phys) 86 if (x2apic_mode && (x2apic_phys || x2apic_fadt_phys()))
86 return 1; 87 return 1;
87 88
88 return apic == &apic_x2apic_phys; 89 return apic == &apic_x2apic_phys;
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 8cfade9510a4..794f6eb54cd3 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -5,7 +5,7 @@
5 * 5 *
6 * SGI UV APIC functions (note: not an Intel compatible APIC) 6 * SGI UV APIC functions (note: not an Intel compatible APIC)
7 * 7 *
8 * Copyright (C) 2007-2010 Silicon Graphics, Inc. All rights reserved. 8 * Copyright (C) 2007-2013 Silicon Graphics, Inc. All rights reserved.
9 */ 9 */
10#include <linux/cpumask.h> 10#include <linux/cpumask.h>
11#include <linux/hardirq.h> 11#include <linux/hardirq.h>
@@ -91,10 +91,16 @@ static int __init early_get_pnodeid(void)
91 m_n_config.v = uv_early_read_mmr(UVH_RH_GAM_CONFIG_MMR); 91 m_n_config.v = uv_early_read_mmr(UVH_RH_GAM_CONFIG_MMR);
92 uv_min_hub_revision_id = node_id.s.revision; 92 uv_min_hub_revision_id = node_id.s.revision;
93 93
94 if (node_id.s.part_number == UV2_HUB_PART_NUMBER) 94 switch (node_id.s.part_number) {
95 uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1; 95 case UV2_HUB_PART_NUMBER:
96 if (node_id.s.part_number == UV2_HUB_PART_NUMBER_X) 96 case UV2_HUB_PART_NUMBER_X:
97 uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1; 97 uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1;
98 break;
99 case UV3_HUB_PART_NUMBER:
100 case UV3_HUB_PART_NUMBER_X:
101 uv_min_hub_revision_id += UV3_HUB_REVISION_BASE - 1;
102 break;
103 }
98 104
99 uv_hub_info->hub_revision = uv_min_hub_revision_id; 105 uv_hub_info->hub_revision = uv_min_hub_revision_id;
100 pnode = (node_id.s.node_id >> 1) & ((1 << m_n_config.s.n_skt) - 1); 106 pnode = (node_id.s.node_id >> 1) & ((1 << m_n_config.s.n_skt) - 1);
@@ -130,13 +136,16 @@ static void __init uv_set_apicid_hibit(void)
130 136
131static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) 137static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
132{ 138{
133 int pnodeid, is_uv1, is_uv2; 139 int pnodeid, is_uv1, is_uv2, is_uv3;
134 140
135 is_uv1 = !strcmp(oem_id, "SGI"); 141 is_uv1 = !strcmp(oem_id, "SGI");
136 is_uv2 = !strcmp(oem_id, "SGI2"); 142 is_uv2 = !strcmp(oem_id, "SGI2");
137 if (is_uv1 || is_uv2) { 143 is_uv3 = !strncmp(oem_id, "SGI3", 4); /* there are varieties of UV3 */
144 if (is_uv1 || is_uv2 || is_uv3) {
138 uv_hub_info->hub_revision = 145 uv_hub_info->hub_revision =
139 is_uv1 ? UV1_HUB_REVISION_BASE : UV2_HUB_REVISION_BASE; 146 (is_uv1 ? UV1_HUB_REVISION_BASE :
147 (is_uv2 ? UV2_HUB_REVISION_BASE :
148 UV3_HUB_REVISION_BASE));
140 pnodeid = early_get_pnodeid(); 149 pnodeid = early_get_pnodeid();
141 early_get_apic_pnode_shift(); 150 early_get_apic_pnode_shift();
142 x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range; 151 x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range;
@@ -450,14 +459,17 @@ static __init void map_high(char *id, unsigned long base, int pshift,
450 459
451 paddr = base << pshift; 460 paddr = base << pshift;
452 bytes = (1UL << bshift) * (max_pnode + 1); 461 bytes = (1UL << bshift) * (max_pnode + 1);
453 printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr, 462 if (!paddr) {
454 paddr + bytes); 463 pr_info("UV: Map %s_HI base address NULL\n", id);
464 return;
465 }
466 pr_info("UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr, paddr + bytes);
455 if (map_type == map_uc) 467 if (map_type == map_uc)
456 init_extra_mapping_uc(paddr, bytes); 468 init_extra_mapping_uc(paddr, bytes);
457 else 469 else
458 init_extra_mapping_wb(paddr, bytes); 470 init_extra_mapping_wb(paddr, bytes);
459
460} 471}
472
461static __init void map_gru_high(int max_pnode) 473static __init void map_gru_high(int max_pnode)
462{ 474{
463 union uvh_rh_gam_gru_overlay_config_mmr_u gru; 475 union uvh_rh_gam_gru_overlay_config_mmr_u gru;
@@ -468,7 +480,8 @@ static __init void map_gru_high(int max_pnode)
468 map_high("GRU", gru.s.base, shift, shift, max_pnode, map_wb); 480 map_high("GRU", gru.s.base, shift, shift, max_pnode, map_wb);
469 gru_start_paddr = ((u64)gru.s.base << shift); 481 gru_start_paddr = ((u64)gru.s.base << shift);
470 gru_end_paddr = gru_start_paddr + (1UL << shift) * (max_pnode + 1); 482 gru_end_paddr = gru_start_paddr + (1UL << shift) * (max_pnode + 1);
471 483 } else {
484 pr_info("UV: GRU disabled\n");
472 } 485 }
473} 486}
474 487
@@ -480,23 +493,146 @@ static __init void map_mmr_high(int max_pnode)
480 mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR); 493 mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR);
481 if (mmr.s.enable) 494 if (mmr.s.enable)
482 map_high("MMR", mmr.s.base, shift, shift, max_pnode, map_uc); 495 map_high("MMR", mmr.s.base, shift, shift, max_pnode, map_uc);
496 else
497 pr_info("UV: MMR disabled\n");
498}
499
500/*
501 * This commonality works because both 0 & 1 versions of the MMIOH OVERLAY
502 * and REDIRECT MMR regs are exactly the same on UV3.
503 */
504struct mmioh_config {
505 unsigned long overlay;
506 unsigned long redirect;
507 char *id;
508};
509
510static __initdata struct mmioh_config mmiohs[] = {
511 {
512 UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR,
513 UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR,
514 "MMIOH0"
515 },
516 {
517 UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR,
518 UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR,
519 "MMIOH1"
520 },
521};
522
523static __init void map_mmioh_high_uv3(int index, int min_pnode, int max_pnode)
524{
525 union uv3h_rh_gam_mmioh_overlay_config0_mmr_u overlay;
526 unsigned long mmr;
527 unsigned long base;
528 int i, n, shift, m_io, max_io;
529 int nasid, lnasid, fi, li;
530 char *id;
531
532 id = mmiohs[index].id;
533 overlay.v = uv_read_local_mmr(mmiohs[index].overlay);
534 pr_info("UV: %s overlay 0x%lx base:0x%x m_io:%d\n",
535 id, overlay.v, overlay.s3.base, overlay.s3.m_io);
536 if (!overlay.s3.enable) {
537 pr_info("UV: %s disabled\n", id);
538 return;
539 }
540
541 shift = UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_SHFT;
542 base = (unsigned long)overlay.s3.base;
543 m_io = overlay.s3.m_io;
544 mmr = mmiohs[index].redirect;
545 n = UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH;
546 min_pnode *= 2; /* convert to NASID */
547 max_pnode *= 2;
548 max_io = lnasid = fi = li = -1;
549
550 for (i = 0; i < n; i++) {
551 union uv3h_rh_gam_mmioh_redirect_config0_mmr_u redirect;
552
553 redirect.v = uv_read_local_mmr(mmr + i * 8);
554 nasid = redirect.s3.nasid;
555 if (nasid < min_pnode || max_pnode < nasid)
556 nasid = -1; /* invalid NASID */
557
558 if (nasid == lnasid) {
559 li = i;
560 if (i != n-1) /* last entry check */
561 continue;
562 }
563
564 /* check if we have a cached (or last) redirect to print */
565 if (lnasid != -1 || (i == n-1 && nasid != -1)) {
566 unsigned long addr1, addr2;
567 int f, l;
568
569 if (lnasid == -1) {
570 f = l = i;
571 lnasid = nasid;
572 } else {
573 f = fi;
574 l = li;
575 }
576 addr1 = (base << shift) +
577 f * (unsigned long)(1 << m_io);
578 addr2 = (base << shift) +
579 (l + 1) * (unsigned long)(1 << m_io);
580 pr_info("UV: %s[%03d..%03d] NASID 0x%04x ADDR 0x%016lx - 0x%016lx\n",
581 id, fi, li, lnasid, addr1, addr2);
582 if (max_io < l)
583 max_io = l;
584 }
585 fi = li = i;
586 lnasid = nasid;
587 }
588
589 pr_info("UV: %s base:0x%lx shift:%d M_IO:%d MAX_IO:%d\n",
590 id, base, shift, m_io, max_io);
591
592 if (max_io >= 0)
593 map_high(id, base, shift, m_io, max_io, map_uc);
483} 594}
484 595
485static __init void map_mmioh_high(int max_pnode) 596static __init void map_mmioh_high(int min_pnode, int max_pnode)
486{ 597{
487 union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh; 598 union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh;
488 int shift; 599 unsigned long mmr, base;
600 int shift, enable, m_io, n_io;
489 601
490 mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR); 602 if (is_uv3_hub()) {
491 if (is_uv1_hub() && mmioh.s1.enable) { 603 /* Map both MMIOH Regions */
492 shift = UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT; 604 map_mmioh_high_uv3(0, min_pnode, max_pnode);
493 map_high("MMIOH", mmioh.s1.base, shift, mmioh.s1.m_io, 605 map_mmioh_high_uv3(1, min_pnode, max_pnode);
494 max_pnode, map_uc); 606 return;
495 } 607 }
496 if (is_uv2_hub() && mmioh.s2.enable) { 608
609 if (is_uv1_hub()) {
610 mmr = UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR;
611 shift = UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
612 mmioh.v = uv_read_local_mmr(mmr);
613 enable = !!mmioh.s1.enable;
614 base = mmioh.s1.base;
615 m_io = mmioh.s1.m_io;
616 n_io = mmioh.s1.n_io;
617 } else if (is_uv2_hub()) {
618 mmr = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR;
497 shift = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT; 619 shift = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
498 map_high("MMIOH", mmioh.s2.base, shift, mmioh.s2.m_io, 620 mmioh.v = uv_read_local_mmr(mmr);
499 max_pnode, map_uc); 621 enable = !!mmioh.s2.enable;
622 base = mmioh.s2.base;
623 m_io = mmioh.s2.m_io;
624 n_io = mmioh.s2.n_io;
625 } else
626 return;
627
628 if (enable) {
629 max_pnode &= (1 << n_io) - 1;
630 pr_info(
631 "UV: base:0x%lx shift:%d N_IO:%d M_IO:%d max_pnode:0x%x\n",
632 base, shift, m_io, n_io, max_pnode);
633 map_high("MMIOH", base, shift, m_io, max_pnode, map_uc);
634 } else {
635 pr_info("UV: MMIOH disabled\n");
500 } 636 }
501} 637}
502 638
@@ -724,42 +860,41 @@ void uv_nmi_init(void)
724void __init uv_system_init(void) 860void __init uv_system_init(void)
725{ 861{
726 union uvh_rh_gam_config_mmr_u m_n_config; 862 union uvh_rh_gam_config_mmr_u m_n_config;
727 union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh;
728 union uvh_node_id_u node_id; 863 union uvh_node_id_u node_id;
729 unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size; 864 unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size;
730 int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val, n_io; 865 int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val;
731 int gnode_extra, max_pnode = 0; 866 int gnode_extra, min_pnode = 999999, max_pnode = -1;
732 unsigned long mmr_base, present, paddr; 867 unsigned long mmr_base, present, paddr;
733 unsigned short pnode_mask, pnode_io_mask; 868 unsigned short pnode_mask;
869 char *hub = (is_uv1_hub() ? "UV1" :
870 (is_uv2_hub() ? "UV2" :
871 "UV3"));
734 872
735 printk(KERN_INFO "UV: Found %s hub\n", is_uv1_hub() ? "UV1" : "UV2"); 873 pr_info("UV: Found %s hub\n", hub);
736 map_low_mmrs(); 874 map_low_mmrs();
737 875
738 m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR ); 876 m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR );
739 m_val = m_n_config.s.m_skt; 877 m_val = m_n_config.s.m_skt;
740 n_val = m_n_config.s.n_skt; 878 n_val = m_n_config.s.n_skt;
741 mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR); 879 pnode_mask = (1 << n_val) - 1;
742 n_io = is_uv1_hub() ? mmioh.s1.n_io : mmioh.s2.n_io;
743 mmr_base = 880 mmr_base =
744 uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) & 881 uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) &
745 ~UV_MMR_ENABLE; 882 ~UV_MMR_ENABLE;
746 pnode_mask = (1 << n_val) - 1;
747 pnode_io_mask = (1 << n_io) - 1;
748 883
749 node_id.v = uv_read_local_mmr(UVH_NODE_ID); 884 node_id.v = uv_read_local_mmr(UVH_NODE_ID);
750 gnode_extra = (node_id.s.node_id & ~((1 << n_val) - 1)) >> 1; 885 gnode_extra = (node_id.s.node_id & ~((1 << n_val) - 1)) >> 1;
751 gnode_upper = ((unsigned long)gnode_extra << m_val); 886 gnode_upper = ((unsigned long)gnode_extra << m_val);
752 printk(KERN_INFO "UV: N %d, M %d, N_IO: %d, gnode_upper 0x%lx, gnode_extra 0x%x, pnode_mask 0x%x, pnode_io_mask 0x%x\n", 887 pr_info("UV: N:%d M:%d pnode_mask:0x%x gnode_upper/extra:0x%lx/0x%x\n",
753 n_val, m_val, n_io, gnode_upper, gnode_extra, pnode_mask, pnode_io_mask); 888 n_val, m_val, pnode_mask, gnode_upper, gnode_extra);
754 889
755 printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base); 890 pr_info("UV: global MMR base 0x%lx\n", mmr_base);
756 891
757 for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) 892 for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++)
758 uv_possible_blades += 893 uv_possible_blades +=
759 hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE + i * 8)); 894 hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE + i * 8));
760 895
761 /* uv_num_possible_blades() is really the hub count */ 896 /* uv_num_possible_blades() is really the hub count */
762 printk(KERN_INFO "UV: Found %d blades, %d hubs\n", 897 pr_info("UV: Found %d blades, %d hubs\n",
763 is_uv1_hub() ? uv_num_possible_blades() : 898 is_uv1_hub() ? uv_num_possible_blades() :
764 (uv_num_possible_blades() + 1) / 2, 899 (uv_num_possible_blades() + 1) / 2,
765 uv_num_possible_blades()); 900 uv_num_possible_blades());
@@ -794,6 +929,7 @@ void __init uv_system_init(void)
794 uv_blade_info[blade].nr_possible_cpus = 0; 929 uv_blade_info[blade].nr_possible_cpus = 0;
795 uv_blade_info[blade].nr_online_cpus = 0; 930 uv_blade_info[blade].nr_online_cpus = 0;
796 spin_lock_init(&uv_blade_info[blade].nmi_lock); 931 spin_lock_init(&uv_blade_info[blade].nmi_lock);
932 min_pnode = min(pnode, min_pnode);
797 max_pnode = max(pnode, max_pnode); 933 max_pnode = max(pnode, max_pnode);
798 blade++; 934 blade++;
799 } 935 }
@@ -856,7 +992,7 @@ void __init uv_system_init(void)
856 992
857 map_gru_high(max_pnode); 993 map_gru_high(max_pnode);
858 map_mmr_high(max_pnode); 994 map_mmr_high(max_pnode);
859 map_mmioh_high(max_pnode & pnode_io_mask); 995 map_mmioh_high(min_pnode, max_pnode);
860 996
861 uv_cpu_init(); 997 uv_cpu_init();
862 uv_scir_register_cpu_notifier(); 998 uv_scir_register_cpu_notifier();
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index d65464e43503..66b5faffe14a 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -232,6 +232,7 @@
232#include <linux/acpi.h> 232#include <linux/acpi.h>
233#include <linux/syscore_ops.h> 233#include <linux/syscore_ops.h>
234#include <linux/i8253.h> 234#include <linux/i8253.h>
235#include <linux/cpuidle.h>
235 236
236#include <asm/uaccess.h> 237#include <asm/uaccess.h>
237#include <asm/desc.h> 238#include <asm/desc.h>
@@ -360,13 +361,35 @@ struct apm_user {
360 * idle percentage above which bios idle calls are done 361 * idle percentage above which bios idle calls are done
361 */ 362 */
362#ifdef CONFIG_APM_CPU_IDLE 363#ifdef CONFIG_APM_CPU_IDLE
363#warning deprecated CONFIG_APM_CPU_IDLE will be deleted in 2012
364#define DEFAULT_IDLE_THRESHOLD 95 364#define DEFAULT_IDLE_THRESHOLD 95
365#else 365#else
366#define DEFAULT_IDLE_THRESHOLD 100 366#define DEFAULT_IDLE_THRESHOLD 100
367#endif 367#endif
368#define DEFAULT_IDLE_PERIOD (100 / 3) 368#define DEFAULT_IDLE_PERIOD (100 / 3)
369 369
370static int apm_cpu_idle(struct cpuidle_device *dev,
371 struct cpuidle_driver *drv, int index);
372
373static struct cpuidle_driver apm_idle_driver = {
374 .name = "apm_idle",
375 .owner = THIS_MODULE,
376 .en_core_tk_irqen = 1,
377 .states = {
378 { /* entry 0 is for polling */ },
379 { /* entry 1 is for APM idle */
380 .name = "APM",
381 .desc = "APM idle",
382 .flags = CPUIDLE_FLAG_TIME_VALID,
383 .exit_latency = 250, /* WAG */
384 .target_residency = 500, /* WAG */
385 .enter = &apm_cpu_idle
386 },
387 },
388 .state_count = 2,
389};
390
391static struct cpuidle_device apm_cpuidle_device;
392
370/* 393/*
371 * Local variables 394 * Local variables
372 */ 395 */
@@ -377,7 +400,6 @@ static struct {
377static int clock_slowed; 400static int clock_slowed;
378static int idle_threshold __read_mostly = DEFAULT_IDLE_THRESHOLD; 401static int idle_threshold __read_mostly = DEFAULT_IDLE_THRESHOLD;
379static int idle_period __read_mostly = DEFAULT_IDLE_PERIOD; 402static int idle_period __read_mostly = DEFAULT_IDLE_PERIOD;
380static int set_pm_idle;
381static int suspends_pending; 403static int suspends_pending;
382static int standbys_pending; 404static int standbys_pending;
383static int ignore_sys_suspend; 405static int ignore_sys_suspend;
@@ -884,8 +906,6 @@ static void apm_do_busy(void)
884#define IDLE_CALC_LIMIT (HZ * 100) 906#define IDLE_CALC_LIMIT (HZ * 100)
885#define IDLE_LEAKY_MAX 16 907#define IDLE_LEAKY_MAX 16
886 908
887static void (*original_pm_idle)(void) __read_mostly;
888
889/** 909/**
890 * apm_cpu_idle - cpu idling for APM capable Linux 910 * apm_cpu_idle - cpu idling for APM capable Linux
891 * 911 *
@@ -894,35 +914,36 @@ static void (*original_pm_idle)(void) __read_mostly;
894 * Furthermore it calls the system default idle routine. 914 * Furthermore it calls the system default idle routine.
895 */ 915 */
896 916
897static void apm_cpu_idle(void) 917static int apm_cpu_idle(struct cpuidle_device *dev,
918 struct cpuidle_driver *drv, int index)
898{ 919{
899 static int use_apm_idle; /* = 0 */ 920 static int use_apm_idle; /* = 0 */
900 static unsigned int last_jiffies; /* = 0 */ 921 static unsigned int last_jiffies; /* = 0 */
901 static unsigned int last_stime; /* = 0 */ 922 static unsigned int last_stime; /* = 0 */
923 cputime_t stime;
902 924
903 int apm_idle_done = 0; 925 int apm_idle_done = 0;
904 unsigned int jiffies_since_last_check = jiffies - last_jiffies; 926 unsigned int jiffies_since_last_check = jiffies - last_jiffies;
905 unsigned int bucket; 927 unsigned int bucket;
906 928
907 WARN_ONCE(1, "deprecated apm_cpu_idle will be deleted in 2012");
908recalc: 929recalc:
930 task_cputime(current, NULL, &stime);
909 if (jiffies_since_last_check > IDLE_CALC_LIMIT) { 931 if (jiffies_since_last_check > IDLE_CALC_LIMIT) {
910 use_apm_idle = 0; 932 use_apm_idle = 0;
911 last_jiffies = jiffies;
912 last_stime = current->stime;
913 } else if (jiffies_since_last_check > idle_period) { 933 } else if (jiffies_since_last_check > idle_period) {
914 unsigned int idle_percentage; 934 unsigned int idle_percentage;
915 935
916 idle_percentage = current->stime - last_stime; 936 idle_percentage = stime - last_stime;
917 idle_percentage *= 100; 937 idle_percentage *= 100;
918 idle_percentage /= jiffies_since_last_check; 938 idle_percentage /= jiffies_since_last_check;
919 use_apm_idle = (idle_percentage > idle_threshold); 939 use_apm_idle = (idle_percentage > idle_threshold);
920 if (apm_info.forbid_idle) 940 if (apm_info.forbid_idle)
921 use_apm_idle = 0; 941 use_apm_idle = 0;
922 last_jiffies = jiffies;
923 last_stime = current->stime;
924 } 942 }
925 943
944 last_jiffies = jiffies;
945 last_stime = stime;
946
926 bucket = IDLE_LEAKY_MAX; 947 bucket = IDLE_LEAKY_MAX;
927 948
928 while (!need_resched()) { 949 while (!need_resched()) {
@@ -950,10 +971,7 @@ recalc:
950 break; 971 break;
951 } 972 }
952 } 973 }
953 if (original_pm_idle) 974 default_idle();
954 original_pm_idle();
955 else
956 default_idle();
957 local_irq_disable(); 975 local_irq_disable();
958 jiffies_since_last_check = jiffies - last_jiffies; 976 jiffies_since_last_check = jiffies - last_jiffies;
959 if (jiffies_since_last_check > idle_period) 977 if (jiffies_since_last_check > idle_period)
@@ -963,7 +981,7 @@ recalc:
963 if (apm_idle_done) 981 if (apm_idle_done)
964 apm_do_busy(); 982 apm_do_busy();
965 983
966 local_irq_enable(); 984 return index;
967} 985}
968 986
969/** 987/**
@@ -2381,9 +2399,9 @@ static int __init apm_init(void)
2381 if (HZ != 100) 2399 if (HZ != 100)
2382 idle_period = (idle_period * HZ) / 100; 2400 idle_period = (idle_period * HZ) / 100;
2383 if (idle_threshold < 100) { 2401 if (idle_threshold < 100) {
2384 original_pm_idle = pm_idle; 2402 if (!cpuidle_register_driver(&apm_idle_driver))
2385 pm_idle = apm_cpu_idle; 2403 if (cpuidle_register_device(&apm_cpuidle_device))
2386 set_pm_idle = 1; 2404 cpuidle_unregister_driver(&apm_idle_driver);
2387 } 2405 }
2388 2406
2389 return 0; 2407 return 0;
@@ -2393,15 +2411,9 @@ static void __exit apm_exit(void)
2393{ 2411{
2394 int error; 2412 int error;
2395 2413
2396 if (set_pm_idle) { 2414 cpuidle_unregister_device(&apm_cpuidle_device);
2397 pm_idle = original_pm_idle; 2415 cpuidle_unregister_driver(&apm_idle_driver);
2398 /* 2416
2399 * We are about to unload the current idle thread pm callback
2400 * (pm_idle), Wait for all processors to update cached/local
2401 * copies of pm_idle before proceeding.
2402 */
2403 kick_all_cpus_sync();
2404 }
2405 if (((apm_info.bios.flags & APM_BIOS_DISENGAGED) == 0) 2417 if (((apm_info.bios.flags & APM_BIOS_DISENGAGED) == 0)
2406 && (apm_info.connection_version > 0x0100)) { 2418 && (apm_info.connection_version > 0x0100)) {
2407 error = apm_engage_power_management(APM_DEVICE_ALL, 0); 2419 error = apm_engage_power_management(APM_DEVICE_ALL, 0);
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 15239fffd6fe..782c456eaa01 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -364,9 +364,9 @@ static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
364#endif 364#endif
365} 365}
366 366
367int amd_get_nb_id(int cpu) 367u16 amd_get_nb_id(int cpu)
368{ 368{
369 int id = 0; 369 u16 id = 0;
370#ifdef CONFIG_SMP 370#ifdef CONFIG_SMP
371 id = per_cpu(cpu_llc_id, cpu); 371 id = per_cpu(cpu_llc_id, cpu);
372#endif 372#endif
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 92dfec986a48..af6455e3fcc9 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -17,15 +17,6 @@
17#include <asm/paravirt.h> 17#include <asm/paravirt.h>
18#include <asm/alternative.h> 18#include <asm/alternative.h>
19 19
20static int __init no_halt(char *s)
21{
22 WARN_ONCE(1, "\"no-hlt\" is deprecated, please use \"idle=poll\"\n");
23 boot_cpu_data.hlt_works_ok = 0;
24 return 1;
25}
26
27__setup("no-hlt", no_halt);
28
29static int __init no_387(char *s) 20static int __init no_387(char *s)
30{ 21{
31 boot_cpu_data.hard_math = 0; 22 boot_cpu_data.hard_math = 0;
@@ -89,23 +80,6 @@ static void __init check_fpu(void)
89 pr_warn("Hmm, FPU with FDIV bug\n"); 80 pr_warn("Hmm, FPU with FDIV bug\n");
90} 81}
91 82
92static void __init check_hlt(void)
93{
94 if (boot_cpu_data.x86 >= 5 || paravirt_enabled())
95 return;
96
97 pr_info("Checking 'hlt' instruction... ");
98 if (!boot_cpu_data.hlt_works_ok) {
99 pr_cont("disabled\n");
100 return;
101 }
102 halt();
103 halt();
104 halt();
105 halt();
106 pr_cont("OK\n");
107}
108
109/* 83/*
110 * Check whether we are able to run this kernel safely on SMP. 84 * Check whether we are able to run this kernel safely on SMP.
111 * 85 *
@@ -129,7 +103,6 @@ void __init check_bugs(void)
129 print_cpu_info(&boot_cpu_data); 103 print_cpu_info(&boot_cpu_data);
130#endif 104#endif
131 check_config(); 105 check_config();
132 check_hlt();
133 init_utsname()->machine[1] = 106 init_utsname()->machine[1] =
134 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); 107 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
135 alternative_instructions(); 108 alternative_instructions();
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c
index a8f8fa9769d6..1e7e84a02eba 100644
--- a/arch/x86/kernel/cpu/hypervisor.c
+++ b/arch/x86/kernel/cpu/hypervisor.c
@@ -79,3 +79,10 @@ void __init init_hypervisor_platform(void)
79 if (x86_hyper->init_platform) 79 if (x86_hyper->init_platform)
80 x86_hyper->init_platform(); 80 x86_hyper->init_platform();
81} 81}
82
83bool __init hypervisor_x2apic_available(void)
84{
85 return x86_hyper &&
86 x86_hyper->x2apic_available &&
87 x86_hyper->x2apic_available();
88}
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index fe9edec6698a..7c6f7d548c0f 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -298,8 +298,7 @@ struct _cache_attr {
298 unsigned int); 298 unsigned int);
299}; 299};
300 300
301#ifdef CONFIG_AMD_NB 301#if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS)
302
303/* 302/*
304 * L3 cache descriptors 303 * L3 cache descriptors
305 */ 304 */
@@ -524,9 +523,9 @@ store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count,
524static struct _cache_attr subcaches = 523static struct _cache_attr subcaches =
525 __ATTR(subcaches, 0644, show_subcaches, store_subcaches); 524 __ATTR(subcaches, 0644, show_subcaches, store_subcaches);
526 525
527#else /* CONFIG_AMD_NB */ 526#else
528#define amd_init_l3_cache(x, y) 527#define amd_init_l3_cache(x, y)
529#endif /* CONFIG_AMD_NB */ 528#endif /* CONFIG_AMD_NB && CONFIG_SYSFS */
530 529
531static int 530static int
532__cpuinit cpuid4_cache_lookup_regs(int index, 531__cpuinit cpuid4_cache_lookup_regs(int index,
@@ -1227,7 +1226,7 @@ static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
1227 .notifier_call = cacheinfo_cpu_callback, 1226 .notifier_call = cacheinfo_cpu_callback,
1228}; 1227};
1229 1228
1230static int __cpuinit cache_sysfs_init(void) 1229static int __init cache_sysfs_init(void)
1231{ 1230{
1232 int i; 1231 int i;
1233 1232
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 80dbda84f1c3..fc7608a89d93 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -512,11 +512,8 @@ int mce_available(struct cpuinfo_x86 *c)
512 512
513static void mce_schedule_work(void) 513static void mce_schedule_work(void)
514{ 514{
515 if (!mce_ring_empty()) { 515 if (!mce_ring_empty())
516 struct work_struct *work = &__get_cpu_var(mce_work); 516 schedule_work(&__get_cpu_var(mce_work));
517 if (!work_pending(work))
518 schedule_work(work);
519 }
520} 517}
521 518
522DEFINE_PER_CPU(struct irq_work, mce_irq_work); 519DEFINE_PER_CPU(struct irq_work, mce_irq_work);
@@ -1351,12 +1348,7 @@ int mce_notify_irq(void)
1351 /* wake processes polling /dev/mcelog */ 1348 /* wake processes polling /dev/mcelog */
1352 wake_up_interruptible(&mce_chrdev_wait); 1349 wake_up_interruptible(&mce_chrdev_wait);
1353 1350
1354 /* 1351 if (mce_helper[0])
1355 * There is no risk of missing notifications because
1356 * work_pending is always cleared before the function is
1357 * executed.
1358 */
1359 if (mce_helper[0] && !work_pending(&mce_trigger_work))
1360 schedule_work(&mce_trigger_work); 1352 schedule_work(&mce_trigger_work);
1361 1353
1362 if (__ratelimit(&ratelimit)) 1354 if (__ratelimit(&ratelimit))
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 0a630dd4b620..a7d26d83fb70 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -14,10 +14,15 @@
14#include <linux/time.h> 14#include <linux/time.h>
15#include <linux/clocksource.h> 15#include <linux/clocksource.h>
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/hardirq.h>
18#include <linux/interrupt.h>
17#include <asm/processor.h> 19#include <asm/processor.h>
18#include <asm/hypervisor.h> 20#include <asm/hypervisor.h>
19#include <asm/hyperv.h> 21#include <asm/hyperv.h>
20#include <asm/mshyperv.h> 22#include <asm/mshyperv.h>
23#include <asm/desc.h>
24#include <asm/idle.h>
25#include <asm/irq_regs.h>
21 26
22struct ms_hyperv_info ms_hyperv; 27struct ms_hyperv_info ms_hyperv;
23EXPORT_SYMBOL_GPL(ms_hyperv); 28EXPORT_SYMBOL_GPL(ms_hyperv);
@@ -30,6 +35,13 @@ static bool __init ms_hyperv_platform(void)
30 if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) 35 if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
31 return false; 36 return false;
32 37
38 /*
39 * Xen emulates Hyper-V to support enlightened Windows.
40 * Check to see first if we are on a Xen Hypervisor.
41 */
42 if (xen_cpuid_base())
43 return false;
44
33 cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS, 45 cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS,
34 &eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]); 46 &eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]);
35 47
@@ -68,7 +80,14 @@ static void __init ms_hyperv_init_platform(void)
68 printk(KERN_INFO "HyperV: features 0x%x, hints 0x%x\n", 80 printk(KERN_INFO "HyperV: features 0x%x, hints 0x%x\n",
69 ms_hyperv.features, ms_hyperv.hints); 81 ms_hyperv.features, ms_hyperv.hints);
70 82
71 clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100); 83 if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
84 clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100);
85#if IS_ENABLED(CONFIG_HYPERV)
86 /*
87 * Setup the IDT for hypervisor callback.
88 */
89 alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector);
90#endif
72} 91}
73 92
74const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = { 93const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
@@ -77,3 +96,36 @@ const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
77 .init_platform = ms_hyperv_init_platform, 96 .init_platform = ms_hyperv_init_platform,
78}; 97};
79EXPORT_SYMBOL(x86_hyper_ms_hyperv); 98EXPORT_SYMBOL(x86_hyper_ms_hyperv);
99
100#if IS_ENABLED(CONFIG_HYPERV)
101static int vmbus_irq = -1;
102static irq_handler_t vmbus_isr;
103
104void hv_register_vmbus_handler(int irq, irq_handler_t handler)
105{
106 vmbus_irq = irq;
107 vmbus_isr = handler;
108}
109
110void hyperv_vector_handler(struct pt_regs *regs)
111{
112 struct pt_regs *old_regs = set_irq_regs(regs);
113 struct irq_desc *desc;
114
115 irq_enter();
116 exit_idle();
117
118 desc = irq_to_desc(vmbus_irq);
119
120 if (desc)
121 generic_handle_irq_desc(vmbus_irq, desc);
122
123 irq_exit();
124 set_irq_regs(old_regs);
125}
126#else
127void hv_register_vmbus_handler(int irq, irq_handler_t handler)
128{
129}
130#endif
131EXPORT_SYMBOL_GPL(hv_register_vmbus_handler);
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 4428fd178bce..bf0f01aea994 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -340,9 +340,6 @@ int x86_setup_perfctr(struct perf_event *event)
340 /* BTS is currently only allowed for user-mode. */ 340 /* BTS is currently only allowed for user-mode. */
341 if (!attr->exclude_kernel) 341 if (!attr->exclude_kernel)
342 return -EOPNOTSUPP; 342 return -EOPNOTSUPP;
343
344 if (!attr->exclude_guest)
345 return -EOPNOTSUPP;
346 } 343 }
347 344
348 hwc->config |= config; 345 hwc->config |= config;
@@ -385,9 +382,6 @@ int x86_pmu_hw_config(struct perf_event *event)
385 if (event->attr.precise_ip) { 382 if (event->attr.precise_ip) {
386 int precise = 0; 383 int precise = 0;
387 384
388 if (!event->attr.exclude_guest)
389 return -EOPNOTSUPP;
390
391 /* Support for constant skid */ 385 /* Support for constant skid */
392 if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) { 386 if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) {
393 precise++; 387 precise++;
@@ -835,7 +829,7 @@ static inline void x86_assign_hw_event(struct perf_event *event,
835 } else { 829 } else {
836 hwc->config_base = x86_pmu_config_addr(hwc->idx); 830 hwc->config_base = x86_pmu_config_addr(hwc->idx);
837 hwc->event_base = x86_pmu_event_addr(hwc->idx); 831 hwc->event_base = x86_pmu_event_addr(hwc->idx);
838 hwc->event_base_rdpmc = hwc->idx; 832 hwc->event_base_rdpmc = x86_pmu_rdpmc_index(hwc->idx);
839 } 833 }
840} 834}
841 835
@@ -1316,11 +1310,6 @@ static struct attribute_group x86_pmu_format_group = {
1316 .attrs = NULL, 1310 .attrs = NULL,
1317}; 1311};
1318 1312
1319struct perf_pmu_events_attr {
1320 struct device_attribute attr;
1321 u64 id;
1322};
1323
1324/* 1313/*
1325 * Remove all undefined events (x86_pmu.event_map(id) == 0) 1314 * Remove all undefined events (x86_pmu.event_map(id) == 0)
1326 * out of events_attr attributes. 1315 * out of events_attr attributes.
@@ -1354,11 +1343,9 @@ static ssize_t events_sysfs_show(struct device *dev, struct device_attribute *at
1354#define EVENT_VAR(_id) event_attr_##_id 1343#define EVENT_VAR(_id) event_attr_##_id
1355#define EVENT_PTR(_id) &event_attr_##_id.attr.attr 1344#define EVENT_PTR(_id) &event_attr_##_id.attr.attr
1356 1345
1357#define EVENT_ATTR(_name, _id) \ 1346#define EVENT_ATTR(_name, _id) \
1358static struct perf_pmu_events_attr EVENT_VAR(_id) = { \ 1347 PMU_EVENT_ATTR(_name, EVENT_VAR(_id), PERF_COUNT_HW_##_id, \
1359 .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \ 1348 events_sysfs_show)
1360 .id = PERF_COUNT_HW_##_id, \
1361};
1362 1349
1363EVENT_ATTR(cpu-cycles, CPU_CYCLES ); 1350EVENT_ATTR(cpu-cycles, CPU_CYCLES );
1364EVENT_ATTR(instructions, INSTRUCTIONS ); 1351EVENT_ATTR(instructions, INSTRUCTIONS );
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 115c1ea97746..7f5c75c2afdd 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -325,6 +325,8 @@ struct x86_pmu {
325 int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign); 325 int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
326 unsigned eventsel; 326 unsigned eventsel;
327 unsigned perfctr; 327 unsigned perfctr;
328 int (*addr_offset)(int index, bool eventsel);
329 int (*rdpmc_index)(int index);
328 u64 (*event_map)(int); 330 u64 (*event_map)(int);
329 int max_events; 331 int max_events;
330 int num_counters; 332 int num_counters;
@@ -446,28 +448,21 @@ extern u64 __read_mostly hw_cache_extra_regs
446 448
447u64 x86_perf_event_update(struct perf_event *event); 449u64 x86_perf_event_update(struct perf_event *event);
448 450
449static inline int x86_pmu_addr_offset(int index) 451static inline unsigned int x86_pmu_config_addr(int index)
450{ 452{
451 int offset; 453 return x86_pmu.eventsel + (x86_pmu.addr_offset ?
452 454 x86_pmu.addr_offset(index, true) : index);
453 /* offset = X86_FEATURE_PERFCTR_CORE ? index << 1 : index */
454 alternative_io(ASM_NOP2,
455 "shll $1, %%eax",
456 X86_FEATURE_PERFCTR_CORE,
457 "=a" (offset),
458 "a" (index));
459
460 return offset;
461} 455}
462 456
463static inline unsigned int x86_pmu_config_addr(int index) 457static inline unsigned int x86_pmu_event_addr(int index)
464{ 458{
465 return x86_pmu.eventsel + x86_pmu_addr_offset(index); 459 return x86_pmu.perfctr + (x86_pmu.addr_offset ?
460 x86_pmu.addr_offset(index, false) : index);
466} 461}
467 462
468static inline unsigned int x86_pmu_event_addr(int index) 463static inline int x86_pmu_rdpmc_index(int index)
469{ 464{
470 return x86_pmu.perfctr + x86_pmu_addr_offset(index); 465 return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
471} 466}
472 467
473int x86_setup_perfctr(struct perf_event *event); 468int x86_setup_perfctr(struct perf_event *event);
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index c93bc4e813a0..dfdab42aed27 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -132,21 +132,102 @@ static u64 amd_pmu_event_map(int hw_event)
132 return amd_perfmon_event_map[hw_event]; 132 return amd_perfmon_event_map[hw_event];
133} 133}
134 134
135static int amd_pmu_hw_config(struct perf_event *event) 135static struct event_constraint *amd_nb_event_constraint;
136
137/*
138 * Previously calculated offsets
139 */
140static unsigned int event_offsets[X86_PMC_IDX_MAX] __read_mostly;
141static unsigned int count_offsets[X86_PMC_IDX_MAX] __read_mostly;
142static unsigned int rdpmc_indexes[X86_PMC_IDX_MAX] __read_mostly;
143
144/*
145 * Legacy CPUs:
146 * 4 counters starting at 0xc0010000 each offset by 1
147 *
148 * CPUs with core performance counter extensions:
149 * 6 counters starting at 0xc0010200 each offset by 2
150 *
151 * CPUs with north bridge performance counter extensions:
152 * 4 additional counters starting at 0xc0010240 each offset by 2
153 * (indexed right above either one of the above core counters)
154 */
155static inline int amd_pmu_addr_offset(int index, bool eventsel)
136{ 156{
137 int ret; 157 int offset, first, base;
138 158
139 /* pass precise event sampling to ibs: */ 159 if (!index)
140 if (event->attr.precise_ip && get_ibs_caps()) 160 return index;
141 return -ENOENT; 161
162 if (eventsel)
163 offset = event_offsets[index];
164 else
165 offset = count_offsets[index];
166
167 if (offset)
168 return offset;
169
170 if (amd_nb_event_constraint &&
171 test_bit(index, amd_nb_event_constraint->idxmsk)) {
172 /*
173 * calculate the offset of NB counters with respect to
174 * base eventsel or perfctr
175 */
176
177 first = find_first_bit(amd_nb_event_constraint->idxmsk,
178 X86_PMC_IDX_MAX);
179
180 if (eventsel)
181 base = MSR_F15H_NB_PERF_CTL - x86_pmu.eventsel;
182 else
183 base = MSR_F15H_NB_PERF_CTR - x86_pmu.perfctr;
184
185 offset = base + ((index - first) << 1);
186 } else if (!cpu_has_perfctr_core)
187 offset = index;
188 else
189 offset = index << 1;
190
191 if (eventsel)
192 event_offsets[index] = offset;
193 else
194 count_offsets[index] = offset;
195
196 return offset;
197}
198
199static inline int amd_pmu_rdpmc_index(int index)
200{
201 int ret, first;
202
203 if (!index)
204 return index;
205
206 ret = rdpmc_indexes[index];
142 207
143 ret = x86_pmu_hw_config(event);
144 if (ret) 208 if (ret)
145 return ret; 209 return ret;
146 210
147 if (has_branch_stack(event)) 211 if (amd_nb_event_constraint &&
148 return -EOPNOTSUPP; 212 test_bit(index, amd_nb_event_constraint->idxmsk)) {
213 /*
214 * according to the mnual, ECX value of the NB counters is
215 * the index of the NB counter (0, 1, 2 or 3) plus 6
216 */
217
218 first = find_first_bit(amd_nb_event_constraint->idxmsk,
219 X86_PMC_IDX_MAX);
220 ret = index - first + 6;
221 } else
222 ret = index;
223
224 rdpmc_indexes[index] = ret;
225
226 return ret;
227}
149 228
229static int amd_core_hw_config(struct perf_event *event)
230{
150 if (event->attr.exclude_host && event->attr.exclude_guest) 231 if (event->attr.exclude_host && event->attr.exclude_guest)
151 /* 232 /*
152 * When HO == GO == 1 the hardware treats that as GO == HO == 0 233 * When HO == GO == 1 the hardware treats that as GO == HO == 0
@@ -156,14 +237,37 @@ static int amd_pmu_hw_config(struct perf_event *event)
156 event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR | 237 event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
157 ARCH_PERFMON_EVENTSEL_OS); 238 ARCH_PERFMON_EVENTSEL_OS);
158 else if (event->attr.exclude_host) 239 else if (event->attr.exclude_host)
159 event->hw.config |= AMD_PERFMON_EVENTSEL_GUESTONLY; 240 event->hw.config |= AMD64_EVENTSEL_GUESTONLY;
160 else if (event->attr.exclude_guest) 241 else if (event->attr.exclude_guest)
161 event->hw.config |= AMD_PERFMON_EVENTSEL_HOSTONLY; 242 event->hw.config |= AMD64_EVENTSEL_HOSTONLY;
243
244 return 0;
245}
246
247/*
248 * NB counters do not support the following event select bits:
249 * Host/Guest only
250 * Counter mask
251 * Invert counter mask
252 * Edge detect
253 * OS/User mode
254 */
255static int amd_nb_hw_config(struct perf_event *event)
256{
257 /* for NB, we only allow system wide counting mode */
258 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
259 return -EINVAL;
260
261 if (event->attr.exclude_user || event->attr.exclude_kernel ||
262 event->attr.exclude_host || event->attr.exclude_guest)
263 return -EINVAL;
162 264
163 if (event->attr.type != PERF_TYPE_RAW) 265 event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
164 return 0; 266 ARCH_PERFMON_EVENTSEL_OS);
165 267
166 event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK; 268 if (event->hw.config & ~(AMD64_RAW_EVENT_MASK_NB |
269 ARCH_PERFMON_EVENTSEL_INT))
270 return -EINVAL;
167 271
168 return 0; 272 return 0;
169} 273}
@@ -181,6 +285,11 @@ static inline int amd_is_nb_event(struct hw_perf_event *hwc)
181 return (hwc->config & 0xe0) == 0xe0; 285 return (hwc->config & 0xe0) == 0xe0;
182} 286}
183 287
288static inline int amd_is_perfctr_nb_event(struct hw_perf_event *hwc)
289{
290 return amd_nb_event_constraint && amd_is_nb_event(hwc);
291}
292
184static inline int amd_has_nb(struct cpu_hw_events *cpuc) 293static inline int amd_has_nb(struct cpu_hw_events *cpuc)
185{ 294{
186 struct amd_nb *nb = cpuc->amd_nb; 295 struct amd_nb *nb = cpuc->amd_nb;
@@ -188,20 +297,37 @@ static inline int amd_has_nb(struct cpu_hw_events *cpuc)
188 return nb && nb->nb_id != -1; 297 return nb && nb->nb_id != -1;
189} 298}
190 299
191static void amd_put_event_constraints(struct cpu_hw_events *cpuc, 300static int amd_pmu_hw_config(struct perf_event *event)
192 struct perf_event *event) 301{
302 int ret;
303
304 /* pass precise event sampling to ibs: */
305 if (event->attr.precise_ip && get_ibs_caps())
306 return -ENOENT;
307
308 if (has_branch_stack(event))
309 return -EOPNOTSUPP;
310
311 ret = x86_pmu_hw_config(event);
312 if (ret)
313 return ret;
314
315 if (event->attr.type == PERF_TYPE_RAW)
316 event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;
317
318 if (amd_is_perfctr_nb_event(&event->hw))
319 return amd_nb_hw_config(event);
320
321 return amd_core_hw_config(event);
322}
323
324static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc,
325 struct perf_event *event)
193{ 326{
194 struct hw_perf_event *hwc = &event->hw;
195 struct amd_nb *nb = cpuc->amd_nb; 327 struct amd_nb *nb = cpuc->amd_nb;
196 int i; 328 int i;
197 329
198 /* 330 /*
199 * only care about NB events
200 */
201 if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
202 return;
203
204 /*
205 * need to scan whole list because event may not have 331 * need to scan whole list because event may not have
206 * been assigned during scheduling 332 * been assigned during scheduling
207 * 333 *
@@ -215,6 +341,19 @@ static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
215 } 341 }
216} 342}
217 343
344static void amd_nb_interrupt_hw_config(struct hw_perf_event *hwc)
345{
346 int core_id = cpu_data(smp_processor_id()).cpu_core_id;
347
348 /* deliver interrupts only to this core */
349 if (hwc->config & ARCH_PERFMON_EVENTSEL_INT) {
350 hwc->config |= AMD64_EVENTSEL_INT_CORE_ENABLE;
351 hwc->config &= ~AMD64_EVENTSEL_INT_CORE_SEL_MASK;
352 hwc->config |= (u64)(core_id) <<
353 AMD64_EVENTSEL_INT_CORE_SEL_SHIFT;
354 }
355}
356
218 /* 357 /*
219 * AMD64 NorthBridge events need special treatment because 358 * AMD64 NorthBridge events need special treatment because
220 * counter access needs to be synchronized across all cores 359 * counter access needs to be synchronized across all cores
@@ -247,24 +386,24 @@ static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
247 * 386 *
248 * Given that resources are allocated (cmpxchg), they must be 387 * Given that resources are allocated (cmpxchg), they must be
249 * eventually freed for others to use. This is accomplished by 388 * eventually freed for others to use. This is accomplished by
250 * calling amd_put_event_constraints(). 389 * calling __amd_put_nb_event_constraints()
251 * 390 *
252 * Non NB events are not impacted by this restriction. 391 * Non NB events are not impacted by this restriction.
253 */ 392 */
254static struct event_constraint * 393static struct event_constraint *
255amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) 394__amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
395 struct event_constraint *c)
256{ 396{
257 struct hw_perf_event *hwc = &event->hw; 397 struct hw_perf_event *hwc = &event->hw;
258 struct amd_nb *nb = cpuc->amd_nb; 398 struct amd_nb *nb = cpuc->amd_nb;
259 struct perf_event *old = NULL; 399 struct perf_event *old;
260 int max = x86_pmu.num_counters; 400 int idx, new = -1;
261 int i, j, k = -1;
262 401
263 /* 402 if (!c)
264 * if not NB event or no NB, then no constraints 403 c = &unconstrained;
265 */ 404
266 if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc))) 405 if (cpuc->is_fake)
267 return &unconstrained; 406 return c;
268 407
269 /* 408 /*
270 * detect if already present, if so reuse 409 * detect if already present, if so reuse
@@ -276,48 +415,36 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
276 * because of successive calls to x86_schedule_events() from 415 * because of successive calls to x86_schedule_events() from
277 * hw_perf_group_sched_in() without hw_perf_enable() 416 * hw_perf_group_sched_in() without hw_perf_enable()
278 */ 417 */
279 for (i = 0; i < max; i++) { 418 for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) {
280 /* 419 if (new == -1 || hwc->idx == idx)
281 * keep track of first free slot 420 /* assign free slot, prefer hwc->idx */
282 */ 421 old = cmpxchg(nb->owners + idx, NULL, event);
283 if (k == -1 && !nb->owners[i]) 422 else if (nb->owners[idx] == event)
284 k = i; 423 /* event already present */
424 old = event;
425 else
426 continue;
427
428 if (old && old != event)
429 continue;
430
431 /* reassign to this slot */
432 if (new != -1)
433 cmpxchg(nb->owners + new, event, NULL);
434 new = idx;
285 435
286 /* already present, reuse */ 436 /* already present, reuse */
287 if (nb->owners[i] == event) 437 if (old == event)
288 goto done;
289 }
290 /*
291 * not present, so grab a new slot
292 * starting either at:
293 */
294 if (hwc->idx != -1) {
295 /* previous assignment */
296 i = hwc->idx;
297 } else if (k != -1) {
298 /* start from free slot found */
299 i = k;
300 } else {
301 /*
302 * event not found, no slot found in
303 * first pass, try again from the
304 * beginning
305 */
306 i = 0;
307 }
308 j = i;
309 do {
310 old = cmpxchg(nb->owners+i, NULL, event);
311 if (!old)
312 break; 438 break;
313 if (++i == max) 439 }
314 i = 0; 440
315 } while (i != j); 441 if (new == -1)
316done: 442 return &emptyconstraint;
317 if (!old) 443
318 return &nb->event_constraints[i]; 444 if (amd_is_perfctr_nb_event(hwc))
319 445 amd_nb_interrupt_hw_config(hwc);
320 return &emptyconstraint; 446
447 return &nb->event_constraints[new];
321} 448}
322 449
323static struct amd_nb *amd_alloc_nb(int cpu) 450static struct amd_nb *amd_alloc_nb(int cpu)
@@ -364,7 +491,7 @@ static void amd_pmu_cpu_starting(int cpu)
364 struct amd_nb *nb; 491 struct amd_nb *nb;
365 int i, nb_id; 492 int i, nb_id;
366 493
367 cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY; 494 cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
368 495
369 if (boot_cpu_data.x86_max_cores < 2) 496 if (boot_cpu_data.x86_max_cores < 2)
370 return; 497 return;
@@ -407,6 +534,26 @@ static void amd_pmu_cpu_dead(int cpu)
407 } 534 }
408} 535}
409 536
537static struct event_constraint *
538amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
539{
540 /*
541 * if not NB event or no NB, then no constraints
542 */
543 if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw)))
544 return &unconstrained;
545
546 return __amd_get_nb_event_constraints(cpuc, event,
547 amd_nb_event_constraint);
548}
549
550static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
551 struct perf_event *event)
552{
553 if (amd_has_nb(cpuc) && amd_is_nb_event(&event->hw))
554 __amd_put_nb_event_constraints(cpuc, event);
555}
556
410PMU_FORMAT_ATTR(event, "config:0-7,32-35"); 557PMU_FORMAT_ATTR(event, "config:0-7,32-35");
411PMU_FORMAT_ATTR(umask, "config:8-15" ); 558PMU_FORMAT_ATTR(umask, "config:8-15" );
412PMU_FORMAT_ATTR(edge, "config:18" ); 559PMU_FORMAT_ATTR(edge, "config:18" );
@@ -496,6 +643,9 @@ static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09,
496static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0); 643static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
497static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0); 644static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
498 645
646static struct event_constraint amd_NBPMC96 = EVENT_CONSTRAINT(0, 0x3C0, 0);
647static struct event_constraint amd_NBPMC74 = EVENT_CONSTRAINT(0, 0xF0, 0);
648
499static struct event_constraint * 649static struct event_constraint *
500amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event) 650amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event)
501{ 651{
@@ -561,8 +711,8 @@ amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *ev
561 return &amd_f15_PMC20; 711 return &amd_f15_PMC20;
562 } 712 }
563 case AMD_EVENT_NB: 713 case AMD_EVENT_NB:
564 /* not yet implemented */ 714 return __amd_get_nb_event_constraints(cpuc, event,
565 return &emptyconstraint; 715 amd_nb_event_constraint);
566 default: 716 default:
567 return &emptyconstraint; 717 return &emptyconstraint;
568 } 718 }
@@ -587,6 +737,8 @@ static __initconst const struct x86_pmu amd_pmu = {
587 .schedule_events = x86_schedule_events, 737 .schedule_events = x86_schedule_events,
588 .eventsel = MSR_K7_EVNTSEL0, 738 .eventsel = MSR_K7_EVNTSEL0,
589 .perfctr = MSR_K7_PERFCTR0, 739 .perfctr = MSR_K7_PERFCTR0,
740 .addr_offset = amd_pmu_addr_offset,
741 .rdpmc_index = amd_pmu_rdpmc_index,
590 .event_map = amd_pmu_event_map, 742 .event_map = amd_pmu_event_map,
591 .max_events = ARRAY_SIZE(amd_perfmon_event_map), 743 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
592 .num_counters = AMD64_NUM_COUNTERS, 744 .num_counters = AMD64_NUM_COUNTERS,
@@ -608,7 +760,7 @@ static __initconst const struct x86_pmu amd_pmu = {
608 760
609static int setup_event_constraints(void) 761static int setup_event_constraints(void)
610{ 762{
611 if (boot_cpu_data.x86 >= 0x15) 763 if (boot_cpu_data.x86 == 0x15)
612 x86_pmu.get_event_constraints = amd_get_event_constraints_f15h; 764 x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
613 return 0; 765 return 0;
614} 766}
@@ -638,6 +790,23 @@ static int setup_perfctr_core(void)
638 return 0; 790 return 0;
639} 791}
640 792
793static int setup_perfctr_nb(void)
794{
795 if (!cpu_has_perfctr_nb)
796 return -ENODEV;
797
798 x86_pmu.num_counters += AMD64_NUM_COUNTERS_NB;
799
800 if (cpu_has_perfctr_core)
801 amd_nb_event_constraint = &amd_NBPMC96;
802 else
803 amd_nb_event_constraint = &amd_NBPMC74;
804
805 printk(KERN_INFO "perf: AMD northbridge performance counters detected\n");
806
807 return 0;
808}
809
641__init int amd_pmu_init(void) 810__init int amd_pmu_init(void)
642{ 811{
643 /* Performance-monitoring supported from K7 and later: */ 812 /* Performance-monitoring supported from K7 and later: */
@@ -648,6 +817,7 @@ __init int amd_pmu_init(void)
648 817
649 setup_event_constraints(); 818 setup_event_constraints();
650 setup_perfctr_core(); 819 setup_perfctr_core();
820 setup_perfctr_nb();
651 821
652 /* Events are common for all AMDs */ 822 /* Events are common for all AMDs */
653 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, 823 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
@@ -678,7 +848,7 @@ void amd_pmu_disable_virt(void)
678 * SVM is disabled the Guest-only bits still gets set and the counter 848 * SVM is disabled the Guest-only bits still gets set and the counter
679 * will not count anything. 849 * will not count anything.
680 */ 850 */
681 cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY; 851 cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
682 852
683 /* Reload all events */ 853 /* Reload all events */
684 x86_pmu_disable_all(); 854 x86_pmu_disable_all();
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 93b9e1181f83..4914e94ad6e8 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -2019,7 +2019,10 @@ __init int intel_pmu_init(void)
2019 break; 2019 break;
2020 2020
2021 case 28: /* Atom */ 2021 case 28: /* Atom */
2022 case 54: /* Cedariew */ 2022 case 38: /* Lincroft */
2023 case 39: /* Penwell */
2024 case 53: /* Cloverview */
2025 case 54: /* Cedarview */
2023 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, 2026 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
2024 sizeof(hw_cache_event_ids)); 2027 sizeof(hw_cache_event_ids));
2025 2028
@@ -2084,6 +2087,7 @@ __init int intel_pmu_init(void)
2084 pr_cont("SandyBridge events, "); 2087 pr_cont("SandyBridge events, ");
2085 break; 2088 break;
2086 case 58: /* IvyBridge */ 2089 case 58: /* IvyBridge */
2090 case 62: /* IvyBridge EP */
2087 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, 2091 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
2088 sizeof(hw_cache_event_ids)); 2092 sizeof(hw_cache_event_ids));
2089 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, 2093 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c
index f2af39f5dc3d..4820c232a0b9 100644
--- a/arch/x86/kernel/cpu/perf_event_p6.c
+++ b/arch/x86/kernel/cpu/perf_event_p6.c
@@ -19,7 +19,7 @@ static const u64 p6_perfmon_event_map[] =
19 19
20}; 20};
21 21
22static __initconst u64 p6_hw_cache_event_ids 22static u64 p6_hw_cache_event_ids
23 [PERF_COUNT_HW_CACHE_MAX] 23 [PERF_COUNT_HW_CACHE_MAX]
24 [PERF_COUNT_HW_CACHE_OP_MAX] 24 [PERF_COUNT_HW_CACHE_OP_MAX]
25 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 25 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index 3286a92e662a..e280253f6f94 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -28,7 +28,6 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
28{ 28{
29 seq_printf(m, 29 seq_printf(m,
30 "fdiv_bug\t: %s\n" 30 "fdiv_bug\t: %s\n"
31 "hlt_bug\t\t: %s\n"
32 "f00f_bug\t: %s\n" 31 "f00f_bug\t: %s\n"
33 "coma_bug\t: %s\n" 32 "coma_bug\t: %s\n"
34 "fpu\t\t: %s\n" 33 "fpu\t\t: %s\n"
@@ -36,7 +35,6 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
36 "cpuid level\t: %d\n" 35 "cpuid level\t: %d\n"
37 "wp\t\t: %s\n", 36 "wp\t\t: %s\n",
38 c->fdiv_bug ? "yes" : "no", 37 c->fdiv_bug ? "yes" : "no",
39 c->hlt_works_ok ? "no" : "yes",
40 c->f00f_bug ? "yes" : "no", 38 c->f00f_bug ? "yes" : "no",
41 c->coma_bug ? "yes" : "no", 39 c->coma_bug ? "yes" : "no",
42 c->hard_math ? "yes" : "no", 40 c->hard_math ? "yes" : "no",
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index d22d0c4edcfd..03a36321ec54 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -33,6 +33,9 @@
33 33
34#define VMWARE_PORT_CMD_GETVERSION 10 34#define VMWARE_PORT_CMD_GETVERSION 10
35#define VMWARE_PORT_CMD_GETHZ 45 35#define VMWARE_PORT_CMD_GETHZ 45
36#define VMWARE_PORT_CMD_GETVCPU_INFO 68
37#define VMWARE_PORT_CMD_LEGACY_X2APIC 3
38#define VMWARE_PORT_CMD_VCPU_RESERVED 31
36 39
37#define VMWARE_PORT(cmd, eax, ebx, ecx, edx) \ 40#define VMWARE_PORT(cmd, eax, ebx, ecx, edx) \
38 __asm__("inl (%%dx)" : \ 41 __asm__("inl (%%dx)" : \
@@ -125,10 +128,20 @@ static void __cpuinit vmware_set_cpu_features(struct cpuinfo_x86 *c)
125 set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE); 128 set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE);
126} 129}
127 130
131/* Checks if hypervisor supports x2apic without VT-D interrupt remapping. */
132static bool __init vmware_legacy_x2apic_available(void)
133{
134 uint32_t eax, ebx, ecx, edx;
135 VMWARE_PORT(GETVCPU_INFO, eax, ebx, ecx, edx);
136 return (eax & (1 << VMWARE_PORT_CMD_VCPU_RESERVED)) == 0 &&
137 (eax & (1 << VMWARE_PORT_CMD_LEGACY_X2APIC)) != 0;
138}
139
128const __refconst struct hypervisor_x86 x86_hyper_vmware = { 140const __refconst struct hypervisor_x86 x86_hyper_vmware = {
129 .name = "VMware", 141 .name = "VMware",
130 .detect = vmware_platform, 142 .detect = vmware_platform,
131 .set_cpu_features = vmware_set_cpu_features, 143 .set_cpu_features = vmware_set_cpu_features,
132 .init_platform = vmware_platform_setup, 144 .init_platform = vmware_platform_setup,
145 .x2apic_available = vmware_legacy_x2apic_available,
133}; 146};
134EXPORT_SYMBOL(x86_hyper_vmware); 147EXPORT_SYMBOL(x86_hyper_vmware);
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index ff84d5469d77..8831176aa5ef 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1065,7 +1065,6 @@ ENTRY(xen_failsafe_callback)
1065 lea 16(%esp),%esp 1065 lea 16(%esp),%esp
1066 CFI_ADJUST_CFA_OFFSET -16 1066 CFI_ADJUST_CFA_OFFSET -16
1067 jz 5f 1067 jz 5f
1068 addl $16,%esp
1069 jmp iret_exc 1068 jmp iret_exc
10705: pushl_cfi $-1 /* orig_ax = -1 => not a system call */ 10695: pushl_cfi $-1 /* orig_ax = -1 => not a system call */
1071 SAVE_ALL 1070 SAVE_ALL
@@ -1092,11 +1091,18 @@ ENTRY(xen_failsafe_callback)
1092 _ASM_EXTABLE(4b,9b) 1091 _ASM_EXTABLE(4b,9b)
1093ENDPROC(xen_failsafe_callback) 1092ENDPROC(xen_failsafe_callback)
1094 1093
1095BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK, 1094BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
1096 xen_evtchn_do_upcall) 1095 xen_evtchn_do_upcall)
1097 1096
1098#endif /* CONFIG_XEN */ 1097#endif /* CONFIG_XEN */
1099 1098
1099#if IS_ENABLED(CONFIG_HYPERV)
1100
1101BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
1102 hyperv_vector_handler)
1103
1104#endif /* CONFIG_HYPERV */
1105
1100#ifdef CONFIG_FUNCTION_TRACER 1106#ifdef CONFIG_FUNCTION_TRACER
1101#ifdef CONFIG_DYNAMIC_FTRACE 1107#ifdef CONFIG_DYNAMIC_FTRACE
1102 1108
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 07a7a04529bc..048f2240f8e6 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -1454,11 +1454,16 @@ ENTRY(xen_failsafe_callback)
1454 CFI_ENDPROC 1454 CFI_ENDPROC
1455END(xen_failsafe_callback) 1455END(xen_failsafe_callback)
1456 1456
1457apicinterrupt XEN_HVM_EVTCHN_CALLBACK \ 1457apicinterrupt HYPERVISOR_CALLBACK_VECTOR \
1458 xen_hvm_callback_vector xen_evtchn_do_upcall 1458 xen_hvm_callback_vector xen_evtchn_do_upcall
1459 1459
1460#endif /* CONFIG_XEN */ 1460#endif /* CONFIG_XEN */
1461 1461
1462#if IS_ENABLED(CONFIG_HYPERV)
1463apicinterrupt HYPERVISOR_CALLBACK_VECTOR \
1464 hyperv_callback_vector hyperv_vector_handler
1465#endif /* CONFIG_HYPERV */
1466
1462/* 1467/*
1463 * Some functions should be protected against kprobes 1468 * Some functions should be protected against kprobes
1464 */ 1469 */
@@ -1781,6 +1786,7 @@ first_nmi:
1781 * Leave room for the "copied" frame 1786 * Leave room for the "copied" frame
1782 */ 1787 */
1783 subq $(5*8), %rsp 1788 subq $(5*8), %rsp
1789 CFI_ADJUST_CFA_OFFSET 5*8
1784 1790
1785 /* Copy the stack frame to the Saved frame */ 1791 /* Copy the stack frame to the Saved frame */
1786 .rept 5 1792 .rept 5
@@ -1863,10 +1869,8 @@ end_repeat_nmi:
1863nmi_swapgs: 1869nmi_swapgs:
1864 SWAPGS_UNSAFE_STACK 1870 SWAPGS_UNSAFE_STACK
1865nmi_restore: 1871nmi_restore:
1866 RESTORE_ALL 8 1872 /* Pop the extra iret frame at once */
1867 1873 RESTORE_ALL 6*8
1868 /* Pop the extra iret frame */
1869 addq $(5*8), %rsp
1870 1874
1871 /* Clear the NMI executing stack variable */ 1875 /* Clear the NMI executing stack variable */
1872 movq $0, 5*8(%rsp) 1876 movq $0, 5*8(%rsp)
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index c18f59d10101..6773c918b8cc 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -18,6 +18,7 @@
18#include <asm/io_apic.h> 18#include <asm/io_apic.h>
19#include <asm/bios_ebda.h> 19#include <asm/bios_ebda.h>
20#include <asm/tlbflush.h> 20#include <asm/tlbflush.h>
21#include <asm/bootparam_utils.h>
21 22
22static void __init i386_default_early_setup(void) 23static void __init i386_default_early_setup(void)
23{ 24{
@@ -30,6 +31,8 @@ static void __init i386_default_early_setup(void)
30 31
31void __init i386_start_kernel(void) 32void __init i386_start_kernel(void)
32{ 33{
34 sanitize_boot_params(&boot_params);
35
33 memblock_reserve(__pa_symbol(&_text), 36 memblock_reserve(__pa_symbol(&_text),
34 __pa_symbol(&__bss_stop) - __pa_symbol(&_text)); 37 __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
35 38
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 037df57a99ac..849fc9e63c2f 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -25,6 +25,7 @@
25#include <asm/kdebug.h> 25#include <asm/kdebug.h>
26#include <asm/e820.h> 26#include <asm/e820.h>
27#include <asm/bios_ebda.h> 27#include <asm/bios_ebda.h>
28#include <asm/bootparam_utils.h>
28 29
29static void __init zap_identity_mappings(void) 30static void __init zap_identity_mappings(void)
30{ 31{
@@ -46,6 +47,7 @@ static void __init copy_bootdata(char *real_mode_data)
46 char * command_line; 47 char * command_line;
47 48
48 memcpy(&boot_params, real_mode_data, sizeof boot_params); 49 memcpy(&boot_params, real_mode_data, sizeof boot_params);
50 sanitize_boot_params(&boot_params);
49 if (boot_params.hdr.cmd_line_ptr) { 51 if (boot_params.hdr.cmd_line_ptr) {
50 command_line = __va(boot_params.hdr.cmd_line_ptr); 52 command_line = __va(boot_params.hdr.cmd_line_ptr);
51 memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); 53 memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 8e7f6556028f..3c3f58a0808f 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -300,37 +300,52 @@ ENTRY(startup_32_smp)
300 leal -__PAGE_OFFSET(%ecx),%esp 300 leal -__PAGE_OFFSET(%ecx),%esp
301 301
302default_entry: 302default_entry:
303#define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
304 X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
305 X86_CR0_PG)
306 movl $(CR0_STATE & ~X86_CR0_PG),%eax
307 movl %eax,%cr0
308
309/*
310 * We want to start out with EFLAGS unambiguously cleared. Some BIOSes leave
311 * bits like NT set. This would confuse the debugger if this code is traced. So
312 * initialize them properly now before switching to protected mode. That means
313 * DF in particular (even though we have cleared it earlier after copying the
314 * command line) because GCC expects it.
315 */
316 pushl $0
317 popfl
318
303/* 319/*
304 * New page tables may be in 4Mbyte page mode and may 320 * New page tables may be in 4Mbyte page mode and may be using the global pages.
305 * be using the global pages.
306 * 321 *
307 * NOTE! If we are on a 486 we may have no cr4 at all! 322 * NOTE! If we are on a 486 we may have no cr4 at all! Specifically, cr4 exists
308 * Specifically, cr4 exists if and only if CPUID exists 323 * if and only if CPUID exists and has flags other than the FPU flag set.
309 * and has flags other than the FPU flag set.
310 */ 324 */
325 movl $-1,pa(X86_CPUID) # preset CPUID level
311 movl $X86_EFLAGS_ID,%ecx 326 movl $X86_EFLAGS_ID,%ecx
312 pushl %ecx 327 pushl %ecx
313 popfl 328 popfl # set EFLAGS=ID
314 pushfl 329 pushfl
315 popl %eax 330 popl %eax # get EFLAGS
316 pushl $0 331 testl $X86_EFLAGS_ID,%eax # did EFLAGS.ID remained set?
317 popfl 332 jz enable_paging # hw disallowed setting of ID bit
318 pushfl 333 # which means no CPUID and no CR4
319 popl %edx 334
320 xorl %edx,%eax 335 xorl %eax,%eax
321 testl %ecx,%eax 336 cpuid
322 jz 6f # No ID flag = no CPUID = no CR4 337 movl %eax,pa(X86_CPUID) # save largest std CPUID function
323 338
324 movl $1,%eax 339 movl $1,%eax
325 cpuid 340 cpuid
326 andl $~1,%edx # Ignore CPUID.FPU 341 andl $~1,%edx # Ignore CPUID.FPU
327 jz 6f # No flags or only CPUID.FPU = no CR4 342 jz enable_paging # No flags or only CPUID.FPU = no CR4
328 343
329 movl pa(mmu_cr4_features),%eax 344 movl pa(mmu_cr4_features),%eax
330 movl %eax,%cr4 345 movl %eax,%cr4
331 346
332 testb $X86_CR4_PAE, %al # check if PAE is enabled 347 testb $X86_CR4_PAE, %al # check if PAE is enabled
333 jz 6f 348 jz enable_paging
334 349
335 /* Check if extended functions are implemented */ 350 /* Check if extended functions are implemented */
336 movl $0x80000000, %eax 351 movl $0x80000000, %eax
@@ -338,7 +353,7 @@ default_entry:
338 /* Value must be in the range 0x80000001 to 0x8000ffff */ 353 /* Value must be in the range 0x80000001 to 0x8000ffff */
339 subl $0x80000001, %eax 354 subl $0x80000001, %eax
340 cmpl $(0x8000ffff-0x80000001), %eax 355 cmpl $(0x8000ffff-0x80000001), %eax
341 ja 6f 356 ja enable_paging
342 357
343 /* Clear bogus XD_DISABLE bits */ 358 /* Clear bogus XD_DISABLE bits */
344 call verify_cpu 359 call verify_cpu
@@ -347,7 +362,7 @@ default_entry:
347 cpuid 362 cpuid
348 /* Execute Disable bit supported? */ 363 /* Execute Disable bit supported? */
349 btl $(X86_FEATURE_NX & 31), %edx 364 btl $(X86_FEATURE_NX & 31), %edx
350 jnc 6f 365 jnc enable_paging
351 366
352 /* Setup EFER (Extended Feature Enable Register) */ 367 /* Setup EFER (Extended Feature Enable Register) */
353 movl $MSR_EFER, %ecx 368 movl $MSR_EFER, %ecx
@@ -357,15 +372,14 @@ default_entry:
357 /* Make changes effective */ 372 /* Make changes effective */
358 wrmsr 373 wrmsr
359 374
3606: 375enable_paging:
361 376
362/* 377/*
363 * Enable paging 378 * Enable paging
364 */ 379 */
365 movl $pa(initial_page_table), %eax 380 movl $pa(initial_page_table), %eax
366 movl %eax,%cr3 /* set the page table pointer.. */ 381 movl %eax,%cr3 /* set the page table pointer.. */
367 movl %cr0,%eax 382 movl $CR0_STATE,%eax
368 orl $X86_CR0_PG,%eax
369 movl %eax,%cr0 /* ..and set paging (PG) bit */ 383 movl %eax,%cr0 /* ..and set paging (PG) bit */
370 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */ 384 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
3711: 3851:
@@ -373,14 +387,6 @@ default_entry:
373 addl $__PAGE_OFFSET, %esp 387 addl $__PAGE_OFFSET, %esp
374 388
375/* 389/*
376 * Initialize eflags. Some BIOS's leave bits like NT set. This would
377 * confuse the debugger if this code is traced.
378 * XXX - best to initialize before switching to protected mode.
379 */
380 pushl $0
381 popfl
382
383/*
384 * start system 32-bit setup. We need to re-do some of the things done 390 * start system 32-bit setup. We need to re-do some of the things done
385 * in 16-bit mode for the "real" operations. 391 * in 16-bit mode for the "real" operations.
386 */ 392 */
@@ -389,31 +395,11 @@ default_entry:
389 jz 1f # Did we do this already? 395 jz 1f # Did we do this already?
390 call *%eax 396 call *%eax
3911: 3971:
392 398
393/* check if it is 486 or 386. */
394/* 399/*
395 * XXX - this does a lot of unnecessary setup. Alignment checks don't 400 * Check if it is 486
396 * apply at our cpl of 0 and the stack ought to be aligned already, and
397 * we don't need to preserve eflags.
398 */ 401 */
399 movl $-1,X86_CPUID # -1 for no CPUID initially 402 cmpl $-1,X86_CPUID
400 movb $3,X86 # at least 386
401 pushfl # push EFLAGS
402 popl %eax # get EFLAGS
403 movl %eax,%ecx # save original EFLAGS
404 xorl $0x240000,%eax # flip AC and ID bits in EFLAGS
405 pushl %eax # copy to EFLAGS
406 popfl # set EFLAGS
407 pushfl # get new EFLAGS
408 popl %eax # put it in eax
409 xorl %ecx,%eax # change in flags
410 pushl %ecx # restore original EFLAGS
411 popfl
412 testl $0x40000,%eax # check if AC bit changed
413 je is386
414
415 movb $4,X86 # at least 486
416 testl $0x200000,%eax # check if ID bit changed
417 je is486 403 je is486
418 404
419 /* get vendor info */ 405 /* get vendor info */
@@ -439,11 +425,10 @@ default_entry:
439 movb %cl,X86_MASK 425 movb %cl,X86_MASK
440 movl %edx,X86_CAPABILITY 426 movl %edx,X86_CAPABILITY
441 427
442is486: movl $0x50022,%ecx # set AM, WP, NE and MP 428is486:
443 jmp 2f 429 movb $4,X86
444 430 movl $0x50022,%ecx # set AM, WP, NE and MP
445is386: movl $2,%ecx # set MP 431 movl %cr0,%eax
4462: movl %cr0,%eax
447 andl $0x80000011,%eax # Save PG,PE,ET 432 andl $0x80000011,%eax # Save PG,PE,ET
448 orl %ecx,%eax 433 orl %ecx,%eax
449 movl %eax,%cr0 434 movl %eax,%cr0
@@ -468,7 +453,6 @@ is386: movl $2,%ecx # set MP
468 xorl %eax,%eax # Clear LDT 453 xorl %eax,%eax # Clear LDT
469 lldt %ax 454 lldt %ax
470 455
471 cld # gcc2 wants the direction flag cleared at all times
472 pushl $0 # fake return address for unwinder 456 pushl $0 # fake return address for unwinder
473 jmp *(initial_code) 457 jmp *(initial_code)
474 458
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index e28670f9a589..da85a8e830a1 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -478,7 +478,7 @@ static int hpet_msi_next_event(unsigned long delta,
478 478
479static int hpet_setup_msi_irq(unsigned int irq) 479static int hpet_setup_msi_irq(unsigned int irq)
480{ 480{
481 if (arch_setup_hpet_msi(irq, hpet_blockid)) { 481 if (x86_msi.setup_hpet_msi(irq, hpet_blockid)) {
482 destroy_irq(irq); 482 destroy_irq(irq);
483 return -EINVAL; 483 return -EINVAL;
484 } 484 }
diff --git a/arch/x86/kernel/kprobes/Makefile b/arch/x86/kernel/kprobes/Makefile
new file mode 100644
index 000000000000..0d33169cc1a2
--- /dev/null
+++ b/arch/x86/kernel/kprobes/Makefile
@@ -0,0 +1,7 @@
1#
2# Makefile for kernel probes
3#
4
5obj-$(CONFIG_KPROBES) += core.o
6obj-$(CONFIG_OPTPROBES) += opt.o
7obj-$(CONFIG_KPROBES_ON_FTRACE) += ftrace.o
diff --git a/arch/x86/kernel/kprobes-common.h b/arch/x86/kernel/kprobes/common.h
index 3230b68ef29a..2e9d4b5af036 100644
--- a/arch/x86/kernel/kprobes-common.h
+++ b/arch/x86/kernel/kprobes/common.h
@@ -99,4 +99,15 @@ static inline unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsig
99 return addr; 99 return addr;
100} 100}
101#endif 101#endif
102
103#ifdef CONFIG_KPROBES_ON_FTRACE
104extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
105 struct kprobe_ctlblk *kcb);
106#else
107static inline int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
108 struct kprobe_ctlblk *kcb)
109{
110 return 0;
111}
112#endif
102#endif 113#endif
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes/core.c
index 57916c0d3cf6..e124554598ee 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -58,7 +58,7 @@
58#include <asm/insn.h> 58#include <asm/insn.h>
59#include <asm/debugreg.h> 59#include <asm/debugreg.h>
60 60
61#include "kprobes-common.h" 61#include "common.h"
62 62
63void jprobe_return_end(void); 63void jprobe_return_end(void);
64 64
@@ -78,7 +78,7 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
78 * Groups, and some special opcodes can not boost. 78 * Groups, and some special opcodes can not boost.
79 * This is non-const and volatile to keep gcc from statically 79 * This is non-const and volatile to keep gcc from statically
80 * optimizing it out, as variable_test_bit makes gcc think only 80 * optimizing it out, as variable_test_bit makes gcc think only
81 * *(unsigned long*) is used. 81 * *(unsigned long*) is used.
82 */ 82 */
83static volatile u32 twobyte_is_boostable[256 / 32] = { 83static volatile u32 twobyte_is_boostable[256 / 32] = {
84 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ 84 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
@@ -117,7 +117,7 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
117 struct __arch_relative_insn { 117 struct __arch_relative_insn {
118 u8 op; 118 u8 op;
119 s32 raddr; 119 s32 raddr;
120 } __attribute__((packed)) *insn; 120 } __packed *insn;
121 121
122 insn = (struct __arch_relative_insn *)from; 122 insn = (struct __arch_relative_insn *)from;
123 insn->raddr = (s32)((long)(to) - ((long)(from) + 5)); 123 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
@@ -541,23 +541,6 @@ reenter_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb
541 return 1; 541 return 1;
542} 542}
543 543
544#ifdef KPROBES_CAN_USE_FTRACE
545static void __kprobes skip_singlestep(struct kprobe *p, struct pt_regs *regs,
546 struct kprobe_ctlblk *kcb)
547{
548 /*
549 * Emulate singlestep (and also recover regs->ip)
550 * as if there is a 5byte nop
551 */
552 regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
553 if (unlikely(p->post_handler)) {
554 kcb->kprobe_status = KPROBE_HIT_SSDONE;
555 p->post_handler(p, regs, 0);
556 }
557 __this_cpu_write(current_kprobe, NULL);
558}
559#endif
560
561/* 544/*
562 * Interrupts are disabled on entry as trap3 is an interrupt gate and they 545 * Interrupts are disabled on entry as trap3 is an interrupt gate and they
563 * remain disabled throughout this function. 546 * remain disabled throughout this function.
@@ -616,13 +599,8 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
616 } else if (kprobe_running()) { 599 } else if (kprobe_running()) {
617 p = __this_cpu_read(current_kprobe); 600 p = __this_cpu_read(current_kprobe);
618 if (p->break_handler && p->break_handler(p, regs)) { 601 if (p->break_handler && p->break_handler(p, regs)) {
619#ifdef KPROBES_CAN_USE_FTRACE 602 if (!skip_singlestep(p, regs, kcb))
620 if (kprobe_ftrace(p)) { 603 setup_singlestep(p, regs, kcb, 0);
621 skip_singlestep(p, regs, kcb);
622 return 1;
623 }
624#endif
625 setup_singlestep(p, regs, kcb, 0);
626 return 1; 604 return 1;
627 } 605 }
628 } /* else: not a kprobe fault; let the kernel handle it */ 606 } /* else: not a kprobe fault; let the kernel handle it */
@@ -1075,50 +1053,6 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
1075 return 0; 1053 return 0;
1076} 1054}
1077 1055
1078#ifdef KPROBES_CAN_USE_FTRACE
1079/* Ftrace callback handler for kprobes */
1080void __kprobes kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
1081 struct ftrace_ops *ops, struct pt_regs *regs)
1082{
1083 struct kprobe *p;
1084 struct kprobe_ctlblk *kcb;
1085 unsigned long flags;
1086
1087 /* Disable irq for emulating a breakpoint and avoiding preempt */
1088 local_irq_save(flags);
1089
1090 p = get_kprobe((kprobe_opcode_t *)ip);
1091 if (unlikely(!p) || kprobe_disabled(p))
1092 goto end;
1093
1094 kcb = get_kprobe_ctlblk();
1095 if (kprobe_running()) {
1096 kprobes_inc_nmissed_count(p);
1097 } else {
1098 /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
1099 regs->ip = ip + sizeof(kprobe_opcode_t);
1100
1101 __this_cpu_write(current_kprobe, p);
1102 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
1103 if (!p->pre_handler || !p->pre_handler(p, regs))
1104 skip_singlestep(p, regs, kcb);
1105 /*
1106 * If pre_handler returns !0, it sets regs->ip and
1107 * resets current kprobe.
1108 */
1109 }
1110end:
1111 local_irq_restore(flags);
1112}
1113
1114int __kprobes arch_prepare_kprobe_ftrace(struct kprobe *p)
1115{
1116 p->ainsn.insn = NULL;
1117 p->ainsn.boostable = -1;
1118 return 0;
1119}
1120#endif
1121
1122int __init arch_init_kprobes(void) 1056int __init arch_init_kprobes(void)
1123{ 1057{
1124 return arch_init_optprobes(); 1058 return arch_init_optprobes();
diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c
new file mode 100644
index 000000000000..23ef5c556f06
--- /dev/null
+++ b/arch/x86/kernel/kprobes/ftrace.c
@@ -0,0 +1,93 @@
1/*
2 * Dynamic Ftrace based Kprobes Optimization
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) Hitachi Ltd., 2012
19 */
20#include <linux/kprobes.h>
21#include <linux/ptrace.h>
22#include <linux/hardirq.h>
23#include <linux/preempt.h>
24#include <linux/ftrace.h>
25
26#include "common.h"
27
28static int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
29 struct kprobe_ctlblk *kcb)
30{
31 /*
32 * Emulate singlestep (and also recover regs->ip)
33 * as if there is a 5byte nop
34 */
35 regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
36 if (unlikely(p->post_handler)) {
37 kcb->kprobe_status = KPROBE_HIT_SSDONE;
38 p->post_handler(p, regs, 0);
39 }
40 __this_cpu_write(current_kprobe, NULL);
41 return 1;
42}
43
44int __kprobes skip_singlestep(struct kprobe *p, struct pt_regs *regs,
45 struct kprobe_ctlblk *kcb)
46{
47 if (kprobe_ftrace(p))
48 return __skip_singlestep(p, regs, kcb);
49 else
50 return 0;
51}
52
53/* Ftrace callback handler for kprobes */
54void __kprobes kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
55 struct ftrace_ops *ops, struct pt_regs *regs)
56{
57 struct kprobe *p;
58 struct kprobe_ctlblk *kcb;
59 unsigned long flags;
60
61 /* Disable irq for emulating a breakpoint and avoiding preempt */
62 local_irq_save(flags);
63
64 p = get_kprobe((kprobe_opcode_t *)ip);
65 if (unlikely(!p) || kprobe_disabled(p))
66 goto end;
67
68 kcb = get_kprobe_ctlblk();
69 if (kprobe_running()) {
70 kprobes_inc_nmissed_count(p);
71 } else {
72 /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
73 regs->ip = ip + sizeof(kprobe_opcode_t);
74
75 __this_cpu_write(current_kprobe, p);
76 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
77 if (!p->pre_handler || !p->pre_handler(p, regs))
78 __skip_singlestep(p, regs, kcb);
79 /*
80 * If pre_handler returns !0, it sets regs->ip and
81 * resets current kprobe.
82 */
83 }
84end:
85 local_irq_restore(flags);
86}
87
88int __kprobes arch_prepare_kprobe_ftrace(struct kprobe *p)
89{
90 p->ainsn.insn = NULL;
91 p->ainsn.boostable = -1;
92 return 0;
93}
diff --git a/arch/x86/kernel/kprobes-opt.c b/arch/x86/kernel/kprobes/opt.c
index c5e410eed403..76dc6f095724 100644
--- a/arch/x86/kernel/kprobes-opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -37,7 +37,7 @@
37#include <asm/insn.h> 37#include <asm/insn.h>
38#include <asm/debugreg.h> 38#include <asm/debugreg.h>
39 39
40#include "kprobes-common.h" 40#include "common.h"
41 41
42unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr) 42unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
43{ 43{
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 9c2bd8bd4b4c..2b44ea5f269d 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -505,6 +505,7 @@ static bool __init kvm_detect(void)
505const struct hypervisor_x86 x86_hyper_kvm __refconst = { 505const struct hypervisor_x86 x86_hyper_kvm __refconst = {
506 .name = "KVM", 506 .name = "KVM",
507 .detect = kvm_detect, 507 .detect = kvm_detect,
508 .x2apic_available = kvm_para_available,
508}; 509};
509EXPORT_SYMBOL_GPL(x86_hyper_kvm); 510EXPORT_SYMBOL_GPL(x86_hyper_kvm);
510 511
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index a7c5661f8496..4929502c1372 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -174,6 +174,9 @@ static int msr_open(struct inode *inode, struct file *file)
174 unsigned int cpu; 174 unsigned int cpu;
175 struct cpuinfo_x86 *c; 175 struct cpuinfo_x86 *c;
176 176
177 if (!capable(CAP_SYS_RAWIO))
178 return -EPERM;
179
177 cpu = iminor(file->f_path.dentry->d_inode); 180 cpu = iminor(file->f_path.dentry->d_inode);
178 if (cpu >= nr_cpu_ids || !cpu_online(cpu)) 181 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
179 return -ENXIO; /* No such CPU */ 182 return -ENXIO; /* No such CPU */
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 0f5dec5c80e0..872079a67e4d 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -56,7 +56,7 @@ struct device x86_dma_fallback_dev = {
56EXPORT_SYMBOL(x86_dma_fallback_dev); 56EXPORT_SYMBOL(x86_dma_fallback_dev);
57 57
58/* Number of entries preallocated for DMA-API debugging */ 58/* Number of entries preallocated for DMA-API debugging */
59#define PREALLOC_DMA_DEBUG_ENTRIES 32768 59#define PREALLOC_DMA_DEBUG_ENTRIES 65536
60 60
61int dma_set_mask(struct device *dev, u64 mask) 61int dma_set_mask(struct device *dev, u64 mask)
62{ 62{
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 2ed787f15bf0..14ae10031ff0 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -268,13 +268,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
268unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; 268unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
269EXPORT_SYMBOL(boot_option_idle_override); 269EXPORT_SYMBOL(boot_option_idle_override);
270 270
271/* 271static void (*x86_idle)(void);
272 * Powermanagement idle function, if any..
273 */
274void (*pm_idle)(void);
275#ifdef CONFIG_APM_MODULE
276EXPORT_SYMBOL(pm_idle);
277#endif
278 272
279#ifndef CONFIG_SMP 273#ifndef CONFIG_SMP
280static inline void play_dead(void) 274static inline void play_dead(void)
@@ -351,7 +345,7 @@ void cpu_idle(void)
351 rcu_idle_enter(); 345 rcu_idle_enter();
352 346
353 if (cpuidle_idle_call()) 347 if (cpuidle_idle_call())
354 pm_idle(); 348 x86_idle();
355 349
356 rcu_idle_exit(); 350 rcu_idle_exit();
357 start_critical_timings(); 351 start_critical_timings();
@@ -375,7 +369,6 @@ void cpu_idle(void)
375 */ 369 */
376void default_idle(void) 370void default_idle(void)
377{ 371{
378 trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id());
379 trace_cpu_idle_rcuidle(1, smp_processor_id()); 372 trace_cpu_idle_rcuidle(1, smp_processor_id());
380 current_thread_info()->status &= ~TS_POLLING; 373 current_thread_info()->status &= ~TS_POLLING;
381 /* 374 /*
@@ -389,21 +382,22 @@ void default_idle(void)
389 else 382 else
390 local_irq_enable(); 383 local_irq_enable();
391 current_thread_info()->status |= TS_POLLING; 384 current_thread_info()->status |= TS_POLLING;
392 trace_power_end_rcuidle(smp_processor_id());
393 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); 385 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
394} 386}
395#ifdef CONFIG_APM_MODULE 387#ifdef CONFIG_APM_MODULE
396EXPORT_SYMBOL(default_idle); 388EXPORT_SYMBOL(default_idle);
397#endif 389#endif
398 390
399bool set_pm_idle_to_default(void) 391#ifdef CONFIG_XEN
392bool xen_set_default_idle(void)
400{ 393{
401 bool ret = !!pm_idle; 394 bool ret = !!x86_idle;
402 395
403 pm_idle = default_idle; 396 x86_idle = default_idle;
404 397
405 return ret; 398 return ret;
406} 399}
400#endif
407void stop_this_cpu(void *dummy) 401void stop_this_cpu(void *dummy)
408{ 402{
409 local_irq_disable(); 403 local_irq_disable();
@@ -413,31 +407,8 @@ void stop_this_cpu(void *dummy)
413 set_cpu_online(smp_processor_id(), false); 407 set_cpu_online(smp_processor_id(), false);
414 disable_local_APIC(); 408 disable_local_APIC();
415 409
416 for (;;) { 410 for (;;)
417 if (hlt_works(smp_processor_id())) 411 halt();
418 halt();
419 }
420}
421
422/* Default MONITOR/MWAIT with no hints, used for default C1 state */
423static void mwait_idle(void)
424{
425 if (!need_resched()) {
426 trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id());
427 trace_cpu_idle_rcuidle(1, smp_processor_id());
428 if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
429 clflush((void *)&current_thread_info()->flags);
430
431 __monitor((void *)&current_thread_info()->flags, 0, 0);
432 smp_mb();
433 if (!need_resched())
434 __sti_mwait(0, 0);
435 else
436 local_irq_enable();
437 trace_power_end_rcuidle(smp_processor_id());
438 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
439 } else
440 local_irq_enable();
441} 412}
442 413
443/* 414/*
@@ -447,62 +418,13 @@ static void mwait_idle(void)
447 */ 418 */
448static void poll_idle(void) 419static void poll_idle(void)
449{ 420{
450 trace_power_start_rcuidle(POWER_CSTATE, 0, smp_processor_id());
451 trace_cpu_idle_rcuidle(0, smp_processor_id()); 421 trace_cpu_idle_rcuidle(0, smp_processor_id());
452 local_irq_enable(); 422 local_irq_enable();
453 while (!need_resched()) 423 while (!need_resched())
454 cpu_relax(); 424 cpu_relax();
455 trace_power_end_rcuidle(smp_processor_id());
456 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); 425 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
457} 426}
458 427
459/*
460 * mwait selection logic:
461 *
462 * It depends on the CPU. For AMD CPUs that support MWAIT this is
463 * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings
464 * then depend on a clock divisor and current Pstate of the core. If
465 * all cores of a processor are in halt state (C1) the processor can
466 * enter the C1E (C1 enhanced) state. If mwait is used this will never
467 * happen.
468 *
469 * idle=mwait overrides this decision and forces the usage of mwait.
470 */
471
472#define MWAIT_INFO 0x05
473#define MWAIT_ECX_EXTENDED_INFO 0x01
474#define MWAIT_EDX_C1 0xf0
475
476int mwait_usable(const struct cpuinfo_x86 *c)
477{
478 u32 eax, ebx, ecx, edx;
479
480 /* Use mwait if idle=mwait boot option is given */
481 if (boot_option_idle_override == IDLE_FORCE_MWAIT)
482 return 1;
483
484 /*
485 * Any idle= boot option other than idle=mwait means that we must not
486 * use mwait. Eg: idle=halt or idle=poll or idle=nomwait
487 */
488 if (boot_option_idle_override != IDLE_NO_OVERRIDE)
489 return 0;
490
491 if (c->cpuid_level < MWAIT_INFO)
492 return 0;
493
494 cpuid(MWAIT_INFO, &eax, &ebx, &ecx, &edx);
495 /* Check, whether EDX has extended info about MWAIT */
496 if (!(ecx & MWAIT_ECX_EXTENDED_INFO))
497 return 1;
498
499 /*
500 * edx enumeratios MONITOR/MWAIT extensions. Check, whether
501 * C1 supports MWAIT
502 */
503 return (edx & MWAIT_EDX_C1);
504}
505
506bool amd_e400_c1e_detected; 428bool amd_e400_c1e_detected;
507EXPORT_SYMBOL(amd_e400_c1e_detected); 429EXPORT_SYMBOL(amd_e400_c1e_detected);
508 430
@@ -567,31 +489,24 @@ static void amd_e400_idle(void)
567void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) 489void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
568{ 490{
569#ifdef CONFIG_SMP 491#ifdef CONFIG_SMP
570 if (pm_idle == poll_idle && smp_num_siblings > 1) { 492 if (x86_idle == poll_idle && smp_num_siblings > 1)
571 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n"); 493 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
572 }
573#endif 494#endif
574 if (pm_idle) 495 if (x86_idle)
575 return; 496 return;
576 497
577 if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) { 498 if (cpu_has_amd_erratum(amd_erratum_400)) {
578 /*
579 * One CPU supports mwait => All CPUs supports mwait
580 */
581 pr_info("using mwait in idle threads\n");
582 pm_idle = mwait_idle;
583 } else if (cpu_has_amd_erratum(amd_erratum_400)) {
584 /* E400: APIC timer interrupt does not wake up CPU from C1e */ 499 /* E400: APIC timer interrupt does not wake up CPU from C1e */
585 pr_info("using AMD E400 aware idle routine\n"); 500 pr_info("using AMD E400 aware idle routine\n");
586 pm_idle = amd_e400_idle; 501 x86_idle = amd_e400_idle;
587 } else 502 } else
588 pm_idle = default_idle; 503 x86_idle = default_idle;
589} 504}
590 505
591void __init init_amd_e400_c1e_mask(void) 506void __init init_amd_e400_c1e_mask(void)
592{ 507{
593 /* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */ 508 /* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */
594 if (pm_idle == amd_e400_idle) 509 if (x86_idle == amd_e400_idle)
595 zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL); 510 zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL);
596} 511}
597 512
@@ -602,11 +517,8 @@ static int __init idle_setup(char *str)
602 517
603 if (!strcmp(str, "poll")) { 518 if (!strcmp(str, "poll")) {
604 pr_info("using polling idle threads\n"); 519 pr_info("using polling idle threads\n");
605 pm_idle = poll_idle; 520 x86_idle = poll_idle;
606 boot_option_idle_override = IDLE_POLL; 521 boot_option_idle_override = IDLE_POLL;
607 } else if (!strcmp(str, "mwait")) {
608 boot_option_idle_override = IDLE_FORCE_MWAIT;
609 WARN_ONCE(1, "\"idle=mwait\" will be removed in 2012\n");
610 } else if (!strcmp(str, "halt")) { 522 } else if (!strcmp(str, "halt")) {
611 /* 523 /*
612 * When the boot option of idle=halt is added, halt is 524 * When the boot option of idle=halt is added, halt is
@@ -615,7 +527,7 @@ static int __init idle_setup(char *str)
615 * To continue to load the CPU idle driver, don't touch 527 * To continue to load the CPU idle driver, don't touch
616 * the boot_option_idle_override. 528 * the boot_option_idle_override.
617 */ 529 */
618 pm_idle = default_idle; 530 x86_idle = default_idle;
619 boot_option_idle_override = IDLE_HALT; 531 boot_option_idle_override = IDLE_HALT;
620 } else if (!strcmp(str, "nomwait")) { 532 } else if (!strcmp(str, "nomwait")) {
621 /* 533 /*
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index b629bbe0d9bd..29a8120e6fe8 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -22,7 +22,7 @@
22#include <linux/perf_event.h> 22#include <linux/perf_event.h>
23#include <linux/hw_breakpoint.h> 23#include <linux/hw_breakpoint.h>
24#include <linux/rcupdate.h> 24#include <linux/rcupdate.h>
25#include <linux/module.h> 25#include <linux/export.h>
26#include <linux/context_tracking.h> 26#include <linux/context_tracking.h>
27 27
28#include <asm/uaccess.h> 28#include <asm/uaccess.h>
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 4e8ba39eaf0f..76fa1e9a2b39 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -584,7 +584,7 @@ static void native_machine_emergency_restart(void)
584 break; 584 break;
585 585
586 case BOOT_EFI: 586 case BOOT_EFI:
587 if (efi_enabled) 587 if (efi_enabled(EFI_RUNTIME_SERVICES))
588 efi.reset_system(reboot_mode ? 588 efi.reset_system(reboot_mode ?
589 EFI_RESET_WARM : 589 EFI_RESET_WARM :
590 EFI_RESET_COLD, 590 EFI_RESET_COLD,
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c
index 801602b5d745..2e8f3d3b5641 100644
--- a/arch/x86/kernel/rtc.c
+++ b/arch/x86/kernel/rtc.c
@@ -149,7 +149,6 @@ unsigned long mach_get_cmos_time(void)
149 if (century) { 149 if (century) {
150 century = bcd2bin(century); 150 century = bcd2bin(century);
151 year += century * 100; 151 year += century * 100;
152 printk(KERN_INFO "Extended CMOS year: %d\n", century * 100);
153 } else 152 } else
154 year += CMOS_YEARS_OFFS; 153 year += CMOS_YEARS_OFFS;
155 154
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 00f6c1472b85..8b24289cc10c 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -807,15 +807,15 @@ void __init setup_arch(char **cmdline_p)
807#ifdef CONFIG_EFI 807#ifdef CONFIG_EFI
808 if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature, 808 if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
809 "EL32", 4)) { 809 "EL32", 4)) {
810 efi_enabled = 1; 810 set_bit(EFI_BOOT, &x86_efi_facility);
811 efi_64bit = false;
812 } else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature, 811 } else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
813 "EL64", 4)) { 812 "EL64", 4)) {
814 efi_enabled = 1; 813 set_bit(EFI_BOOT, &x86_efi_facility);
815 efi_64bit = true; 814 set_bit(EFI_64BIT, &x86_efi_facility);
816 } 815 }
817 if (efi_enabled && efi_memblock_x86_reserve_range()) 816
818 efi_enabled = 0; 817 if (efi_enabled(EFI_BOOT))
818 efi_memblock_x86_reserve_range();
819#endif 819#endif
820 820
821 x86_init.oem.arch_setup(); 821 x86_init.oem.arch_setup();
@@ -888,7 +888,7 @@ void __init setup_arch(char **cmdline_p)
888 888
889 finish_e820_parsing(); 889 finish_e820_parsing();
890 890
891 if (efi_enabled) 891 if (efi_enabled(EFI_BOOT))
892 efi_init(); 892 efi_init();
893 893
894 dmi_scan_machine(); 894 dmi_scan_machine();
@@ -971,7 +971,7 @@ void __init setup_arch(char **cmdline_p)
971 * The EFI specification says that boot service code won't be called 971 * The EFI specification says that boot service code won't be called
972 * after ExitBootServices(). This is, in fact, a lie. 972 * after ExitBootServices(). This is, in fact, a lie.
973 */ 973 */
974 if (efi_enabled) 974 if (efi_enabled(EFI_MEMMAP))
975 efi_reserve_boot_services(); 975 efi_reserve_boot_services();
976 976
977 /* preallocate 4k for mptable mpc */ 977 /* preallocate 4k for mptable mpc */
@@ -1114,7 +1114,7 @@ void __init setup_arch(char **cmdline_p)
1114 1114
1115#ifdef CONFIG_VT 1115#ifdef CONFIG_VT
1116#if defined(CONFIG_VGA_CONSOLE) 1116#if defined(CONFIG_VGA_CONSOLE)
1117 if (!efi_enabled || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY)) 1117 if (!efi_enabled(EFI_BOOT) || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
1118 conswitchp = &vga_con; 1118 conswitchp = &vga_con;
1119#elif defined(CONFIG_DUMMY_CONSOLE) 1119#elif defined(CONFIG_DUMMY_CONSOLE)
1120 conswitchp = &dummy_con; 1120 conswitchp = &dummy_con;
@@ -1131,14 +1131,14 @@ void __init setup_arch(char **cmdline_p)
1131 register_refined_jiffies(CLOCK_TICK_RATE); 1131 register_refined_jiffies(CLOCK_TICK_RATE);
1132 1132
1133#ifdef CONFIG_EFI 1133#ifdef CONFIG_EFI
1134 /* Once setup is done above, disable efi_enabled on mismatched 1134 /* Once setup is done above, unmap the EFI memory map on
1135 * firmware/kernel archtectures since there is no support for 1135 * mismatched firmware/kernel archtectures since there is no
1136 * runtime services. 1136 * support for runtime services.
1137 */ 1137 */
1138 if (efi_enabled && IS_ENABLED(CONFIG_X86_64) != efi_64bit) { 1138 if (efi_enabled(EFI_BOOT) &&
1139 IS_ENABLED(CONFIG_X86_64) != efi_enabled(EFI_64BIT)) {
1139 pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n"); 1140 pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n");
1140 efi_unmap_memmap(); 1141 efi_unmap_memmap();
1141 efi_enabled = 0;
1142 } 1142 }
1143#endif 1143#endif
1144} 1144}
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index ed0fe385289d..a6ceaedc396a 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1369,7 +1369,7 @@ static inline void mwait_play_dead(void)
1369 void *mwait_ptr; 1369 void *mwait_ptr;
1370 struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info); 1370 struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info);
1371 1371
1372 if (!(this_cpu_has(X86_FEATURE_MWAIT) && mwait_usable(c))) 1372 if (!this_cpu_has(X86_FEATURE_MWAIT))
1373 return; 1373 return;
1374 if (!this_cpu_has(X86_FEATURE_CLFLSH)) 1374 if (!this_cpu_has(X86_FEATURE_CLFLSH))
1375 return; 1375 return;
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
index cd3b2438a980..9b4d51d0c0d0 100644
--- a/arch/x86/kernel/step.c
+++ b/arch/x86/kernel/step.c
@@ -165,10 +165,11 @@ void set_task_blockstep(struct task_struct *task, bool on)
165 * Ensure irq/preemption can't change debugctl in between. 165 * Ensure irq/preemption can't change debugctl in between.
166 * Note also that both TIF_BLOCKSTEP and debugctl should 166 * Note also that both TIF_BLOCKSTEP and debugctl should
167 * be changed atomically wrt preemption. 167 * be changed atomically wrt preemption.
168 * FIXME: this means that set/clear TIF_BLOCKSTEP is simply 168 *
169 * wrong if task != current, SIGKILL can wakeup the stopped 169 * NOTE: this means that set/clear TIF_BLOCKSTEP is only safe if
170 * tracee and set/clear can play with the running task, this 170 * task is current or it can't be running, otherwise we can race
171 * can confuse the next __switch_to_xtra(). 171 * with __switch_to_xtra(). We rely on ptrace_freeze_traced() but
172 * PTRACE_KILL is not safe.
172 */ 173 */
173 local_irq_disable(); 174 local_irq_disable();
174 debugctl = get_debugctlmsr(); 175 debugctl = get_debugctlmsr();
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 97ef74b88e0f..dbded5aedb81 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -157,7 +157,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
157 if (flags & MAP_FIXED) 157 if (flags & MAP_FIXED)
158 return addr; 158 return addr;
159 159
160 /* for MAP_32BIT mappings we force the legact mmap base */ 160 /* for MAP_32BIT mappings we force the legacy mmap base */
161 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) 161 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
162 goto bottomup; 162 goto bottomup;
163 163
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 06ccb5073a3f..4b9ea101fe3b 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -623,7 +623,8 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
623 ns_now = __cycles_2_ns(tsc_now); 623 ns_now = __cycles_2_ns(tsc_now);
624 624
625 if (cpu_khz) { 625 if (cpu_khz) {
626 *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz; 626 *scale = ((NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR) +
627 cpu_khz / 2) / cpu_khz;
627 *offset = ns_now - mult_frac(tsc_now, *scale, 628 *offset = ns_now - mult_frac(tsc_now, *scale,
628 (1UL << CYC2NS_SCALE_FACTOR)); 629 (1UL << CYC2NS_SCALE_FACTOR));
629 } 630 }
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index c71025b67462..0ba4cfb4f412 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -680,8 +680,10 @@ static bool __skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
680 if (auprobe->insn[i] == 0x66) 680 if (auprobe->insn[i] == 0x66)
681 continue; 681 continue;
682 682
683 if (auprobe->insn[i] == 0x90) 683 if (auprobe->insn[i] == 0x90) {
684 regs->ip += i + 1;
684 return true; 685 return true;
686 }
685 687
686 break; 688 break;
687 } 689 }
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index 7a3d075a814a..d065d67c2672 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -19,6 +19,7 @@
19#include <asm/time.h> 19#include <asm/time.h>
20#include <asm/irq.h> 20#include <asm/irq.h>
21#include <asm/io_apic.h> 21#include <asm/io_apic.h>
22#include <asm/hpet.h>
22#include <asm/pat.h> 23#include <asm/pat.h>
23#include <asm/tsc.h> 24#include <asm/tsc.h>
24#include <asm/iommu.h> 25#include <asm/iommu.h>
@@ -111,15 +112,22 @@ struct x86_platform_ops x86_platform = {
111 112
112EXPORT_SYMBOL_GPL(x86_platform); 113EXPORT_SYMBOL_GPL(x86_platform);
113struct x86_msi_ops x86_msi = { 114struct x86_msi_ops x86_msi = {
114 .setup_msi_irqs = native_setup_msi_irqs, 115 .setup_msi_irqs = native_setup_msi_irqs,
115 .teardown_msi_irq = native_teardown_msi_irq, 116 .compose_msi_msg = native_compose_msi_msg,
116 .teardown_msi_irqs = default_teardown_msi_irqs, 117 .teardown_msi_irq = native_teardown_msi_irq,
117 .restore_msi_irqs = default_restore_msi_irqs, 118 .teardown_msi_irqs = default_teardown_msi_irqs,
119 .restore_msi_irqs = default_restore_msi_irqs,
120 .setup_hpet_msi = default_setup_hpet_msi,
118}; 121};
119 122
120struct x86_io_apic_ops x86_io_apic_ops = { 123struct x86_io_apic_ops x86_io_apic_ops = {
121 .init = native_io_apic_init_mappings, 124 .init = native_io_apic_init_mappings,
122 .read = native_io_apic_read, 125 .read = native_io_apic_read,
123 .write = native_io_apic_write, 126 .write = native_io_apic_write,
124 .modify = native_io_apic_modify, 127 .modify = native_io_apic_modify,
128 .disable = native_disable_io_apic,
129 .print_entries = native_io_apic_print_entries,
130 .set_affinity = native_ioapic_set_affinity,
131 .setup_entry = native_setup_ioapic_entry,
132 .eoi_ioapic_pin = native_eoi_ioapic_pin,
125}; 133};
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 027088f2f7dd..fb674fd3fc22 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -748,13 +748,15 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
748 return; 748 return;
749 } 749 }
750#endif 750#endif
751 /* Kernel addresses are always protection faults: */
752 if (address >= TASK_SIZE)
753 error_code |= PF_PROT;
751 754
752 if (unlikely(show_unhandled_signals)) 755 if (likely(show_unhandled_signals))
753 show_signal_msg(regs, error_code, address, tsk); 756 show_signal_msg(regs, error_code, address, tsk);
754 757
755 /* Kernel addresses are always protection faults: */
756 tsk->thread.cr2 = address; 758 tsk->thread.cr2 = address;
757 tsk->thread.error_code = error_code | (address >= TASK_SIZE); 759 tsk->thread.error_code = error_code;
758 tsk->thread.trap_nr = X86_TRAP_PF; 760 tsk->thread.trap_nr = X86_TRAP_PF;
759 761
760 force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0); 762 force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0);
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 2ead3c8a4c84..d6eeead43758 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -605,7 +605,7 @@ kernel_physical_mapping_init(unsigned long start,
605 } 605 }
606 606
607 if (pgd_changed) 607 if (pgd_changed)
608 sync_global_pgds(addr, end); 608 sync_global_pgds(addr, end - 1);
609 609
610 __flush_tlb_all(); 610 __flush_tlb_all();
611 611
@@ -831,6 +831,9 @@ int kern_addr_valid(unsigned long addr)
831 if (pud_none(*pud)) 831 if (pud_none(*pud))
832 return 0; 832 return 0;
833 833
834 if (pud_large(*pud))
835 return pfn_valid(pud_pfn(*pud));
836
834 pmd = pmd_offset(pud, addr); 837 pmd = pmd_offset(pud, addr);
835 if (pmd_none(*pmd)) 838 if (pmd_none(*pmd))
836 return 0; 839 return 0;
@@ -981,7 +984,7 @@ vmemmap_populate(struct page *start_page, unsigned long size, int node)
981 } 984 }
982 985
983 } 986 }
984 sync_global_pgds((unsigned long)start_page, end); 987 sync_global_pgds((unsigned long)start_page, end - 1);
985 return 0; 988 return 0;
986} 989}
987 990
diff --git a/arch/x86/mm/memtest.c b/arch/x86/mm/memtest.c
index c80b9fb95734..8dabbed409ee 100644
--- a/arch/x86/mm/memtest.c
+++ b/arch/x86/mm/memtest.c
@@ -9,6 +9,7 @@
9#include <linux/memblock.h> 9#include <linux/memblock.h>
10 10
11static u64 patterns[] __initdata = { 11static u64 patterns[] __initdata = {
12 /* The first entry has to be 0 to leave memtest with zeroed memory */
12 0, 13 0,
13 0xffffffffffffffffULL, 14 0xffffffffffffffffULL,
14 0x5555555555555555ULL, 15 0x5555555555555555ULL,
@@ -110,15 +111,8 @@ void __init early_memtest(unsigned long start, unsigned long end)
110 return; 111 return;
111 112
112 printk(KERN_INFO "early_memtest: # of tests: %d\n", memtest_pattern); 113 printk(KERN_INFO "early_memtest: # of tests: %d\n", memtest_pattern);
113 for (i = 0; i < memtest_pattern; i++) { 114 for (i = memtest_pattern-1; i < UINT_MAX; --i) {
114 idx = i % ARRAY_SIZE(patterns); 115 idx = i % ARRAY_SIZE(patterns);
115 do_one_pass(patterns[idx], start, end); 116 do_one_pass(patterns[idx], start, end);
116 } 117 }
117
118 if (idx > 0) {
119 printk(KERN_INFO "early_memtest: wipe out "
120 "test pattern from memory\n");
121 /* additional test with pattern 0 will do this */
122 do_one_pass(0, start, end);
123 }
124} 118}
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index 4ddf497ca65b..cdd0da9dd530 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -149,39 +149,40 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
149 int node, pxm; 149 int node, pxm;
150 150
151 if (srat_disabled()) 151 if (srat_disabled())
152 return -1; 152 goto out_err;
153 if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) { 153 if (ma->header.length != sizeof(struct acpi_srat_mem_affinity))
154 bad_srat(); 154 goto out_err_bad_srat;
155 return -1;
156 }
157 if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0) 155 if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
158 return -1; 156 goto out_err;
159
160 if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info()) 157 if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info())
161 return -1; 158 goto out_err;
159
162 start = ma->base_address; 160 start = ma->base_address;
163 end = start + ma->length; 161 end = start + ma->length;
164 pxm = ma->proximity_domain; 162 pxm = ma->proximity_domain;
165 if (acpi_srat_revision <= 1) 163 if (acpi_srat_revision <= 1)
166 pxm &= 0xff; 164 pxm &= 0xff;
165
167 node = setup_node(pxm); 166 node = setup_node(pxm);
168 if (node < 0) { 167 if (node < 0) {
169 printk(KERN_ERR "SRAT: Too many proximity domains.\n"); 168 printk(KERN_ERR "SRAT: Too many proximity domains.\n");
170 bad_srat(); 169 goto out_err_bad_srat;
171 return -1;
172 } 170 }
173 171
174 if (numa_add_memblk(node, start, end) < 0) { 172 if (numa_add_memblk(node, start, end) < 0)
175 bad_srat(); 173 goto out_err_bad_srat;
176 return -1;
177 }
178 174
179 node_set(node, numa_nodes_parsed); 175 node_set(node, numa_nodes_parsed);
180 176
181 printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n", 177 printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n",
182 node, pxm, 178 node, pxm,
183 (unsigned long long) start, (unsigned long long) end - 1); 179 (unsigned long long) start, (unsigned long long) end - 1);
180
184 return 0; 181 return 0;
182out_err_bad_srat:
183 bad_srat();
184out_err:
185 return -1;
185} 186}
186 187
187void __init acpi_numa_arch_fixup(void) {} 188void __init acpi_numa_arch_fixup(void) {}
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 13a6b29e2e5d..282375f13c7e 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -335,7 +335,7 @@ static const struct file_operations fops_tlbflush = {
335 .llseek = default_llseek, 335 .llseek = default_llseek,
336}; 336};
337 337
338static int __cpuinit create_tlb_flushall_shift(void) 338static int __init create_tlb_flushall_shift(void)
339{ 339{
340 debugfs_create_file("tlb_flushall_shift", S_IRUSR | S_IWUSR, 340 debugfs_create_file("tlb_flushall_shift", S_IRUSR | S_IWUSR,
341 arch_debugfs_dir, NULL, &fops_tlbflush); 341 arch_debugfs_dir, NULL, &fops_tlbflush);
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index fb29968a7cd5..082e88129712 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -548,8 +548,7 @@ static int __init acpi_mcfg_check_entry(struct acpi_table_mcfg *mcfg,
548 if (cfg->address < 0xFFFFFFFF) 548 if (cfg->address < 0xFFFFFFFF)
549 return 0; 549 return 0;
550 550
551 if (!strcmp(mcfg->header.oem_id, "SGI") || 551 if (!strncmp(mcfg->header.oem_id, "SGI", 3))
552 !strcmp(mcfg->header.oem_id, "SGI2"))
553 return 0; 552 return 0;
554 553
555 if (mcfg->header.revision >= 1) { 554 if (mcfg->header.revision >= 1) {
diff --git a/arch/x86/platform/Makefile b/arch/x86/platform/Makefile
index 8d874396cb29..01e0231a113e 100644
--- a/arch/x86/platform/Makefile
+++ b/arch/x86/platform/Makefile
@@ -2,10 +2,12 @@
2obj-y += ce4100/ 2obj-y += ce4100/
3obj-y += efi/ 3obj-y += efi/
4obj-y += geode/ 4obj-y += geode/
5obj-y += goldfish/
5obj-y += iris/ 6obj-y += iris/
6obj-y += mrst/ 7obj-y += mrst/
7obj-y += olpc/ 8obj-y += olpc/
8obj-y += scx200/ 9obj-y += scx200/
9obj-y += sfi/ 10obj-y += sfi/
11obj-y += ts5500/
10obj-y += visws/ 12obj-y += visws/
11obj-y += uv/ 13obj-y += uv/
diff --git a/arch/x86/platform/efi/efi-bgrt.c b/arch/x86/platform/efi/efi-bgrt.c
index d9c1b95af17c..7145ec63c520 100644
--- a/arch/x86/platform/efi/efi-bgrt.c
+++ b/arch/x86/platform/efi/efi-bgrt.c
@@ -11,20 +11,21 @@
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 */ 12 */
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/init.h>
14#include <linux/acpi.h> 15#include <linux/acpi.h>
15#include <linux/efi.h> 16#include <linux/efi.h>
16#include <linux/efi-bgrt.h> 17#include <linux/efi-bgrt.h>
17 18
18struct acpi_table_bgrt *bgrt_tab; 19struct acpi_table_bgrt *bgrt_tab;
19void *bgrt_image; 20void *__initdata bgrt_image;
20size_t bgrt_image_size; 21size_t __initdata bgrt_image_size;
21 22
22struct bmp_header { 23struct bmp_header {
23 u16 id; 24 u16 id;
24 u32 size; 25 u32 size;
25} __packed; 26} __packed;
26 27
27void efi_bgrt_init(void) 28void __init efi_bgrt_init(void)
28{ 29{
29 acpi_status status; 30 acpi_status status;
30 void __iomem *image; 31 void __iomem *image;
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index ad4439145f85..928bf837040a 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -51,9 +51,6 @@
51 51
52#define EFI_DEBUG 1 52#define EFI_DEBUG 1
53 53
54int efi_enabled;
55EXPORT_SYMBOL(efi_enabled);
56
57struct efi __read_mostly efi = { 54struct efi __read_mostly efi = {
58 .mps = EFI_INVALID_TABLE_ADDR, 55 .mps = EFI_INVALID_TABLE_ADDR,
59 .acpi = EFI_INVALID_TABLE_ADDR, 56 .acpi = EFI_INVALID_TABLE_ADDR,
@@ -69,19 +66,28 @@ EXPORT_SYMBOL(efi);
69 66
70struct efi_memory_map memmap; 67struct efi_memory_map memmap;
71 68
72bool efi_64bit;
73
74static struct efi efi_phys __initdata; 69static struct efi efi_phys __initdata;
75static efi_system_table_t efi_systab __initdata; 70static efi_system_table_t efi_systab __initdata;
76 71
77static inline bool efi_is_native(void) 72static inline bool efi_is_native(void)
78{ 73{
79 return IS_ENABLED(CONFIG_X86_64) == efi_64bit; 74 return IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT);
75}
76
77unsigned long x86_efi_facility;
78
79/*
80 * Returns 1 if 'facility' is enabled, 0 otherwise.
81 */
82int efi_enabled(int facility)
83{
84 return test_bit(facility, &x86_efi_facility) != 0;
80} 85}
86EXPORT_SYMBOL(efi_enabled);
81 87
82static int __init setup_noefi(char *arg) 88static int __init setup_noefi(char *arg)
83{ 89{
84 efi_enabled = 0; 90 clear_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility);
85 return 0; 91 return 0;
86} 92}
87early_param("noefi", setup_noefi); 93early_param("noefi", setup_noefi);
@@ -426,6 +432,7 @@ void __init efi_reserve_boot_services(void)
426 432
427void __init efi_unmap_memmap(void) 433void __init efi_unmap_memmap(void)
428{ 434{
435 clear_bit(EFI_MEMMAP, &x86_efi_facility);
429 if (memmap.map) { 436 if (memmap.map) {
430 early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size); 437 early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size);
431 memmap.map = NULL; 438 memmap.map = NULL;
@@ -460,7 +467,7 @@ void __init efi_free_boot_services(void)
460 467
461static int __init efi_systab_init(void *phys) 468static int __init efi_systab_init(void *phys)
462{ 469{
463 if (efi_64bit) { 470 if (efi_enabled(EFI_64BIT)) {
464 efi_system_table_64_t *systab64; 471 efi_system_table_64_t *systab64;
465 u64 tmp = 0; 472 u64 tmp = 0;
466 473
@@ -552,7 +559,7 @@ static int __init efi_config_init(u64 tables, int nr_tables)
552 void *config_tables, *tablep; 559 void *config_tables, *tablep;
553 int i, sz; 560 int i, sz;
554 561
555 if (efi_64bit) 562 if (efi_enabled(EFI_64BIT))
556 sz = sizeof(efi_config_table_64_t); 563 sz = sizeof(efi_config_table_64_t);
557 else 564 else
558 sz = sizeof(efi_config_table_32_t); 565 sz = sizeof(efi_config_table_32_t);
@@ -572,7 +579,7 @@ static int __init efi_config_init(u64 tables, int nr_tables)
572 efi_guid_t guid; 579 efi_guid_t guid;
573 unsigned long table; 580 unsigned long table;
574 581
575 if (efi_64bit) { 582 if (efi_enabled(EFI_64BIT)) {
576 u64 table64; 583 u64 table64;
577 guid = ((efi_config_table_64_t *)tablep)->guid; 584 guid = ((efi_config_table_64_t *)tablep)->guid;
578 table64 = ((efi_config_table_64_t *)tablep)->table; 585 table64 = ((efi_config_table_64_t *)tablep)->table;
@@ -684,7 +691,6 @@ void __init efi_init(void)
684 if (boot_params.efi_info.efi_systab_hi || 691 if (boot_params.efi_info.efi_systab_hi ||
685 boot_params.efi_info.efi_memmap_hi) { 692 boot_params.efi_info.efi_memmap_hi) {
686 pr_info("Table located above 4GB, disabling EFI.\n"); 693 pr_info("Table located above 4GB, disabling EFI.\n");
687 efi_enabled = 0;
688 return; 694 return;
689 } 695 }
690 efi_phys.systab = (efi_system_table_t *)boot_params.efi_info.efi_systab; 696 efi_phys.systab = (efi_system_table_t *)boot_params.efi_info.efi_systab;
@@ -694,10 +700,10 @@ void __init efi_init(void)
694 ((__u64)boot_params.efi_info.efi_systab_hi<<32)); 700 ((__u64)boot_params.efi_info.efi_systab_hi<<32));
695#endif 701#endif
696 702
697 if (efi_systab_init(efi_phys.systab)) { 703 if (efi_systab_init(efi_phys.systab))
698 efi_enabled = 0;
699 return; 704 return;
700 } 705
706 set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility);
701 707
702 /* 708 /*
703 * Show what we know for posterity 709 * Show what we know for posterity
@@ -715,10 +721,10 @@ void __init efi_init(void)
715 efi.systab->hdr.revision >> 16, 721 efi.systab->hdr.revision >> 16,
716 efi.systab->hdr.revision & 0xffff, vendor); 722 efi.systab->hdr.revision & 0xffff, vendor);
717 723
718 if (efi_config_init(efi.systab->tables, efi.systab->nr_tables)) { 724 if (efi_config_init(efi.systab->tables, efi.systab->nr_tables))
719 efi_enabled = 0;
720 return; 725 return;
721 } 726
727 set_bit(EFI_CONFIG_TABLES, &x86_efi_facility);
722 728
723 /* 729 /*
724 * Note: We currently don't support runtime services on an EFI 730 * Note: We currently don't support runtime services on an EFI
@@ -727,15 +733,17 @@ void __init efi_init(void)
727 733
728 if (!efi_is_native()) 734 if (!efi_is_native())
729 pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n"); 735 pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n");
730 else if (efi_runtime_init()) { 736 else {
731 efi_enabled = 0; 737 if (efi_runtime_init())
732 return; 738 return;
739 set_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility);
733 } 740 }
734 741
735 if (efi_memmap_init()) { 742 if (efi_memmap_init())
736 efi_enabled = 0;
737 return; 743 return;
738 } 744
745 set_bit(EFI_MEMMAP, &x86_efi_facility);
746
739#ifdef CONFIG_X86_32 747#ifdef CONFIG_X86_32
740 if (efi_is_native()) { 748 if (efi_is_native()) {
741 x86_platform.get_wallclock = efi_get_time; 749 x86_platform.get_wallclock = efi_get_time;
@@ -941,7 +949,7 @@ void __init efi_enter_virtual_mode(void)
941 * 949 *
942 * Call EFI services through wrapper functions. 950 * Call EFI services through wrapper functions.
943 */ 951 */
944 efi.runtime_version = efi_systab.fw_revision; 952 efi.runtime_version = efi_systab.hdr.revision;
945 efi.get_time = virt_efi_get_time; 953 efi.get_time = virt_efi_get_time;
946 efi.set_time = virt_efi_set_time; 954 efi.set_time = virt_efi_set_time;
947 efi.get_wakeup_time = virt_efi_get_wakeup_time; 955 efi.get_wakeup_time = virt_efi_get_wakeup_time;
@@ -969,6 +977,9 @@ u32 efi_mem_type(unsigned long phys_addr)
969 efi_memory_desc_t *md; 977 efi_memory_desc_t *md;
970 void *p; 978 void *p;
971 979
980 if (!efi_enabled(EFI_MEMMAP))
981 return 0;
982
972 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { 983 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
973 md = p; 984 md = p;
974 if ((md->phys_addr <= phys_addr) && 985 if ((md->phys_addr <= phys_addr) &&
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 95fd505dfeb6..2b2003860615 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -38,7 +38,7 @@
38#include <asm/cacheflush.h> 38#include <asm/cacheflush.h>
39#include <asm/fixmap.h> 39#include <asm/fixmap.h>
40 40
41static pgd_t save_pgd __initdata; 41static pgd_t *save_pgd __initdata;
42static unsigned long efi_flags __initdata; 42static unsigned long efi_flags __initdata;
43 43
44static void __init early_code_mapping_set_exec(int executable) 44static void __init early_code_mapping_set_exec(int executable)
@@ -61,12 +61,20 @@ static void __init early_code_mapping_set_exec(int executable)
61void __init efi_call_phys_prelog(void) 61void __init efi_call_phys_prelog(void)
62{ 62{
63 unsigned long vaddress; 63 unsigned long vaddress;
64 int pgd;
65 int n_pgds;
64 66
65 early_code_mapping_set_exec(1); 67 early_code_mapping_set_exec(1);
66 local_irq_save(efi_flags); 68 local_irq_save(efi_flags);
67 vaddress = (unsigned long)__va(0x0UL); 69
68 save_pgd = *pgd_offset_k(0x0UL); 70 n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
69 set_pgd(pgd_offset_k(0x0UL), *pgd_offset_k(vaddress)); 71 save_pgd = kmalloc(n_pgds * sizeof(pgd_t), GFP_KERNEL);
72
73 for (pgd = 0; pgd < n_pgds; pgd++) {
74 save_pgd[pgd] = *pgd_offset_k(pgd * PGDIR_SIZE);
75 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
76 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
77 }
70 __flush_tlb_all(); 78 __flush_tlb_all();
71} 79}
72 80
@@ -75,7 +83,11 @@ void __init efi_call_phys_epilog(void)
75 /* 83 /*
76 * After the lock is released, the original page table is restored. 84 * After the lock is released, the original page table is restored.
77 */ 85 */
78 set_pgd(pgd_offset_k(0x0UL), save_pgd); 86 int pgd;
87 int n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
88 for (pgd = 0; pgd < n_pgds; pgd++)
89 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
90 kfree(save_pgd);
79 __flush_tlb_all(); 91 __flush_tlb_all();
80 local_irq_restore(efi_flags); 92 local_irq_restore(efi_flags);
81 early_code_mapping_set_exec(0); 93 early_code_mapping_set_exec(0);
diff --git a/arch/x86/platform/goldfish/Makefile b/arch/x86/platform/goldfish/Makefile
new file mode 100644
index 000000000000..f030b532fdf3
--- /dev/null
+++ b/arch/x86/platform/goldfish/Makefile
@@ -0,0 +1 @@
obj-$(CONFIG_GOLDFISH) += goldfish.o
diff --git a/arch/x86/platform/goldfish/goldfish.c b/arch/x86/platform/goldfish/goldfish.c
new file mode 100644
index 000000000000..1693107a518e
--- /dev/null
+++ b/arch/x86/platform/goldfish/goldfish.c
@@ -0,0 +1,51 @@
1/*
2 * Copyright (C) 2007 Google, Inc.
3 * Copyright (C) 2011 Intel, Inc.
4 * Copyright (C) 2013 Intel, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/kernel.h>
18#include <linux/irq.h>
19#include <linux/platform_device.h>
20
21/*
22 * Where in virtual device memory the IO devices (timers, system controllers
23 * and so on)
24 */
25
26#define GOLDFISH_PDEV_BUS_BASE (0xff001000)
27#define GOLDFISH_PDEV_BUS_END (0xff7fffff)
28#define GOLDFISH_PDEV_BUS_IRQ (4)
29
30#define GOLDFISH_TTY_BASE (0x2000)
31
32static struct resource goldfish_pdev_bus_resources[] = {
33 {
34 .start = GOLDFISH_PDEV_BUS_BASE,
35 .end = GOLDFISH_PDEV_BUS_END,
36 .flags = IORESOURCE_MEM,
37 },
38 {
39 .start = GOLDFISH_PDEV_BUS_IRQ,
40 .end = GOLDFISH_PDEV_BUS_IRQ,
41 .flags = IORESOURCE_IRQ,
42 }
43};
44
45static int __init goldfish_init(void)
46{
47 platform_device_register_simple("goldfish_pdev_bus", -1,
48 goldfish_pdev_bus_resources, 2);
49 return 0;
50}
51device_initcall(goldfish_init);
diff --git a/arch/x86/platform/olpc/olpc-xo15-sci.c b/arch/x86/platform/olpc/olpc-xo15-sci.c
index 2fdca25905ae..fef7d0ba7e3a 100644
--- a/arch/x86/platform/olpc/olpc-xo15-sci.c
+++ b/arch/x86/platform/olpc/olpc-xo15-sci.c
@@ -195,7 +195,7 @@ err_sysfs:
195 return r; 195 return r;
196} 196}
197 197
198static int xo15_sci_remove(struct acpi_device *device, int type) 198static int xo15_sci_remove(struct acpi_device *device)
199{ 199{
200 acpi_disable_gpe(NULL, xo15_sci_gpe); 200 acpi_disable_gpe(NULL, xo15_sci_gpe);
201 acpi_remove_gpe_handler(NULL, xo15_sci_gpe, xo15_sci_gpe_handler); 201 acpi_remove_gpe_handler(NULL, xo15_sci_gpe, xo15_sci_gpe_handler);
diff --git a/arch/x86/platform/sfi/sfi.c b/arch/x86/platform/sfi/sfi.c
index 7785b72ecc3a..bcd1a703e3e6 100644
--- a/arch/x86/platform/sfi/sfi.c
+++ b/arch/x86/platform/sfi/sfi.c
@@ -35,7 +35,7 @@
35static unsigned long sfi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; 35static unsigned long sfi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
36 36
37/* All CPUs enumerated by SFI must be present and enabled */ 37/* All CPUs enumerated by SFI must be present and enabled */
38static void __cpuinit mp_sfi_register_lapic(u8 id) 38static void __init mp_sfi_register_lapic(u8 id)
39{ 39{
40 if (MAX_LOCAL_APIC - id <= 0) { 40 if (MAX_LOCAL_APIC - id <= 0) {
41 pr_warning("Processor #%d invalid (max %d)\n", 41 pr_warning("Processor #%d invalid (max %d)\n",
diff --git a/arch/x86/platform/ts5500/Makefile b/arch/x86/platform/ts5500/Makefile
new file mode 100644
index 000000000000..c54e348c96a7
--- /dev/null
+++ b/arch/x86/platform/ts5500/Makefile
@@ -0,0 +1 @@
obj-$(CONFIG_TS5500) += ts5500.o
diff --git a/arch/x86/platform/ts5500/ts5500.c b/arch/x86/platform/ts5500/ts5500.c
new file mode 100644
index 000000000000..39febb214e8c
--- /dev/null
+++ b/arch/x86/platform/ts5500/ts5500.c
@@ -0,0 +1,339 @@
1/*
2 * Technologic Systems TS-5500 Single Board Computer support
3 *
4 * Copyright (C) 2013 Savoir-faire Linux Inc.
5 * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it under
8 * the terms of the GNU General Public License as published by the Free Software
9 * Foundation; either version 2 of the License, or (at your option) any later
10 * version.
11 *
12 *
13 * This driver registers the Technologic Systems TS-5500 Single Board Computer
14 * (SBC) and its devices, and exposes information to userspace such as jumpers'
15 * state or available options. For further information about sysfs entries, see
16 * Documentation/ABI/testing/sysfs-platform-ts5500.
17 *
18 * This code actually supports the TS-5500 platform, but it may be extended to
19 * support similar Technologic Systems x86-based platforms, such as the TS-5600.
20 */
21
22#include <linux/delay.h>
23#include <linux/io.h>
24#include <linux/kernel.h>
25#include <linux/leds.h>
26#include <linux/module.h>
27#include <linux/platform_data/gpio-ts5500.h>
28#include <linux/platform_data/max197.h>
29#include <linux/platform_device.h>
30#include <linux/slab.h>
31
32/* Product code register */
33#define TS5500_PRODUCT_CODE_ADDR 0x74
34#define TS5500_PRODUCT_CODE 0x60 /* TS-5500 product code */
35
36/* SRAM/RS-485/ADC options, and RS-485 RTS/Automatic RS-485 flags register */
37#define TS5500_SRAM_RS485_ADC_ADDR 0x75
38#define TS5500_SRAM BIT(0) /* SRAM option */
39#define TS5500_RS485 BIT(1) /* RS-485 option */
40#define TS5500_ADC BIT(2) /* A/D converter option */
41#define TS5500_RS485_RTS BIT(6) /* RTS for RS-485 */
42#define TS5500_RS485_AUTO BIT(7) /* Automatic RS-485 */
43
44/* External Reset/Industrial Temperature Range options register */
45#define TS5500_ERESET_ITR_ADDR 0x76
46#define TS5500_ERESET BIT(0) /* External Reset option */
47#define TS5500_ITR BIT(1) /* Indust. Temp. Range option */
48
49/* LED/Jumpers register */
50#define TS5500_LED_JP_ADDR 0x77
51#define TS5500_LED BIT(0) /* LED flag */
52#define TS5500_JP1 BIT(1) /* Automatic CMOS */
53#define TS5500_JP2 BIT(2) /* Enable Serial Console */
54#define TS5500_JP3 BIT(3) /* Write Enable Drive A */
55#define TS5500_JP4 BIT(4) /* Fast Console (115K baud) */
56#define TS5500_JP5 BIT(5) /* User Jumper */
57#define TS5500_JP6 BIT(6) /* Console on COM1 (req. JP2) */
58#define TS5500_JP7 BIT(7) /* Undocumented (Unused) */
59
60/* A/D Converter registers */
61#define TS5500_ADC_CONV_BUSY_ADDR 0x195 /* Conversion state register */
62#define TS5500_ADC_CONV_BUSY BIT(0)
63#define TS5500_ADC_CONV_INIT_LSB_ADDR 0x196 /* Start conv. / LSB register */
64#define TS5500_ADC_CONV_MSB_ADDR 0x197 /* MSB register */
65#define TS5500_ADC_CONV_DELAY 12 /* usec */
66
67/**
68 * struct ts5500_sbc - TS-5500 board description
69 * @id: Board product ID.
70 * @sram: Flag for SRAM option.
71 * @rs485: Flag for RS-485 option.
72 * @adc: Flag for Analog/Digital converter option.
73 * @ereset: Flag for External Reset option.
74 * @itr: Flag for Industrial Temperature Range option.
75 * @jumpers: Bitfield for jumpers' state.
76 */
77struct ts5500_sbc {
78 int id;
79 bool sram;
80 bool rs485;
81 bool adc;
82 bool ereset;
83 bool itr;
84 u8 jumpers;
85};
86
87/* Board signatures in BIOS shadow RAM */
88static const struct {
89 const char * const string;
90 const ssize_t offset;
91} ts5500_signatures[] __initdata = {
92 { "TS-5x00 AMD Elan", 0xb14 },
93};
94
95static int __init ts5500_check_signature(void)
96{
97 void __iomem *bios;
98 int i, ret = -ENODEV;
99
100 bios = ioremap(0xf0000, 0x10000);
101 if (!bios)
102 return -ENOMEM;
103
104 for (i = 0; i < ARRAY_SIZE(ts5500_signatures); i++) {
105 if (check_signature(bios + ts5500_signatures[i].offset,
106 ts5500_signatures[i].string,
107 strlen(ts5500_signatures[i].string))) {
108 ret = 0;
109 break;
110 }
111 }
112
113 iounmap(bios);
114 return ret;
115}
116
117static int __init ts5500_detect_config(struct ts5500_sbc *sbc)
118{
119 u8 tmp;
120 int ret = 0;
121
122 if (!request_region(TS5500_PRODUCT_CODE_ADDR, 4, "ts5500"))
123 return -EBUSY;
124
125 tmp = inb(TS5500_PRODUCT_CODE_ADDR);
126 if (tmp != TS5500_PRODUCT_CODE) {
127 pr_err("This platform is not a TS-5500 (found ID 0x%x)\n", tmp);
128 ret = -ENODEV;
129 goto cleanup;
130 }
131 sbc->id = tmp;
132
133 tmp = inb(TS5500_SRAM_RS485_ADC_ADDR);
134 sbc->sram = tmp & TS5500_SRAM;
135 sbc->rs485 = tmp & TS5500_RS485;
136 sbc->adc = tmp & TS5500_ADC;
137
138 tmp = inb(TS5500_ERESET_ITR_ADDR);
139 sbc->ereset = tmp & TS5500_ERESET;
140 sbc->itr = tmp & TS5500_ITR;
141
142 tmp = inb(TS5500_LED_JP_ADDR);
143 sbc->jumpers = tmp & ~TS5500_LED;
144
145cleanup:
146 release_region(TS5500_PRODUCT_CODE_ADDR, 4);
147 return ret;
148}
149
150static ssize_t ts5500_show_id(struct device *dev,
151 struct device_attribute *attr, char *buf)
152{
153 struct ts5500_sbc *sbc = dev_get_drvdata(dev);
154
155 return sprintf(buf, "0x%.2x\n", sbc->id);
156}
157
158static ssize_t ts5500_show_jumpers(struct device *dev,
159 struct device_attribute *attr,
160 char *buf)
161{
162 struct ts5500_sbc *sbc = dev_get_drvdata(dev);
163
164 return sprintf(buf, "0x%.2x\n", sbc->jumpers >> 1);
165}
166
167#define TS5500_SHOW(field) \
168 static ssize_t ts5500_show_##field(struct device *dev, \
169 struct device_attribute *attr, \
170 char *buf) \
171 { \
172 struct ts5500_sbc *sbc = dev_get_drvdata(dev); \
173 return sprintf(buf, "%d\n", sbc->field); \
174 }
175
176TS5500_SHOW(sram)
177TS5500_SHOW(rs485)
178TS5500_SHOW(adc)
179TS5500_SHOW(ereset)
180TS5500_SHOW(itr)
181
182static DEVICE_ATTR(id, S_IRUGO, ts5500_show_id, NULL);
183static DEVICE_ATTR(jumpers, S_IRUGO, ts5500_show_jumpers, NULL);
184static DEVICE_ATTR(sram, S_IRUGO, ts5500_show_sram, NULL);
185static DEVICE_ATTR(rs485, S_IRUGO, ts5500_show_rs485, NULL);
186static DEVICE_ATTR(adc, S_IRUGO, ts5500_show_adc, NULL);
187static DEVICE_ATTR(ereset, S_IRUGO, ts5500_show_ereset, NULL);
188static DEVICE_ATTR(itr, S_IRUGO, ts5500_show_itr, NULL);
189
190static struct attribute *ts5500_attributes[] = {
191 &dev_attr_id.attr,
192 &dev_attr_jumpers.attr,
193 &dev_attr_sram.attr,
194 &dev_attr_rs485.attr,
195 &dev_attr_adc.attr,
196 &dev_attr_ereset.attr,
197 &dev_attr_itr.attr,
198 NULL
199};
200
201static const struct attribute_group ts5500_attr_group = {
202 .attrs = ts5500_attributes,
203};
204
205static struct resource ts5500_dio1_resource[] = {
206 DEFINE_RES_IRQ_NAMED(7, "DIO1 interrupt"),
207};
208
209static struct platform_device ts5500_dio1_pdev = {
210 .name = "ts5500-dio1",
211 .id = -1,
212 .resource = ts5500_dio1_resource,
213 .num_resources = 1,
214};
215
216static struct resource ts5500_dio2_resource[] = {
217 DEFINE_RES_IRQ_NAMED(6, "DIO2 interrupt"),
218};
219
220static struct platform_device ts5500_dio2_pdev = {
221 .name = "ts5500-dio2",
222 .id = -1,
223 .resource = ts5500_dio2_resource,
224 .num_resources = 1,
225};
226
227static void ts5500_led_set(struct led_classdev *led_cdev,
228 enum led_brightness brightness)
229{
230 outb(!!brightness, TS5500_LED_JP_ADDR);
231}
232
233static enum led_brightness ts5500_led_get(struct led_classdev *led_cdev)
234{
235 return (inb(TS5500_LED_JP_ADDR) & TS5500_LED) ? LED_FULL : LED_OFF;
236}
237
238static struct led_classdev ts5500_led_cdev = {
239 .name = "ts5500:green:",
240 .brightness_set = ts5500_led_set,
241 .brightness_get = ts5500_led_get,
242};
243
244static int ts5500_adc_convert(u8 ctrl)
245{
246 u8 lsb, msb;
247
248 /* Start conversion (ensure the 3 MSB are set to 0) */
249 outb(ctrl & 0x1f, TS5500_ADC_CONV_INIT_LSB_ADDR);
250
251 /*
252 * The platform has CPLD logic driving the A/D converter.
253 * The conversion must complete within 11 microseconds,
254 * otherwise we have to re-initiate a conversion.
255 */
256 udelay(TS5500_ADC_CONV_DELAY);
257 if (inb(TS5500_ADC_CONV_BUSY_ADDR) & TS5500_ADC_CONV_BUSY)
258 return -EBUSY;
259
260 /* Read the raw data */
261 lsb = inb(TS5500_ADC_CONV_INIT_LSB_ADDR);
262 msb = inb(TS5500_ADC_CONV_MSB_ADDR);
263
264 return (msb << 8) | lsb;
265}
266
267static struct max197_platform_data ts5500_adc_pdata = {
268 .convert = ts5500_adc_convert,
269};
270
271static struct platform_device ts5500_adc_pdev = {
272 .name = "max197",
273 .id = -1,
274 .dev = {
275 .platform_data = &ts5500_adc_pdata,
276 },
277};
278
279static int __init ts5500_init(void)
280{
281 struct platform_device *pdev;
282 struct ts5500_sbc *sbc;
283 int err;
284
285 /*
286 * There is no DMI available or PCI bridge subvendor info,
287 * only the BIOS provides a 16-bit identification call.
288 * It is safer to find a signature in the BIOS shadow RAM.
289 */
290 err = ts5500_check_signature();
291 if (err)
292 return err;
293
294 pdev = platform_device_register_simple("ts5500", -1, NULL, 0);
295 if (IS_ERR(pdev))
296 return PTR_ERR(pdev);
297
298 sbc = devm_kzalloc(&pdev->dev, sizeof(struct ts5500_sbc), GFP_KERNEL);
299 if (!sbc) {
300 err = -ENOMEM;
301 goto error;
302 }
303
304 err = ts5500_detect_config(sbc);
305 if (err)
306 goto error;
307
308 platform_set_drvdata(pdev, sbc);
309
310 err = sysfs_create_group(&pdev->dev.kobj, &ts5500_attr_group);
311 if (err)
312 goto error;
313
314 ts5500_dio1_pdev.dev.parent = &pdev->dev;
315 if (platform_device_register(&ts5500_dio1_pdev))
316 dev_warn(&pdev->dev, "DIO1 block registration failed\n");
317 ts5500_dio2_pdev.dev.parent = &pdev->dev;
318 if (platform_device_register(&ts5500_dio2_pdev))
319 dev_warn(&pdev->dev, "DIO2 block registration failed\n");
320
321 if (led_classdev_register(&pdev->dev, &ts5500_led_cdev))
322 dev_warn(&pdev->dev, "LED registration failed\n");
323
324 if (sbc->adc) {
325 ts5500_adc_pdev.dev.parent = &pdev->dev;
326 if (platform_device_register(&ts5500_adc_pdev))
327 dev_warn(&pdev->dev, "ADC registration failed\n");
328 }
329
330 return 0;
331error:
332 platform_device_unregister(pdev);
333 return err;
334}
335device_initcall(ts5500_init);
336
337MODULE_LICENSE("GPL");
338MODULE_AUTHOR("Savoir-faire Linux Inc. <kernel@savoirfairelinux.com>");
339MODULE_DESCRIPTION("Technologic Systems TS-5500 platform driver");
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index b8b3a37c80cd..0f92173a12b6 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1034,7 +1034,8 @@ static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp,
1034 * globally purge translation cache of a virtual address or all TLB's 1034 * globally purge translation cache of a virtual address or all TLB's
1035 * @cpumask: mask of all cpu's in which the address is to be removed 1035 * @cpumask: mask of all cpu's in which the address is to be removed
1036 * @mm: mm_struct containing virtual address range 1036 * @mm: mm_struct containing virtual address range
1037 * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu) 1037 * @start: start virtual address to be removed from TLB
1038 * @end: end virtual address to be remove from TLB
1038 * @cpu: the current cpu 1039 * @cpu: the current cpu
1039 * 1040 *
1040 * This is the entry point for initiating any UV global TLB shootdown. 1041 * This is the entry point for initiating any UV global TLB shootdown.
@@ -1056,7 +1057,7 @@ static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp,
1056 */ 1057 */
1057const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, 1058const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
1058 struct mm_struct *mm, unsigned long start, 1059 struct mm_struct *mm, unsigned long start,
1059 unsigned end, unsigned int cpu) 1060 unsigned long end, unsigned int cpu)
1060{ 1061{
1061 int locals = 0; 1062 int locals = 0;
1062 int remotes = 0; 1063 int remotes = 0;
@@ -1113,7 +1114,10 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
1113 1114
1114 record_send_statistics(stat, locals, hubs, remotes, bau_desc); 1115 record_send_statistics(stat, locals, hubs, remotes, bau_desc);
1115 1116
1116 bau_desc->payload.address = start; 1117 if (!end || (end - start) <= PAGE_SIZE)
1118 bau_desc->payload.address = start;
1119 else
1120 bau_desc->payload.address = TLB_FLUSH_ALL;
1117 bau_desc->payload.sending_cpu = cpu; 1121 bau_desc->payload.sending_cpu = cpu;
1118 /* 1122 /*
1119 * uv_flush_send_and_wait returns 0 if all cpu's were messaged, 1123 * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
@@ -1463,7 +1467,7 @@ static ssize_t ptc_proc_write(struct file *file, const char __user *user,
1463 } 1467 }
1464 1468
1465 if (input_arg == 0) { 1469 if (input_arg == 0) {
1466 elements = sizeof(stat_description)/sizeof(*stat_description); 1470 elements = ARRAY_SIZE(stat_description);
1467 printk(KERN_DEBUG "# cpu: cpu number\n"); 1471 printk(KERN_DEBUG "# cpu: cpu number\n");
1468 printk(KERN_DEBUG "Sender statistics:\n"); 1472 printk(KERN_DEBUG "Sender statistics:\n");
1469 for (i = 0; i < elements; i++) 1473 for (i = 0; i < elements; i++)
@@ -1504,7 +1508,7 @@ static int parse_tunables_write(struct bau_control *bcp, char *instr,
1504 char *q; 1508 char *q;
1505 int cnt = 0; 1509 int cnt = 0;
1506 int val; 1510 int val;
1507 int e = sizeof(tunables) / sizeof(*tunables); 1511 int e = ARRAY_SIZE(tunables);
1508 1512
1509 p = instr + strspn(instr, WHITESPACE); 1513 p = instr + strspn(instr, WHITESPACE);
1510 q = p; 1514 q = p;
diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c
index 5032e0d19b86..98718f604eb6 100644
--- a/arch/x86/platform/uv/uv_time.c
+++ b/arch/x86/platform/uv/uv_time.c
@@ -15,7 +15,7 @@
15 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 * 17 *
18 * Copyright (c) 2009 Silicon Graphics, Inc. All Rights Reserved. 18 * Copyright (c) 2009-2013 Silicon Graphics, Inc. All Rights Reserved.
19 * Copyright (c) Dimitri Sivanich 19 * Copyright (c) Dimitri Sivanich
20 */ 20 */
21#include <linux/clockchips.h> 21#include <linux/clockchips.h>
@@ -102,9 +102,10 @@ static int uv_intr_pending(int pnode)
102 if (is_uv1_hub()) 102 if (is_uv1_hub())
103 return uv_read_global_mmr64(pnode, UVH_EVENT_OCCURRED0) & 103 return uv_read_global_mmr64(pnode, UVH_EVENT_OCCURRED0) &
104 UV1H_EVENT_OCCURRED0_RTC1_MASK; 104 UV1H_EVENT_OCCURRED0_RTC1_MASK;
105 else 105 else if (is_uvx_hub())
106 return uv_read_global_mmr64(pnode, UV2H_EVENT_OCCURRED2) & 106 return uv_read_global_mmr64(pnode, UVXH_EVENT_OCCURRED2) &
107 UV2H_EVENT_OCCURRED2_RTC_1_MASK; 107 UVXH_EVENT_OCCURRED2_RTC_1_MASK;
108 return 0;
108} 109}
109 110
110/* Setup interrupt and return non-zero if early expiration occurred. */ 111/* Setup interrupt and return non-zero if early expiration occurred. */
@@ -122,8 +123,8 @@ static int uv_setup_intr(int cpu, u64 expires)
122 uv_write_global_mmr64(pnode, UVH_EVENT_OCCURRED0_ALIAS, 123 uv_write_global_mmr64(pnode, UVH_EVENT_OCCURRED0_ALIAS,
123 UV1H_EVENT_OCCURRED0_RTC1_MASK); 124 UV1H_EVENT_OCCURRED0_RTC1_MASK);
124 else 125 else
125 uv_write_global_mmr64(pnode, UV2H_EVENT_OCCURRED2_ALIAS, 126 uv_write_global_mmr64(pnode, UVXH_EVENT_OCCURRED2_ALIAS,
126 UV2H_EVENT_OCCURRED2_RTC_1_MASK); 127 UVXH_EVENT_OCCURRED2_RTC_1_MASK);
127 128
128 val = (X86_PLATFORM_IPI_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) | 129 val = (X86_PLATFORM_IPI_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) |
129 ((u64)apicid << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT); 130 ((u64)apicid << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT);
diff --git a/arch/x86/tools/insn_sanity.c b/arch/x86/tools/insn_sanity.c
index cc2f8c131286..872eb60e7806 100644
--- a/arch/x86/tools/insn_sanity.c
+++ b/arch/x86/tools/insn_sanity.c
@@ -55,7 +55,7 @@ static FILE *input_file; /* Input file name */
55static void usage(const char *err) 55static void usage(const char *err)
56{ 56{
57 if (err) 57 if (err)
58 fprintf(stderr, "Error: %s\n\n", err); 58 fprintf(stderr, "%s: Error: %s\n\n", prog, err);
59 fprintf(stderr, "Usage: %s [-y|-n|-v] [-s seed[,no]] [-m max] [-i input]\n", prog); 59 fprintf(stderr, "Usage: %s [-y|-n|-v] [-s seed[,no]] [-m max] [-i input]\n", prog);
60 fprintf(stderr, "\t-y 64bit mode\n"); 60 fprintf(stderr, "\t-y 64bit mode\n");
61 fprintf(stderr, "\t-n 32bit mode\n"); 61 fprintf(stderr, "\t-n 32bit mode\n");
@@ -269,7 +269,13 @@ int main(int argc, char **argv)
269 insns++; 269 insns++;
270 } 270 }
271 271
272 fprintf(stdout, "%s: decoded and checked %d %s instructions with %d errors (seed:0x%x)\n", (errors) ? "Failure" : "Success", insns, (input_file) ? "given" : "random", errors, seed); 272 fprintf(stdout, "%s: %s: decoded and checked %d %s instructions with %d errors (seed:0x%x)\n",
273 prog,
274 (errors) ? "Failure" : "Success",
275 insns,
276 (input_file) ? "given" : "random",
277 errors,
278 seed);
273 279
274 return errors ? 1 : 0; 280 return errors ? 1 : 0;
275} 281}
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index 5a1847d61930..79d67bd507fa 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -814,12 +814,14 @@ int main(int argc, char **argv)
814 read_relocs(fp); 814 read_relocs(fp);
815 if (show_absolute_syms) { 815 if (show_absolute_syms) {
816 print_absolute_symbols(); 816 print_absolute_symbols();
817 return 0; 817 goto out;
818 } 818 }
819 if (show_absolute_relocs) { 819 if (show_absolute_relocs) {
820 print_absolute_relocs(); 820 print_absolute_relocs();
821 return 0; 821 goto out;
822 } 822 }
823 emit_relocs(as_text, use_real_mode); 823 emit_relocs(as_text, use_real_mode);
824out:
825 fclose(fp);
824 return 0; 826 return 0;
825} 827}
diff --git a/arch/x86/um/fault.c b/arch/x86/um/fault.c
index 8784ab30d91b..84ac7f7b0257 100644
--- a/arch/x86/um/fault.c
+++ b/arch/x86/um/fault.c
@@ -20,7 +20,7 @@ int arch_fixup(unsigned long address, struct uml_pt_regs *regs)
20 const struct exception_table_entry *fixup; 20 const struct exception_table_entry *fixup;
21 21
22 fixup = search_exception_tables(address); 22 fixup = search_exception_tables(address);
23 if (fixup != 0) { 23 if (fixup) {
24 UPT_IP(regs) = fixup->fixup; 24 UPT_IP(regs) = fixup->fixup;
25 return 1; 25 return 1;
26 } 26 }
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index 205ad328aa52..c74436e687bf 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -60,7 +60,7 @@ notrace static cycle_t vread_tsc(void)
60 60
61static notrace cycle_t vread_hpet(void) 61static notrace cycle_t vread_hpet(void)
62{ 62{
63 return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0); 63 return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + HPET_COUNTER);
64} 64}
65 65
66#ifdef CONFIG_PARAVIRT_CLOCK 66#ifdef CONFIG_PARAVIRT_CLOCK
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 138e5667409a..39928d16be3b 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1517,72 +1517,51 @@ asmlinkage void __init xen_start_kernel(void)
1517#endif 1517#endif
1518} 1518}
1519 1519
1520#ifdef CONFIG_XEN_PVHVM 1520void __ref xen_hvm_init_shared_info(void)
1521#define HVM_SHARED_INFO_ADDR 0xFE700000UL
1522static struct shared_info *xen_hvm_shared_info;
1523static unsigned long xen_hvm_sip_phys;
1524static int xen_major, xen_minor;
1525
1526static void xen_hvm_connect_shared_info(unsigned long pfn)
1527{ 1521{
1522 int cpu;
1528 struct xen_add_to_physmap xatp; 1523 struct xen_add_to_physmap xatp;
1524 static struct shared_info *shared_info_page = 0;
1529 1525
1526 if (!shared_info_page)
1527 shared_info_page = (struct shared_info *)
1528 extend_brk(PAGE_SIZE, PAGE_SIZE);
1530 xatp.domid = DOMID_SELF; 1529 xatp.domid = DOMID_SELF;
1531 xatp.idx = 0; 1530 xatp.idx = 0;
1532 xatp.space = XENMAPSPACE_shared_info; 1531 xatp.space = XENMAPSPACE_shared_info;
1533 xatp.gpfn = pfn; 1532 xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT;
1534 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) 1533 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
1535 BUG(); 1534 BUG();
1536 1535
1537} 1536 HYPERVISOR_shared_info = (struct shared_info *)shared_info_page;
1538static void __init xen_hvm_set_shared_info(struct shared_info *sip)
1539{
1540 int cpu;
1541
1542 HYPERVISOR_shared_info = sip;
1543 1537
1544 /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info 1538 /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info
1545 * page, we use it in the event channel upcall and in some pvclock 1539 * page, we use it in the event channel upcall and in some pvclock
1546 * related functions. We don't need the vcpu_info placement 1540 * related functions. We don't need the vcpu_info placement
1547 * optimizations because we don't use any pv_mmu or pv_irq op on 1541 * optimizations because we don't use any pv_mmu or pv_irq op on
1548 * HVM. */ 1542 * HVM.
1549 for_each_online_cpu(cpu) 1543 * When xen_hvm_init_shared_info is run at boot time only vcpu 0 is
1544 * online but xen_hvm_init_shared_info is run at resume time too and
1545 * in that case multiple vcpus might be online. */
1546 for_each_online_cpu(cpu) {
1550 per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; 1547 per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
1551}
1552
1553/* Reconnect the shared_info pfn to a (new) mfn */
1554void xen_hvm_resume_shared_info(void)
1555{
1556 xen_hvm_connect_shared_info(xen_hvm_sip_phys >> PAGE_SHIFT);
1557}
1558
1559/* Xen tools prior to Xen 4 do not provide a E820_Reserved area for guest usage.
1560 * On these old tools the shared info page will be placed in E820_Ram.
1561 * Xen 4 provides a E820_Reserved area at 0xFC000000, and this code expects
1562 * that nothing is mapped up to HVM_SHARED_INFO_ADDR.
1563 * Xen 4.3+ provides an explicit 1MB area at HVM_SHARED_INFO_ADDR which is used
1564 * here for the shared info page. */
1565static void __init xen_hvm_init_shared_info(void)
1566{
1567 if (xen_major < 4) {
1568 xen_hvm_shared_info = extend_brk(PAGE_SIZE, PAGE_SIZE);
1569 xen_hvm_sip_phys = __pa(xen_hvm_shared_info);
1570 } else {
1571 xen_hvm_sip_phys = HVM_SHARED_INFO_ADDR;
1572 set_fixmap(FIX_PARAVIRT_BOOTMAP, xen_hvm_sip_phys);
1573 xen_hvm_shared_info =
1574 (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
1575 } 1548 }
1576 xen_hvm_connect_shared_info(xen_hvm_sip_phys >> PAGE_SHIFT);
1577 xen_hvm_set_shared_info(xen_hvm_shared_info);
1578} 1549}
1579 1550
1551#ifdef CONFIG_XEN_PVHVM
1580static void __init init_hvm_pv_info(void) 1552static void __init init_hvm_pv_info(void)
1581{ 1553{
1582 uint32_t ecx, edx, pages, msr, base; 1554 int major, minor;
1555 uint32_t eax, ebx, ecx, edx, pages, msr, base;
1583 u64 pfn; 1556 u64 pfn;
1584 1557
1585 base = xen_cpuid_base(); 1558 base = xen_cpuid_base();
1559 cpuid(base + 1, &eax, &ebx, &ecx, &edx);
1560
1561 major = eax >> 16;
1562 minor = eax & 0xffff;
1563 printk(KERN_INFO "Xen version %d.%d.\n", major, minor);
1564
1586 cpuid(base + 2, &pages, &msr, &ecx, &edx); 1565 cpuid(base + 2, &pages, &msr, &ecx, &edx);
1587 1566
1588 pfn = __pa(hypercall_page); 1567 pfn = __pa(hypercall_page);
@@ -1633,22 +1612,12 @@ static void __init xen_hvm_guest_init(void)
1633 1612
1634static bool __init xen_hvm_platform(void) 1613static bool __init xen_hvm_platform(void)
1635{ 1614{
1636 uint32_t eax, ebx, ecx, edx, base;
1637
1638 if (xen_pv_domain()) 1615 if (xen_pv_domain())
1639 return false; 1616 return false;
1640 1617
1641 base = xen_cpuid_base(); 1618 if (!xen_cpuid_base())
1642 if (!base)
1643 return false; 1619 return false;
1644 1620
1645 cpuid(base + 1, &eax, &ebx, &ecx, &edx);
1646
1647 xen_major = eax >> 16;
1648 xen_minor = eax & 0xffff;
1649
1650 printk(KERN_INFO "Xen version %d.%d.\n", xen_major, xen_minor);
1651
1652 return true; 1621 return true;
1653} 1622}
1654 1623
@@ -1668,6 +1637,7 @@ const struct hypervisor_x86 x86_hyper_xen_hvm __refconst = {
1668 .name = "Xen HVM", 1637 .name = "Xen HVM",
1669 .detect = xen_hvm_platform, 1638 .detect = xen_hvm_platform,
1670 .init_platform = xen_hvm_guest_init, 1639 .init_platform = xen_hvm_guest_init,
1640 .x2apic_available = xen_x2apic_para_available,
1671}; 1641};
1672EXPORT_SYMBOL(x86_hyper_xen_hvm); 1642EXPORT_SYMBOL(x86_hyper_xen_hvm);
1673#endif 1643#endif
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 8971a26d21ab..94eac5c85cdc 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -556,12 +556,9 @@ void __init xen_arch_setup(void)
556 COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE); 556 COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
557 557
558 /* Set up idle, making sure it calls safe_halt() pvop */ 558 /* Set up idle, making sure it calls safe_halt() pvop */
559#ifdef CONFIG_X86_32
560 boot_cpu_data.hlt_works_ok = 1;
561#endif
562 disable_cpuidle(); 559 disable_cpuidle();
563 disable_cpufreq(); 560 disable_cpufreq();
564 WARN_ON(set_pm_idle_to_default()); 561 WARN_ON(xen_set_default_idle());
565 fiddle_vdso(); 562 fiddle_vdso();
566#ifdef CONFIG_NUMA 563#ifdef CONFIG_NUMA
567 numa_off = 1; 564 numa_off = 1;
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 4f7d2599b484..34bc4cee8887 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -432,13 +432,6 @@ static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */
432 play_dead_common(); 432 play_dead_common();
433 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); 433 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
434 cpu_bringup(); 434 cpu_bringup();
435 /*
436 * Balance out the preempt calls - as we are running in cpu_idle
437 * loop which has been called at bootup from cpu_bringup_and_idle.
438 * The cpucpu_bringup_and_idle called cpu_bringup which made a
439 * preempt_disable() So this preempt_enable will balance it out.
440 */
441 preempt_enable();
442} 435}
443 436
444#else /* !CONFIG_HOTPLUG_CPU */ 437#else /* !CONFIG_HOTPLUG_CPU */
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
index ae8a00c39de4..45329c8c226e 100644
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -30,7 +30,7 @@ void xen_arch_hvm_post_suspend(int suspend_cancelled)
30{ 30{
31#ifdef CONFIG_XEN_PVHVM 31#ifdef CONFIG_XEN_PVHVM
32 int cpu; 32 int cpu;
33 xen_hvm_resume_shared_info(); 33 xen_hvm_init_shared_info();
34 xen_callback_vector(); 34 xen_callback_vector();
35 xen_unplug_emulated_devices(); 35 xen_unplug_emulated_devices();
36 if (xen_feature(XENFEAT_hvm_safe_pvclock)) { 36 if (xen_feature(XENFEAT_hvm_safe_pvclock)) {
diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
index f9643fc50de5..33ca6e42a4ca 100644
--- a/arch/x86/xen/xen-asm_32.S
+++ b/arch/x86/xen/xen-asm_32.S
@@ -89,11 +89,11 @@ ENTRY(xen_iret)
89 */ 89 */
90#ifdef CONFIG_SMP 90#ifdef CONFIG_SMP
91 GET_THREAD_INFO(%eax) 91 GET_THREAD_INFO(%eax)
92 movl TI_cpu(%eax), %eax 92 movl %ss:TI_cpu(%eax), %eax
93 movl __per_cpu_offset(,%eax,4), %eax 93 movl %ss:__per_cpu_offset(,%eax,4), %eax
94 mov xen_vcpu(%eax), %eax 94 mov %ss:xen_vcpu(%eax), %eax
95#else 95#else
96 movl xen_vcpu, %eax 96 movl %ss:xen_vcpu, %eax
97#endif 97#endif
98 98
99 /* check IF state we're restoring */ 99 /* check IF state we're restoring */
@@ -106,11 +106,11 @@ ENTRY(xen_iret)
106 * resuming the code, so we don't have to be worried about 106 * resuming the code, so we don't have to be worried about
107 * being preempted to another CPU. 107 * being preempted to another CPU.
108 */ 108 */
109 setz XEN_vcpu_info_mask(%eax) 109 setz %ss:XEN_vcpu_info_mask(%eax)
110xen_iret_start_crit: 110xen_iret_start_crit:
111 111
112 /* check for unmasked and pending */ 112 /* check for unmasked and pending */
113 cmpw $0x0001, XEN_vcpu_info_pending(%eax) 113 cmpw $0x0001, %ss:XEN_vcpu_info_pending(%eax)
114 114
115 /* 115 /*
116 * If there's something pending, mask events again so we can 116 * If there's something pending, mask events again so we can
@@ -118,7 +118,7 @@ xen_iret_start_crit:
118 * touch XEN_vcpu_info_mask. 118 * touch XEN_vcpu_info_mask.
119 */ 119 */
120 jne 1f 120 jne 1f
121 movb $1, XEN_vcpu_info_mask(%eax) 121 movb $1, %ss:XEN_vcpu_info_mask(%eax)
122 122
1231: popl %eax 1231: popl %eax
124 124
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index d2e73d19d366..a95b41744ad0 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -40,7 +40,7 @@ void xen_enable_syscall(void);
40void xen_vcpu_restore(void); 40void xen_vcpu_restore(void);
41 41
42void xen_callback_vector(void); 42void xen_callback_vector(void);
43void xen_hvm_resume_shared_info(void); 43void xen_hvm_init_shared_info(void);
44void xen_unplug_emulated_devices(void); 44void xen_unplug_emulated_devices(void);
45 45
46void __init xen_build_dynamic_phys_to_machine(void); 46void __init xen_build_dynamic_phys_to_machine(void);
diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h
index 4acb5feba1fb..172a02a6ad14 100644
--- a/arch/xtensa/include/asm/dma-mapping.h
+++ b/arch/xtensa/include/asm/dma-mapping.h
@@ -170,4 +170,19 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
170 consistent_sync(vaddr, size, direction); 170 consistent_sync(vaddr, size, direction);
171} 171}
172 172
173/* Not supported for now */
174static inline int dma_mmap_coherent(struct device *dev,
175 struct vm_area_struct *vma, void *cpu_addr,
176 dma_addr_t dma_addr, size_t size)
177{
178 return -EINVAL;
179}
180
181static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
182 void *cpu_addr, dma_addr_t dma_addr,
183 size_t size)
184{
185 return -EINVAL;
186}
187
173#endif /* _XTENSA_DMA_MAPPING_H */ 188#endif /* _XTENSA_DMA_MAPPING_H */
diff --git a/block/blk-exec.c b/block/blk-exec.c
index 74638ec234c8..c88202f973d9 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -5,6 +5,7 @@
5#include <linux/module.h> 5#include <linux/module.h>
6#include <linux/bio.h> 6#include <linux/bio.h>
7#include <linux/blkdev.h> 7#include <linux/blkdev.h>
8#include <linux/sched/sysctl.h>
8 9
9#include "blk.h" 10#include "blk.h"
10 11
diff --git a/block/elevator.c b/block/elevator.c
index 9edba1b8323e..603b2c178740 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -100,14 +100,14 @@ static void elevator_put(struct elevator_type *e)
100 module_put(e->elevator_owner); 100 module_put(e->elevator_owner);
101} 101}
102 102
103static struct elevator_type *elevator_get(const char *name) 103static struct elevator_type *elevator_get(const char *name, bool try_loading)
104{ 104{
105 struct elevator_type *e; 105 struct elevator_type *e;
106 106
107 spin_lock(&elv_list_lock); 107 spin_lock(&elv_list_lock);
108 108
109 e = elevator_find(name); 109 e = elevator_find(name);
110 if (!e) { 110 if (!e && try_loading) {
111 spin_unlock(&elv_list_lock); 111 spin_unlock(&elv_list_lock);
112 request_module("%s-iosched", name); 112 request_module("%s-iosched", name);
113 spin_lock(&elv_list_lock); 113 spin_lock(&elv_list_lock);
@@ -136,6 +136,22 @@ static int __init elevator_setup(char *str)
136 136
137__setup("elevator=", elevator_setup); 137__setup("elevator=", elevator_setup);
138 138
139/* called during boot to load the elevator chosen by the elevator param */
140void __init load_default_elevator_module(void)
141{
142 struct elevator_type *e;
143
144 if (!chosen_elevator[0])
145 return;
146
147 spin_lock(&elv_list_lock);
148 e = elevator_find(chosen_elevator);
149 spin_unlock(&elv_list_lock);
150
151 if (!e)
152 request_module("%s-iosched", chosen_elevator);
153}
154
139static struct kobj_type elv_ktype; 155static struct kobj_type elv_ktype;
140 156
141static struct elevator_queue *elevator_alloc(struct request_queue *q, 157static struct elevator_queue *elevator_alloc(struct request_queue *q,
@@ -191,25 +207,30 @@ int elevator_init(struct request_queue *q, char *name)
191 q->boundary_rq = NULL; 207 q->boundary_rq = NULL;
192 208
193 if (name) { 209 if (name) {
194 e = elevator_get(name); 210 e = elevator_get(name, true);
195 if (!e) 211 if (!e)
196 return -EINVAL; 212 return -EINVAL;
197 } 213 }
198 214
215 /*
216 * Use the default elevator specified by config boot param or
217 * config option. Don't try to load modules as we could be running
218 * off async and request_module() isn't allowed from async.
219 */
199 if (!e && *chosen_elevator) { 220 if (!e && *chosen_elevator) {
200 e = elevator_get(chosen_elevator); 221 e = elevator_get(chosen_elevator, false);
201 if (!e) 222 if (!e)
202 printk(KERN_ERR "I/O scheduler %s not found\n", 223 printk(KERN_ERR "I/O scheduler %s not found\n",
203 chosen_elevator); 224 chosen_elevator);
204 } 225 }
205 226
206 if (!e) { 227 if (!e) {
207 e = elevator_get(CONFIG_DEFAULT_IOSCHED); 228 e = elevator_get(CONFIG_DEFAULT_IOSCHED, false);
208 if (!e) { 229 if (!e) {
209 printk(KERN_ERR 230 printk(KERN_ERR
210 "Default I/O scheduler not found. " \ 231 "Default I/O scheduler not found. " \
211 "Using noop.\n"); 232 "Using noop.\n");
212 e = elevator_get("noop"); 233 e = elevator_get("noop", false);
213 } 234 }
214 } 235 }
215 236
@@ -951,7 +972,7 @@ int elevator_change(struct request_queue *q, const char *name)
951 return -ENXIO; 972 return -ENXIO;
952 973
953 strlcpy(elevator_name, name, sizeof(elevator_name)); 974 strlcpy(elevator_name, name, sizeof(elevator_name));
954 e = elevator_get(strstrip(elevator_name)); 975 e = elevator_get(strstrip(elevator_name), true);
955 if (!e) { 976 if (!e) {
956 printk(KERN_ERR "elevator: type %s not found\n", elevator_name); 977 printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
957 return -EINVAL; 978 return -EINVAL;
diff --git a/block/genhd.c b/block/genhd.c
index 9a289d7c84bb..3993ebf4135f 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -35,6 +35,8 @@ static DEFINE_IDR(ext_devt_idr);
35 35
36static struct device_type disk_type; 36static struct device_type disk_type;
37 37
38static void disk_check_events(struct disk_events *ev,
39 unsigned int *clearing_ptr);
38static void disk_alloc_events(struct gendisk *disk); 40static void disk_alloc_events(struct gendisk *disk);
39static void disk_add_events(struct gendisk *disk); 41static void disk_add_events(struct gendisk *disk);
40static void disk_del_events(struct gendisk *disk); 42static void disk_del_events(struct gendisk *disk);
@@ -1549,6 +1551,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
1549 const struct block_device_operations *bdops = disk->fops; 1551 const struct block_device_operations *bdops = disk->fops;
1550 struct disk_events *ev = disk->ev; 1552 struct disk_events *ev = disk->ev;
1551 unsigned int pending; 1553 unsigned int pending;
1554 unsigned int clearing = mask;
1552 1555
1553 if (!ev) { 1556 if (!ev) {
1554 /* for drivers still using the old ->media_changed method */ 1557 /* for drivers still using the old ->media_changed method */
@@ -1558,34 +1561,53 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
1558 return 0; 1561 return 0;
1559 } 1562 }
1560 1563
1561 /* tell the workfn about the events being cleared */ 1564 disk_block_events(disk);
1565
1566 /*
1567 * store the union of mask and ev->clearing on the stack so that the
1568 * race with disk_flush_events does not cause ambiguity (ev->clearing
1569 * can still be modified even if events are blocked).
1570 */
1562 spin_lock_irq(&ev->lock); 1571 spin_lock_irq(&ev->lock);
1563 ev->clearing |= mask; 1572 clearing |= ev->clearing;
1573 ev->clearing = 0;
1564 spin_unlock_irq(&ev->lock); 1574 spin_unlock_irq(&ev->lock);
1565 1575
1566 /* uncondtionally schedule event check and wait for it to finish */ 1576 disk_check_events(ev, &clearing);
1567 disk_block_events(disk); 1577 /*
1568 queue_delayed_work(system_freezable_wq, &ev->dwork, 0); 1578 * if ev->clearing is not 0, the disk_flush_events got called in the
1569 flush_delayed_work(&ev->dwork); 1579 * middle of this function, so we want to run the workfn without delay.
1570 __disk_unblock_events(disk, false); 1580 */
1581 __disk_unblock_events(disk, ev->clearing ? true : false);
1571 1582
1572 /* then, fetch and clear pending events */ 1583 /* then, fetch and clear pending events */
1573 spin_lock_irq(&ev->lock); 1584 spin_lock_irq(&ev->lock);
1574 WARN_ON_ONCE(ev->clearing & mask); /* cleared by workfn */
1575 pending = ev->pending & mask; 1585 pending = ev->pending & mask;
1576 ev->pending &= ~mask; 1586 ev->pending &= ~mask;
1577 spin_unlock_irq(&ev->lock); 1587 spin_unlock_irq(&ev->lock);
1588 WARN_ON_ONCE(clearing & mask);
1578 1589
1579 return pending; 1590 return pending;
1580} 1591}
1581 1592
1593/*
1594 * Separate this part out so that a different pointer for clearing_ptr can be
1595 * passed in for disk_clear_events.
1596 */
1582static void disk_events_workfn(struct work_struct *work) 1597static void disk_events_workfn(struct work_struct *work)
1583{ 1598{
1584 struct delayed_work *dwork = to_delayed_work(work); 1599 struct delayed_work *dwork = to_delayed_work(work);
1585 struct disk_events *ev = container_of(dwork, struct disk_events, dwork); 1600 struct disk_events *ev = container_of(dwork, struct disk_events, dwork);
1601
1602 disk_check_events(ev, &ev->clearing);
1603}
1604
1605static void disk_check_events(struct disk_events *ev,
1606 unsigned int *clearing_ptr)
1607{
1586 struct gendisk *disk = ev->disk; 1608 struct gendisk *disk = ev->disk;
1587 char *envp[ARRAY_SIZE(disk_uevents) + 1] = { }; 1609 char *envp[ARRAY_SIZE(disk_uevents) + 1] = { };
1588 unsigned int clearing = ev->clearing; 1610 unsigned int clearing = *clearing_ptr;
1589 unsigned int events; 1611 unsigned int events;
1590 unsigned long intv; 1612 unsigned long intv;
1591 int nr_events = 0, i; 1613 int nr_events = 0, i;
@@ -1598,7 +1620,7 @@ static void disk_events_workfn(struct work_struct *work)
1598 1620
1599 events &= ~ev->pending; 1621 events &= ~ev->pending;
1600 ev->pending |= events; 1622 ev->pending |= events;
1601 ev->clearing &= ~clearing; 1623 *clearing_ptr &= ~clearing;
1602 1624
1603 intv = disk_events_poll_jiffies(disk); 1625 intv = disk_events_poll_jiffies(disk);
1604 if (!ev->block && intv) 1626 if (!ev->block && intv)
diff --git a/drivers/Kconfig b/drivers/Kconfig
index f5fb0722a63a..2b4e89ba15ad 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -134,6 +134,8 @@ source "drivers/hwspinlock/Kconfig"
134 134
135source "drivers/clocksource/Kconfig" 135source "drivers/clocksource/Kconfig"
136 136
137source "drivers/mailbox/Kconfig"
138
137source "drivers/iommu/Kconfig" 139source "drivers/iommu/Kconfig"
138 140
139source "drivers/remoteproc/Kconfig" 141source "drivers/remoteproc/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 7863b9fee50b..a8d32f1094b4 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -130,6 +130,7 @@ obj-y += platform/
130#common clk code 130#common clk code
131obj-y += clk/ 131obj-y += clk/
132 132
133obj-$(CONFIG_MAILBOX) += mailbox/
133obj-$(CONFIG_HWSPINLOCK) += hwspinlock/ 134obj-$(CONFIG_HWSPINLOCK) += hwspinlock/
134obj-$(CONFIG_NFC) += nfc/ 135obj-$(CONFIG_NFC) += nfc/
135obj-$(CONFIG_IOMMU_SUPPORT) += iommu/ 136obj-$(CONFIG_IOMMU_SUPPORT) += iommu/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 38c5078da11d..78105b3a5262 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -337,7 +337,7 @@ config X86_PM_TIMER
337 systems require this timer. 337 systems require this timer.
338 338
339config ACPI_CONTAINER 339config ACPI_CONTAINER
340 tristate "Container and Module Devices (EXPERIMENTAL)" 340 bool "Container and Module Devices (EXPERIMENTAL)"
341 depends on EXPERIMENTAL 341 depends on EXPERIMENTAL
342 default (ACPI_HOTPLUG_MEMORY || ACPI_HOTPLUG_CPU || ACPI_HOTPLUG_IO) 342 default (ACPI_HOTPLUG_MEMORY || ACPI_HOTPLUG_CPU || ACPI_HOTPLUG_IO)
343 help 343 help
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 2a4502becd13..474fcfeba66c 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -37,7 +37,8 @@ acpi-y += resource.o
37acpi-y += processor_core.o 37acpi-y += processor_core.o
38acpi-y += ec.o 38acpi-y += ec.o
39acpi-$(CONFIG_ACPI_DOCK) += dock.o 39acpi-$(CONFIG_ACPI_DOCK) += dock.o
40acpi-y += pci_root.o pci_link.o pci_irq.o pci_bind.o 40acpi-y += pci_root.o pci_link.o pci_irq.o
41acpi-y += csrt.o
41acpi-y += acpi_platform.o 42acpi-y += acpi_platform.o
42acpi-y += power.o 43acpi-y += power.o
43acpi-y += event.o 44acpi-y += event.o
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index d5fdd36190cc..6d5bf649196d 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -60,7 +60,7 @@ static int acpi_ac_open_fs(struct inode *inode, struct file *file);
60#endif 60#endif
61 61
62static int acpi_ac_add(struct acpi_device *device); 62static int acpi_ac_add(struct acpi_device *device);
63static int acpi_ac_remove(struct acpi_device *device, int type); 63static int acpi_ac_remove(struct acpi_device *device);
64static void acpi_ac_notify(struct acpi_device *device, u32 event); 64static void acpi_ac_notify(struct acpi_device *device, u32 event);
65 65
66static const struct acpi_device_id ac_device_ids[] = { 66static const struct acpi_device_id ac_device_ids[] = {
@@ -337,7 +337,7 @@ static int acpi_ac_resume(struct device *dev)
337} 337}
338#endif 338#endif
339 339
340static int acpi_ac_remove(struct acpi_device *device, int type) 340static int acpi_ac_remove(struct acpi_device *device)
341{ 341{
342 struct acpi_ac *ac = NULL; 342 struct acpi_ac *ac = NULL;
343 343
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index b679bf8478f7..034d3e72aa92 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -54,7 +54,7 @@ MODULE_LICENSE("GPL");
54#define MEMORY_POWER_OFF_STATE 2 54#define MEMORY_POWER_OFF_STATE 2
55 55
56static int acpi_memory_device_add(struct acpi_device *device); 56static int acpi_memory_device_add(struct acpi_device *device);
57static int acpi_memory_device_remove(struct acpi_device *device, int type); 57static int acpi_memory_device_remove(struct acpi_device *device);
58 58
59static const struct acpi_device_id memory_device_ids[] = { 59static const struct acpi_device_id memory_device_ids[] = {
60 {ACPI_MEMORY_DEVICE_HID, 0}, 60 {ACPI_MEMORY_DEVICE_HID, 0},
@@ -153,51 +153,46 @@ acpi_memory_get_device_resources(struct acpi_memory_device *mem_device)
153 return 0; 153 return 0;
154} 154}
155 155
156static int 156static int acpi_memory_get_device(acpi_handle handle,
157acpi_memory_get_device(acpi_handle handle, 157 struct acpi_memory_device **mem_device)
158 struct acpi_memory_device **mem_device)
159{ 158{
160 acpi_status status;
161 acpi_handle phandle;
162 struct acpi_device *device = NULL; 159 struct acpi_device *device = NULL;
163 struct acpi_device *pdevice = NULL; 160 int result = 0;
164 int result;
165 161
162 acpi_scan_lock_acquire();
166 163
167 if (!acpi_bus_get_device(handle, &device) && device) 164 acpi_bus_get_device(handle, &device);
165 if (device)
168 goto end; 166 goto end;
169 167
170 status = acpi_get_parent(handle, &phandle);
171 if (ACPI_FAILURE(status)) {
172 ACPI_EXCEPTION((AE_INFO, status, "Cannot find acpi parent"));
173 return -EINVAL;
174 }
175
176 /* Get the parent device */
177 result = acpi_bus_get_device(phandle, &pdevice);
178 if (result) {
179 acpi_handle_warn(phandle, "Cannot get acpi bus device\n");
180 return -EINVAL;
181 }
182
183 /* 168 /*
184 * Now add the notified device. This creates the acpi_device 169 * Now add the notified device. This creates the acpi_device
185 * and invokes .add function 170 * and invokes .add function
186 */ 171 */
187 result = acpi_bus_add(&device, pdevice, handle, ACPI_BUS_TYPE_DEVICE); 172 result = acpi_bus_scan(handle);
188 if (result) { 173 if (result) {
189 acpi_handle_warn(handle, "Cannot add acpi bus\n"); 174 acpi_handle_warn(handle, "ACPI namespace scan failed\n");
190 return -EINVAL; 175 result = -EINVAL;
176 goto out;
177 }
178 result = acpi_bus_get_device(handle, &device);
179 if (result) {
180 acpi_handle_warn(handle, "Missing device object\n");
181 result = -EINVAL;
182 goto out;
191 } 183 }
192 184
193 end: 185 end:
194 *mem_device = acpi_driver_data(device); 186 *mem_device = acpi_driver_data(device);
195 if (!(*mem_device)) { 187 if (!(*mem_device)) {
196 dev_err(&device->dev, "driver data not found\n"); 188 dev_err(&device->dev, "driver data not found\n");
197 return -ENODEV; 189 result = -ENODEV;
190 goto out;
198 } 191 }
199 192
200 return 0; 193 out:
194 acpi_scan_lock_release();
195 return result;
201} 196}
202 197
203static int acpi_memory_check_device(struct acpi_memory_device *mem_device) 198static int acpi_memory_check_device(struct acpi_memory_device *mem_device)
@@ -317,6 +312,7 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
317 struct acpi_device *device; 312 struct acpi_device *device;
318 struct acpi_eject_event *ej_event = NULL; 313 struct acpi_eject_event *ej_event = NULL;
319 u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */ 314 u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
315 acpi_status status;
320 316
321 switch (event) { 317 switch (event) {
322 case ACPI_NOTIFY_BUS_CHECK: 318 case ACPI_NOTIFY_BUS_CHECK:
@@ -339,29 +335,40 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
339 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 335 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
340 "\nReceived EJECT REQUEST notification for device\n")); 336 "\nReceived EJECT REQUEST notification for device\n"));
341 337
338 status = AE_ERROR;
339 acpi_scan_lock_acquire();
340
342 if (acpi_bus_get_device(handle, &device)) { 341 if (acpi_bus_get_device(handle, &device)) {
343 acpi_handle_err(handle, "Device doesn't exist\n"); 342 acpi_handle_err(handle, "Device doesn't exist\n");
344 break; 343 goto unlock;
345 } 344 }
346 mem_device = acpi_driver_data(device); 345 mem_device = acpi_driver_data(device);
347 if (!mem_device) { 346 if (!mem_device) {
348 acpi_handle_err(handle, "Driver Data is NULL\n"); 347 acpi_handle_err(handle, "Driver Data is NULL\n");
349 break; 348 goto unlock;
350 } 349 }
351 350
352 ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL); 351 ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL);
353 if (!ej_event) { 352 if (!ej_event) {
354 pr_err(PREFIX "No memory, dropping EJECT\n"); 353 pr_err(PREFIX "No memory, dropping EJECT\n");
355 break; 354 goto unlock;
356 } 355 }
357 356
358 ej_event->handle = handle; 357 get_device(&device->dev);
358 ej_event->device = device;
359 ej_event->event = ACPI_NOTIFY_EJECT_REQUEST; 359 ej_event->event = ACPI_NOTIFY_EJECT_REQUEST;
360 acpi_os_hotplug_execute(acpi_bus_hot_remove_device, 360 /* The eject is carried out asynchronously. */
361 (void *)ej_event); 361 status = acpi_os_hotplug_execute(acpi_bus_hot_remove_device,
362 ej_event);
363 if (ACPI_FAILURE(status)) {
364 put_device(&device->dev);
365 kfree(ej_event);
366 }
362 367
363 /* eject is performed asynchronously */ 368 unlock:
364 return; 369 acpi_scan_lock_release();
370 if (ACPI_SUCCESS(status))
371 return;
365 default: 372 default:
366 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 373 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
367 "Unsupported event [0x%x]\n", event)); 374 "Unsupported event [0x%x]\n", event));
@@ -372,7 +379,6 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
372 379
373 /* Inform firmware that the hotplug operation has completed */ 380 /* Inform firmware that the hotplug operation has completed */
374 (void) acpi_evaluate_hotplug_ost(handle, event, ost_code, NULL); 381 (void) acpi_evaluate_hotplug_ost(handle, event, ost_code, NULL);
375 return;
376} 382}
377 383
378static void acpi_memory_device_free(struct acpi_memory_device *mem_device) 384static void acpi_memory_device_free(struct acpi_memory_device *mem_device)
@@ -427,7 +433,7 @@ static int acpi_memory_device_add(struct acpi_device *device)
427 return result; 433 return result;
428} 434}
429 435
430static int acpi_memory_device_remove(struct acpi_device *device, int type) 436static int acpi_memory_device_remove(struct acpi_device *device)
431{ 437{
432 struct acpi_memory_device *mem_device = NULL; 438 struct acpi_memory_device *mem_device = NULL;
433 int result; 439 int result;
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 16fa979f7180..31de1043eea0 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -482,8 +482,7 @@ static int acpi_pad_add(struct acpi_device *device)
482 return 0; 482 return 0;
483} 483}
484 484
485static int acpi_pad_remove(struct acpi_device *device, 485static int acpi_pad_remove(struct acpi_device *device)
486 int type)
487{ 486{
488 mutex_lock(&isolated_cpus_lock); 487 mutex_lock(&isolated_cpus_lock);
489 acpi_pad_idle_cpus(0); 488 acpi_pad_idle_cpus(0);
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index db129b9f52cb..26fce4b8a632 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -13,6 +13,7 @@
13 13
14#include <linux/acpi.h> 14#include <linux/acpi.h>
15#include <linux/device.h> 15#include <linux/device.h>
16#include <linux/err.h>
16#include <linux/kernel.h> 17#include <linux/kernel.h>
17#include <linux/module.h> 18#include <linux/module.h>
18#include <linux/platform_device.h> 19#include <linux/platform_device.h>
@@ -21,18 +22,59 @@
21 22
22ACPI_MODULE_NAME("platform"); 23ACPI_MODULE_NAME("platform");
23 24
25/* Flags for acpi_create_platform_device */
26#define ACPI_PLATFORM_CLK BIT(0)
27
28/*
29 * The following ACPI IDs are known to be suitable for representing as
30 * platform devices.
31 */
32static const struct acpi_device_id acpi_platform_device_ids[] = {
33
34 { "PNP0D40" },
35
36 /* Haswell LPSS devices */
37 { "INT33C0", ACPI_PLATFORM_CLK },
38 { "INT33C1", ACPI_PLATFORM_CLK },
39 { "INT33C2", ACPI_PLATFORM_CLK },
40 { "INT33C3", ACPI_PLATFORM_CLK },
41 { "INT33C4", ACPI_PLATFORM_CLK },
42 { "INT33C5", ACPI_PLATFORM_CLK },
43 { "INT33C6", ACPI_PLATFORM_CLK },
44 { "INT33C7", ACPI_PLATFORM_CLK },
45
46 { }
47};
48
49static int acpi_create_platform_clks(struct acpi_device *adev)
50{
51 static struct platform_device *pdev;
52
53 /* Create Lynxpoint LPSS clocks */
54 if (!pdev && !strncmp(acpi_device_hid(adev), "INT33C", 6)) {
55 pdev = platform_device_register_simple("clk-lpt", -1, NULL, 0);
56 if (IS_ERR(pdev))
57 return PTR_ERR(pdev);
58 }
59
60 return 0;
61}
62
24/** 63/**
25 * acpi_create_platform_device - Create platform device for ACPI device node 64 * acpi_create_platform_device - Create platform device for ACPI device node
26 * @adev: ACPI device node to create a platform device for. 65 * @adev: ACPI device node to create a platform device for.
66 * @id: ACPI device ID used to match @adev.
27 * 67 *
28 * Check if the given @adev can be represented as a platform device and, if 68 * Check if the given @adev can be represented as a platform device and, if
29 * that's the case, create and register a platform device, populate its common 69 * that's the case, create and register a platform device, populate its common
30 * resources and returns a pointer to it. Otherwise, return %NULL. 70 * resources and returns a pointer to it. Otherwise, return %NULL.
31 * 71 *
32 * The platform device's name will be taken from the @adev's _HID and _UID. 72 * Name of the platform device will be the same as @adev's.
33 */ 73 */
34struct platform_device *acpi_create_platform_device(struct acpi_device *adev) 74static int acpi_create_platform_device(struct acpi_device *adev,
75 const struct acpi_device_id *id)
35{ 76{
77 unsigned long flags = id->driver_data;
36 struct platform_device *pdev = NULL; 78 struct platform_device *pdev = NULL;
37 struct acpi_device *acpi_parent; 79 struct acpi_device *acpi_parent;
38 struct platform_device_info pdevinfo; 80 struct platform_device_info pdevinfo;
@@ -41,20 +83,28 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
41 struct resource *resources; 83 struct resource *resources;
42 int count; 84 int count;
43 85
86 if (flags & ACPI_PLATFORM_CLK) {
87 int ret = acpi_create_platform_clks(adev);
88 if (ret) {
89 dev_err(&adev->dev, "failed to create clocks\n");
90 return ret;
91 }
92 }
93
44 /* If the ACPI node already has a physical device attached, skip it. */ 94 /* If the ACPI node already has a physical device attached, skip it. */
45 if (adev->physical_node_count) 95 if (adev->physical_node_count)
46 return NULL; 96 return 0;
47 97
48 INIT_LIST_HEAD(&resource_list); 98 INIT_LIST_HEAD(&resource_list);
49 count = acpi_dev_get_resources(adev, &resource_list, NULL, NULL); 99 count = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
50 if (count <= 0) 100 if (count <= 0)
51 return NULL; 101 return 0;
52 102
53 resources = kmalloc(count * sizeof(struct resource), GFP_KERNEL); 103 resources = kmalloc(count * sizeof(struct resource), GFP_KERNEL);
54 if (!resources) { 104 if (!resources) {
55 dev_err(&adev->dev, "No memory for resources\n"); 105 dev_err(&adev->dev, "No memory for resources\n");
56 acpi_dev_free_resource_list(&resource_list); 106 acpi_dev_free_resource_list(&resource_list);
57 return NULL; 107 return -ENOMEM;
58 } 108 }
59 count = 0; 109 count = 0;
60 list_for_each_entry(rentry, &resource_list, node) 110 list_for_each_entry(rentry, &resource_list, node)
@@ -100,5 +150,15 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
100 } 150 }
101 151
102 kfree(resources); 152 kfree(resources);
103 return pdev; 153 return 1;
154}
155
156static struct acpi_scan_handler platform_handler = {
157 .ids = acpi_platform_device_ids,
158 .attach = acpi_create_platform_device,
159};
160
161void __init acpi_platform_init(void)
162{
163 acpi_scan_add_handler(&platform_handler);
104} 164}
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index bc7a03ded064..a1b9bf5085a2 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -31,6 +31,7 @@ acpi-y += \
31 evgpeinit.o \ 31 evgpeinit.o \
32 evgpeutil.o \ 32 evgpeutil.o \
33 evglock.o \ 33 evglock.o \
34 evhandler.o \
34 evmisc.o \ 35 evmisc.o \
35 evregion.o \ 36 evregion.o \
36 evrgnini.o \ 37 evrgnini.o \
@@ -90,6 +91,7 @@ acpi-y += \
90 nsobject.o \ 91 nsobject.o \
91 nsparse.o \ 92 nsparse.o \
92 nspredef.o \ 93 nspredef.o \
94 nsprepkg.o \
93 nsrepair.o \ 95 nsrepair.o \
94 nsrepair2.o \ 96 nsrepair2.o \
95 nssearch.o \ 97 nssearch.o \
@@ -104,7 +106,9 @@ acpi-$(ACPI_FUTURE_USAGE) += nsdumpdv.o
104acpi-y += \ 106acpi-y += \
105 psargs.o \ 107 psargs.o \
106 psloop.o \ 108 psloop.o \
109 psobject.o \
107 psopcode.o \ 110 psopcode.o \
111 psopinfo.o \
108 psparse.o \ 112 psparse.o \
109 psscope.o \ 113 psscope.o \
110 pstree.o \ 114 pstree.o \
@@ -126,7 +130,7 @@ acpi-y += \
126 rsutils.o \ 130 rsutils.o \
127 rsxface.o 131 rsxface.o
128 132
129acpi-$(ACPI_FUTURE_USAGE) += rsdump.o 133acpi-$(ACPI_FUTURE_USAGE) += rsdump.o rsdumpinfo.o
130 134
131acpi-y += \ 135acpi-y += \
132 tbfadt.o \ 136 tbfadt.o \
@@ -155,8 +159,10 @@ acpi-y += \
155 utmutex.o \ 159 utmutex.o \
156 utobject.o \ 160 utobject.o \
157 utosi.o \ 161 utosi.o \
162 utownerid.o \
158 utresrc.o \ 163 utresrc.o \
159 utstate.o \ 164 utstate.o \
165 utstring.o \
160 utxface.o \ 166 utxface.o \
161 utxfinit.o \ 167 utxfinit.o \
162 utxferror.o \ 168 utxferror.o \
diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h
index 8a7d51bfb3b3..8a6c4a0d22db 100644
--- a/drivers/acpi/acpica/accommon.h
+++ b/drivers/acpi/acpica/accommon.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -51,6 +51,7 @@
51 * 51 *
52 * Note: The order of these include files is important. 52 * Note: The order of these include files is important.
53 */ 53 */
54#include <acpi/acconfig.h> /* Global configuration constants */
54#include "acmacros.h" /* C macros */ 55#include "acmacros.h" /* C macros */
55#include "aclocal.h" /* Internal data types */ 56#include "aclocal.h" /* Internal data types */
56#include "acobject.h" /* ACPI internal object */ 57#include "acobject.h" /* ACPI internal object */
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 432a318c9ed1..9feba08c29fe 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -115,6 +115,21 @@ ACPI_HW_DEPENDENT_RETURN_VOID(void
115 char *block_arg)) 115 char *block_arg))
116 116
117/* 117/*
118 * dbconvert - miscellaneous conversion routines
119 */
120 acpi_status acpi_db_hex_char_to_value(int hex_char, u8 *return_value);
121
122acpi_status acpi_db_convert_to_package(char *string, union acpi_object *object);
123
124acpi_status
125acpi_db_convert_to_object(acpi_object_type type,
126 char *string, union acpi_object *object);
127
128u8 *acpi_db_encode_pld_buffer(struct acpi_pld_info *pld_info);
129
130void acpi_db_dump_pld_buffer(union acpi_object *obj_desc);
131
132/*
118 * dbmethod - control method commands 133 * dbmethod - control method commands
119 */ 134 */
120void 135void
@@ -191,6 +206,8 @@ void
191acpi_db_create_execution_threads(char *num_threads_arg, 206acpi_db_create_execution_threads(char *num_threads_arg,
192 char *num_loops_arg, char *method_name_arg); 207 char *num_loops_arg, char *method_name_arg);
193 208
209void acpi_db_delete_objects(u32 count, union acpi_object *objects);
210
194#ifdef ACPI_DBG_TRACK_ALLOCATIONS 211#ifdef ACPI_DBG_TRACK_ALLOCATIONS
195u32 acpi_db_get_cache_info(struct acpi_memory_list *cache); 212u32 acpi_db_get_cache_info(struct acpi_memory_list *cache);
196#endif 213#endif
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index ed33ebcdaebe..427db72a6302 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index e975c6720448..ab0e97710381 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -158,10 +158,23 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
158 void *context); 158 void *context);
159 159
160/* 160/*
161 * evregion - Address Space handling 161 * evhandler - Address space handling
162 */ 162 */
163u8
164acpi_ev_has_default_handler(struct acpi_namespace_node *node,
165 acpi_adr_space_type space_id);
166
163acpi_status acpi_ev_install_region_handlers(void); 167acpi_status acpi_ev_install_region_handlers(void);
164 168
169acpi_status
170acpi_ev_install_space_handler(struct acpi_namespace_node *node,
171 acpi_adr_space_type space_id,
172 acpi_adr_space_handler handler,
173 acpi_adr_space_setup setup, void *context);
174
175/*
176 * evregion - Operation region support
177 */
165acpi_status acpi_ev_initialize_op_regions(void); 178acpi_status acpi_ev_initialize_op_regions(void);
166 179
167acpi_status 180acpi_status
@@ -180,12 +193,6 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
180 u8 acpi_ns_is_locked); 193 u8 acpi_ns_is_locked);
181 194
182acpi_status 195acpi_status
183acpi_ev_install_space_handler(struct acpi_namespace_node *node,
184 acpi_adr_space_type space_id,
185 acpi_adr_space_handler handler,
186 acpi_adr_space_setup setup, void *context);
187
188acpi_status
189acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, 196acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
190 acpi_adr_space_type space_id); 197 acpi_adr_space_type space_id);
191 198
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 64472e4ec329..ecb49927b817 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -192,14 +192,6 @@ ACPI_EXTERN u8 acpi_gbl_integer_bit_width;
192ACPI_EXTERN u8 acpi_gbl_integer_byte_width; 192ACPI_EXTERN u8 acpi_gbl_integer_byte_width;
193ACPI_EXTERN u8 acpi_gbl_integer_nybble_width; 193ACPI_EXTERN u8 acpi_gbl_integer_nybble_width;
194 194
195/* Mutex for _OSI support */
196
197ACPI_EXTERN acpi_mutex acpi_gbl_osi_mutex;
198
199/* Reader/Writer lock is used for namespace walk and dynamic table unload */
200
201ACPI_EXTERN struct acpi_rw_lock acpi_gbl_namespace_rw_lock;
202
203/***************************************************************************** 195/*****************************************************************************
204 * 196 *
205 * Mutual exclusion within ACPICA subsystem 197 * Mutual exclusion within ACPICA subsystem
@@ -233,6 +225,14 @@ ACPI_EXTERN u8 acpi_gbl_global_lock_pending;
233ACPI_EXTERN acpi_spinlock acpi_gbl_gpe_lock; /* For GPE data structs and registers */ 225ACPI_EXTERN acpi_spinlock acpi_gbl_gpe_lock; /* For GPE data structs and registers */
234ACPI_EXTERN acpi_spinlock acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */ 226ACPI_EXTERN acpi_spinlock acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */
235 227
228/* Mutex for _OSI support */
229
230ACPI_EXTERN acpi_mutex acpi_gbl_osi_mutex;
231
232/* Reader/Writer lock is used for namespace walk and dynamic table unload */
233
234ACPI_EXTERN struct acpi_rw_lock acpi_gbl_namespace_rw_lock;
235
236/***************************************************************************** 236/*****************************************************************************
237 * 237 *
238 * Miscellaneous globals 238 * Miscellaneous globals
@@ -252,7 +252,7 @@ ACPI_EXTERN acpi_cache_t *acpi_gbl_operand_cache;
252ACPI_EXTERN struct acpi_global_notify_handler acpi_gbl_global_notify[2]; 252ACPI_EXTERN struct acpi_global_notify_handler acpi_gbl_global_notify[2];
253ACPI_EXTERN acpi_exception_handler acpi_gbl_exception_handler; 253ACPI_EXTERN acpi_exception_handler acpi_gbl_exception_handler;
254ACPI_EXTERN acpi_init_handler acpi_gbl_init_handler; 254ACPI_EXTERN acpi_init_handler acpi_gbl_init_handler;
255ACPI_EXTERN acpi_tbl_handler acpi_gbl_table_handler; 255ACPI_EXTERN acpi_table_handler acpi_gbl_table_handler;
256ACPI_EXTERN void *acpi_gbl_table_handler_context; 256ACPI_EXTERN void *acpi_gbl_table_handler_context;
257ACPI_EXTERN struct acpi_walk_state *acpi_gbl_breakpoint_walk; 257ACPI_EXTERN struct acpi_walk_state *acpi_gbl_breakpoint_walk;
258ACPI_EXTERN acpi_interface_handler acpi_gbl_interface_handler; 258ACPI_EXTERN acpi_interface_handler acpi_gbl_interface_handler;
@@ -304,6 +304,7 @@ extern const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS];
304ACPI_EXTERN struct acpi_memory_list *acpi_gbl_global_list; 304ACPI_EXTERN struct acpi_memory_list *acpi_gbl_global_list;
305ACPI_EXTERN struct acpi_memory_list *acpi_gbl_ns_node_list; 305ACPI_EXTERN struct acpi_memory_list *acpi_gbl_ns_node_list;
306ACPI_EXTERN u8 acpi_gbl_display_final_mem_stats; 306ACPI_EXTERN u8 acpi_gbl_display_final_mem_stats;
307ACPI_EXTERN u8 acpi_gbl_disable_mem_tracking;
307#endif 308#endif
308 309
309/***************************************************************************** 310/*****************************************************************************
@@ -365,19 +366,18 @@ ACPI_EXTERN u8 acpi_gbl_sleep_type_b;
365 * 366 *
366 ****************************************************************************/ 367 ****************************************************************************/
367 368
368extern struct acpi_fixed_event_info
369 acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS];
370ACPI_EXTERN struct acpi_fixed_event_handler
371 acpi_gbl_fixed_event_handlers[ACPI_NUM_FIXED_EVENTS];
372ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head;
373ACPI_EXTERN struct acpi_gpe_block_info
374*acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS];
375
376#if (!ACPI_REDUCED_HARDWARE) 369#if (!ACPI_REDUCED_HARDWARE)
377 370
378ACPI_EXTERN u8 acpi_gbl_all_gpes_initialized; 371ACPI_EXTERN u8 acpi_gbl_all_gpes_initialized;
372ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head;
373ACPI_EXTERN struct acpi_gpe_block_info
374 *acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS];
379ACPI_EXTERN acpi_gbl_event_handler acpi_gbl_global_event_handler; 375ACPI_EXTERN acpi_gbl_event_handler acpi_gbl_global_event_handler;
380ACPI_EXTERN void *acpi_gbl_global_event_handler_context; 376ACPI_EXTERN void *acpi_gbl_global_event_handler_context;
377ACPI_EXTERN struct acpi_fixed_event_handler
378 acpi_gbl_fixed_event_handlers[ACPI_NUM_FIXED_EVENTS];
379extern struct acpi_fixed_event_info
380 acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS];
381 381
382#endif /* !ACPI_REDUCED_HARDWARE */ 382#endif /* !ACPI_REDUCED_HARDWARE */
383 383
@@ -405,7 +405,7 @@ ACPI_EXTERN u32 acpi_gbl_trace_dbg_layer;
405 405
406/***************************************************************************** 406/*****************************************************************************
407 * 407 *
408 * Debugger globals 408 * Debugger and Disassembler globals
409 * 409 *
410 ****************************************************************************/ 410 ****************************************************************************/
411 411
@@ -413,8 +413,12 @@ ACPI_EXTERN u8 acpi_gbl_db_output_flags;
413 413
414#ifdef ACPI_DISASSEMBLER 414#ifdef ACPI_DISASSEMBLER
415 415
416u8 ACPI_INIT_GLOBAL(acpi_gbl_ignore_noop_operator, FALSE);
417
416ACPI_EXTERN u8 acpi_gbl_db_opt_disasm; 418ACPI_EXTERN u8 acpi_gbl_db_opt_disasm;
417ACPI_EXTERN u8 acpi_gbl_db_opt_verbose; 419ACPI_EXTERN u8 acpi_gbl_db_opt_verbose;
420ACPI_EXTERN struct acpi_external_list *acpi_gbl_external_list;
421ACPI_EXTERN struct acpi_external_file *acpi_gbl_external_file_list;
418#endif 422#endif
419 423
420#ifdef ACPI_DEBUGGER 424#ifdef ACPI_DEBUGGER
@@ -426,6 +430,7 @@ extern u8 acpi_gbl_db_terminate_threads;
426ACPI_EXTERN u8 acpi_gbl_db_opt_tables; 430ACPI_EXTERN u8 acpi_gbl_db_opt_tables;
427ACPI_EXTERN u8 acpi_gbl_db_opt_stats; 431ACPI_EXTERN u8 acpi_gbl_db_opt_stats;
428ACPI_EXTERN u8 acpi_gbl_db_opt_ini_methods; 432ACPI_EXTERN u8 acpi_gbl_db_opt_ini_methods;
433ACPI_EXTERN u8 acpi_gbl_db_opt_no_region_support;
429 434
430ACPI_EXTERN char *acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS]; 435ACPI_EXTERN char *acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS];
431ACPI_EXTERN acpi_object_type acpi_gbl_db_arg_types[ACPI_DEBUGGER_MAX_ARGS]; 436ACPI_EXTERN acpi_object_type acpi_gbl_db_arg_types[ACPI_DEBUGGER_MAX_ARGS];
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index d902d31abc6c..6357e932bfd9 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index eb308635da72..8af8c9bdeb35 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -458,7 +458,7 @@ void acpi_ex_reacquire_interpreter(void);
458 458
459void acpi_ex_relinquish_interpreter(void); 459void acpi_ex_relinquish_interpreter(void);
460 460
461void acpi_ex_truncate_for32bit_table(union acpi_operand_object *obj_desc); 461u8 acpi_ex_truncate_for32bit_table(union acpi_operand_object *obj_desc);
462 462
463void acpi_ex_acquire_global_lock(u32 rule); 463void acpi_ex_acquire_global_lock(u32 rule);
464 464
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index ff8bd0061e8b..805f419086ab 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -189,11 +189,10 @@ struct acpi_namespace_node {
189#define ANOBJ_EVALUATED 0x20 /* Set on first evaluation of node */ 189#define ANOBJ_EVALUATED 0x20 /* Set on first evaluation of node */
190#define ANOBJ_ALLOCATED_BUFFER 0x40 /* Method AML buffer is dynamic (install_method) */ 190#define ANOBJ_ALLOCATED_BUFFER 0x40 /* Method AML buffer is dynamic (install_method) */
191 191
192#define ANOBJ_IS_EXTERNAL 0x08 /* i_aSL only: This object created via External() */ 192#define ANOBJ_IS_EXTERNAL 0x08 /* iASL only: This object created via External() */
193#define ANOBJ_METHOD_NO_RETVAL 0x10 /* i_aSL only: Method has no return value */ 193#define ANOBJ_METHOD_NO_RETVAL 0x10 /* iASL only: Method has no return value */
194#define ANOBJ_METHOD_SOME_NO_RETVAL 0x20 /* i_aSL only: Method has at least one return value */ 194#define ANOBJ_METHOD_SOME_NO_RETVAL 0x20 /* iASL only: Method has at least one return value */
195#define ANOBJ_IS_BIT_OFFSET 0x40 /* i_aSL only: Reference is a bit offset */ 195#define ANOBJ_IS_REFERENCED 0x80 /* iASL only: Object was referenced */
196#define ANOBJ_IS_REFERENCED 0x80 /* i_aSL only: Object was referenced */
197 196
198/* Internal ACPI table management - master table list */ 197/* Internal ACPI table management - master table list */
199 198
@@ -411,11 +410,10 @@ struct acpi_gpe_notify_info {
411 struct acpi_gpe_notify_info *next; 410 struct acpi_gpe_notify_info *next;
412}; 411};
413 412
414struct acpi_gpe_notify_object { 413/*
415 struct acpi_namespace_node *node; 414 * GPE dispatch info. At any time, the GPE can have at most one type
416 struct acpi_gpe_notify_object *next; 415 * of dispatch - Method, Handler, or Implicit Notify.
417}; 416 */
418
419union acpi_gpe_dispatch_info { 417union acpi_gpe_dispatch_info {
420 struct acpi_namespace_node *method_node; /* Method node for this GPE level */ 418 struct acpi_namespace_node *method_node; /* Method node for this GPE level */
421 struct acpi_gpe_handler_info *handler; /* Installed GPE handler */ 419 struct acpi_gpe_handler_info *handler; /* Installed GPE handler */
@@ -679,6 +677,8 @@ struct acpi_opcode_info {
679 u8 type; /* Opcode type */ 677 u8 type; /* Opcode type */
680}; 678};
681 679
680/* Value associated with the parse object */
681
682union acpi_parse_value { 682union acpi_parse_value {
683 u64 integer; /* Integer constant (Up to 64 bits) */ 683 u64 integer; /* Integer constant (Up to 64 bits) */
684 u32 size; /* bytelist or field size */ 684 u32 size; /* bytelist or field size */
@@ -1025,6 +1025,31 @@ struct acpi_port_info {
1025 1025
1026/***************************************************************************** 1026/*****************************************************************************
1027 * 1027 *
1028 * Disassembler
1029 *
1030 ****************************************************************************/
1031
1032struct acpi_external_list {
1033 char *path;
1034 char *internal_path;
1035 struct acpi_external_list *next;
1036 u32 value;
1037 u16 length;
1038 u8 type;
1039 u8 flags;
1040};
1041
1042/* Values for Flags field above */
1043
1044#define ACPI_IPATH_ALLOCATED 0x01
1045
1046struct acpi_external_file {
1047 char *path;
1048 struct acpi_external_file *next;
1049};
1050
1051/*****************************************************************************
1052 *
1028 * Debugger 1053 * Debugger
1029 * 1054 *
1030 ****************************************************************************/ 1055 ****************************************************************************/
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index 5efad99f2169..ed7943b9044f 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -49,14 +49,18 @@
49 * get into potential aligment issues -- see the STORE macros below. 49 * get into potential aligment issues -- see the STORE macros below.
50 * Use with care. 50 * Use with care.
51 */ 51 */
52#define ACPI_GET8(ptr) *ACPI_CAST_PTR (u8, ptr) 52#define ACPI_CAST8(ptr) ACPI_CAST_PTR (u8, (ptr))
53#define ACPI_GET16(ptr) *ACPI_CAST_PTR (u16, ptr) 53#define ACPI_CAST16(ptr) ACPI_CAST_PTR (u16, (ptr))
54#define ACPI_GET32(ptr) *ACPI_CAST_PTR (u32, ptr) 54#define ACPI_CAST32(ptr) ACPI_CAST_PTR (u32, (ptr))
55#define ACPI_GET64(ptr) *ACPI_CAST_PTR (u64, ptr) 55#define ACPI_CAST64(ptr) ACPI_CAST_PTR (u64, (ptr))
56#define ACPI_SET8(ptr) *ACPI_CAST_PTR (u8, ptr) 56#define ACPI_GET8(ptr) (*ACPI_CAST8 (ptr))
57#define ACPI_SET16(ptr) *ACPI_CAST_PTR (u16, ptr) 57#define ACPI_GET16(ptr) (*ACPI_CAST16 (ptr))
58#define ACPI_SET32(ptr) *ACPI_CAST_PTR (u32, ptr) 58#define ACPI_GET32(ptr) (*ACPI_CAST32 (ptr))
59#define ACPI_SET64(ptr) *ACPI_CAST_PTR (u64, ptr) 59#define ACPI_GET64(ptr) (*ACPI_CAST64 (ptr))
60#define ACPI_SET8(ptr, val) (*ACPI_CAST8 (ptr) = (u8) (val))
61#define ACPI_SET16(ptr, val) (*ACPI_CAST16 (ptr) = (u16) (val))
62#define ACPI_SET32(ptr, val) (*ACPI_CAST32 (ptr) = (u32) (val))
63#define ACPI_SET64(ptr, val) (*ACPI_CAST64 (ptr) = (u64) (val))
60 64
61/* 65/*
62 * printf() format helpers 66 * printf() format helpers
@@ -293,6 +297,26 @@
293#define ACPI_16BIT_MASK 0x0000FFFF 297#define ACPI_16BIT_MASK 0x0000FFFF
294#define ACPI_24BIT_MASK 0x00FFFFFF 298#define ACPI_24BIT_MASK 0x00FFFFFF
295 299
300/* Macros to extract flag bits from position zero */
301
302#define ACPI_GET_1BIT_FLAG(value) ((value) & ACPI_1BIT_MASK)
303#define ACPI_GET_2BIT_FLAG(value) ((value) & ACPI_2BIT_MASK)
304#define ACPI_GET_3BIT_FLAG(value) ((value) & ACPI_3BIT_MASK)
305#define ACPI_GET_4BIT_FLAG(value) ((value) & ACPI_4BIT_MASK)
306
307/* Macros to extract flag bits from position one and above */
308
309#define ACPI_EXTRACT_1BIT_FLAG(field, position) (ACPI_GET_1BIT_FLAG ((field) >> position))
310#define ACPI_EXTRACT_2BIT_FLAG(field, position) (ACPI_GET_2BIT_FLAG ((field) >> position))
311#define ACPI_EXTRACT_3BIT_FLAG(field, position) (ACPI_GET_3BIT_FLAG ((field) >> position))
312#define ACPI_EXTRACT_4BIT_FLAG(field, position) (ACPI_GET_4BIT_FLAG ((field) >> position))
313
314/* ACPI Pathname helpers */
315
316#define ACPI_IS_ROOT_PREFIX(c) ((c) == (u8) 0x5C) /* Backslash */
317#define ACPI_IS_PARENT_PREFIX(c) ((c) == (u8) 0x5E) /* Carat */
318#define ACPI_IS_PATH_SEPARATOR(c) ((c) == (u8) 0x2E) /* Period (dot) */
319
296/* 320/*
297 * An object of type struct acpi_namespace_node can appear in some contexts 321 * An object of type struct acpi_namespace_node can appear in some contexts
298 * where a pointer to an object of type union acpi_operand_object can also 322 * where a pointer to an object of type union acpi_operand_object can also
@@ -364,137 +388,6 @@
364 388
365#endif /* ACPI_NO_ERROR_MESSAGES */ 389#endif /* ACPI_NO_ERROR_MESSAGES */
366 390
367/*
368 * Debug macros that are conditionally compiled
369 */
370#ifdef ACPI_DEBUG_OUTPUT
371/*
372 * Function entry tracing
373 */
374#define ACPI_FUNCTION_TRACE(a) ACPI_FUNCTION_NAME(a) \
375 acpi_ut_trace(ACPI_DEBUG_PARAMETERS)
376#define ACPI_FUNCTION_TRACE_PTR(a, b) ACPI_FUNCTION_NAME(a) \
377 acpi_ut_trace_ptr(ACPI_DEBUG_PARAMETERS, (void *)b)
378#define ACPI_FUNCTION_TRACE_U32(a, b) ACPI_FUNCTION_NAME(a) \
379 acpi_ut_trace_u32(ACPI_DEBUG_PARAMETERS, (u32)b)
380#define ACPI_FUNCTION_TRACE_STR(a, b) ACPI_FUNCTION_NAME(a) \
381 acpi_ut_trace_str(ACPI_DEBUG_PARAMETERS, (char *)b)
382
383#define ACPI_FUNCTION_ENTRY() acpi_ut_track_stack_ptr()
384
385/*
386 * Function exit tracing.
387 * WARNING: These macros include a return statement. This is usually considered
388 * bad form, but having a separate exit macro is very ugly and difficult to maintain.
389 * One of the FUNCTION_TRACE macros above must be used in conjunction with these macros
390 * so that "_AcpiFunctionName" is defined.
391 *
392 * Note: the DO_WHILE0 macro is used to prevent some compilers from complaining
393 * about these constructs.
394 */
395#ifdef ACPI_USE_DO_WHILE_0
396#define ACPI_DO_WHILE0(a) do a while(0)
397#else
398#define ACPI_DO_WHILE0(a) a
399#endif
400
401#define return_VOID ACPI_DO_WHILE0 ({ \
402 acpi_ut_exit (ACPI_DEBUG_PARAMETERS); \
403 return;})
404/*
405 * There are two versions of most of the return macros. The default version is
406 * safer, since it avoids side-effects by guaranteeing that the argument will
407 * not be evaluated twice.
408 *
409 * A less-safe version of the macros is provided for optional use if the
410 * compiler uses excessive CPU stack (for example, this may happen in the
411 * debug case if code optimzation is disabled.)
412 */
413#ifndef ACPI_SIMPLE_RETURN_MACROS
414
415#define return_ACPI_STATUS(s) ACPI_DO_WHILE0 ({ \
416 register acpi_status _s = (s); \
417 acpi_ut_status_exit (ACPI_DEBUG_PARAMETERS, _s); \
418 return (_s); })
419#define return_PTR(s) ACPI_DO_WHILE0 ({ \
420 register void *_s = (void *) (s); \
421 acpi_ut_ptr_exit (ACPI_DEBUG_PARAMETERS, (u8 *) _s); \
422 return (_s); })
423#define return_VALUE(s) ACPI_DO_WHILE0 ({ \
424 register u64 _s = (s); \
425 acpi_ut_value_exit (ACPI_DEBUG_PARAMETERS, _s); \
426 return (_s); })
427#define return_UINT8(s) ACPI_DO_WHILE0 ({ \
428 register u8 _s = (u8) (s); \
429 acpi_ut_value_exit (ACPI_DEBUG_PARAMETERS, (u64) _s); \
430 return (_s); })
431#define return_UINT32(s) ACPI_DO_WHILE0 ({ \
432 register u32 _s = (u32) (s); \
433 acpi_ut_value_exit (ACPI_DEBUG_PARAMETERS, (u64) _s); \
434 return (_s); })
435#else /* Use original less-safe macros */
436
437#define return_ACPI_STATUS(s) ACPI_DO_WHILE0 ({ \
438 acpi_ut_status_exit (ACPI_DEBUG_PARAMETERS, (s)); \
439 return((s)); })
440#define return_PTR(s) ACPI_DO_WHILE0 ({ \
441 acpi_ut_ptr_exit (ACPI_DEBUG_PARAMETERS, (u8 *) (s)); \
442 return((s)); })
443#define return_VALUE(s) ACPI_DO_WHILE0 ({ \
444 acpi_ut_value_exit (ACPI_DEBUG_PARAMETERS, (u64) (s)); \
445 return((s)); })
446#define return_UINT8(s) return_VALUE(s)
447#define return_UINT32(s) return_VALUE(s)
448
449#endif /* ACPI_SIMPLE_RETURN_MACROS */
450
451/* Conditional execution */
452
453#define ACPI_DEBUG_EXEC(a) a
454#define ACPI_DEBUG_ONLY_MEMBERS(a) a;
455#define _VERBOSE_STRUCTURES
456
457/* Various object display routines for debug */
458
459#define ACPI_DUMP_STACK_ENTRY(a) acpi_ex_dump_operand((a), 0)
460#define ACPI_DUMP_OPERANDS(a, b ,c) acpi_ex_dump_operands(a, b, c)
461#define ACPI_DUMP_ENTRY(a, b) acpi_ns_dump_entry (a, b)
462#define ACPI_DUMP_PATHNAME(a, b, c, d) acpi_ns_dump_pathname(a, b, c, d)
463#define ACPI_DUMP_BUFFER(a, b) acpi_ut_debug_dump_buffer((u8 *) a, b, DB_BYTE_DISPLAY, _COMPONENT)
464
465#else
466/*
467 * This is the non-debug case -- make everything go away,
468 * leaving no executable debug code!
469 */
470#define ACPI_DEBUG_EXEC(a)
471#define ACPI_DEBUG_ONLY_MEMBERS(a)
472#define ACPI_FUNCTION_TRACE(a)
473#define ACPI_FUNCTION_TRACE_PTR(a, b)
474#define ACPI_FUNCTION_TRACE_U32(a, b)
475#define ACPI_FUNCTION_TRACE_STR(a, b)
476#define ACPI_FUNCTION_EXIT
477#define ACPI_FUNCTION_STATUS_EXIT(s)
478#define ACPI_FUNCTION_VALUE_EXIT(s)
479#define ACPI_FUNCTION_ENTRY()
480#define ACPI_DUMP_STACK_ENTRY(a)
481#define ACPI_DUMP_OPERANDS(a, b, c)
482#define ACPI_DUMP_ENTRY(a, b)
483#define ACPI_DUMP_TABLES(a, b)
484#define ACPI_DUMP_PATHNAME(a, b, c, d)
485#define ACPI_DUMP_BUFFER(a, b)
486#define ACPI_DEBUG_PRINT(pl)
487#define ACPI_DEBUG_PRINT_RAW(pl)
488
489#define return_VOID return
490#define return_ACPI_STATUS(s) return(s)
491#define return_VALUE(s) return(s)
492#define return_UINT8(s) return(s)
493#define return_UINT32(s) return(s)
494#define return_PTR(s) return(s)
495
496#endif /* ACPI_DEBUG_OUTPUT */
497
498#if (!ACPI_REDUCED_HARDWARE) 391#if (!ACPI_REDUCED_HARDWARE)
499#define ACPI_HW_OPTIONAL_FUNCTION(addr) addr 392#define ACPI_HW_OPTIONAL_FUNCTION(addr) addr
500#else 393#else
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 9b19d4b86424..02cd5482ff8b 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -218,6 +218,18 @@ acpi_ns_check_parameter_count(char *pathname,
218 u32 user_param_count, 218 u32 user_param_count,
219 const union acpi_predefined_info *info); 219 const union acpi_predefined_info *info);
220 220
221acpi_status
222acpi_ns_check_object_type(struct acpi_predefined_data *data,
223 union acpi_operand_object **return_object_ptr,
224 u32 expected_btypes, u32 package_index);
225
226/*
227 * nsprepkg - Validation of predefined name packages
228 */
229acpi_status
230acpi_ns_check_package(struct acpi_predefined_data *data,
231 union acpi_operand_object **return_object_ptr);
232
221/* 233/*
222 * nsnames - Name and Scope manipulation 234 * nsnames - Name and Scope manipulation
223 */ 235 */
@@ -333,8 +345,6 @@ acpi_ns_install_node(struct acpi_walk_state *walk_state,
333/* 345/*
334 * nsutils - Utility functions 346 * nsutils - Utility functions
335 */ 347 */
336u8 acpi_ns_valid_root_prefix(char prefix);
337
338acpi_object_type acpi_ns_get_type(struct acpi_namespace_node *node); 348acpi_object_type acpi_ns_get_type(struct acpi_namespace_node *node);
339 349
340u32 acpi_ns_local(acpi_object_type type); 350u32 acpi_ns_local(acpi_object_type type);
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index 24eb9eac9514..cc7ab6dd724e 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -307,7 +307,7 @@ struct acpi_object_addr_handler {
307 struct acpi_namespace_node *node; /* Parent device */ 307 struct acpi_namespace_node *node; /* Parent device */
308 void *context; 308 void *context;
309 acpi_adr_space_setup setup; 309 acpi_adr_space_setup setup;
310 union acpi_operand_object *region_list; /* regions using this handler */ 310 union acpi_operand_object *region_list; /* Regions using this handler */
311 union acpi_operand_object *next; 311 union acpi_operand_object *next;
312}; 312};
313 313
diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h
index d786a5128b78..3fc9ca7e8aa3 100644
--- a/drivers/acpi/acpica/acopcode.h
+++ b/drivers/acpi/acpica/acopcode.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h
index eefcf47a61a0..aed319318835 100644
--- a/drivers/acpi/acpica/acparser.h
+++ b/drivers/acpi/acpica/acparser.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -105,7 +105,28 @@ union acpi_parse_object *acpi_ps_find_name(union acpi_parse_object *scope,
105union acpi_parse_object *acpi_ps_get_parent(union acpi_parse_object *op); 105union acpi_parse_object *acpi_ps_get_parent(union acpi_parse_object *op);
106 106
107/* 107/*
108 * psopcode - AML Opcode information 108 * psobject - support for parse object processing
109 */
110acpi_status
111acpi_ps_build_named_op(struct acpi_walk_state *walk_state,
112 u8 *aml_op_start,
113 union acpi_parse_object *unnamed_op,
114 union acpi_parse_object **op);
115
116acpi_status
117acpi_ps_create_op(struct acpi_walk_state *walk_state,
118 u8 *aml_op_start, union acpi_parse_object **new_op);
119
120acpi_status
121acpi_ps_complete_op(struct acpi_walk_state *walk_state,
122 union acpi_parse_object **op, acpi_status status);
123
124acpi_status
125acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
126 union acpi_parse_object *op, acpi_status status);
127
128/*
129 * psopinfo - AML Opcode information
109 */ 130 */
110const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode); 131const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode);
111 132
@@ -211,8 +232,6 @@ void acpi_ps_free_op(union acpi_parse_object *op);
211 232
212u8 acpi_ps_is_leading_char(u32 c); 233u8 acpi_ps_is_leading_char(u32 c);
213 234
214u8 acpi_ps_is_prefix_char(u32 c);
215
216#ifdef ACPI_FUTURE_USAGE 235#ifdef ACPI_FUTURE_USAGE
217u32 acpi_ps_get_name(union acpi_parse_object *op); 236u32 acpi_ps_get_name(union acpi_parse_object *op);
218#endif /* ACPI_FUTURE_USAGE */ 237#endif /* ACPI_FUTURE_USAGE */
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index 9dfa1c83bd4e..752cc40cdc1e 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -1,12 +1,11 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Name: acpredef - Information table for ACPI predefined methods and objects 3 * Name: acpredef - Information table for ACPI predefined methods and objects
4 * $Revision: 1.1 $
5 * 4 *
6 *****************************************************************************/ 5 *****************************************************************************/
7 6
8/* 7/*
9 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
10 * All rights reserved. 9 * All rights reserved.
11 * 10 *
12 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -51,13 +50,13 @@
51 * 50 *
52 * 1) PTYPE1 packages do not contain sub-packages. 51 * 1) PTYPE1 packages do not contain sub-packages.
53 * 52 *
54 * ACPI_PTYPE1_FIXED: Fixed length, 1 or 2 object types: 53 * ACPI_PTYPE1_FIXED: Fixed-length length, 1 or 2 object types:
55 * object type 54 * object type
56 * count 55 * count
57 * object type 56 * object type
58 * count 57 * count
59 * 58 *
60 * ACPI_PTYPE1_VAR: Variable length: 59 * ACPI_PTYPE1_VAR: Variable-length length:
61 * object type (Int/Buf/Ref) 60 * object type (Int/Buf/Ref)
62 * 61 *
63 * ACPI_PTYPE1_OPTION: Package has some required and some optional elements 62 * ACPI_PTYPE1_OPTION: Package has some required and some optional elements
@@ -85,10 +84,10 @@
85 * count 84 * count
86 * (Used for _CST) 85 * (Used for _CST)
87 * 86 *
88 * ACPI_PTYPE2_FIXED: Each subpackage is of fixed length 87 * ACPI_PTYPE2_FIXED: Each subpackage is of Fixed-length
89 * (Used for _PRT) 88 * (Used for _PRT)
90 * 89 *
91 * ACPI_PTYPE2_MIN: Each subpackage has a variable but minimum length 90 * ACPI_PTYPE2_MIN: Each subpackage has a Variable-length but minimum length
92 * (Used for _HPX) 91 * (Used for _HPX)
93 * 92 *
94 * ACPI_PTYPE2_REV_FIXED: Revision at start, each subpackage is Fixed-length 93 * ACPI_PTYPE2_REV_FIXED: Revision at start, each subpackage is Fixed-length
@@ -124,7 +123,8 @@ enum acpi_return_package_types {
124 * These are the names that can actually be evaluated via acpi_evaluate_object. 123 * These are the names that can actually be evaluated via acpi_evaluate_object.
125 * Not present in this table are the following: 124 * Not present in this table are the following:
126 * 125 *
127 * 1) Predefined/Reserved names that are never evaluated via acpi_evaluate_object: 126 * 1) Predefined/Reserved names that are never evaluated via
127 * acpi_evaluate_object:
128 * _Lxx and _Exx GPE methods 128 * _Lxx and _Exx GPE methods
129 * _Qxx EC methods 129 * _Qxx EC methods
130 * _T_x compiler temporary variables 130 * _T_x compiler temporary variables
@@ -149,6 +149,8 @@ enum acpi_return_package_types {
149 * information about the expected structure of the package. This information 149 * information about the expected structure of the package. This information
150 * is saved here (rather than in a separate table) in order to minimize the 150 * is saved here (rather than in a separate table) in order to minimize the
151 * overall size of the stored data. 151 * overall size of the stored data.
152 *
153 * Note: The additional braces are intended to promote portability.
152 */ 154 */
153static const union acpi_predefined_info predefined_names[] = { 155static const union acpi_predefined_info predefined_names[] = {
154 {{"_AC0", 0, ACPI_RTYPE_INTEGER}}, 156 {{"_AC0", 0, ACPI_RTYPE_INTEGER}},
@@ -212,9 +214,8 @@ static const union acpi_predefined_info predefined_names[] = {
212 {{"_BCT", 1, ACPI_RTYPE_INTEGER}}, 214 {{"_BCT", 1, ACPI_RTYPE_INTEGER}},
213 {{"_BDN", 0, ACPI_RTYPE_INTEGER}}, 215 {{"_BDN", 0, ACPI_RTYPE_INTEGER}},
214 {{"_BFS", 1, 0}}, 216 {{"_BFS", 1, 0}},
215 {{"_BIF", 0, ACPI_RTYPE_PACKAGE} }, /* Fixed-length (9 Int),(4 Str/Buf) */ 217 {{"_BIF", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (9 Int),(4 Str) */
216 {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 9, 218 {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 9, ACPI_RTYPE_STRING}, 4, 0}},
217 ACPI_RTYPE_STRING | ACPI_RTYPE_BUFFER}, 4, 0} },
218 219
219 {{"_BIX", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (16 Int),(4 Str) */ 220 {{"_BIX", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (16 Int),(4 Str) */
220 {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 16, ACPI_RTYPE_STRING}, 4, 221 {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 16, ACPI_RTYPE_STRING}, 4,
@@ -236,7 +237,8 @@ static const union acpi_predefined_info predefined_names[] = {
236 {{"_CBA", 0, ACPI_RTYPE_INTEGER}}, /* See PCI firmware spec 3.0 */ 237 {{"_CBA", 0, ACPI_RTYPE_INTEGER}}, /* See PCI firmware spec 3.0 */
237 {{"_CDM", 0, ACPI_RTYPE_INTEGER}}, 238 {{"_CDM", 0, ACPI_RTYPE_INTEGER}},
238 {{"_CID", 0, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING | ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints/Strs) */ 239 {{"_CID", 0, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING | ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints/Strs) */
239 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING, 0,0}, 0,0}}, 240 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING, 0, 0}, 0,
241 0}},
240 242
241 {{"_CLS", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (3 Int) */ 243 {{"_CLS", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (3 Int) */
242 {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, 0}, 0, 0}}, 244 {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, 0}, 0, 0}},
@@ -251,7 +253,8 @@ static const union acpi_predefined_info predefined_names[] = {
251 {{{ACPI_PTYPE2_COUNT, ACPI_RTYPE_INTEGER, 0,0}, 0,0}}, 253 {{{ACPI_PTYPE2_COUNT, ACPI_RTYPE_INTEGER, 0,0}, 0,0}},
252 254
253 {{"_CST", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (1 Int(n), n Pkg (1 Buf/3 Int) */ 255 {{"_CST", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (1 Int(n), n Pkg (1 Buf/3 Int) */
254 {{{ACPI_PTYPE2_PKG_COUNT,ACPI_RTYPE_BUFFER, 1, ACPI_RTYPE_INTEGER}, 3,0}}, 256 {{{ACPI_PTYPE2_PKG_COUNT, ACPI_RTYPE_BUFFER, 1, ACPI_RTYPE_INTEGER}, 3,
257 0}},
255 258
256 {{"_CWS", 1, ACPI_RTYPE_INTEGER}}, 259 {{"_CWS", 1, ACPI_RTYPE_INTEGER}},
257 {{"_DCK", 1, ACPI_RTYPE_INTEGER}}, 260 {{"_DCK", 1, ACPI_RTYPE_INTEGER}},
@@ -342,8 +345,8 @@ static const union acpi_predefined_info predefined_names[] = {
342 {{"_MBM", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (8 Int) */ 345 {{"_MBM", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (8 Int) */
343 {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 8, 0}, 0, 0}}, 346 {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 8, 0}, 0, 0}},
344 347
345 {{"_MLS", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (2 Str) */ 348 {{"_MLS", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (1 Str/1 Buf) */
346 {{{ACPI_PTYPE2, ACPI_RTYPE_STRING, 2,0}, 0,0}}, 349 {{{ACPI_PTYPE2, ACPI_RTYPE_STRING, 1, ACPI_RTYPE_BUFFER}, 1, 0}},
347 350
348 {{"_MSG", 1, 0}}, 351 {{"_MSG", 1, 0}},
349 {{"_MSM", 4, ACPI_RTYPE_INTEGER}}, 352 {{"_MSM", 4, ACPI_RTYPE_INTEGER}},
diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h
index 0347d0993497..f691d0e4d9fa 100644
--- a/drivers/acpi/acpica/acresrc.h
+++ b/drivers/acpi/acpica/acresrc.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -347,18 +347,21 @@ extern struct acpi_rsdump_info *acpi_gbl_dump_resource_dispatch[];
347extern struct acpi_rsdump_info *acpi_gbl_dump_serial_bus_dispatch[]; 347extern struct acpi_rsdump_info *acpi_gbl_dump_serial_bus_dispatch[];
348 348
349/* 349/*
350 * rsdump 350 * rsdumpinfo
351 */ 351 */
352extern struct acpi_rsdump_info acpi_rs_dump_irq[]; 352extern struct acpi_rsdump_info acpi_rs_dump_irq[];
353extern struct acpi_rsdump_info acpi_rs_dump_prt[];
353extern struct acpi_rsdump_info acpi_rs_dump_dma[]; 354extern struct acpi_rsdump_info acpi_rs_dump_dma[];
354extern struct acpi_rsdump_info acpi_rs_dump_start_dpf[]; 355extern struct acpi_rsdump_info acpi_rs_dump_start_dpf[];
355extern struct acpi_rsdump_info acpi_rs_dump_end_dpf[]; 356extern struct acpi_rsdump_info acpi_rs_dump_end_dpf[];
356extern struct acpi_rsdump_info acpi_rs_dump_io[]; 357extern struct acpi_rsdump_info acpi_rs_dump_io[];
358extern struct acpi_rsdump_info acpi_rs_dump_io_flags[];
357extern struct acpi_rsdump_info acpi_rs_dump_fixed_io[]; 359extern struct acpi_rsdump_info acpi_rs_dump_fixed_io[];
358extern struct acpi_rsdump_info acpi_rs_dump_vendor[]; 360extern struct acpi_rsdump_info acpi_rs_dump_vendor[];
359extern struct acpi_rsdump_info acpi_rs_dump_end_tag[]; 361extern struct acpi_rsdump_info acpi_rs_dump_end_tag[];
360extern struct acpi_rsdump_info acpi_rs_dump_memory24[]; 362extern struct acpi_rsdump_info acpi_rs_dump_memory24[];
361extern struct acpi_rsdump_info acpi_rs_dump_memory32[]; 363extern struct acpi_rsdump_info acpi_rs_dump_memory32[];
364extern struct acpi_rsdump_info acpi_rs_dump_memory_flags[];
362extern struct acpi_rsdump_info acpi_rs_dump_fixed_memory32[]; 365extern struct acpi_rsdump_info acpi_rs_dump_fixed_memory32[];
363extern struct acpi_rsdump_info acpi_rs_dump_address16[]; 366extern struct acpi_rsdump_info acpi_rs_dump_address16[];
364extern struct acpi_rsdump_info acpi_rs_dump_address32[]; 367extern struct acpi_rsdump_info acpi_rs_dump_address32[];
@@ -372,6 +375,7 @@ extern struct acpi_rsdump_info acpi_rs_dump_common_serial_bus[];
372extern struct acpi_rsdump_info acpi_rs_dump_i2c_serial_bus[]; 375extern struct acpi_rsdump_info acpi_rs_dump_i2c_serial_bus[];
373extern struct acpi_rsdump_info acpi_rs_dump_spi_serial_bus[]; 376extern struct acpi_rsdump_info acpi_rs_dump_spi_serial_bus[];
374extern struct acpi_rsdump_info acpi_rs_dump_uart_serial_bus[]; 377extern struct acpi_rsdump_info acpi_rs_dump_uart_serial_bus[];
378extern struct acpi_rsdump_info acpi_rs_dump_general_flags[];
375#endif 379#endif
376 380
377#endif /* __ACRESRC_H__ */ 381#endif /* __ACRESRC_H__ */
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index 937e66c65d1e..7896d85876ca 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index 6712965ba8ae..7755e915a007 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index b0f5f92b674a..0082fa0a6139 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -483,39 +483,17 @@ acpi_ut_short_divide(u64 in_dividend,
483/* 483/*
484 * utmisc 484 * utmisc
485 */ 485 */
486void ut_convert_backslashes(char *pathname);
487
488const char *acpi_ut_validate_exception(acpi_status status); 486const char *acpi_ut_validate_exception(acpi_status status);
489 487
490u8 acpi_ut_is_pci_root_bridge(char *id); 488u8 acpi_ut_is_pci_root_bridge(char *id);
491 489
492u8 acpi_ut_is_aml_table(struct acpi_table_header *table); 490u8 acpi_ut_is_aml_table(struct acpi_table_header *table);
493 491
494acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id);
495
496void acpi_ut_release_owner_id(acpi_owner_id * owner_id);
497
498acpi_status 492acpi_status
499acpi_ut_walk_package_tree(union acpi_operand_object *source_object, 493acpi_ut_walk_package_tree(union acpi_operand_object *source_object,
500 void *target_object, 494 void *target_object,
501 acpi_pkg_callback walk_callback, void *context); 495 acpi_pkg_callback walk_callback, void *context);
502 496
503void acpi_ut_strupr(char *src_string);
504
505void acpi_ut_strlwr(char *src_string);
506
507int acpi_ut_stricmp(char *string1, char *string2);
508
509void acpi_ut_print_string(char *string, u8 max_length);
510
511u8 acpi_ut_valid_acpi_name(u32 name);
512
513void acpi_ut_repair_name(char *name);
514
515u8 acpi_ut_valid_acpi_char(char character, u32 position);
516
517acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer);
518
519/* Values for Base above (16=Hex, 10=Decimal) */ 497/* Values for Base above (16=Hex, 10=Decimal) */
520 498
521#define ACPI_ANY_BASE 0 499#define ACPI_ANY_BASE 0
@@ -532,15 +510,25 @@ acpi_ut_display_init_pathname(u8 type,
532#endif 510#endif
533 511
534/* 512/*
513 * utownerid - Support for Table/Method Owner IDs
514 */
515acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id);
516
517void acpi_ut_release_owner_id(acpi_owner_id * owner_id);
518
519/*
535 * utresrc 520 * utresrc
536 */ 521 */
537acpi_status 522acpi_status
538acpi_ut_walk_aml_resources(u8 *aml, 523acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
524 u8 *aml,
539 acpi_size aml_length, 525 acpi_size aml_length,
540 acpi_walk_aml_callback user_function, 526 acpi_walk_aml_callback user_function,
541 void **context); 527 void **context);
542 528
543acpi_status acpi_ut_validate_resource(void *aml, u8 *return_index); 529acpi_status
530acpi_ut_validate_resource(struct acpi_walk_state *walk_state,
531 void *aml, u8 *return_index);
544 532
545u32 acpi_ut_get_descriptor_length(void *aml); 533u32 acpi_ut_get_descriptor_length(void *aml);
546 534
@@ -554,6 +542,27 @@ acpi_status
554acpi_ut_get_resource_end_tag(union acpi_operand_object *obj_desc, u8 **end_tag); 542acpi_ut_get_resource_end_tag(union acpi_operand_object *obj_desc, u8 **end_tag);
555 543
556/* 544/*
545 * utstring - String and character utilities
546 */
547void acpi_ut_strupr(char *src_string);
548
549void acpi_ut_strlwr(char *src_string);
550
551int acpi_ut_stricmp(char *string1, char *string2);
552
553acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer);
554
555void acpi_ut_print_string(char *string, u8 max_length);
556
557void ut_convert_backslashes(char *pathname);
558
559u8 acpi_ut_valid_acpi_name(u32 name);
560
561u8 acpi_ut_valid_acpi_char(char character, u32 position);
562
563void acpi_ut_repair_name(char *name);
564
565/*
557 * utmutex - mutex support 566 * utmutex - mutex support
558 */ 567 */
559acpi_status acpi_ut_mutex_initialize(void); 568acpi_status acpi_ut_mutex_initialize(void);
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index c26f8ff6c3b9..48a3e331b72d 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -7,7 +7,7 @@
7 *****************************************************************************/ 7 *****************************************************************************/
8 8
9/* 9/*
10 * Copyright (C) 2000 - 2012, Intel Corp. 10 * Copyright (C) 2000 - 2013, Intel Corp.
11 * All rights reserved. 11 * All rights reserved.
12 * 12 *
13 * Redistribution and use in source and binary forms, with or without 13 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h
index 968449685e06..87c26366d1df 100644
--- a/drivers/acpi/acpica/amlresrc.h
+++ b/drivers/acpi/acpica/amlresrc.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -199,6 +199,12 @@ struct aml_resource_fixed_dma {
199struct aml_resource_large_header { 199struct aml_resource_large_header {
200AML_RESOURCE_LARGE_HEADER_COMMON}; 200AML_RESOURCE_LARGE_HEADER_COMMON};
201 201
202/* General Flags for address space resource descriptors */
203
204#define ACPI_RESOURCE_FLAG_DEC 2
205#define ACPI_RESOURCE_FLAG_MIF 4
206#define ACPI_RESOURCE_FLAG_MAF 8
207
202struct aml_resource_memory24 { 208struct aml_resource_memory24 {
203 AML_RESOURCE_LARGE_HEADER_COMMON u8 flags; 209 AML_RESOURCE_LARGE_HEADER_COMMON u8 flags;
204 u16 minimum; 210 u16 minimum;
diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
index c8b5e2565b98..fb09b08d7080 100644
--- a/drivers/acpi/acpica/dsargs.c
+++ b/drivers/acpi/acpica/dsargs.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2012, Intel Corp. 9 * Copyright (C) 2000 - 2013, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index 57895db3231a..7ea0f162f11c 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2012, Intel Corp. 9 * Copyright (C) 2000 - 2013, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index b5b904ee815f..feadeed1012d 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index 87eff701ecfa..bc8e63f7784b 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 52eb4e01622a..a9ffd44c18fe 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -47,7 +47,7 @@
47#include "acinterp.h" 47#include "acinterp.h"
48#include "acnamesp.h" 48#include "acnamesp.h"
49#ifdef ACPI_DISASSEMBLER 49#ifdef ACPI_DISASSEMBLER
50#include <acpi/acdisasm.h> 50#include "acdisasm.h"
51#endif 51#endif
52 52
53#define _COMPONENT ACPI_DISPATCHER 53#define _COMPONENT ACPI_DISPATCHER
@@ -151,6 +151,7 @@ acpi_ds_create_method_mutex(union acpi_operand_object *method_desc)
151 151
152 status = acpi_os_create_mutex(&mutex_desc->mutex.os_mutex); 152 status = acpi_os_create_mutex(&mutex_desc->mutex.os_mutex);
153 if (ACPI_FAILURE(status)) { 153 if (ACPI_FAILURE(status)) {
154 acpi_ut_delete_object_desc(mutex_desc);
154 return_ACPI_STATUS(status); 155 return_ACPI_STATUS(status);
155 } 156 }
156 157
@@ -378,7 +379,8 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
378 */ 379 */
379 info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info)); 380 info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
380 if (!info) { 381 if (!info) {
381 return_ACPI_STATUS(AE_NO_MEMORY); 382 status = AE_NO_MEMORY;
383 goto cleanup;
382 } 384 }
383 385
384 info->parameters = &this_walk_state->operands[0]; 386 info->parameters = &this_walk_state->operands[0];
diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c
index 9a83b7e0f3ba..3da80460ce38 100644
--- a/drivers/acpi/acpica/dsmthdat.c
+++ b/drivers/acpi/acpica/dsmthdat.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index c9f15d3a3686..e20e9f84eee8 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -388,7 +388,7 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
388 union acpi_parse_object *parent; 388 union acpi_parse_object *parent;
389 union acpi_operand_object *obj_desc = NULL; 389 union acpi_operand_object *obj_desc = NULL;
390 acpi_status status = AE_OK; 390 acpi_status status = AE_OK;
391 unsigned i; 391 u32 i;
392 u16 index; 392 u16 index;
393 u16 reference_count; 393 u16 reference_count;
394 394
@@ -525,7 +525,7 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
525 } 525 }
526 526
527 ACPI_INFO((AE_INFO, 527 ACPI_INFO((AE_INFO,
528 "Actual Package length (%u) is larger than NumElements field (%u), truncated\n", 528 "Actual Package length (%u) is larger than NumElements field (%u), truncated",
529 i, element_count)); 529 i, element_count));
530 } else if (i < element_count) { 530 } else if (i < element_count) {
531 /* 531 /*
@@ -703,7 +703,7 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
703 /* Truncate value if we are executing from a 32-bit ACPI table */ 703 /* Truncate value if we are executing from a 32-bit ACPI table */
704 704
705#ifndef ACPI_NO_METHOD_EXECUTION 705#ifndef ACPI_NO_METHOD_EXECUTION
706 acpi_ex_truncate_for32bit_table(obj_desc); 706 (void)acpi_ex_truncate_for32bit_table(obj_desc);
707#endif 707#endif
708 break; 708 break;
709 709
@@ -725,8 +725,18 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
725 case AML_TYPE_LITERAL: 725 case AML_TYPE_LITERAL:
726 726
727 obj_desc->integer.value = op->common.value.integer; 727 obj_desc->integer.value = op->common.value.integer;
728
728#ifndef ACPI_NO_METHOD_EXECUTION 729#ifndef ACPI_NO_METHOD_EXECUTION
729 acpi_ex_truncate_for32bit_table(obj_desc); 730 if (acpi_ex_truncate_for32bit_table(obj_desc)) {
731
732 /* Warn if we found a 64-bit constant in a 32-bit table */
733
734 ACPI_WARNING((AE_INFO,
735 "Truncated 64-bit constant found in 32-bit table: %8.8X%8.8X => %8.8X",
736 ACPI_FORMAT_UINT64(op->common.
737 value.integer),
738 (u32)obj_desc->integer.value));
739 }
730#endif 740#endif
731 break; 741 break;
732 742
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index d09c6b4bab2c..ee6367b8eaf7 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -486,18 +486,18 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
486 ACPI_FUNCTION_TRACE_PTR(ds_eval_table_region_operands, op); 486 ACPI_FUNCTION_TRACE_PTR(ds_eval_table_region_operands, op);
487 487
488 /* 488 /*
489 * This is where we evaluate the signature_string and oem_iDString 489 * This is where we evaluate the Signature string, oem_id string,
490 * and oem_table_iDString of the data_table_region declaration 490 * and oem_table_id string of the Data Table Region declaration
491 */ 491 */
492 node = op->common.node; 492 node = op->common.node;
493 493
494 /* next_op points to signature_string op */ 494 /* next_op points to Signature string op */
495 495
496 next_op = op->common.value.arg; 496 next_op = op->common.value.arg;
497 497
498 /* 498 /*
499 * Evaluate/create the signature_string and oem_iDString 499 * Evaluate/create the Signature string, oem_id string,
500 * and oem_table_iDString operands 500 * and oem_table_id string operands
501 */ 501 */
502 status = acpi_ds_create_operands(walk_state, next_op); 502 status = acpi_ds_create_operands(walk_state, next_op);
503 if (ACPI_FAILURE(status)) { 503 if (ACPI_FAILURE(status)) {
@@ -505,8 +505,8 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
505 } 505 }
506 506
507 /* 507 /*
508 * Resolve the signature_string and oem_iDString 508 * Resolve the Signature string, oem_id string,
509 * and oem_table_iDString operands 509 * and oem_table_id string operands
510 */ 510 */
511 status = acpi_ex_resolve_operands(op->common.aml_opcode, 511 status = acpi_ex_resolve_operands(op->common.aml_opcode,
512 ACPI_WALK_OPERANDS, walk_state); 512 ACPI_WALK_OPERANDS, walk_state);
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index afeb99f49482..4d8c992a51d8 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -178,7 +178,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
178 178
179 if (!op) { 179 if (!op) {
180 ACPI_ERROR((AE_INFO, "Null Op")); 180 ACPI_ERROR((AE_INFO, "Null Op"));
181 return_UINT8(TRUE); 181 return_VALUE(TRUE);
182 } 182 }
183 183
184 /* 184 /*
@@ -210,7 +210,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
210 "At Method level, result of [%s] not used\n", 210 "At Method level, result of [%s] not used\n",
211 acpi_ps_get_opcode_name(op->common. 211 acpi_ps_get_opcode_name(op->common.
212 aml_opcode))); 212 aml_opcode)));
213 return_UINT8(FALSE); 213 return_VALUE(FALSE);
214 } 214 }
215 215
216 /* Get info on the parent. The root_op is AML_SCOPE */ 216 /* Get info on the parent. The root_op is AML_SCOPE */
@@ -219,7 +219,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
219 acpi_ps_get_opcode_info(op->common.parent->common.aml_opcode); 219 acpi_ps_get_opcode_info(op->common.parent->common.aml_opcode);
220 if (parent_info->class == AML_CLASS_UNKNOWN) { 220 if (parent_info->class == AML_CLASS_UNKNOWN) {
221 ACPI_ERROR((AE_INFO, "Unknown parent opcode Op=%p", op)); 221 ACPI_ERROR((AE_INFO, "Unknown parent opcode Op=%p", op));
222 return_UINT8(FALSE); 222 return_VALUE(FALSE);
223 } 223 }
224 224
225 /* 225 /*
@@ -307,7 +307,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
307 acpi_ps_get_opcode_name(op->common.parent->common. 307 acpi_ps_get_opcode_name(op->common.parent->common.
308 aml_opcode), op)); 308 aml_opcode), op));
309 309
310 return_UINT8(TRUE); 310 return_VALUE(TRUE);
311 311
312 result_not_used: 312 result_not_used:
313 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, 313 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
@@ -316,7 +316,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
316 acpi_ps_get_opcode_name(op->common.parent->common. 316 acpi_ps_get_opcode_name(op->common.parent->common.
317 aml_opcode), op)); 317 aml_opcode), op));
318 318
319 return_UINT8(FALSE); 319 return_VALUE(FALSE);
320} 320}
321 321
322/******************************************************************************* 322/*******************************************************************************
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index 58593931be96..44f8325c2bae 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2012, Intel Corp. 9 * Copyright (C) 2000 - 2013, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -149,7 +149,7 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
149 149
150 /* Truncate the predicate to 32-bits if necessary */ 150 /* Truncate the predicate to 32-bits if necessary */
151 151
152 acpi_ex_truncate_for32bit_table(local_obj_desc); 152 (void)acpi_ex_truncate_for32bit_table(local_obj_desc);
153 153
154 /* 154 /*
155 * Save the result of the predicate evaluation on 155 * Save the result of the predicate evaluation on
@@ -706,7 +706,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
706 * ACPI 2.0 support for 64-bit integers: Truncate numeric 706 * ACPI 2.0 support for 64-bit integers: Truncate numeric
707 * result value if we are executing from a 32-bit ACPI table 707 * result value if we are executing from a 32-bit ACPI table
708 */ 708 */
709 acpi_ex_truncate_for32bit_table(walk_state->result_obj); 709 (void)acpi_ex_truncate_for32bit_table(walk_state->result_obj);
710 710
711 /* 711 /*
712 * Check if we just completed the evaluation of a 712 * Check if we just completed the evaluation of a
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index 557510084c7a..6e17c0e24e63 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -50,7 +50,7 @@
50#include "acnamesp.h" 50#include "acnamesp.h"
51 51
52#ifdef ACPI_ASL_COMPILER 52#ifdef ACPI_ASL_COMPILER
53#include <acpi/acdisasm.h> 53#include "acdisasm.h"
54#endif 54#endif
55 55
56#define _COMPONENT ACPI_DISPATCHER 56#define _COMPONENT ACPI_DISPATCHER
@@ -178,7 +178,8 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
178 * Target of Scope() not found. Generate an External for it, and 178 * Target of Scope() not found. Generate an External for it, and
179 * insert the name into the namespace. 179 * insert the name into the namespace.
180 */ 180 */
181 acpi_dm_add_to_external_list(path, ACPI_TYPE_DEVICE, 0); 181 acpi_dm_add_to_external_list(op, path, ACPI_TYPE_DEVICE,
182 0);
182 status = 183 status =
183 acpi_ns_lookup(walk_state->scope_info, path, 184 acpi_ns_lookup(walk_state->scope_info, path,
184 object_type, ACPI_IMODE_LOAD_PASS1, 185 object_type, ACPI_IMODE_LOAD_PASS1,
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index 379835748357..4407ff2377d5 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -222,7 +222,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
222 */ 222 */
223 ACPI_WARNING((AE_INFO, 223 ACPI_WARNING((AE_INFO,
224 "Type override - [%4.4s] had invalid type (%s) " 224 "Type override - [%4.4s] had invalid type (%s) "
225 "for Scope operator, changed to type ANY\n", 225 "for Scope operator, changed to type ANY",
226 acpi_ut_get_node_name(node), 226 acpi_ut_get_node_name(node),
227 acpi_ut_get_type_name(node->type))); 227 acpi_ut_get_type_name(node->type)));
228 228
diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c
index f6c4295470ae..d67891de1b54 100644
--- a/drivers/acpi/acpica/dswscope.c
+++ b/drivers/acpi/acpica/dswscope.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index 3e65a15a735f..ecb12e2137ff 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index d4acfbbe5b29..b8ea0b26cde3 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
index af14a7137632..a621481c6cf2 100644
--- a/drivers/acpi/acpica/evglock.c
+++ b/drivers/acpi/acpica/evglock.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 36d120574423..b9adb9a7ed85 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -561,8 +561,8 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
561 status = AE_NO_MEMORY; 561 status = AE_NO_MEMORY;
562 } else { 562 } else {
563 /* 563 /*
564 * Invoke the GPE Method (_Lxx, _Exx) i.e., evaluate the _Lxx/_Exx 564 * Invoke the GPE Method (_Lxx, _Exx) i.e., evaluate the
565 * control method that corresponds to this GPE 565 * _Lxx/_Exx control method that corresponds to this GPE
566 */ 566 */
567 info->prefix_node = 567 info->prefix_node =
568 local_gpe_event_info->dispatch.method_node; 568 local_gpe_event_info->dispatch.method_node;
@@ -707,7 +707,7 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
707 if (ACPI_FAILURE(status)) { 707 if (ACPI_FAILURE(status)) {
708 ACPI_EXCEPTION((AE_INFO, status, 708 ACPI_EXCEPTION((AE_INFO, status,
709 "Unable to clear GPE%02X", gpe_number)); 709 "Unable to clear GPE%02X", gpe_number));
710 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); 710 return_VALUE(ACPI_INTERRUPT_NOT_HANDLED);
711 } 711 }
712 } 712 }
713 713
@@ -724,7 +724,7 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
724 if (ACPI_FAILURE(status)) { 724 if (ACPI_FAILURE(status)) {
725 ACPI_EXCEPTION((AE_INFO, status, 725 ACPI_EXCEPTION((AE_INFO, status,
726 "Unable to disable GPE%02X", gpe_number)); 726 "Unable to disable GPE%02X", gpe_number));
727 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); 727 return_VALUE(ACPI_INTERRUPT_NOT_HANDLED);
728 } 728 }
729 729
730 /* 730 /*
@@ -765,7 +765,7 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
765 gpe_event_info); 765 gpe_event_info);
766 if (ACPI_FAILURE(status)) { 766 if (ACPI_FAILURE(status)) {
767 ACPI_EXCEPTION((AE_INFO, status, 767 ACPI_EXCEPTION((AE_INFO, status,
768 "Unable to queue handler for GPE%2X - event disabled", 768 "Unable to queue handler for GPE%02X - event disabled",
769 gpe_number)); 769 gpe_number));
770 } 770 }
771 break; 771 break;
@@ -784,7 +784,7 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
784 break; 784 break;
785 } 785 }
786 786
787 return_UINT32(ACPI_INTERRUPT_HANDLED); 787 return_VALUE(ACPI_INTERRUPT_HANDLED);
788} 788}
789 789
790#endif /* !ACPI_REDUCED_HARDWARE */ 790#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 1571a61a7833..a2d688bbac02 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -405,13 +405,13 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
405 (*return_gpe_block) = gpe_block; 405 (*return_gpe_block) = gpe_block;
406 } 406 }
407 407
408 ACPI_DEBUG_PRINT((ACPI_DB_INIT, 408 ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
409 "GPE %02X to %02X [%4.4s] %u regs on int 0x%X\n", 409 " Initialized GPE %02X to %02X [%4.4s] %u regs on interrupt 0x%X\n",
410 (u32) gpe_block->block_base_number, 410 (u32)gpe_block->block_base_number,
411 (u32) (gpe_block->block_base_number + 411 (u32)(gpe_block->block_base_number +
412 (gpe_block->gpe_count - 1)), 412 (gpe_block->gpe_count - 1)),
413 gpe_device->name.ascii, gpe_block->register_count, 413 gpe_device->name.ascii, gpe_block->register_count,
414 interrupt_number)); 414 interrupt_number));
415 415
416 /* Update global count of currently available GPEs */ 416 /* Update global count of currently available GPEs */
417 417
@@ -496,9 +496,11 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
496 } 496 }
497 497
498 if (gpe_enabled_count) { 498 if (gpe_enabled_count) {
499 ACPI_DEBUG_PRINT((ACPI_DB_INIT, 499 ACPI_INFO((AE_INFO,
500 "Enabled %u GPEs in this block\n", 500 "Enabled %u GPEs in block %02X to %02X",
501 gpe_enabled_count)); 501 gpe_enabled_count, (u32)gpe_block->block_base_number,
502 (u32)(gpe_block->block_base_number +
503 (gpe_block->gpe_count - 1))));
502 } 504 }
503 505
504 gpe_block->initialized = TRUE; 506 gpe_block->initialized = TRUE;
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index da0add858f81..72b8f6b3f4ca 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -86,6 +86,9 @@ acpi_status acpi_ev_gpe_initialize(void)
86 86
87 ACPI_FUNCTION_TRACE(ev_gpe_initialize); 87 ACPI_FUNCTION_TRACE(ev_gpe_initialize);
88 88
89 ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
90 "Initializing General Purpose Events (GPEs):\n"));
91
89 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); 92 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
90 if (ACPI_FAILURE(status)) { 93 if (ACPI_FAILURE(status)) {
91 return_ACPI_STATUS(status); 94 return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index 228a0c3b1d49..b24dbb80fab8 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c
new file mode 100644
index 000000000000..d4f83112c2e2
--- /dev/null
+++ b/drivers/acpi/acpica/evhandler.c
@@ -0,0 +1,529 @@
1/******************************************************************************
2 *
3 * Module Name: evhandler - Support for Address Space handlers
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include "accommon.h"
46#include "acevents.h"
47#include "acnamesp.h"
48#include "acinterp.h"
49
50#define _COMPONENT ACPI_EVENTS
51ACPI_MODULE_NAME("evhandler")
52
53/* Local prototypes */
54static acpi_status
55acpi_ev_install_handler(acpi_handle obj_handle,
56 u32 level, void *context, void **return_value);
57
58/* These are the address spaces that will get default handlers */
59
60u8 acpi_gbl_default_address_spaces[ACPI_NUM_DEFAULT_SPACES] = {
61 ACPI_ADR_SPACE_SYSTEM_MEMORY,
62 ACPI_ADR_SPACE_SYSTEM_IO,
63 ACPI_ADR_SPACE_PCI_CONFIG,
64 ACPI_ADR_SPACE_DATA_TABLE
65};
66
67/*******************************************************************************
68 *
69 * FUNCTION: acpi_ev_install_region_handlers
70 *
71 * PARAMETERS: None
72 *
73 * RETURN: Status
74 *
75 * DESCRIPTION: Installs the core subsystem default address space handlers.
76 *
77 ******************************************************************************/
78
79acpi_status acpi_ev_install_region_handlers(void)
80{
81 acpi_status status;
82 u32 i;
83
84 ACPI_FUNCTION_TRACE(ev_install_region_handlers);
85
86 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
87 if (ACPI_FAILURE(status)) {
88 return_ACPI_STATUS(status);
89 }
90
91 /*
92 * All address spaces (PCI Config, EC, SMBus) are scope dependent and
93 * registration must occur for a specific device.
94 *
95 * In the case of the system memory and IO address spaces there is
96 * currently no device associated with the address space. For these we
97 * use the root.
98 *
99 * We install the default PCI config space handler at the root so that
100 * this space is immediately available even though the we have not
101 * enumerated all the PCI Root Buses yet. This is to conform to the ACPI
102 * specification which states that the PCI config space must be always
103 * available -- even though we are nowhere near ready to find the PCI root
104 * buses at this point.
105 *
106 * NOTE: We ignore AE_ALREADY_EXISTS because this means that a handler
107 * has already been installed (via acpi_install_address_space_handler).
108 * Similar for AE_SAME_HANDLER.
109 */
110 for (i = 0; i < ACPI_NUM_DEFAULT_SPACES; i++) {
111 status = acpi_ev_install_space_handler(acpi_gbl_root_node,
112 acpi_gbl_default_address_spaces
113 [i],
114 ACPI_DEFAULT_HANDLER,
115 NULL, NULL);
116 switch (status) {
117 case AE_OK:
118 case AE_SAME_HANDLER:
119 case AE_ALREADY_EXISTS:
120
121 /* These exceptions are all OK */
122
123 status = AE_OK;
124 break;
125
126 default:
127
128 goto unlock_and_exit;
129 }
130 }
131
132 unlock_and_exit:
133 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
134 return_ACPI_STATUS(status);
135}
136
137/*******************************************************************************
138 *
139 * FUNCTION: acpi_ev_has_default_handler
140 *
141 * PARAMETERS: node - Namespace node for the device
142 * space_id - The address space ID
143 *
144 * RETURN: TRUE if default handler is installed, FALSE otherwise
145 *
146 * DESCRIPTION: Check if the default handler is installed for the requested
147 * space ID.
148 *
149 ******************************************************************************/
150
151u8
152acpi_ev_has_default_handler(struct acpi_namespace_node *node,
153 acpi_adr_space_type space_id)
154{
155 union acpi_operand_object *obj_desc;
156 union acpi_operand_object *handler_obj;
157
158 /* Must have an existing internal object */
159
160 obj_desc = acpi_ns_get_attached_object(node);
161 if (obj_desc) {
162 handler_obj = obj_desc->device.handler;
163
164 /* Walk the linked list of handlers for this object */
165
166 while (handler_obj) {
167 if (handler_obj->address_space.space_id == space_id) {
168 if (handler_obj->address_space.handler_flags &
169 ACPI_ADDR_HANDLER_DEFAULT_INSTALLED) {
170 return (TRUE);
171 }
172 }
173
174 handler_obj = handler_obj->address_space.next;
175 }
176 }
177
178 return (FALSE);
179}
180
181/*******************************************************************************
182 *
183 * FUNCTION: acpi_ev_install_handler
184 *
185 * PARAMETERS: walk_namespace callback
186 *
187 * DESCRIPTION: This routine installs an address handler into objects that are
188 * of type Region or Device.
189 *
190 * If the Object is a Device, and the device has a handler of
191 * the same type then the search is terminated in that branch.
192 *
193 * This is because the existing handler is closer in proximity
194 * to any more regions than the one we are trying to install.
195 *
196 ******************************************************************************/
197
198static acpi_status
199acpi_ev_install_handler(acpi_handle obj_handle,
200 u32 level, void *context, void **return_value)
201{
202 union acpi_operand_object *handler_obj;
203 union acpi_operand_object *next_handler_obj;
204 union acpi_operand_object *obj_desc;
205 struct acpi_namespace_node *node;
206 acpi_status status;
207
208 ACPI_FUNCTION_NAME(ev_install_handler);
209
210 handler_obj = (union acpi_operand_object *)context;
211
212 /* Parameter validation */
213
214 if (!handler_obj) {
215 return (AE_OK);
216 }
217
218 /* Convert and validate the device handle */
219
220 node = acpi_ns_validate_handle(obj_handle);
221 if (!node) {
222 return (AE_BAD_PARAMETER);
223 }
224
225 /*
226 * We only care about regions and objects that are allowed to have
227 * address space handlers
228 */
229 if ((node->type != ACPI_TYPE_DEVICE) &&
230 (node->type != ACPI_TYPE_REGION) && (node != acpi_gbl_root_node)) {
231 return (AE_OK);
232 }
233
234 /* Check for an existing internal object */
235
236 obj_desc = acpi_ns_get_attached_object(node);
237 if (!obj_desc) {
238
239 /* No object, just exit */
240
241 return (AE_OK);
242 }
243
244 /* Devices are handled different than regions */
245
246 if (obj_desc->common.type == ACPI_TYPE_DEVICE) {
247
248 /* Check if this Device already has a handler for this address space */
249
250 next_handler_obj = obj_desc->device.handler;
251 while (next_handler_obj) {
252
253 /* Found a handler, is it for the same address space? */
254
255 if (next_handler_obj->address_space.space_id ==
256 handler_obj->address_space.space_id) {
257 ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
258 "Found handler for region [%s] in device %p(%p) "
259 "handler %p\n",
260 acpi_ut_get_region_name
261 (handler_obj->address_space.
262 space_id), obj_desc,
263 next_handler_obj,
264 handler_obj));
265
266 /*
267 * Since the object we found it on was a device, then it
268 * means that someone has already installed a handler for
269 * the branch of the namespace from this device on. Just
270 * bail out telling the walk routine to not traverse this
271 * branch. This preserves the scoping rule for handlers.
272 */
273 return (AE_CTRL_DEPTH);
274 }
275
276 /* Walk the linked list of handlers attached to this device */
277
278 next_handler_obj = next_handler_obj->address_space.next;
279 }
280
281 /*
282 * As long as the device didn't have a handler for this space we
283 * don't care about it. We just ignore it and proceed.
284 */
285 return (AE_OK);
286 }
287
288 /* Object is a Region */
289
290 if (obj_desc->region.space_id != handler_obj->address_space.space_id) {
291
292 /* This region is for a different address space, just ignore it */
293
294 return (AE_OK);
295 }
296
297 /*
298 * Now we have a region and it is for the handler's address space type.
299 *
300 * First disconnect region for any previous handler (if any)
301 */
302 acpi_ev_detach_region(obj_desc, FALSE);
303
304 /* Connect the region to the new handler */
305
306 status = acpi_ev_attach_region(handler_obj, obj_desc, FALSE);
307 return (status);
308}
309
310/*******************************************************************************
311 *
312 * FUNCTION: acpi_ev_install_space_handler
313 *
314 * PARAMETERS: node - Namespace node for the device
315 * space_id - The address space ID
316 * handler - Address of the handler
317 * setup - Address of the setup function
318 * context - Value passed to the handler on each access
319 *
320 * RETURN: Status
321 *
322 * DESCRIPTION: Install a handler for all op_regions of a given space_id.
323 * Assumes namespace is locked
324 *
325 ******************************************************************************/
326
327acpi_status
328acpi_ev_install_space_handler(struct acpi_namespace_node * node,
329 acpi_adr_space_type space_id,
330 acpi_adr_space_handler handler,
331 acpi_adr_space_setup setup, void *context)
332{
333 union acpi_operand_object *obj_desc;
334 union acpi_operand_object *handler_obj;
335 acpi_status status;
336 acpi_object_type type;
337 u8 flags = 0;
338
339 ACPI_FUNCTION_TRACE(ev_install_space_handler);
340
341 /*
342 * This registration is valid for only the types below and the root. This
343 * is where the default handlers get placed.
344 */
345 if ((node->type != ACPI_TYPE_DEVICE) &&
346 (node->type != ACPI_TYPE_PROCESSOR) &&
347 (node->type != ACPI_TYPE_THERMAL) && (node != acpi_gbl_root_node)) {
348 status = AE_BAD_PARAMETER;
349 goto unlock_and_exit;
350 }
351
352 if (handler == ACPI_DEFAULT_HANDLER) {
353 flags = ACPI_ADDR_HANDLER_DEFAULT_INSTALLED;
354
355 switch (space_id) {
356 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
357 handler = acpi_ex_system_memory_space_handler;
358 setup = acpi_ev_system_memory_region_setup;
359 break;
360
361 case ACPI_ADR_SPACE_SYSTEM_IO:
362 handler = acpi_ex_system_io_space_handler;
363 setup = acpi_ev_io_space_region_setup;
364 break;
365
366 case ACPI_ADR_SPACE_PCI_CONFIG:
367 handler = acpi_ex_pci_config_space_handler;
368 setup = acpi_ev_pci_config_region_setup;
369 break;
370
371 case ACPI_ADR_SPACE_CMOS:
372 handler = acpi_ex_cmos_space_handler;
373 setup = acpi_ev_cmos_region_setup;
374 break;
375
376 case ACPI_ADR_SPACE_PCI_BAR_TARGET:
377 handler = acpi_ex_pci_bar_space_handler;
378 setup = acpi_ev_pci_bar_region_setup;
379 break;
380
381 case ACPI_ADR_SPACE_DATA_TABLE:
382 handler = acpi_ex_data_table_space_handler;
383 setup = NULL;
384 break;
385
386 default:
387 status = AE_BAD_PARAMETER;
388 goto unlock_and_exit;
389 }
390 }
391
392 /* If the caller hasn't specified a setup routine, use the default */
393
394 if (!setup) {
395 setup = acpi_ev_default_region_setup;
396 }
397
398 /* Check for an existing internal object */
399
400 obj_desc = acpi_ns_get_attached_object(node);
401 if (obj_desc) {
402 /*
403 * The attached device object already exists. Make sure the handler
404 * is not already installed.
405 */
406 handler_obj = obj_desc->device.handler;
407
408 /* Walk the handler list for this device */
409
410 while (handler_obj) {
411
412 /* Same space_id indicates a handler already installed */
413
414 if (handler_obj->address_space.space_id == space_id) {
415 if (handler_obj->address_space.handler ==
416 handler) {
417 /*
418 * It is (relatively) OK to attempt to install the SAME
419 * handler twice. This can easily happen with the
420 * PCI_Config space.
421 */
422 status = AE_SAME_HANDLER;
423 goto unlock_and_exit;
424 } else {
425 /* A handler is already installed */
426
427 status = AE_ALREADY_EXISTS;
428 }
429 goto unlock_and_exit;
430 }
431
432 /* Walk the linked list of handlers */
433
434 handler_obj = handler_obj->address_space.next;
435 }
436 } else {
437 ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
438 "Creating object on Device %p while installing handler\n",
439 node));
440
441 /* obj_desc does not exist, create one */
442
443 if (node->type == ACPI_TYPE_ANY) {
444 type = ACPI_TYPE_DEVICE;
445 } else {
446 type = node->type;
447 }
448
449 obj_desc = acpi_ut_create_internal_object(type);
450 if (!obj_desc) {
451 status = AE_NO_MEMORY;
452 goto unlock_and_exit;
453 }
454
455 /* Init new descriptor */
456
457 obj_desc->common.type = (u8)type;
458
459 /* Attach the new object to the Node */
460
461 status = acpi_ns_attach_object(node, obj_desc, type);
462
463 /* Remove local reference to the object */
464
465 acpi_ut_remove_reference(obj_desc);
466
467 if (ACPI_FAILURE(status)) {
468 goto unlock_and_exit;
469 }
470 }
471
472 ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
473 "Installing address handler for region %s(%X) on Device %4.4s %p(%p)\n",
474 acpi_ut_get_region_name(space_id), space_id,
475 acpi_ut_get_node_name(node), node, obj_desc));
476
477 /*
478 * Install the handler
479 *
480 * At this point there is no existing handler. Just allocate the object
481 * for the handler and link it into the list.
482 */
483 handler_obj =
484 acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_ADDRESS_HANDLER);
485 if (!handler_obj) {
486 status = AE_NO_MEMORY;
487 goto unlock_and_exit;
488 }
489
490 /* Init handler obj */
491
492 handler_obj->address_space.space_id = (u8)space_id;
493 handler_obj->address_space.handler_flags = flags;
494 handler_obj->address_space.region_list = NULL;
495 handler_obj->address_space.node = node;
496 handler_obj->address_space.handler = handler;
497 handler_obj->address_space.context = context;
498 handler_obj->address_space.setup = setup;
499
500 /* Install at head of Device.address_space list */
501
502 handler_obj->address_space.next = obj_desc->device.handler;
503
504 /*
505 * The Device object is the first reference on the handler_obj.
506 * Each region that uses the handler adds a reference.
507 */
508 obj_desc->device.handler = handler_obj;
509
510 /*
511 * Walk the namespace finding all of the regions this
512 * handler will manage.
513 *
514 * Start at the device and search the branch toward
515 * the leaf nodes until either the leaf is encountered or
516 * a device is detected that has an address handler of the
517 * same type.
518 *
519 * In either case, back up and search down the remainder
520 * of the branch
521 */
522 status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX,
523 ACPI_NS_WALK_UNLOCK,
524 acpi_ev_install_handler, NULL,
525 handler_obj, NULL);
526
527 unlock_and_exit:
528 return_ACPI_STATUS(status);
529}
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index 51f537937c1f..c986b2336b81 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 0cc6a16fedc7..6555e350fc1f 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -1,11 +1,11 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Module Name: evregion - ACPI address_space (op_region) handler dispatch 3 * Module Name: evregion - Operation Region support
4 * 4 *
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -50,10 +50,9 @@
50#define _COMPONENT ACPI_EVENTS 50#define _COMPONENT ACPI_EVENTS
51ACPI_MODULE_NAME("evregion") 51ACPI_MODULE_NAME("evregion")
52 52
53extern u8 acpi_gbl_default_address_spaces[];
54
53/* Local prototypes */ 55/* Local prototypes */
54static u8
55acpi_ev_has_default_handler(struct acpi_namespace_node *node,
56 acpi_adr_space_type space_id);
57 56
58static void acpi_ev_orphan_ec_reg_method(void); 57static void acpi_ev_orphan_ec_reg_method(void);
59 58
@@ -61,135 +60,6 @@ static acpi_status
61acpi_ev_reg_run(acpi_handle obj_handle, 60acpi_ev_reg_run(acpi_handle obj_handle,
62 u32 level, void *context, void **return_value); 61 u32 level, void *context, void **return_value);
63 62
64static acpi_status
65acpi_ev_install_handler(acpi_handle obj_handle,
66 u32 level, void *context, void **return_value);
67
68/* These are the address spaces that will get default handlers */
69
70#define ACPI_NUM_DEFAULT_SPACES 4
71
72static u8 acpi_gbl_default_address_spaces[ACPI_NUM_DEFAULT_SPACES] = {
73 ACPI_ADR_SPACE_SYSTEM_MEMORY,
74 ACPI_ADR_SPACE_SYSTEM_IO,
75 ACPI_ADR_SPACE_PCI_CONFIG,
76 ACPI_ADR_SPACE_DATA_TABLE
77};
78
79/*******************************************************************************
80 *
81 * FUNCTION: acpi_ev_install_region_handlers
82 *
83 * PARAMETERS: None
84 *
85 * RETURN: Status
86 *
87 * DESCRIPTION: Installs the core subsystem default address space handlers.
88 *
89 ******************************************************************************/
90
91acpi_status acpi_ev_install_region_handlers(void)
92{
93 acpi_status status;
94 u32 i;
95
96 ACPI_FUNCTION_TRACE(ev_install_region_handlers);
97
98 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
99 if (ACPI_FAILURE(status)) {
100 return_ACPI_STATUS(status);
101 }
102
103 /*
104 * All address spaces (PCI Config, EC, SMBus) are scope dependent and
105 * registration must occur for a specific device.
106 *
107 * In the case of the system memory and IO address spaces there is
108 * currently no device associated with the address space. For these we
109 * use the root.
110 *
111 * We install the default PCI config space handler at the root so that
112 * this space is immediately available even though the we have not
113 * enumerated all the PCI Root Buses yet. This is to conform to the ACPI
114 * specification which states that the PCI config space must be always
115 * available -- even though we are nowhere near ready to find the PCI root
116 * buses at this point.
117 *
118 * NOTE: We ignore AE_ALREADY_EXISTS because this means that a handler
119 * has already been installed (via acpi_install_address_space_handler).
120 * Similar for AE_SAME_HANDLER.
121 */
122 for (i = 0; i < ACPI_NUM_DEFAULT_SPACES; i++) {
123 status = acpi_ev_install_space_handler(acpi_gbl_root_node,
124 acpi_gbl_default_address_spaces
125 [i],
126 ACPI_DEFAULT_HANDLER,
127 NULL, NULL);
128 switch (status) {
129 case AE_OK:
130 case AE_SAME_HANDLER:
131 case AE_ALREADY_EXISTS:
132
133 /* These exceptions are all OK */
134
135 status = AE_OK;
136 break;
137
138 default:
139
140 goto unlock_and_exit;
141 }
142 }
143
144 unlock_and_exit:
145 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
146 return_ACPI_STATUS(status);
147}
148
149/*******************************************************************************
150 *
151 * FUNCTION: acpi_ev_has_default_handler
152 *
153 * PARAMETERS: node - Namespace node for the device
154 * space_id - The address space ID
155 *
156 * RETURN: TRUE if default handler is installed, FALSE otherwise
157 *
158 * DESCRIPTION: Check if the default handler is installed for the requested
159 * space ID.
160 *
161 ******************************************************************************/
162
163static u8
164acpi_ev_has_default_handler(struct acpi_namespace_node *node,
165 acpi_adr_space_type space_id)
166{
167 union acpi_operand_object *obj_desc;
168 union acpi_operand_object *handler_obj;
169
170 /* Must have an existing internal object */
171
172 obj_desc = acpi_ns_get_attached_object(node);
173 if (obj_desc) {
174 handler_obj = obj_desc->device.handler;
175
176 /* Walk the linked list of handlers for this object */
177
178 while (handler_obj) {
179 if (handler_obj->address_space.space_id == space_id) {
180 if (handler_obj->address_space.handler_flags &
181 ACPI_ADDR_HANDLER_DEFAULT_INSTALLED) {
182 return (TRUE);
183 }
184 }
185
186 handler_obj = handler_obj->address_space.next;
187 }
188 }
189
190 return (FALSE);
191}
192
193/******************************************************************************* 63/*******************************************************************************
194 * 64 *
195 * FUNCTION: acpi_ev_initialize_op_regions 65 * FUNCTION: acpi_ev_initialize_op_regions
@@ -241,91 +111,6 @@ acpi_status acpi_ev_initialize_op_regions(void)
241 111
242/******************************************************************************* 112/*******************************************************************************
243 * 113 *
244 * FUNCTION: acpi_ev_execute_reg_method
245 *
246 * PARAMETERS: region_obj - Region object
247 * function - Passed to _REG: On (1) or Off (0)
248 *
249 * RETURN: Status
250 *
251 * DESCRIPTION: Execute _REG method for a region
252 *
253 ******************************************************************************/
254
255acpi_status
256acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
257{
258 struct acpi_evaluate_info *info;
259 union acpi_operand_object *args[3];
260 union acpi_operand_object *region_obj2;
261 acpi_status status;
262
263 ACPI_FUNCTION_TRACE(ev_execute_reg_method);
264
265 region_obj2 = acpi_ns_get_secondary_object(region_obj);
266 if (!region_obj2) {
267 return_ACPI_STATUS(AE_NOT_EXIST);
268 }
269
270 if (region_obj2->extra.method_REG == NULL) {
271 return_ACPI_STATUS(AE_OK);
272 }
273
274 /* Allocate and initialize the evaluation information block */
275
276 info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
277 if (!info) {
278 return_ACPI_STATUS(AE_NO_MEMORY);
279 }
280
281 info->prefix_node = region_obj2->extra.method_REG;
282 info->pathname = NULL;
283 info->parameters = args;
284 info->flags = ACPI_IGNORE_RETURN_VALUE;
285
286 /*
287 * The _REG method has two arguments:
288 *
289 * arg0 - Integer:
290 * Operation region space ID Same value as region_obj->Region.space_id
291 *
292 * arg1 - Integer:
293 * connection status 1 for connecting the handler, 0 for disconnecting
294 * the handler (Passed as a parameter)
295 */
296 args[0] =
297 acpi_ut_create_integer_object((u64) region_obj->region.space_id);
298 if (!args[0]) {
299 status = AE_NO_MEMORY;
300 goto cleanup1;
301 }
302
303 args[1] = acpi_ut_create_integer_object((u64) function);
304 if (!args[1]) {
305 status = AE_NO_MEMORY;
306 goto cleanup2;
307 }
308
309 args[2] = NULL; /* Terminate list */
310
311 /* Execute the method, no return value */
312
313 ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
314 (ACPI_TYPE_METHOD, info->prefix_node, NULL));
315
316 status = acpi_ns_evaluate(info);
317 acpi_ut_remove_reference(args[1]);
318
319 cleanup2:
320 acpi_ut_remove_reference(args[0]);
321
322 cleanup1:
323 ACPI_FREE(info);
324 return_ACPI_STATUS(status);
325}
326
327/*******************************************************************************
328 *
329 * FUNCTION: acpi_ev_address_space_dispatch 114 * FUNCTION: acpi_ev_address_space_dispatch
330 * 115 *
331 * PARAMETERS: region_obj - Internal region object 116 * PARAMETERS: region_obj - Internal region object
@@ -709,351 +494,86 @@ acpi_ev_attach_region(union acpi_operand_object *handler_obj,
709 494
710/******************************************************************************* 495/*******************************************************************************
711 * 496 *
712 * FUNCTION: acpi_ev_install_handler 497 * FUNCTION: acpi_ev_execute_reg_method
713 *
714 * PARAMETERS: walk_namespace callback
715 *
716 * DESCRIPTION: This routine installs an address handler into objects that are
717 * of type Region or Device.
718 *
719 * If the Object is a Device, and the device has a handler of
720 * the same type then the search is terminated in that branch.
721 *
722 * This is because the existing handler is closer in proximity
723 * to any more regions than the one we are trying to install.
724 *
725 ******************************************************************************/
726
727static acpi_status
728acpi_ev_install_handler(acpi_handle obj_handle,
729 u32 level, void *context, void **return_value)
730{
731 union acpi_operand_object *handler_obj;
732 union acpi_operand_object *next_handler_obj;
733 union acpi_operand_object *obj_desc;
734 struct acpi_namespace_node *node;
735 acpi_status status;
736
737 ACPI_FUNCTION_NAME(ev_install_handler);
738
739 handler_obj = (union acpi_operand_object *)context;
740
741 /* Parameter validation */
742
743 if (!handler_obj) {
744 return (AE_OK);
745 }
746
747 /* Convert and validate the device handle */
748
749 node = acpi_ns_validate_handle(obj_handle);
750 if (!node) {
751 return (AE_BAD_PARAMETER);
752 }
753
754 /*
755 * We only care about regions and objects that are allowed to have
756 * address space handlers
757 */
758 if ((node->type != ACPI_TYPE_DEVICE) &&
759 (node->type != ACPI_TYPE_REGION) && (node != acpi_gbl_root_node)) {
760 return (AE_OK);
761 }
762
763 /* Check for an existing internal object */
764
765 obj_desc = acpi_ns_get_attached_object(node);
766 if (!obj_desc) {
767
768 /* No object, just exit */
769
770 return (AE_OK);
771 }
772
773 /* Devices are handled different than regions */
774
775 if (obj_desc->common.type == ACPI_TYPE_DEVICE) {
776
777 /* Check if this Device already has a handler for this address space */
778
779 next_handler_obj = obj_desc->device.handler;
780 while (next_handler_obj) {
781
782 /* Found a handler, is it for the same address space? */
783
784 if (next_handler_obj->address_space.space_id ==
785 handler_obj->address_space.space_id) {
786 ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
787 "Found handler for region [%s] in device %p(%p) "
788 "handler %p\n",
789 acpi_ut_get_region_name
790 (handler_obj->address_space.
791 space_id), obj_desc,
792 next_handler_obj,
793 handler_obj));
794
795 /*
796 * Since the object we found it on was a device, then it
797 * means that someone has already installed a handler for
798 * the branch of the namespace from this device on. Just
799 * bail out telling the walk routine to not traverse this
800 * branch. This preserves the scoping rule for handlers.
801 */
802 return (AE_CTRL_DEPTH);
803 }
804
805 /* Walk the linked list of handlers attached to this device */
806
807 next_handler_obj = next_handler_obj->address_space.next;
808 }
809
810 /*
811 * As long as the device didn't have a handler for this space we
812 * don't care about it. We just ignore it and proceed.
813 */
814 return (AE_OK);
815 }
816
817 /* Object is a Region */
818
819 if (obj_desc->region.space_id != handler_obj->address_space.space_id) {
820
821 /* This region is for a different address space, just ignore it */
822
823 return (AE_OK);
824 }
825
826 /*
827 * Now we have a region and it is for the handler's address space type.
828 *
829 * First disconnect region for any previous handler (if any)
830 */
831 acpi_ev_detach_region(obj_desc, FALSE);
832
833 /* Connect the region to the new handler */
834
835 status = acpi_ev_attach_region(handler_obj, obj_desc, FALSE);
836 return (status);
837}
838
839/*******************************************************************************
840 *
841 * FUNCTION: acpi_ev_install_space_handler
842 * 498 *
843 * PARAMETERS: node - Namespace node for the device 499 * PARAMETERS: region_obj - Region object
844 * space_id - The address space ID 500 * function - Passed to _REG: On (1) or Off (0)
845 * handler - Address of the handler
846 * setup - Address of the setup function
847 * context - Value passed to the handler on each access
848 * 501 *
849 * RETURN: Status 502 * RETURN: Status
850 * 503 *
851 * DESCRIPTION: Install a handler for all op_regions of a given space_id. 504 * DESCRIPTION: Execute _REG method for a region
852 * Assumes namespace is locked
853 * 505 *
854 ******************************************************************************/ 506 ******************************************************************************/
855 507
856acpi_status 508acpi_status
857acpi_ev_install_space_handler(struct acpi_namespace_node * node, 509acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
858 acpi_adr_space_type space_id,
859 acpi_adr_space_handler handler,
860 acpi_adr_space_setup setup, void *context)
861{ 510{
862 union acpi_operand_object *obj_desc; 511 struct acpi_evaluate_info *info;
863 union acpi_operand_object *handler_obj; 512 union acpi_operand_object *args[3];
513 union acpi_operand_object *region_obj2;
864 acpi_status status; 514 acpi_status status;
865 acpi_object_type type;
866 u8 flags = 0;
867 515
868 ACPI_FUNCTION_TRACE(ev_install_space_handler); 516 ACPI_FUNCTION_TRACE(ev_execute_reg_method);
869
870 /*
871 * This registration is valid for only the types below and the root. This
872 * is where the default handlers get placed.
873 */
874 if ((node->type != ACPI_TYPE_DEVICE) &&
875 (node->type != ACPI_TYPE_PROCESSOR) &&
876 (node->type != ACPI_TYPE_THERMAL) && (node != acpi_gbl_root_node)) {
877 status = AE_BAD_PARAMETER;
878 goto unlock_and_exit;
879 }
880 517
881 if (handler == ACPI_DEFAULT_HANDLER) { 518 region_obj2 = acpi_ns_get_secondary_object(region_obj);
882 flags = ACPI_ADDR_HANDLER_DEFAULT_INSTALLED; 519 if (!region_obj2) {
883 520 return_ACPI_STATUS(AE_NOT_EXIST);
884 switch (space_id) {
885 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
886 handler = acpi_ex_system_memory_space_handler;
887 setup = acpi_ev_system_memory_region_setup;
888 break;
889
890 case ACPI_ADR_SPACE_SYSTEM_IO:
891 handler = acpi_ex_system_io_space_handler;
892 setup = acpi_ev_io_space_region_setup;
893 break;
894
895 case ACPI_ADR_SPACE_PCI_CONFIG:
896 handler = acpi_ex_pci_config_space_handler;
897 setup = acpi_ev_pci_config_region_setup;
898 break;
899
900 case ACPI_ADR_SPACE_CMOS:
901 handler = acpi_ex_cmos_space_handler;
902 setup = acpi_ev_cmos_region_setup;
903 break;
904
905 case ACPI_ADR_SPACE_PCI_BAR_TARGET:
906 handler = acpi_ex_pci_bar_space_handler;
907 setup = acpi_ev_pci_bar_region_setup;
908 break;
909
910 case ACPI_ADR_SPACE_DATA_TABLE:
911 handler = acpi_ex_data_table_space_handler;
912 setup = NULL;
913 break;
914
915 default:
916 status = AE_BAD_PARAMETER;
917 goto unlock_and_exit;
918 }
919 } 521 }
920 522
921 /* If the caller hasn't specified a setup routine, use the default */ 523 if (region_obj2->extra.method_REG == NULL) {
922 524 return_ACPI_STATUS(AE_OK);
923 if (!setup) {
924 setup = acpi_ev_default_region_setup;
925 } 525 }
926 526
927 /* Check for an existing internal object */ 527 /* Allocate and initialize the evaluation information block */
928
929 obj_desc = acpi_ns_get_attached_object(node);
930 if (obj_desc) {
931 /*
932 * The attached device object already exists. Make sure the handler
933 * is not already installed.
934 */
935 handler_obj = obj_desc->device.handler;
936
937 /* Walk the handler list for this device */
938
939 while (handler_obj) {
940
941 /* Same space_id indicates a handler already installed */
942
943 if (handler_obj->address_space.space_id == space_id) {
944 if (handler_obj->address_space.handler ==
945 handler) {
946 /*
947 * It is (relatively) OK to attempt to install the SAME
948 * handler twice. This can easily happen with the
949 * PCI_Config space.
950 */
951 status = AE_SAME_HANDLER;
952 goto unlock_and_exit;
953 } else {
954 /* A handler is already installed */
955
956 status = AE_ALREADY_EXISTS;
957 }
958 goto unlock_and_exit;
959 }
960
961 /* Walk the linked list of handlers */
962
963 handler_obj = handler_obj->address_space.next;
964 }
965 } else {
966 ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
967 "Creating object on Device %p while installing handler\n",
968 node));
969
970 /* obj_desc does not exist, create one */
971
972 if (node->type == ACPI_TYPE_ANY) {
973 type = ACPI_TYPE_DEVICE;
974 } else {
975 type = node->type;
976 }
977
978 obj_desc = acpi_ut_create_internal_object(type);
979 if (!obj_desc) {
980 status = AE_NO_MEMORY;
981 goto unlock_and_exit;
982 }
983
984 /* Init new descriptor */
985
986 obj_desc->common.type = (u8) type;
987
988 /* Attach the new object to the Node */
989
990 status = acpi_ns_attach_object(node, obj_desc, type);
991
992 /* Remove local reference to the object */
993
994 acpi_ut_remove_reference(obj_desc);
995 528
996 if (ACPI_FAILURE(status)) { 529 info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
997 goto unlock_and_exit; 530 if (!info) {
998 } 531 return_ACPI_STATUS(AE_NO_MEMORY);
999 } 532 }
1000 533
1001 ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, 534 info->prefix_node = region_obj2->extra.method_REG;
1002 "Installing address handler for region %s(%X) on Device %4.4s %p(%p)\n", 535 info->pathname = NULL;
1003 acpi_ut_get_region_name(space_id), space_id, 536 info->parameters = args;
1004 acpi_ut_get_node_name(node), node, obj_desc)); 537 info->flags = ACPI_IGNORE_RETURN_VALUE;
1005 538
1006 /* 539 /*
1007 * Install the handler 540 * The _REG method has two arguments:
541 *
542 * arg0 - Integer:
543 * Operation region space ID Same value as region_obj->Region.space_id
1008 * 544 *
1009 * At this point there is no existing handler. Just allocate the object 545 * arg1 - Integer:
1010 * for the handler and link it into the list. 546 * connection status 1 for connecting the handler, 0 for disconnecting
547 * the handler (Passed as a parameter)
1011 */ 548 */
1012 handler_obj = 549 args[0] =
1013 acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_ADDRESS_HANDLER); 550 acpi_ut_create_integer_object((u64)region_obj->region.space_id);
1014 if (!handler_obj) { 551 if (!args[0]) {
1015 status = AE_NO_MEMORY; 552 status = AE_NO_MEMORY;
1016 goto unlock_and_exit; 553 goto cleanup1;
1017 } 554 }
1018 555
1019 /* Init handler obj */ 556 args[1] = acpi_ut_create_integer_object((u64)function);
557 if (!args[1]) {
558 status = AE_NO_MEMORY;
559 goto cleanup2;
560 }
1020 561
1021 handler_obj->address_space.space_id = (u8) space_id; 562 args[2] = NULL; /* Terminate list */
1022 handler_obj->address_space.handler_flags = flags;
1023 handler_obj->address_space.region_list = NULL;
1024 handler_obj->address_space.node = node;
1025 handler_obj->address_space.handler = handler;
1026 handler_obj->address_space.context = context;
1027 handler_obj->address_space.setup = setup;
1028 563
1029 /* Install at head of Device.address_space list */ 564 /* Execute the method, no return value */
1030 565
1031 handler_obj->address_space.next = obj_desc->device.handler; 566 ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
567 (ACPI_TYPE_METHOD, info->prefix_node, NULL));
1032 568
1033 /* 569 status = acpi_ns_evaluate(info);
1034 * The Device object is the first reference on the handler_obj. 570 acpi_ut_remove_reference(args[1]);
1035 * Each region that uses the handler adds a reference.
1036 */
1037 obj_desc->device.handler = handler_obj;
1038 571
1039 /* 572 cleanup2:
1040 * Walk the namespace finding all of the regions this 573 acpi_ut_remove_reference(args[0]);
1041 * handler will manage.
1042 *
1043 * Start at the device and search the branch toward
1044 * the leaf nodes until either the leaf is encountered or
1045 * a device is detected that has an address handler of the
1046 * same type.
1047 *
1048 * In either case, back up and search down the remainder
1049 * of the branch
1050 */
1051 status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX,
1052 ACPI_NS_WALK_UNLOCK,
1053 acpi_ev_install_handler, NULL,
1054 handler_obj, NULL);
1055 574
1056 unlock_and_exit: 575 cleanup1:
576 ACPI_FREE(info);
1057 return_ACPI_STATUS(status); 577 return_ACPI_STATUS(status);
1058} 578}
1059 579
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 1474241bfc7e..3bb616794b3b 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evsci.c b/drivers/acpi/acpica/evsci.c
index f9661e2b46a9..f4b43bede015 100644
--- a/drivers/acpi/acpica/evsci.c
+++ b/drivers/acpi/acpica/evsci.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2012, Intel Corp. 9 * Copyright (C) 2000 - 2013, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -89,7 +89,7 @@ static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context)
89 */ 89 */
90 interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list); 90 interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list);
91 91
92 return_UINT32(interrupt_handled); 92 return_VALUE(interrupt_handled);
93} 93}
94 94
95/******************************************************************************* 95/*******************************************************************************
@@ -120,7 +120,7 @@ u32 ACPI_SYSTEM_XFACE acpi_ev_gpe_xrupt_handler(void *context)
120 120
121 interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list); 121 interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list);
122 122
123 return_UINT32(interrupt_handled); 123 return_VALUE(interrupt_handled);
124} 124}
125 125
126/****************************************************************************** 126/******************************************************************************
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index ae668f32cf16..ddffd6847914 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -56,13 +56,13 @@ ACPI_MODULE_NAME("evxface")
56 * 56 *
57 * FUNCTION: acpi_install_notify_handler 57 * FUNCTION: acpi_install_notify_handler
58 * 58 *
59 * PARAMETERS: Device - The device for which notifies will be handled 59 * PARAMETERS: device - The device for which notifies will be handled
60 * handler_type - The type of handler: 60 * handler_type - The type of handler:
61 * ACPI_SYSTEM_NOTIFY: System Handler (00-7F) 61 * ACPI_SYSTEM_NOTIFY: System Handler (00-7F)
62 * ACPI_DEVICE_NOTIFY: Device Handler (80-FF) 62 * ACPI_DEVICE_NOTIFY: Device Handler (80-FF)
63 * ACPI_ALL_NOTIFY: Both System and Device 63 * ACPI_ALL_NOTIFY: Both System and Device
64 * Handler - Address of the handler 64 * handler - Address of the handler
65 * Context - Value passed to the handler on each GPE 65 * context - Value passed to the handler on each GPE
66 * 66 *
67 * RETURN: Status 67 * RETURN: Status
68 * 68 *
@@ -217,12 +217,12 @@ ACPI_EXPORT_SYMBOL(acpi_install_notify_handler)
217 * 217 *
218 * FUNCTION: acpi_remove_notify_handler 218 * FUNCTION: acpi_remove_notify_handler
219 * 219 *
220 * PARAMETERS: Device - The device for which the handler is installed 220 * PARAMETERS: device - The device for which the handler is installed
221 * handler_type - The type of handler: 221 * handler_type - The type of handler:
222 * ACPI_SYSTEM_NOTIFY: System Handler (00-7F) 222 * ACPI_SYSTEM_NOTIFY: System Handler (00-7F)
223 * ACPI_DEVICE_NOTIFY: Device Handler (80-FF) 223 * ACPI_DEVICE_NOTIFY: Device Handler (80-FF)
224 * ACPI_ALL_NOTIFY: Both System and Device 224 * ACPI_ALL_NOTIFY: Both System and Device
225 * Handler - Address of the handler 225 * handler - Address of the handler
226 * 226 *
227 * RETURN: Status 227 * RETURN: Status
228 * 228 *
@@ -249,7 +249,8 @@ acpi_remove_notify_handler(acpi_handle device,
249 (handler_type > ACPI_MAX_NOTIFY_HANDLER_TYPE)) { 249 (handler_type > ACPI_MAX_NOTIFY_HANDLER_TYPE)) {
250 return_ACPI_STATUS(AE_BAD_PARAMETER); 250 return_ACPI_STATUS(AE_BAD_PARAMETER);
251 } 251 }
252 /* Make sure all deferred tasks are completed */ 252
253 /* Make sure all deferred notify tasks are completed */
253 254
254 acpi_os_wait_events_complete(); 255 acpi_os_wait_events_complete();
255 256
@@ -596,7 +597,7 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
596 return_ACPI_STATUS(status); 597 return_ACPI_STATUS(status);
597 } 598 }
598 599
599 /* Allocate memory for the handler object */ 600 /* Allocate and init handler object (before lock) */
600 601
601 handler = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_handler_info)); 602 handler = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_handler_info));
602 if (!handler) { 603 if (!handler) {
@@ -622,16 +623,15 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
622 goto free_and_exit; 623 goto free_and_exit;
623 } 624 }
624 625
625 /* Allocate and init handler object */
626
627 handler->address = address; 626 handler->address = address;
628 handler->context = context; 627 handler->context = context;
629 handler->method_node = gpe_event_info->dispatch.method_node; 628 handler->method_node = gpe_event_info->dispatch.method_node;
630 handler->original_flags = gpe_event_info->flags & 629 handler->original_flags = (u8)(gpe_event_info->flags &
631 (ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK); 630 (ACPI_GPE_XRUPT_TYPE_MASK |
631 ACPI_GPE_DISPATCH_MASK));
632 632
633 /* 633 /*
634 * If the GPE is associated with a method, it might have been enabled 634 * If the GPE is associated with a method, it may have been enabled
635 * automatically during initialization, in which case it has to be 635 * automatically during initialization, in which case it has to be
636 * disabled now to avoid spurious execution of the handler. 636 * disabled now to avoid spurious execution of the handler.
637 */ 637 */
@@ -646,7 +646,7 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
646 646
647 gpe_event_info->dispatch.handler = handler; 647 gpe_event_info->dispatch.handler = handler;
648 648
649 /* Setup up dispatch flags to indicate handler (vs. method) */ 649 /* Setup up dispatch flags to indicate handler (vs. method/notify) */
650 650
651 gpe_event_info->flags &= 651 gpe_event_info->flags &=
652 ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK); 652 ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK);
@@ -697,7 +697,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
697 return_ACPI_STATUS(AE_BAD_PARAMETER); 697 return_ACPI_STATUS(AE_BAD_PARAMETER);
698 } 698 }
699 699
700 /* Make sure all deferred tasks are completed */ 700 /* Make sure all deferred GPE tasks are completed */
701 701
702 acpi_os_wait_events_complete(); 702 acpi_os_wait_events_complete();
703 703
@@ -747,10 +747,10 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
747 * enabled, it should be enabled at this point to restore the 747 * enabled, it should be enabled at this point to restore the
748 * post-initialization configuration. 748 * post-initialization configuration.
749 */ 749 */
750 750 if ((handler->original_flags & ACPI_GPE_DISPATCH_METHOD) &&
751 if ((handler->original_flags & ACPI_GPE_DISPATCH_METHOD) 751 handler->originally_enabled) {
752 && handler->originally_enabled)
753 (void)acpi_ev_add_gpe_reference(gpe_event_info); 752 (void)acpi_ev_add_gpe_reference(gpe_event_info);
753 }
754 754
755 /* Now we can free the handler object */ 755 /* Now we can free the handler object */
756 756
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 35520c6eeefb..d6e4e42316db 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -61,7 +61,6 @@ ACPI_MODULE_NAME("evxfevnt")
61 * DESCRIPTION: Transfers the system into ACPI mode. 61 * DESCRIPTION: Transfers the system into ACPI mode.
62 * 62 *
63 ******************************************************************************/ 63 ******************************************************************************/
64
65acpi_status acpi_enable(void) 64acpi_status acpi_enable(void)
66{ 65{
67 acpi_status status; 66 acpi_status status;
@@ -210,8 +209,8 @@ ACPI_EXPORT_SYMBOL(acpi_enable_event)
210 * 209 *
211 * FUNCTION: acpi_disable_event 210 * FUNCTION: acpi_disable_event
212 * 211 *
213 * PARAMETERS: Event - The fixed eventto be enabled 212 * PARAMETERS: event - The fixed event to be disabled
214 * Flags - Reserved 213 * flags - Reserved
215 * 214 *
216 * RETURN: Status 215 * RETURN: Status
217 * 216 *
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 3f30e753b652..aff4cc261211 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -51,7 +51,7 @@
51ACPI_MODULE_NAME("evxfgpe") 51ACPI_MODULE_NAME("evxfgpe")
52 52
53#if (!ACPI_REDUCED_HARDWARE) /* Entire module */ 53#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
54/****************************************************************************** 54/*******************************************************************************
55 * 55 *
56 * FUNCTION: acpi_update_all_gpes 56 * FUNCTION: acpi_update_all_gpes
57 * 57 *
@@ -172,6 +172,7 @@ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number)
172 acpi_os_release_lock(acpi_gbl_gpe_lock, flags); 172 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
173 return_ACPI_STATUS(status); 173 return_ACPI_STATUS(status);
174} 174}
175
175ACPI_EXPORT_SYMBOL(acpi_disable_gpe) 176ACPI_EXPORT_SYMBOL(acpi_disable_gpe)
176 177
177 178
@@ -225,7 +226,7 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
225 ACPI_CAST_PTR(struct acpi_namespace_node, wake_device); 226 ACPI_CAST_PTR(struct acpi_namespace_node, wake_device);
226 } 227 }
227 228
228 /* Validate WakeDevice is of type Device */ 229 /* Validate wake_device is of type Device */
229 230
230 if (device_node->type != ACPI_TYPE_DEVICE) { 231 if (device_node->type != ACPI_TYPE_DEVICE) {
231 return_ACPI_STATUS (AE_BAD_PARAMETER); 232 return_ACPI_STATUS (AE_BAD_PARAMETER);
@@ -432,8 +433,8 @@ ACPI_EXPORT_SYMBOL(acpi_clear_gpe)
432 * 433 *
433 * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 434 * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
434 * gpe_number - GPE level within the GPE block 435 * gpe_number - GPE level within the GPE block
435 * event_status - Where the current status of the event will 436 * event_status - Where the current status of the event
436 * be returned 437 * will be returned
437 * 438 *
438 * RETURN: Status 439 * RETURN: Status
439 * 440 *
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index 96b412d03950..96c9e5f355ae 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2012, Intel Corp. 9 * Copyright (C) 2000 - 2013, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 16219bde48da..d93b70be60ad 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -48,6 +48,7 @@
48#include "actables.h" 48#include "actables.h"
49#include "acdispat.h" 49#include "acdispat.h"
50#include "acevents.h" 50#include "acevents.h"
51#include "amlcode.h"
51 52
52#define _COMPONENT ACPI_EXECUTER 53#define _COMPONENT ACPI_EXECUTER
53ACPI_MODULE_NAME("exconfig") 54ACPI_MODULE_NAME("exconfig")
@@ -120,8 +121,11 @@ acpi_ex_add_table(u32 table_index,
120 acpi_ns_exec_module_code_list(); 121 acpi_ns_exec_module_code_list();
121 acpi_ex_enter_interpreter(); 122 acpi_ex_enter_interpreter();
122 123
123 /* Update GPEs for any new _Lxx/_Exx methods. Ignore errors */ 124 /*
124 125 * Update GPEs for any new _Lxx/_Exx methods. Ignore errors. The host is
126 * responsible for discovering any new wake GPEs by running _PRW methods
127 * that may have been loaded by this table.
128 */
125 status = acpi_tb_get_owner_id(table_index, &owner_id); 129 status = acpi_tb_get_owner_id(table_index, &owner_id);
126 if (ACPI_SUCCESS(status)) { 130 if (ACPI_SUCCESS(status)) {
127 acpi_ev_update_gpes(owner_id); 131 acpi_ev_update_gpes(owner_id);
@@ -158,12 +162,12 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
158 162
159 ACPI_FUNCTION_TRACE(ex_load_table_op); 163 ACPI_FUNCTION_TRACE(ex_load_table_op);
160 164
161 /* Validate lengths for the signature_string, OEMIDString, OEMtable_iD */ 165 /* Validate lengths for the Signature, oem_id, and oem_table_id strings */
162 166
163 if ((operand[0]->string.length > ACPI_NAME_SIZE) || 167 if ((operand[0]->string.length > ACPI_NAME_SIZE) ||
164 (operand[1]->string.length > ACPI_OEM_ID_SIZE) || 168 (operand[1]->string.length > ACPI_OEM_ID_SIZE) ||
165 (operand[2]->string.length > ACPI_OEM_TABLE_ID_SIZE)) { 169 (operand[2]->string.length > ACPI_OEM_TABLE_ID_SIZE)) {
166 return_ACPI_STATUS(AE_BAD_PARAMETER); 170 return_ACPI_STATUS(AE_AML_STRING_LIMIT);
167 } 171 }
168 172
169 /* Find the ACPI table in the RSDT/XSDT */ 173 /* Find the ACPI table in the RSDT/XSDT */
@@ -210,8 +214,8 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
210 /* parameter_path (optional parameter) */ 214 /* parameter_path (optional parameter) */
211 215
212 if (operand[4]->string.length > 0) { 216 if (operand[4]->string.length > 0) {
213 if ((operand[4]->string.pointer[0] != '\\') && 217 if ((operand[4]->string.pointer[0] != AML_ROOT_PREFIX) &&
214 (operand[4]->string.pointer[0] != '^')) { 218 (operand[4]->string.pointer[0] != AML_PARENT_PREFIX)) {
215 /* 219 /*
216 * Path is not absolute, so it will be relative to the node 220 * Path is not absolute, so it will be relative to the node
217 * referenced by the root_path_string (or the NS root if omitted) 221 * referenced by the root_path_string (or the NS root if omitted)
@@ -301,7 +305,7 @@ acpi_ex_region_read(union acpi_operand_object *obj_desc, u32 length, u8 *buffer)
301 acpi_ev_address_space_dispatch(obj_desc, NULL, ACPI_READ, 305 acpi_ev_address_space_dispatch(obj_desc, NULL, ACPI_READ,
302 region_offset, 8, &value); 306 region_offset, 8, &value);
303 if (ACPI_FAILURE(status)) { 307 if (ACPI_FAILURE(status)) {
304 return status; 308 return (status);
305 } 309 }
306 310
307 *buffer = (u8)value; 311 *buffer = (u8)value;
@@ -309,7 +313,7 @@ acpi_ex_region_read(union acpi_operand_object *obj_desc, u32 length, u8 *buffer)
309 region_offset++; 313 region_offset++;
310 } 314 }
311 315
312 return AE_OK; 316 return (AE_OK);
313} 317}
314 318
315/******************************************************************************* 319/*******************************************************************************
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index 4492a4e03022..d2b9613bbf01 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -176,7 +176,7 @@ acpi_ex_convert_to_integer(union acpi_operand_object *obj_desc,
176 176
177 /* Save the Result */ 177 /* Save the Result */
178 178
179 acpi_ex_truncate_for32bit_table(return_desc); 179 (void)acpi_ex_truncate_for32bit_table(return_desc);
180 *result_desc = return_desc; 180 *result_desc = return_desc;
181 return_ACPI_STATUS(AE_OK); 181 return_ACPI_STATUS(AE_OK);
182} 182}
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index 66554bc6f9a8..26a13f67977e 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
index d7c9f51608a7..7eb853cd279f 100644
--- a/drivers/acpi/acpica/exdebug.c
+++ b/drivers/acpi/acpica/exdebug.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 858b43a7dcf6..e5a3c249f7fa 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -464,9 +464,8 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
464 464
465 ACPI_FUNCTION_NAME(ex_dump_operand) 465 ACPI_FUNCTION_NAME(ex_dump_operand)
466 466
467 if (! 467 /* Check if debug output enabled */
468 ((ACPI_LV_EXEC & acpi_dbg_level) 468 if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_EXEC, _COMPONENT)) {
469 && (_COMPONENT & acpi_dbg_layer))) {
470 return; 469 return;
471 } 470 }
472 471
@@ -811,9 +810,10 @@ void acpi_ex_dump_namespace_node(struct acpi_namespace_node *node, u32 flags)
811 ACPI_FUNCTION_ENTRY(); 810 ACPI_FUNCTION_ENTRY();
812 811
813 if (!flags) { 812 if (!flags) {
814 if (! 813
815 ((ACPI_LV_OBJECTS & acpi_dbg_level) 814 /* Check if debug output enabled */
816 && (_COMPONENT & acpi_dbg_layer))) { 815
816 if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_OBJECTS, _COMPONENT)) {
817 return; 817 return;
818 } 818 }
819 } 819 }
@@ -999,9 +999,10 @@ acpi_ex_dump_object_descriptor(union acpi_operand_object *obj_desc, u32 flags)
999 } 999 }
1000 1000
1001 if (!flags) { 1001 if (!flags) {
1002 if (! 1002
1003 ((ACPI_LV_OBJECTS & acpi_dbg_level) 1003 /* Check if debug output enabled */
1004 && (_COMPONENT & acpi_dbg_layer))) { 1004
1005 if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_OBJECTS, _COMPONENT)) {
1005 return_VOID; 1006 return_VOID;
1006 } 1007 }
1007 } 1008 }
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index ebc55fbf3ff7..7d4bae71e8c6 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index aa2ccfb7cb61..ec7f5690031b 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -329,7 +329,6 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
329static u8 329static u8
330acpi_ex_register_overflow(union acpi_operand_object *obj_desc, u64 value) 330acpi_ex_register_overflow(union acpi_operand_object *obj_desc, u64 value)
331{ 331{
332 ACPI_FUNCTION_NAME(ex_register_overflow);
333 332
334 if (obj_desc->common_field.bit_length >= ACPI_INTEGER_BIT_SIZE) { 333 if (obj_desc->common_field.bit_length >= ACPI_INTEGER_BIT_SIZE) {
335 /* 334 /*
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index 84058705ed12..72a2a13b6d36 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index d1f449d93dcf..7be0205ad067 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -377,7 +377,8 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
377 return_ACPI_STATUS(AE_AML_MUTEX_NOT_ACQUIRED); 377 return_ACPI_STATUS(AE_AML_MUTEX_NOT_ACQUIRED);
378 } 378 }
379 379
380 /* Must have a valid thread. */ 380 /* Must have a valid thread ID */
381
381 if (!walk_state->thread) { 382 if (!walk_state->thread) {
382 ACPI_ERROR((AE_INFO, 383 ACPI_ERROR((AE_INFO,
383 "Cannot release Mutex [%4.4s], null thread info", 384 "Cannot release Mutex [%4.4s], null thread info",
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index 2ff578a16adc..14689dec4960 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index bbf01e9bf057..b60c877f5906 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -948,13 +948,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
948 */ 948 */
949 return_desc = 949 return_desc =
950 acpi_ut_create_integer_object((u64) 950 acpi_ut_create_integer_object((u64)
951 temp_desc-> 951 temp_desc->buffer.pointer[operand[0]->reference.value]);
952 buffer.
953 pointer
954 [operand
955 [0]->
956 reference.
957 value]);
958 if (!return_desc) { 952 if (!return_desc) {
959 status = AE_NO_MEMORY; 953 status = AE_NO_MEMORY;
960 goto cleanup; 954 goto cleanup;
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index ee5634a074c4..e491e46f17df 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index 2c89b4651f08..2d7491f3126e 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index 3e08695c3b30..b76b97002dff 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index ba9db4de7c89..d6eab81f54fb 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -276,7 +276,7 @@ acpi_ex_decode_field_access(union acpi_operand_object *obj_desc,
276 /* Invalid field access type */ 276 /* Invalid field access type */
277 277
278 ACPI_ERROR((AE_INFO, "Unknown field access type 0x%X", access)); 278 ACPI_ERROR((AE_INFO, "Unknown field access type 0x%X", access));
279 return_UINT32(0); 279 return_VALUE(0);
280 } 280 }
281 281
282 if (obj_desc->common.type == ACPI_TYPE_BUFFER_FIELD) { 282 if (obj_desc->common.type == ACPI_TYPE_BUFFER_FIELD) {
@@ -289,7 +289,7 @@ acpi_ex_decode_field_access(union acpi_operand_object *obj_desc,
289 } 289 }
290 290
291 *return_byte_alignment = byte_alignment; 291 *return_byte_alignment = byte_alignment;
292 return_UINT32(bit_length); 292 return_VALUE(bit_length);
293} 293}
294 294
295/******************************************************************************* 295/*******************************************************************************
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index 1db2c0bfde0b..182abaf045e1 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -142,9 +142,9 @@ acpi_ex_system_memory_space_handler(u32 function,
142 } 142 }
143 143
144 /* 144 /*
145 * Attempt to map from the requested address to the end of the region. 145 * October 2009: Attempt to map from the requested address to the
146 * However, we will never map more than one page, nor will we cross 146 * end of the region. However, we will never map more than one
147 * a page boundary. 147 * page, nor will we cross a page boundary.
148 */ 148 */
149 map_length = (acpi_size) 149 map_length = (acpi_size)
150 ((mem_info->address + mem_info->length) - address); 150 ((mem_info->address + mem_info->length) - address);
@@ -154,12 +154,15 @@ acpi_ex_system_memory_space_handler(u32 function,
154 * a page boundary, just map up to the page boundary, do not cross. 154 * a page boundary, just map up to the page boundary, do not cross.
155 * On some systems, crossing a page boundary while mapping regions 155 * On some systems, crossing a page boundary while mapping regions
156 * can cause warnings if the pages have different attributes 156 * can cause warnings if the pages have different attributes
157 * due to resource management 157 * due to resource management.
158 *
159 * This has the added benefit of constraining a single mapping to
160 * one page, which is similar to the original code that used a 4k
161 * maximum window.
158 */ 162 */
159 page_boundary_map_length = 163 page_boundary_map_length =
160 ACPI_ROUND_UP(address, ACPI_DEFAULT_PAGE_SIZE) - address; 164 ACPI_ROUND_UP(address, ACPI_DEFAULT_PAGE_SIZE) - address;
161 165 if (page_boundary_map_length == 0) {
162 if (!page_boundary_map_length) {
163 page_boundary_map_length = ACPI_DEFAULT_PAGE_SIZE; 166 page_boundary_map_length = ACPI_DEFAULT_PAGE_SIZE;
164 } 167 }
165 168
@@ -236,19 +239,19 @@ acpi_ex_system_memory_space_handler(u32 function,
236 239
237 switch (bit_width) { 240 switch (bit_width) {
238 case 8: 241 case 8:
239 ACPI_SET8(logical_addr_ptr) = (u8) * value; 242 ACPI_SET8(logical_addr_ptr, *value);
240 break; 243 break;
241 244
242 case 16: 245 case 16:
243 ACPI_SET16(logical_addr_ptr) = (u16) * value; 246 ACPI_SET16(logical_addr_ptr, *value);
244 break; 247 break;
245 248
246 case 32: 249 case 32:
247 ACPI_SET32(logical_addr_ptr) = (u32) * value; 250 ACPI_SET32(logical_addr_ptr, *value);
248 break; 251 break;
249 252
250 case 64: 253 case 64:
251 ACPI_SET64(logical_addr_ptr) = (u64) * value; 254 ACPI_SET64(logical_addr_ptr, *value);
252 break; 255 break;
253 256
254 default: 257 default:
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index 6239956786eb..8565b6bd12bb 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index cc176b245e22..e4f9dfbb2a13 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index b9ebff2f6a09..9fb9f5e9a4da 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index 90431f12f831..93c6049c2d75 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -487,14 +487,33 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
487 default: 487 default:
488 488
489 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 489 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
490 "Storing %s (%p) directly into node (%p) with no implicit conversion\n", 490 "Storing [%s] (%p) directly into node [%s] (%p)"
491 " with no implicit conversion\n",
491 acpi_ut_get_object_type_name(source_desc), 492 acpi_ut_get_object_type_name(source_desc),
492 source_desc, node)); 493 source_desc,
494 acpi_ut_get_object_type_name(target_desc),
495 node));
493 496
494 /* No conversions for all other types. Just attach the source object */ 497 /*
498 * No conversions for all other types. Directly store a copy of
499 * the source object. NOTE: This is a departure from the ACPI
500 * spec, which states "If conversion is impossible, abort the
501 * running control method".
502 *
503 * This code implements "If conversion is impossible, treat the
504 * Store operation as a CopyObject".
505 */
506 status =
507 acpi_ut_copy_iobject_to_iobject(source_desc, &new_desc,
508 walk_state);
509 if (ACPI_FAILURE(status)) {
510 return_ACPI_STATUS(status);
511 }
495 512
496 status = acpi_ns_attach_object(node, source_desc, 513 status =
497 source_desc->common.type); 514 acpi_ns_attach_object(node, new_desc,
515 new_desc->common.type);
516 acpi_ut_remove_reference(new_desc);
498 break; 517 break;
499 } 518 }
500 519
diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c
index 87153bbc4b43..1cefe777068e 100644
--- a/drivers/acpi/acpica/exstoren.c
+++ b/drivers/acpi/acpica/exstoren.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2012, Intel Corp. 9 * Copyright (C) 2000 - 2013, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -253,7 +253,7 @@ acpi_ex_store_object_to_object(union acpi_operand_object *source_desc,
253 253
254 /* Truncate value if we are executing from a 32-bit ACPI table */ 254 /* Truncate value if we are executing from a 32-bit ACPI table */
255 255
256 acpi_ex_truncate_for32bit_table(dest_desc); 256 (void)acpi_ex_truncate_for32bit_table(dest_desc);
257 break; 257 break;
258 258
259 case ACPI_TYPE_STRING: 259 case ACPI_TYPE_STRING:
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c
index b5f339cb1305..26e371073b1a 100644
--- a/drivers/acpi/acpica/exstorob.c
+++ b/drivers/acpi/acpica/exstorob.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c
index c8a0ad5c1f55..6578dee2e51b 100644
--- a/drivers/acpi/acpica/exsystem.c
+++ b/drivers/acpi/acpica/exsystem.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index 264d22d8018c..b205cbb4b50c 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -202,35 +202,39 @@ void acpi_ex_relinquish_interpreter(void)
202 * 202 *
203 * PARAMETERS: obj_desc - Object to be truncated 203 * PARAMETERS: obj_desc - Object to be truncated
204 * 204 *
205 * RETURN: none 205 * RETURN: TRUE if a truncation was performed, FALSE otherwise.
206 * 206 *
207 * DESCRIPTION: Truncate an ACPI Integer to 32 bits if the execution mode is 207 * DESCRIPTION: Truncate an ACPI Integer to 32 bits if the execution mode is
208 * 32-bit, as determined by the revision of the DSDT. 208 * 32-bit, as determined by the revision of the DSDT.
209 * 209 *
210 ******************************************************************************/ 210 ******************************************************************************/
211 211
212void acpi_ex_truncate_for32bit_table(union acpi_operand_object *obj_desc) 212u8 acpi_ex_truncate_for32bit_table(union acpi_operand_object *obj_desc)
213{ 213{
214 214
215 ACPI_FUNCTION_ENTRY(); 215 ACPI_FUNCTION_ENTRY();
216 216
217 /* 217 /*
218 * Object must be a valid number and we must be executing 218 * Object must be a valid number and we must be executing
219 * a control method. NS node could be there for AML_INT_NAMEPATH_OP. 219 * a control method. Object could be NS node for AML_INT_NAMEPATH_OP.
220 */ 220 */
221 if ((!obj_desc) || 221 if ((!obj_desc) ||
222 (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) != ACPI_DESC_TYPE_OPERAND) || 222 (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) != ACPI_DESC_TYPE_OPERAND) ||
223 (obj_desc->common.type != ACPI_TYPE_INTEGER)) { 223 (obj_desc->common.type != ACPI_TYPE_INTEGER)) {
224 return; 224 return (FALSE);
225 } 225 }
226 226
227 if (acpi_gbl_integer_byte_width == 4) { 227 if ((acpi_gbl_integer_byte_width == 4) &&
228 (obj_desc->integer.value > (u64)ACPI_UINT32_MAX)) {
228 /* 229 /*
229 * We are running a method that exists in a 32-bit ACPI table. 230 * We are executing in a 32-bit ACPI table.
230 * Truncate the value to 32 bits by zeroing out the upper 32-bit field 231 * Truncate the value to 32 bits by zeroing out the upper 32-bit field
231 */ 232 */
232 obj_desc->integer.value &= (u64) ACPI_UINT32_MAX; 233 obj_desc->integer.value &= (u64)ACPI_UINT32_MAX;
234 return (TRUE);
233 } 235 }
236
237 return (FALSE);
234} 238}
235 239
236/******************************************************************************* 240/*******************************************************************************
@@ -336,7 +340,7 @@ static u32 acpi_ex_digits_needed(u64 value, u32 base)
336 /* u64 is unsigned, so we don't worry about a '-' prefix */ 340 /* u64 is unsigned, so we don't worry about a '-' prefix */
337 341
338 if (value == 0) { 342 if (value == 0) {
339 return_UINT32(1); 343 return_VALUE(1);
340 } 344 }
341 345
342 current_value = value; 346 current_value = value;
@@ -350,7 +354,7 @@ static u32 acpi_ex_digits_needed(u64 value, u32 base)
350 num_digits++; 354 num_digits++;
351 } 355 }
352 356
353 return_UINT32(num_digits); 357 return_VALUE(num_digits);
354} 358}
355 359
356/******************************************************************************* 360/*******************************************************************************
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index 90a9aea1cee9..deb3f61e2bd1 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -108,8 +108,7 @@ acpi_status acpi_hw_set_mode(u32 mode)
108 * enable bits to default 108 * enable bits to default
109 */ 109 */
110 status = acpi_hw_write_port(acpi_gbl_FADT.smi_command, 110 status = acpi_hw_write_port(acpi_gbl_FADT.smi_command,
111 (u32) acpi_gbl_FADT.acpi_disable, 111 (u32)acpi_gbl_FADT.acpi_disable, 8);
112 8);
113 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 112 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
114 "Attempting to enable Legacy (non-ACPI) mode\n")); 113 "Attempting to enable Legacy (non-ACPI) mode\n"));
115 break; 114 break;
@@ -152,18 +151,18 @@ u32 acpi_hw_get_mode(void)
152 * system does not support mode transition. 151 * system does not support mode transition.
153 */ 152 */
154 if (!acpi_gbl_FADT.smi_command) { 153 if (!acpi_gbl_FADT.smi_command) {
155 return_UINT32(ACPI_SYS_MODE_ACPI); 154 return_VALUE(ACPI_SYS_MODE_ACPI);
156 } 155 }
157 156
158 status = acpi_read_bit_register(ACPI_BITREG_SCI_ENABLE, &value); 157 status = acpi_read_bit_register(ACPI_BITREG_SCI_ENABLE, &value);
159 if (ACPI_FAILURE(status)) { 158 if (ACPI_FAILURE(status)) {
160 return_UINT32(ACPI_SYS_MODE_LEGACY); 159 return_VALUE(ACPI_SYS_MODE_LEGACY);
161 } 160 }
162 161
163 if (value) { 162 if (value) {
164 return_UINT32(ACPI_SYS_MODE_ACPI); 163 return_VALUE(ACPI_SYS_MODE_ACPI);
165 } else { 164 } else {
166 return_UINT32(ACPI_SYS_MODE_LEGACY); 165 return_VALUE(ACPI_SYS_MODE_LEGACY);
167 } 166 }
168} 167}
169 168
diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c
index 94996f9ae3ad..5e5f76230f5e 100644
--- a/drivers/acpi/acpica/hwesleep.c
+++ b/drivers/acpi/acpica/hwesleep.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2012, Intel Corp. 9 * Copyright (C) 2000 - 2013, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -200,7 +200,6 @@ acpi_status acpi_hw_extended_wake_prep(u8 sleep_state)
200 * FUNCTION: acpi_hw_extended_wake 200 * FUNCTION: acpi_hw_extended_wake
201 * 201 *
202 * PARAMETERS: sleep_state - Which sleep state we just exited 202 * PARAMETERS: sleep_state - Which sleep state we just exited
203 * flags - Reserved, set to zero
204 * 203 *
205 * RETURN: Status 204 * RETURN: Status
206 * 205 *
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index 64560045052d..20d02e93c990 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -69,8 +69,10 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
69 69
70u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info) 70u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info)
71{ 71{
72 return (u32)1 << (gpe_event_info->gpe_number - 72
73 gpe_event_info->register_info->base_gpe_number); 73 return ((u32)1 <<
74 (gpe_event_info->gpe_number -
75 gpe_event_info->register_info->base_gpe_number));
74} 76}
75 77
76/****************************************************************************** 78/******************************************************************************
@@ -133,7 +135,7 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action)
133 break; 135 break;
134 136
135 default: 137 default:
136 ACPI_ERROR((AE_INFO, "Invalid GPE Action, %u\n", action)); 138 ACPI_ERROR((AE_INFO, "Invalid GPE Action, %u", action));
137 return (AE_BAD_PARAMETER); 139 return (AE_BAD_PARAMETER);
138 } 140 }
139 141
diff --git a/drivers/acpi/acpica/hwpci.c b/drivers/acpi/acpica/hwpci.c
index 65bc3453a29c..0889a629505f 100644
--- a/drivers/acpi/acpica/hwpci.c
+++ b/drivers/acpi/acpica/hwpci.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index f4e57503576b..083d6551f0e2 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2012, Intel Corp. 9 * Copyright (C) 2000 - 2013, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -44,7 +44,6 @@
44 44
45#include <acpi/acpi.h> 45#include <acpi/acpi.h>
46#include "accommon.h" 46#include "accommon.h"
47#include "acnamesp.h"
48#include "acevents.h" 47#include "acevents.h"
49 48
50#define _COMPONENT ACPI_HARDWARE 49#define _COMPONENT ACPI_HARDWARE
@@ -364,8 +363,7 @@ acpi_status acpi_hw_write_pm1_control(u32 pm1a_control, u32 pm1b_control)
364 * DESCRIPTION: Read from the specified ACPI register 363 * DESCRIPTION: Read from the specified ACPI register
365 * 364 *
366 ******************************************************************************/ 365 ******************************************************************************/
367acpi_status 366acpi_status acpi_hw_register_read(u32 register_id, u32 *return_value)
368acpi_hw_register_read(u32 register_id, u32 * return_value)
369{ 367{
370 u32 value = 0; 368 u32 value = 0;
371 acpi_status status; 369 acpi_status status;
@@ -485,7 +483,7 @@ acpi_status acpi_hw_register_write(u32 register_id, u32 value)
485 &acpi_gbl_xpm1b_status); 483 &acpi_gbl_xpm1b_status);
486 break; 484 break;
487 485
488 case ACPI_REGISTER_PM1_ENABLE: /* PM1 A/B: 16-bit access */ 486 case ACPI_REGISTER_PM1_ENABLE: /* PM1 A/B: 16-bit access each */
489 487
490 status = acpi_hw_write_multiple(value, 488 status = acpi_hw_write_multiple(value,
491 &acpi_gbl_xpm1a_enable, 489 &acpi_gbl_xpm1a_enable,
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index 3fddde056a5e..e3828cc4361b 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2012, Intel Corp. 9 * Copyright (C) 2000 - 2013, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -45,7 +45,6 @@
45#include <acpi/acpi.h> 45#include <acpi/acpi.h>
46#include <linux/acpi.h> 46#include <linux/acpi.h>
47#include "accommon.h" 47#include "accommon.h"
48#include <linux/module.h>
49 48
50#define _COMPONENT ACPI_HARDWARE 49#define _COMPONENT ACPI_HARDWARE
51ACPI_MODULE_NAME("hwsleep") 50ACPI_MODULE_NAME("hwsleep")
@@ -178,7 +177,7 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state)
178 * to still read the right value. Ideally, this block would go 177 * to still read the right value. Ideally, this block would go
179 * away entirely. 178 * away entirely.
180 */ 179 */
181 acpi_os_stall(10000000); 180 acpi_os_stall(10 * ACPI_USEC_PER_SEC);
182 181
183 status = acpi_hw_register_write(ACPI_REGISTER_PM1_CONTROL, 182 status = acpi_hw_register_write(ACPI_REGISTER_PM1_CONTROL,
184 sleep_enable_reg_info-> 183 sleep_enable_reg_info->
@@ -323,7 +322,8 @@ acpi_status acpi_hw_legacy_wake(u8 sleep_state)
323 * and use it to determine whether the system is rebooting or 322 * and use it to determine whether the system is rebooting or
324 * resuming. Clear WAK_STS for compatibility. 323 * resuming. Clear WAK_STS for compatibility.
325 */ 324 */
326 acpi_write_bit_register(ACPI_BITREG_WAKE_STATUS, 1); 325 (void)acpi_write_bit_register(ACPI_BITREG_WAKE_STATUS,
326 ACPI_CLEAR_STATUS);
327 acpi_gbl_system_awake_and_running = TRUE; 327 acpi_gbl_system_awake_and_running = TRUE;
328 328
329 /* Enable power button */ 329 /* Enable power button */
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index bfdce22f3798..0c1a8bbd05d6 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -176,10 +176,11 @@ acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
176 /* 176 /*
177 * Compute Duration (Requires a 64-bit multiply and divide): 177 * Compute Duration (Requires a 64-bit multiply and divide):
178 * 178 *
179 * time_elapsed = (delta_ticks * 1000000) / PM_TIMER_FREQUENCY; 179 * time_elapsed (microseconds) =
180 * (delta_ticks * ACPI_USEC_PER_SEC) / ACPI_PM_TIMER_FREQUENCY;
180 */ 181 */
181 status = acpi_ut_short_divide(((u64) delta_ticks) * 1000000, 182 status = acpi_ut_short_divide(((u64)delta_ticks) * ACPI_USEC_PER_SEC,
182 PM_TIMER_FREQUENCY, &quotient, NULL); 183 ACPI_PM_TIMER_FREQUENCY, &quotient, NULL);
183 184
184 *time_elapsed = (u32) quotient; 185 *time_elapsed = (u32) quotient;
185 return_ACPI_STATUS(status); 186 return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index b6aae58299dc..eab70d58852a 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -135,7 +135,7 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
135 if ((bit_width != 8) && (bit_width != 16) && (bit_width != 32)) { 135 if ((bit_width != 8) && (bit_width != 16) && (bit_width != 32)) {
136 ACPI_ERROR((AE_INFO, 136 ACPI_ERROR((AE_INFO,
137 "Bad BitWidth parameter: %8.8X", bit_width)); 137 "Bad BitWidth parameter: %8.8X", bit_width));
138 return AE_BAD_PARAMETER; 138 return (AE_BAD_PARAMETER);
139 } 139 }
140 140
141 port_info = acpi_protected_ports; 141 port_info = acpi_protected_ports;
@@ -234,11 +234,11 @@ acpi_status acpi_hw_read_port(acpi_io_address address, u32 *value, u32 width)
234 status = acpi_hw_validate_io_request(address, width); 234 status = acpi_hw_validate_io_request(address, width);
235 if (ACPI_SUCCESS(status)) { 235 if (ACPI_SUCCESS(status)) {
236 status = acpi_os_read_port(address, value, width); 236 status = acpi_os_read_port(address, value, width);
237 return status; 237 return (status);
238 } 238 }
239 239
240 if (status != AE_AML_ILLEGAL_ADDRESS) { 240 if (status != AE_AML_ILLEGAL_ADDRESS) {
241 return status; 241 return (status);
242 } 242 }
243 243
244 /* 244 /*
@@ -253,7 +253,7 @@ acpi_status acpi_hw_read_port(acpi_io_address address, u32 *value, u32 width)
253 if (acpi_hw_validate_io_request(address, 8) == AE_OK) { 253 if (acpi_hw_validate_io_request(address, 8) == AE_OK) {
254 status = acpi_os_read_port(address, &one_byte, 8); 254 status = acpi_os_read_port(address, &one_byte, 8);
255 if (ACPI_FAILURE(status)) { 255 if (ACPI_FAILURE(status)) {
256 return status; 256 return (status);
257 } 257 }
258 258
259 *value |= (one_byte << i); 259 *value |= (one_byte << i);
@@ -262,7 +262,7 @@ acpi_status acpi_hw_read_port(acpi_io_address address, u32 *value, u32 width)
262 address++; 262 address++;
263 } 263 }
264 264
265 return AE_OK; 265 return (AE_OK);
266} 266}
267 267
268/****************************************************************************** 268/******************************************************************************
@@ -297,11 +297,11 @@ acpi_status acpi_hw_write_port(acpi_io_address address, u32 value, u32 width)
297 status = acpi_hw_validate_io_request(address, width); 297 status = acpi_hw_validate_io_request(address, width);
298 if (ACPI_SUCCESS(status)) { 298 if (ACPI_SUCCESS(status)) {
299 status = acpi_os_write_port(address, value, width); 299 status = acpi_os_write_port(address, value, width);
300 return status; 300 return (status);
301 } 301 }
302 302
303 if (status != AE_AML_ILLEGAL_ADDRESS) { 303 if (status != AE_AML_ILLEGAL_ADDRESS) {
304 return status; 304 return (status);
305 } 305 }
306 306
307 /* 307 /*
@@ -317,12 +317,12 @@ acpi_status acpi_hw_write_port(acpi_io_address address, u32 value, u32 width)
317 status = 317 status =
318 acpi_os_write_port(address, (value >> i) & 0xFF, 8); 318 acpi_os_write_port(address, (value >> i) & 0xFF, 8);
319 if (ACPI_FAILURE(status)) { 319 if (ACPI_FAILURE(status)) {
320 return status; 320 return (status);
321 } 321 }
322 } 322 }
323 323
324 address++; 324 address++;
325 } 325 }
326 326
327 return AE_OK; 327 return (AE_OK);
328} 328}
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index 05a154c3c9ac..04c2e16f2c0a 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -80,10 +80,10 @@ acpi_status acpi_reset(void)
80 80
81 if (reset_reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { 81 if (reset_reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
82 /* 82 /*
83 * For I/O space, write directly to the OSL. This 83 * For I/O space, write directly to the OSL. This bypasses the port
84 * bypasses the port validation mechanism, which may 84 * validation mechanism, which may block a valid write to the reset
85 * block a valid write to the reset register. Spec 85 * register.
86 * section 4.7.3.6 requires register width to be 8. 86 * Spec section 4.7.3.6 requires register width to be 8.
87 */ 87 */
88 status = 88 status =
89 acpi_os_write_port((acpi_io_address) reset_reg->address, 89 acpi_os_write_port((acpi_io_address) reset_reg->address,
@@ -333,7 +333,7 @@ ACPI_EXPORT_SYMBOL(acpi_read_bit_register)
333 * FUNCTION: acpi_write_bit_register 333 * FUNCTION: acpi_write_bit_register
334 * 334 *
335 * PARAMETERS: register_id - ID of ACPI Bit Register to access 335 * PARAMETERS: register_id - ID of ACPI Bit Register to access
336 * Value - Value to write to the register, in bit 336 * value - Value to write to the register, in bit
337 * position zero. The bit is automatically 337 * position zero. The bit is automatically
338 * shifted to the correct position. 338 * shifted to the correct position.
339 * 339 *
@@ -440,17 +440,41 @@ ACPI_EXPORT_SYMBOL(acpi_write_bit_register)
440 * *sleep_type_a - Where SLP_TYPa is returned 440 * *sleep_type_a - Where SLP_TYPa is returned
441 * *sleep_type_b - Where SLP_TYPb is returned 441 * *sleep_type_b - Where SLP_TYPb is returned
442 * 442 *
443 * RETURN: status - ACPI status 443 * RETURN: Status
444 *
445 * DESCRIPTION: Obtain the SLP_TYPa and SLP_TYPb values for the requested
446 * sleep state via the appropriate \_Sx object.
447 *
448 * The sleep state package returned from the corresponding \_Sx_ object
449 * must contain at least one integer.
450 *
451 * March 2005:
452 * Added support for a package that contains two integers. This
453 * goes against the ACPI specification which defines this object as a
454 * package with one encoded DWORD integer. However, existing practice
455 * by many BIOS vendors is to return a package with 2 or more integer
456 * elements, at least one per sleep type (A/B).
444 * 457 *
445 * DESCRIPTION: Obtain the SLP_TYPa and SLP_TYPb values for the requested sleep 458 * January 2013:
446 * state. 459 * Therefore, we must be prepared to accept a package with either a
460 * single integer or multiple integers.
461 *
462 * The single integer DWORD format is as follows:
463 * BYTE 0 - Value for the PM1A SLP_TYP register
464 * BYTE 1 - Value for the PM1B SLP_TYP register
465 * BYTE 2-3 - Reserved
466 *
467 * The dual integer format is as follows:
468 * Integer 0 - Value for the PM1A SLP_TYP register
469 * Integer 1 - Value for the PM1A SLP_TYP register
447 * 470 *
448 ******************************************************************************/ 471 ******************************************************************************/
449acpi_status 472acpi_status
450acpi_get_sleep_type_data(u8 sleep_state, u8 *sleep_type_a, u8 *sleep_type_b) 473acpi_get_sleep_type_data(u8 sleep_state, u8 *sleep_type_a, u8 *sleep_type_b)
451{ 474{
452 acpi_status status = AE_OK; 475 acpi_status status;
453 struct acpi_evaluate_info *info; 476 struct acpi_evaluate_info *info;
477 union acpi_operand_object **elements;
454 478
455 ACPI_FUNCTION_TRACE(acpi_get_sleep_type_data); 479 ACPI_FUNCTION_TRACE(acpi_get_sleep_type_data);
456 480
@@ -467,18 +491,14 @@ acpi_get_sleep_type_data(u8 sleep_state, u8 *sleep_type_a, u8 *sleep_type_b)
467 return_ACPI_STATUS(AE_NO_MEMORY); 491 return_ACPI_STATUS(AE_NO_MEMORY);
468 } 492 }
469 493
494 /*
495 * Evaluate the \_Sx namespace object containing the register values
496 * for this state
497 */
470 info->pathname = 498 info->pathname =
471 ACPI_CAST_PTR(char, acpi_gbl_sleep_state_names[sleep_state]); 499 ACPI_CAST_PTR(char, acpi_gbl_sleep_state_names[sleep_state]);
472
473 /* Evaluate the namespace object containing the values for this state */
474
475 status = acpi_ns_evaluate(info); 500 status = acpi_ns_evaluate(info);
476 if (ACPI_FAILURE(status)) { 501 if (ACPI_FAILURE(status)) {
477 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
478 "%s while evaluating SleepState [%s]\n",
479 acpi_format_exception(status),
480 info->pathname));
481
482 goto cleanup; 502 goto cleanup;
483 } 503 }
484 504
@@ -487,64 +507,67 @@ acpi_get_sleep_type_data(u8 sleep_state, u8 *sleep_type_a, u8 *sleep_type_b)
487 if (!info->return_object) { 507 if (!info->return_object) {
488 ACPI_ERROR((AE_INFO, "No Sleep State object returned from [%s]", 508 ACPI_ERROR((AE_INFO, "No Sleep State object returned from [%s]",
489 info->pathname)); 509 info->pathname));
490 status = AE_NOT_EXIST; 510 status = AE_AML_NO_RETURN_VALUE;
511 goto cleanup;
491 } 512 }
492 513
493 /* It must be of type Package */ 514 /* Return object must be of type Package */
494 515
495 else if (info->return_object->common.type != ACPI_TYPE_PACKAGE) { 516 if (info->return_object->common.type != ACPI_TYPE_PACKAGE) {
496 ACPI_ERROR((AE_INFO, 517 ACPI_ERROR((AE_INFO,
497 "Sleep State return object is not a Package")); 518 "Sleep State return object is not a Package"));
498 status = AE_AML_OPERAND_TYPE; 519 status = AE_AML_OPERAND_TYPE;
520 goto cleanup1;
499 } 521 }
500 522
501 /* 523 /*
502 * The package must have at least two elements. NOTE (March 2005): This 524 * Any warnings about the package length or the object types have
503 * goes against the current ACPI spec which defines this object as a 525 * already been issued by the predefined name module -- there is no
504 * package with one encoded DWORD element. However, existing practice 526 * need to repeat them here.
505 * by BIOS vendors seems to be to have 2 or more elements, at least
506 * one per sleep type (A/B).
507 */ 527 */
508 else if (info->return_object->package.count < 2) { 528 elements = info->return_object->package.elements;
509 ACPI_ERROR((AE_INFO, 529 switch (info->return_object->package.count) {
510 "Sleep State return package does not have at least two elements")); 530 case 0:
511 status = AE_AML_NO_OPERAND; 531 status = AE_AML_PACKAGE_LIMIT;
512 } 532 break;
533
534 case 1:
535 if (elements[0]->common.type != ACPI_TYPE_INTEGER) {
536 status = AE_AML_OPERAND_TYPE;
537 break;
538 }
513 539
514 /* The first two elements must both be of type Integer */ 540 /* A valid _Sx_ package with one integer */
515 541
516 else if (((info->return_object->package.elements[0])->common.type 542 *sleep_type_a = (u8)elements[0]->integer.value;
517 != ACPI_TYPE_INTEGER) || 543 *sleep_type_b = (u8)(elements[0]->integer.value >> 8);
518 ((info->return_object->package.elements[1])->common.type 544 break;
519 != ACPI_TYPE_INTEGER)) {
520 ACPI_ERROR((AE_INFO,
521 "Sleep State return package elements are not both Integers "
522 "(%s, %s)",
523 acpi_ut_get_object_type_name(info->return_object->
524 package.elements[0]),
525 acpi_ut_get_object_type_name(info->return_object->
526 package.elements[1])));
527 status = AE_AML_OPERAND_TYPE;
528 } else {
529 /* Valid _Sx_ package size, type, and value */
530 545
531 *sleep_type_a = (u8) 546 case 2:
532 (info->return_object->package.elements[0])->integer.value; 547 default:
533 *sleep_type_b = (u8) 548 if ((elements[0]->common.type != ACPI_TYPE_INTEGER) ||
534 (info->return_object->package.elements[1])->integer.value; 549 (elements[1]->common.type != ACPI_TYPE_INTEGER)) {
535 } 550 status = AE_AML_OPERAND_TYPE;
551 break;
552 }
536 553
537 if (ACPI_FAILURE(status)) { 554 /* A valid _Sx_ package with two integers */
538 ACPI_EXCEPTION((AE_INFO, status, 555
539 "While evaluating SleepState [%s], bad Sleep object %p type %s", 556 *sleep_type_a = (u8)elements[0]->integer.value;
540 info->pathname, info->return_object, 557 *sleep_type_b = (u8)elements[1]->integer.value;
541 acpi_ut_get_object_type_name(info-> 558 break;
542 return_object)));
543 } 559 }
544 560
561 cleanup1:
545 acpi_ut_remove_reference(info->return_object); 562 acpi_ut_remove_reference(info->return_object);
546 563
547 cleanup: 564 cleanup:
565 if (ACPI_FAILURE(status)) {
566 ACPI_EXCEPTION((AE_INFO, status,
567 "While evaluating Sleep State [%s]",
568 info->pathname));
569 }
570
548 ACPI_FREE(info); 571 ACPI_FREE(info);
549 return_ACPI_STATUS(status); 572 return_ACPI_STATUS(status);
550} 573}
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index ae443fe2ebf6..35eebdac0f9d 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -41,9 +41,9 @@
41 * POSSIBILITY OF SUCH DAMAGES. 41 * POSSIBILITY OF SUCH DAMAGES.
42 */ 42 */
43 43
44#include <linux/export.h>
44#include <acpi/acpi.h> 45#include <acpi/acpi.h>
45#include "accommon.h" 46#include "accommon.h"
46#include <linux/module.h>
47 47
48#define _COMPONENT ACPI_HARDWARE 48#define _COMPONENT ACPI_HARDWARE
49ACPI_MODULE_NAME("hwxfsleep") 49ACPI_MODULE_NAME("hwxfsleep")
@@ -207,7 +207,7 @@ acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void)
207 (u32)acpi_gbl_FADT.s4_bios_request, 8); 207 (u32)acpi_gbl_FADT.s4_bios_request, 8);
208 208
209 do { 209 do {
210 acpi_os_stall(1000); 210 acpi_os_stall(ACPI_USEC_PER_MSEC);
211 status = 211 status =
212 acpi_read_bit_register(ACPI_BITREG_WAKE_STATUS, &in_value); 212 acpi_read_bit_register(ACPI_BITREG_WAKE_STATUS, &in_value);
213 if (ACPI_FAILURE(status)) { 213 if (ACPI_FAILURE(status)) {
@@ -350,7 +350,7 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep)
350 * 350 *
351 * RETURN: Status 351 * RETURN: Status
352 * 352 *
353 * DESCRIPTION: Enter a system sleep state (see ACPI 2.0 spec p 231) 353 * DESCRIPTION: Enter a system sleep state
354 * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED 354 * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
355 * 355 *
356 ******************************************************************************/ 356 ******************************************************************************/
@@ -382,8 +382,9 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state)
382 * RETURN: Status 382 * RETURN: Status
383 * 383 *
384 * DESCRIPTION: Perform the first state of OS-independent ACPI cleanup after a 384 * DESCRIPTION: Perform the first state of OS-independent ACPI cleanup after a
385 * sleep. 385 * sleep. Called with interrupts DISABLED.
386 * Called with interrupts DISABLED. 386 * We break wake/resume into 2 stages so that OSPM can handle
387 * various OS-specific tasks between the two steps.
387 * 388 *
388 ******************************************************************************/ 389 ******************************************************************************/
389acpi_status acpi_leave_sleep_state_prep(u8 sleep_state) 390acpi_status acpi_leave_sleep_state_prep(u8 sleep_state)
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index d70eaf39dfdf..8769cf83b044 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c
index 15143c44f5e5..243737363fb8 100644
--- a/drivers/acpi/acpica/nsalloc.c
+++ b/drivers/acpi/acpica/nsalloc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 924b3c71473a..ce6e97326205 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -44,6 +44,7 @@
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include "accommon.h" 45#include "accommon.h"
46#include "acnamesp.h" 46#include "acnamesp.h"
47#include <acpi/acoutput.h>
47 48
48#define _COMPONENT ACPI_NAMESPACE 49#define _COMPONENT ACPI_NAMESPACE
49ACPI_MODULE_NAME("nsdump") 50ACPI_MODULE_NAME("nsdump")
@@ -77,8 +78,9 @@ void acpi_ns_print_pathname(u32 num_segments, char *pathname)
77 78
78 ACPI_FUNCTION_NAME(ns_print_pathname); 79 ACPI_FUNCTION_NAME(ns_print_pathname);
79 80
80 if (!(acpi_dbg_level & ACPI_LV_NAMES) 81 /* Check if debug output enabled */
81 || !(acpi_dbg_layer & ACPI_NAMESPACE)) { 82
83 if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_NAMES, ACPI_NAMESPACE)) {
82 return; 84 return;
83 } 85 }
84 86
@@ -127,7 +129,7 @@ acpi_ns_dump_pathname(acpi_handle handle, char *msg, u32 level, u32 component)
127 129
128 /* Do this only if the requested debug level and component are enabled */ 130 /* Do this only if the requested debug level and component are enabled */
129 131
130 if (!(acpi_dbg_level & level) || !(acpi_dbg_layer & component)) { 132 if (!ACPI_IS_DEBUG_ENABLED(level, component)) {
131 return_VOID; 133 return_VOID;
132 } 134 }
133 135
@@ -729,5 +731,5 @@ void acpi_ns_dump_tables(acpi_handle search_base, u32 max_depth)
729 ACPI_OWNER_ID_MAX, search_handle); 731 ACPI_OWNER_ID_MAX, search_handle);
730 return_VOID; 732 return_VOID;
731} 733}
732#endif /* _ACPI_ASL_COMPILER */ 734#endif
733#endif /* defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER) */ 735#endif
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
index 944d4c8d9438..409ae80824d1 100644
--- a/drivers/acpi/acpica/nsdumpdv.c
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -42,7 +42,6 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include "accommon.h"
46 45
47/* TBD: This entire module is apparently obsolete and should be removed */ 46/* TBD: This entire module is apparently obsolete and should be removed */
48 47
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
index 69074be498e8..1538f3eb2a8f 100644
--- a/drivers/acpi/acpica/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index 4328e2adfeb9..2a431ec50a25 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -46,7 +46,6 @@
46#include "acnamesp.h" 46#include "acnamesp.h"
47#include "acdispat.h" 47#include "acdispat.h"
48#include "acinterp.h" 48#include "acinterp.h"
49#include <linux/nmi.h>
50 49
51#define _COMPONENT ACPI_NAMESPACE 50#define _COMPONENT ACPI_NAMESPACE
52ACPI_MODULE_NAME("nsinit") 51ACPI_MODULE_NAME("nsinit")
@@ -87,7 +86,7 @@ acpi_status acpi_ns_initialize_objects(void)
87 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, 86 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
88 "**** Starting initialization of namespace objects ****\n")); 87 "**** Starting initialization of namespace objects ****\n"));
89 ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, 88 ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
90 "Completing Region/Field/Buffer/Package initialization:")); 89 "Completing Region/Field/Buffer/Package initialization:\n"));
91 90
92 /* Set all init info to zero */ 91 /* Set all init info to zero */
93 92
@@ -103,7 +102,7 @@ acpi_status acpi_ns_initialize_objects(void)
103 } 102 }
104 103
105 ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, 104 ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
106 "\nInitialized %u/%u Regions %u/%u Fields %u/%u " 105 " Initialized %u/%u Regions %u/%u Fields %u/%u "
107 "Buffers %u/%u Packages (%u nodes)\n", 106 "Buffers %u/%u Packages (%u nodes)\n",
108 info.op_region_init, info.op_region_count, 107 info.op_region_init, info.op_region_count,
109 info.field_init, info.field_count, 108 info.field_init, info.field_count,
@@ -150,7 +149,7 @@ acpi_status acpi_ns_initialize_devices(void)
150 149
151 ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, 150 ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
152 "Initializing Device/Processor/Thermal objects " 151 "Initializing Device/Processor/Thermal objects "
153 "by executing _INI methods:")); 152 "and executing _INI/_STA methods:\n"));
154 153
155 /* Tree analysis: find all subtrees that contain _INI methods */ 154 /* Tree analysis: find all subtrees that contain _INI methods */
156 155
@@ -208,7 +207,7 @@ acpi_status acpi_ns_initialize_devices(void)
208 } 207 }
209 208
210 ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, 209 ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
211 "\nExecuted %u _INI methods requiring %u _STA executions " 210 " Executed %u _INI methods requiring %u _STA executions "
212 "(examined %u objects)\n", 211 "(examined %u objects)\n",
213 info.num_INI, info.num_STA, info.device_count)); 212 info.num_INI, info.num_STA, info.device_count));
214 213
@@ -350,14 +349,6 @@ acpi_ns_init_one_object(acpi_handle obj_handle,
350 } 349 }
351 350
352 /* 351 /*
353 * Print a dot for each object unless we are going to print the entire
354 * pathname
355 */
356 if (!(acpi_dbg_level & ACPI_LV_INIT_NAMES)) {
357 ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "."));
358 }
359
360 /*
361 * We ignore errors from above, and always return OK, since we don't want 352 * We ignore errors from above, and always return OK, since we don't want
362 * to abort the walk on any single error. 353 * to abort the walk on any single error.
363 */ 354 */
@@ -572,20 +563,10 @@ acpi_ns_init_one_device(acpi_handle obj_handle,
572 info->parameters = NULL; 563 info->parameters = NULL;
573 info->flags = ACPI_IGNORE_RETURN_VALUE; 564 info->flags = ACPI_IGNORE_RETURN_VALUE;
574 565
575 /*
576 * Some hardware relies on this being executed as atomically
577 * as possible (without an NMI being received in the middle of
578 * this) - so disable NMIs and initialize the device:
579 */
580 status = acpi_ns_evaluate(info); 566 status = acpi_ns_evaluate(info);
581 567
582 if (ACPI_SUCCESS(status)) { 568 if (ACPI_SUCCESS(status)) {
583 walk_info->num_INI++; 569 walk_info->num_INI++;
584
585 if ((acpi_dbg_level <= ACPI_LV_ALL_EXCEPTIONS) &&
586 (!(acpi_dbg_level & ACPI_LV_INFO))) {
587 ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "."));
588 }
589 } 570 }
590#ifdef ACPI_DEBUG_OUTPUT 571#ifdef ACPI_DEBUG_OUTPUT
591 else if (status != AE_NOT_FOUND) { 572 else if (status != AE_NOT_FOUND) {
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index 911f99127b99..0a7badc3179f 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index 55a175eadcc3..90a0380fb8a0 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -126,7 +126,8 @@ acpi_ns_build_external_path(struct acpi_namespace_node *node,
126 * the node, In external format (name segments separated by path 126 * the node, In external format (name segments separated by path
127 * separators.) 127 * separators.)
128 * 128 *
129 * DESCRIPTION: Used for debug printing in acpi_ns_search_table(). 129 * DESCRIPTION: Used to obtain the full pathname to a namespace node, usually
130 * for error and debug statements.
130 * 131 *
131 ******************************************************************************/ 132 ******************************************************************************/
132 133
diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c
index e69f7fa2579d..7a736f4d1fd8 100644
--- a/drivers/acpi/acpica/nsobject.c
+++ b/drivers/acpi/acpica/nsobject.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2012, Intel Corp. 9 * Copyright (C) 2000 - 2013, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index 233f756d5cfa..35dde8151c0d 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index 2419f417ea33..224c30053401 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -1,12 +1,11 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Module Name: nspredef - Validation of ACPI predefined methods and objects 3 * Module Name: nspredef - Validation of ACPI predefined methods and objects
4 * $Revision: 1.1 $
5 * 4 *
6 *****************************************************************************/ 5 *****************************************************************************/
7 6
8/* 7/*
9 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
10 * All rights reserved. 9 * All rights reserved.
11 * 10 *
12 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -74,27 +73,6 @@ ACPI_MODULE_NAME("nspredef")
74 ******************************************************************************/ 73 ******************************************************************************/
75/* Local prototypes */ 74/* Local prototypes */
76static acpi_status 75static acpi_status
77acpi_ns_check_package(struct acpi_predefined_data *data,
78 union acpi_operand_object **return_object_ptr);
79
80static acpi_status
81acpi_ns_check_package_list(struct acpi_predefined_data *data,
82 const union acpi_predefined_info *package,
83 union acpi_operand_object **elements, u32 count);
84
85static acpi_status
86acpi_ns_check_package_elements(struct acpi_predefined_data *data,
87 union acpi_operand_object **elements,
88 u8 type1,
89 u32 count1,
90 u8 type2, u32 count2, u32 start_index);
91
92static acpi_status
93acpi_ns_check_object_type(struct acpi_predefined_data *data,
94 union acpi_operand_object **return_object_ptr,
95 u32 expected_btypes, u32 package_index);
96
97static acpi_status
98acpi_ns_check_reference(struct acpi_predefined_data *data, 76acpi_ns_check_reference(struct acpi_predefined_data *data,
99 union acpi_operand_object *return_object); 77 union acpi_operand_object *return_object);
100 78
@@ -148,7 +126,7 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
148 126
149 pathname = acpi_ns_get_external_pathname(node); 127 pathname = acpi_ns_get_external_pathname(node);
150 if (!pathname) { 128 if (!pathname) {
151 return AE_OK; /* Could not get pathname, ignore */ 129 return (AE_OK); /* Could not get pathname, ignore */
152 } 130 }
153 131
154 /* 132 /*
@@ -408,564 +386,6 @@ const union acpi_predefined_info *acpi_ns_check_for_predefined_name(struct
408 386
409/******************************************************************************* 387/*******************************************************************************
410 * 388 *
411 * FUNCTION: acpi_ns_check_package
412 *
413 * PARAMETERS: data - Pointer to validation data structure
414 * return_object_ptr - Pointer to the object returned from the
415 * evaluation of a method or object
416 *
417 * RETURN: Status
418 *
419 * DESCRIPTION: Check a returned package object for the correct count and
420 * correct type of all sub-objects.
421 *
422 ******************************************************************************/
423
424static acpi_status
425acpi_ns_check_package(struct acpi_predefined_data *data,
426 union acpi_operand_object **return_object_ptr)
427{
428 union acpi_operand_object *return_object = *return_object_ptr;
429 const union acpi_predefined_info *package;
430 union acpi_operand_object **elements;
431 acpi_status status = AE_OK;
432 u32 expected_count;
433 u32 count;
434 u32 i;
435
436 ACPI_FUNCTION_NAME(ns_check_package);
437
438 /* The package info for this name is in the next table entry */
439
440 package = data->predefined + 1;
441
442 ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
443 "%s Validating return Package of Type %X, Count %X\n",
444 data->pathname, package->ret_info.type,
445 return_object->package.count));
446
447 /*
448 * For variable-length Packages, we can safely remove all embedded
449 * and trailing NULL package elements
450 */
451 acpi_ns_remove_null_elements(data, package->ret_info.type,
452 return_object);
453
454 /* Extract package count and elements array */
455
456 elements = return_object->package.elements;
457 count = return_object->package.count;
458
459 /* The package must have at least one element, else invalid */
460
461 if (!count) {
462 ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
463 "Return Package has no elements (empty)"));
464
465 return (AE_AML_OPERAND_VALUE);
466 }
467
468 /*
469 * Decode the type of the expected package contents
470 *
471 * PTYPE1 packages contain no subpackages
472 * PTYPE2 packages contain sub-packages
473 */
474 switch (package->ret_info.type) {
475 case ACPI_PTYPE1_FIXED:
476
477 /*
478 * The package count is fixed and there are no sub-packages
479 *
480 * If package is too small, exit.
481 * If package is larger than expected, issue warning but continue
482 */
483 expected_count =
484 package->ret_info.count1 + package->ret_info.count2;
485 if (count < expected_count) {
486 goto package_too_small;
487 } else if (count > expected_count) {
488 ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
489 "%s: Return Package is larger than needed - "
490 "found %u, expected %u\n",
491 data->pathname, count,
492 expected_count));
493 }
494
495 /* Validate all elements of the returned package */
496
497 status = acpi_ns_check_package_elements(data, elements,
498 package->ret_info.
499 object_type1,
500 package->ret_info.
501 count1,
502 package->ret_info.
503 object_type2,
504 package->ret_info.
505 count2, 0);
506 break;
507
508 case ACPI_PTYPE1_VAR:
509
510 /*
511 * The package count is variable, there are no sub-packages, and all
512 * elements must be of the same type
513 */
514 for (i = 0; i < count; i++) {
515 status = acpi_ns_check_object_type(data, elements,
516 package->ret_info.
517 object_type1, i);
518 if (ACPI_FAILURE(status)) {
519 return (status);
520 }
521 elements++;
522 }
523 break;
524
525 case ACPI_PTYPE1_OPTION:
526
527 /*
528 * The package count is variable, there are no sub-packages. There are
529 * a fixed number of required elements, and a variable number of
530 * optional elements.
531 *
532 * Check if package is at least as large as the minimum required
533 */
534 expected_count = package->ret_info3.count;
535 if (count < expected_count) {
536 goto package_too_small;
537 }
538
539 /* Variable number of sub-objects */
540
541 for (i = 0; i < count; i++) {
542 if (i < package->ret_info3.count) {
543
544 /* These are the required package elements (0, 1, or 2) */
545
546 status =
547 acpi_ns_check_object_type(data, elements,
548 package->
549 ret_info3.
550 object_type[i],
551 i);
552 if (ACPI_FAILURE(status)) {
553 return (status);
554 }
555 } else {
556 /* These are the optional package elements */
557
558 status =
559 acpi_ns_check_object_type(data, elements,
560 package->
561 ret_info3.
562 tail_object_type,
563 i);
564 if (ACPI_FAILURE(status)) {
565 return (status);
566 }
567 }
568 elements++;
569 }
570 break;
571
572 case ACPI_PTYPE2_REV_FIXED:
573
574 /* First element is the (Integer) revision */
575
576 status = acpi_ns_check_object_type(data, elements,
577 ACPI_RTYPE_INTEGER, 0);
578 if (ACPI_FAILURE(status)) {
579 return (status);
580 }
581
582 elements++;
583 count--;
584
585 /* Examine the sub-packages */
586
587 status =
588 acpi_ns_check_package_list(data, package, elements, count);
589 break;
590
591 case ACPI_PTYPE2_PKG_COUNT:
592
593 /* First element is the (Integer) count of sub-packages to follow */
594
595 status = acpi_ns_check_object_type(data, elements,
596 ACPI_RTYPE_INTEGER, 0);
597 if (ACPI_FAILURE(status)) {
598 return (status);
599 }
600
601 /*
602 * Count cannot be larger than the parent package length, but allow it
603 * to be smaller. The >= accounts for the Integer above.
604 */
605 expected_count = (u32) (*elements)->integer.value;
606 if (expected_count >= count) {
607 goto package_too_small;
608 }
609
610 count = expected_count;
611 elements++;
612
613 /* Examine the sub-packages */
614
615 status =
616 acpi_ns_check_package_list(data, package, elements, count);
617 break;
618
619 case ACPI_PTYPE2:
620 case ACPI_PTYPE2_FIXED:
621 case ACPI_PTYPE2_MIN:
622 case ACPI_PTYPE2_COUNT:
623 case ACPI_PTYPE2_FIX_VAR:
624
625 /*
626 * These types all return a single Package that consists of a
627 * variable number of sub-Packages.
628 *
629 * First, ensure that the first element is a sub-Package. If not,
630 * the BIOS may have incorrectly returned the object as a single
631 * package instead of a Package of Packages (a common error if
632 * there is only one entry). We may be able to repair this by
633 * wrapping the returned Package with a new outer Package.
634 */
635 if (*elements
636 && ((*elements)->common.type != ACPI_TYPE_PACKAGE)) {
637
638 /* Create the new outer package and populate it */
639
640 status =
641 acpi_ns_wrap_with_package(data, return_object,
642 return_object_ptr);
643 if (ACPI_FAILURE(status)) {
644 return (status);
645 }
646
647 /* Update locals to point to the new package (of 1 element) */
648
649 return_object = *return_object_ptr;
650 elements = return_object->package.elements;
651 count = 1;
652 }
653
654 /* Examine the sub-packages */
655
656 status =
657 acpi_ns_check_package_list(data, package, elements, count);
658 break;
659
660 default:
661
662 /* Should not get here if predefined info table is correct */
663
664 ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
665 "Invalid internal return type in table entry: %X",
666 package->ret_info.type));
667
668 return (AE_AML_INTERNAL);
669 }
670
671 return (status);
672
673package_too_small:
674
675 /* Error exit for the case with an incorrect package count */
676
677 ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
678 "Return Package is too small - found %u elements, expected %u",
679 count, expected_count));
680
681 return (AE_AML_OPERAND_VALUE);
682}
683
684/*******************************************************************************
685 *
686 * FUNCTION: acpi_ns_check_package_list
687 *
688 * PARAMETERS: data - Pointer to validation data structure
689 * package - Pointer to package-specific info for method
690 * elements - Element list of parent package. All elements
691 * of this list should be of type Package.
692 * count - Count of subpackages
693 *
694 * RETURN: Status
695 *
696 * DESCRIPTION: Examine a list of subpackages
697 *
698 ******************************************************************************/
699
700static acpi_status
701acpi_ns_check_package_list(struct acpi_predefined_data *data,
702 const union acpi_predefined_info *package,
703 union acpi_operand_object **elements, u32 count)
704{
705 union acpi_operand_object *sub_package;
706 union acpi_operand_object **sub_elements;
707 acpi_status status;
708 u32 expected_count;
709 u32 i;
710 u32 j;
711
712 /*
713 * Validate each sub-Package in the parent Package
714 *
715 * NOTE: assumes list of sub-packages contains no NULL elements.
716 * Any NULL elements should have been removed by earlier call
717 * to acpi_ns_remove_null_elements.
718 */
719 for (i = 0; i < count; i++) {
720 sub_package = *elements;
721 sub_elements = sub_package->package.elements;
722 data->parent_package = sub_package;
723
724 /* Each sub-object must be of type Package */
725
726 status = acpi_ns_check_object_type(data, &sub_package,
727 ACPI_RTYPE_PACKAGE, i);
728 if (ACPI_FAILURE(status)) {
729 return (status);
730 }
731
732 /* Examine the different types of expected sub-packages */
733
734 data->parent_package = sub_package;
735 switch (package->ret_info.type) {
736 case ACPI_PTYPE2:
737 case ACPI_PTYPE2_PKG_COUNT:
738 case ACPI_PTYPE2_REV_FIXED:
739
740 /* Each subpackage has a fixed number of elements */
741
742 expected_count =
743 package->ret_info.count1 + package->ret_info.count2;
744 if (sub_package->package.count < expected_count) {
745 goto package_too_small;
746 }
747
748 status =
749 acpi_ns_check_package_elements(data, sub_elements,
750 package->ret_info.
751 object_type1,
752 package->ret_info.
753 count1,
754 package->ret_info.
755 object_type2,
756 package->ret_info.
757 count2, 0);
758 if (ACPI_FAILURE(status)) {
759 return (status);
760 }
761 break;
762
763 case ACPI_PTYPE2_FIX_VAR:
764 /*
765 * Each subpackage has a fixed number of elements and an
766 * optional element
767 */
768 expected_count =
769 package->ret_info.count1 + package->ret_info.count2;
770 if (sub_package->package.count < expected_count) {
771 goto package_too_small;
772 }
773
774 status =
775 acpi_ns_check_package_elements(data, sub_elements,
776 package->ret_info.
777 object_type1,
778 package->ret_info.
779 count1,
780 package->ret_info.
781 object_type2,
782 sub_package->package.
783 count -
784 package->ret_info.
785 count1, 0);
786 if (ACPI_FAILURE(status)) {
787 return (status);
788 }
789 break;
790
791 case ACPI_PTYPE2_FIXED:
792
793 /* Each sub-package has a fixed length */
794
795 expected_count = package->ret_info2.count;
796 if (sub_package->package.count < expected_count) {
797 goto package_too_small;
798 }
799
800 /* Check the type of each sub-package element */
801
802 for (j = 0; j < expected_count; j++) {
803 status =
804 acpi_ns_check_object_type(data,
805 &sub_elements[j],
806 package->
807 ret_info2.
808 object_type[j],
809 j);
810 if (ACPI_FAILURE(status)) {
811 return (status);
812 }
813 }
814 break;
815
816 case ACPI_PTYPE2_MIN:
817
818 /* Each sub-package has a variable but minimum length */
819
820 expected_count = package->ret_info.count1;
821 if (sub_package->package.count < expected_count) {
822 goto package_too_small;
823 }
824
825 /* Check the type of each sub-package element */
826
827 status =
828 acpi_ns_check_package_elements(data, sub_elements,
829 package->ret_info.
830 object_type1,
831 sub_package->package.
832 count, 0, 0, 0);
833 if (ACPI_FAILURE(status)) {
834 return (status);
835 }
836 break;
837
838 case ACPI_PTYPE2_COUNT:
839
840 /*
841 * First element is the (Integer) count of elements, including
842 * the count field (the ACPI name is num_elements)
843 */
844 status = acpi_ns_check_object_type(data, sub_elements,
845 ACPI_RTYPE_INTEGER,
846 0);
847 if (ACPI_FAILURE(status)) {
848 return (status);
849 }
850
851 /*
852 * Make sure package is large enough for the Count and is
853 * is as large as the minimum size
854 */
855 expected_count = (u32)(*sub_elements)->integer.value;
856 if (sub_package->package.count < expected_count) {
857 goto package_too_small;
858 }
859 if (sub_package->package.count <
860 package->ret_info.count1) {
861 expected_count = package->ret_info.count1;
862 goto package_too_small;
863 }
864 if (expected_count == 0) {
865 /*
866 * Either the num_entries element was originally zero or it was
867 * a NULL element and repaired to an Integer of value zero.
868 * In either case, repair it by setting num_entries to be the
869 * actual size of the subpackage.
870 */
871 expected_count = sub_package->package.count;
872 (*sub_elements)->integer.value = expected_count;
873 }
874
875 /* Check the type of each sub-package element */
876
877 status =
878 acpi_ns_check_package_elements(data,
879 (sub_elements + 1),
880 package->ret_info.
881 object_type1,
882 (expected_count - 1),
883 0, 0, 1);
884 if (ACPI_FAILURE(status)) {
885 return (status);
886 }
887 break;
888
889 default: /* Should not get here, type was validated by caller */
890
891 return (AE_AML_INTERNAL);
892 }
893
894 elements++;
895 }
896
897 return (AE_OK);
898
899package_too_small:
900
901 /* The sub-package count was smaller than required */
902
903 ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
904 "Return Sub-Package[%u] is too small - found %u elements, expected %u",
905 i, sub_package->package.count, expected_count));
906
907 return (AE_AML_OPERAND_VALUE);
908}
909
910/*******************************************************************************
911 *
912 * FUNCTION: acpi_ns_check_package_elements
913 *
914 * PARAMETERS: data - Pointer to validation data structure
915 * elements - Pointer to the package elements array
916 * type1 - Object type for first group
917 * count1 - Count for first group
918 * type2 - Object type for second group
919 * count2 - Count for second group
920 * start_index - Start of the first group of elements
921 *
922 * RETURN: Status
923 *
924 * DESCRIPTION: Check that all elements of a package are of the correct object
925 * type. Supports up to two groups of different object types.
926 *
927 ******************************************************************************/
928
929static acpi_status
930acpi_ns_check_package_elements(struct acpi_predefined_data *data,
931 union acpi_operand_object **elements,
932 u8 type1,
933 u32 count1,
934 u8 type2, u32 count2, u32 start_index)
935{
936 union acpi_operand_object **this_element = elements;
937 acpi_status status;
938 u32 i;
939
940 /*
941 * Up to two groups of package elements are supported by the data
942 * structure. All elements in each group must be of the same type.
943 * The second group can have a count of zero.
944 */
945 for (i = 0; i < count1; i++) {
946 status = acpi_ns_check_object_type(data, this_element,
947 type1, i + start_index);
948 if (ACPI_FAILURE(status)) {
949 return (status);
950 }
951 this_element++;
952 }
953
954 for (i = 0; i < count2; i++) {
955 status = acpi_ns_check_object_type(data, this_element,
956 type2,
957 (i + count1 + start_index));
958 if (ACPI_FAILURE(status)) {
959 return (status);
960 }
961 this_element++;
962 }
963
964 return (AE_OK);
965}
966
967/*******************************************************************************
968 *
969 * FUNCTION: acpi_ns_check_object_type 389 * FUNCTION: acpi_ns_check_object_type
970 * 390 *
971 * PARAMETERS: data - Pointer to validation data structure 391 * PARAMETERS: data - Pointer to validation data structure
@@ -983,7 +403,7 @@ acpi_ns_check_package_elements(struct acpi_predefined_data *data,
983 * 403 *
984 ******************************************************************************/ 404 ******************************************************************************/
985 405
986static acpi_status 406acpi_status
987acpi_ns_check_object_type(struct acpi_predefined_data *data, 407acpi_ns_check_object_type(struct acpi_predefined_data *data,
988 union acpi_operand_object **return_object_ptr, 408 union acpi_operand_object **return_object_ptr,
989 u32 expected_btypes, u32 package_index) 409 u32 expected_btypes, u32 package_index)
diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c
new file mode 100644
index 000000000000..a40155467d2e
--- /dev/null
+++ b/drivers/acpi/acpica/nsprepkg.c
@@ -0,0 +1,621 @@
1/******************************************************************************
2 *
3 * Module Name: nsprepkg - Validation of package objects for predefined names
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include "accommon.h"
46#include "acnamesp.h"
47#include "acpredef.h"
48
49#define _COMPONENT ACPI_NAMESPACE
50ACPI_MODULE_NAME("nsprepkg")
51
52/* Local prototypes */
53static acpi_status
54acpi_ns_check_package_list(struct acpi_predefined_data *data,
55 const union acpi_predefined_info *package,
56 union acpi_operand_object **elements, u32 count);
57
58static acpi_status
59acpi_ns_check_package_elements(struct acpi_predefined_data *data,
60 union acpi_operand_object **elements,
61 u8 type1,
62 u32 count1,
63 u8 type2, u32 count2, u32 start_index);
64
65/*******************************************************************************
66 *
67 * FUNCTION: acpi_ns_check_package
68 *
69 * PARAMETERS: data - Pointer to validation data structure
70 * return_object_ptr - Pointer to the object returned from the
71 * evaluation of a method or object
72 *
73 * RETURN: Status
74 *
75 * DESCRIPTION: Check a returned package object for the correct count and
76 * correct type of all sub-objects.
77 *
78 ******************************************************************************/
79
80acpi_status
81acpi_ns_check_package(struct acpi_predefined_data *data,
82 union acpi_operand_object **return_object_ptr)
83{
84 union acpi_operand_object *return_object = *return_object_ptr;
85 const union acpi_predefined_info *package;
86 union acpi_operand_object **elements;
87 acpi_status status = AE_OK;
88 u32 expected_count;
89 u32 count;
90 u32 i;
91
92 ACPI_FUNCTION_NAME(ns_check_package);
93
94 /* The package info for this name is in the next table entry */
95
96 package = data->predefined + 1;
97
98 ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
99 "%s Validating return Package of Type %X, Count %X\n",
100 data->pathname, package->ret_info.type,
101 return_object->package.count));
102
103 /*
104 * For variable-length Packages, we can safely remove all embedded
105 * and trailing NULL package elements
106 */
107 acpi_ns_remove_null_elements(data, package->ret_info.type,
108 return_object);
109
110 /* Extract package count and elements array */
111
112 elements = return_object->package.elements;
113 count = return_object->package.count;
114
115 /* The package must have at least one element, else invalid */
116
117 if (!count) {
118 ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
119 "Return Package has no elements (empty)"));
120
121 return (AE_AML_OPERAND_VALUE);
122 }
123
124 /*
125 * Decode the type of the expected package contents
126 *
127 * PTYPE1 packages contain no subpackages
128 * PTYPE2 packages contain sub-packages
129 */
130 switch (package->ret_info.type) {
131 case ACPI_PTYPE1_FIXED:
132
133 /*
134 * The package count is fixed and there are no sub-packages
135 *
136 * If package is too small, exit.
137 * If package is larger than expected, issue warning but continue
138 */
139 expected_count =
140 package->ret_info.count1 + package->ret_info.count2;
141 if (count < expected_count) {
142 goto package_too_small;
143 } else if (count > expected_count) {
144 ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
145 "%s: Return Package is larger than needed - "
146 "found %u, expected %u\n",
147 data->pathname, count,
148 expected_count));
149 }
150
151 /* Validate all elements of the returned package */
152
153 status = acpi_ns_check_package_elements(data, elements,
154 package->ret_info.
155 object_type1,
156 package->ret_info.
157 count1,
158 package->ret_info.
159 object_type2,
160 package->ret_info.
161 count2, 0);
162 break;
163
164 case ACPI_PTYPE1_VAR:
165
166 /*
167 * The package count is variable, there are no sub-packages, and all
168 * elements must be of the same type
169 */
170 for (i = 0; i < count; i++) {
171 status = acpi_ns_check_object_type(data, elements,
172 package->ret_info.
173 object_type1, i);
174 if (ACPI_FAILURE(status)) {
175 return (status);
176 }
177 elements++;
178 }
179 break;
180
181 case ACPI_PTYPE1_OPTION:
182
183 /*
184 * The package count is variable, there are no sub-packages. There are
185 * a fixed number of required elements, and a variable number of
186 * optional elements.
187 *
188 * Check if package is at least as large as the minimum required
189 */
190 expected_count = package->ret_info3.count;
191 if (count < expected_count) {
192 goto package_too_small;
193 }
194
195 /* Variable number of sub-objects */
196
197 for (i = 0; i < count; i++) {
198 if (i < package->ret_info3.count) {
199
200 /* These are the required package elements (0, 1, or 2) */
201
202 status =
203 acpi_ns_check_object_type(data, elements,
204 package->
205 ret_info3.
206 object_type[i],
207 i);
208 if (ACPI_FAILURE(status)) {
209 return (status);
210 }
211 } else {
212 /* These are the optional package elements */
213
214 status =
215 acpi_ns_check_object_type(data, elements,
216 package->
217 ret_info3.
218 tail_object_type,
219 i);
220 if (ACPI_FAILURE(status)) {
221 return (status);
222 }
223 }
224 elements++;
225 }
226 break;
227
228 case ACPI_PTYPE2_REV_FIXED:
229
230 /* First element is the (Integer) revision */
231
232 status = acpi_ns_check_object_type(data, elements,
233 ACPI_RTYPE_INTEGER, 0);
234 if (ACPI_FAILURE(status)) {
235 return (status);
236 }
237
238 elements++;
239 count--;
240
241 /* Examine the sub-packages */
242
243 status =
244 acpi_ns_check_package_list(data, package, elements, count);
245 break;
246
247 case ACPI_PTYPE2_PKG_COUNT:
248
249 /* First element is the (Integer) count of sub-packages to follow */
250
251 status = acpi_ns_check_object_type(data, elements,
252 ACPI_RTYPE_INTEGER, 0);
253 if (ACPI_FAILURE(status)) {
254 return (status);
255 }
256
257 /*
258 * Count cannot be larger than the parent package length, but allow it
259 * to be smaller. The >= accounts for the Integer above.
260 */
261 expected_count = (u32)(*elements)->integer.value;
262 if (expected_count >= count) {
263 goto package_too_small;
264 }
265
266 count = expected_count;
267 elements++;
268
269 /* Examine the sub-packages */
270
271 status =
272 acpi_ns_check_package_list(data, package, elements, count);
273 break;
274
275 case ACPI_PTYPE2:
276 case ACPI_PTYPE2_FIXED:
277 case ACPI_PTYPE2_MIN:
278 case ACPI_PTYPE2_COUNT:
279 case ACPI_PTYPE2_FIX_VAR:
280
281 /*
282 * These types all return a single Package that consists of a
283 * variable number of sub-Packages.
284 *
285 * First, ensure that the first element is a sub-Package. If not,
286 * the BIOS may have incorrectly returned the object as a single
287 * package instead of a Package of Packages (a common error if
288 * there is only one entry). We may be able to repair this by
289 * wrapping the returned Package with a new outer Package.
290 */
291 if (*elements
292 && ((*elements)->common.type != ACPI_TYPE_PACKAGE)) {
293
294 /* Create the new outer package and populate it */
295
296 status =
297 acpi_ns_wrap_with_package(data, return_object,
298 return_object_ptr);
299 if (ACPI_FAILURE(status)) {
300 return (status);
301 }
302
303 /* Update locals to point to the new package (of 1 element) */
304
305 return_object = *return_object_ptr;
306 elements = return_object->package.elements;
307 count = 1;
308 }
309
310 /* Examine the sub-packages */
311
312 status =
313 acpi_ns_check_package_list(data, package, elements, count);
314 break;
315
316 default:
317
318 /* Should not get here if predefined info table is correct */
319
320 ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
321 "Invalid internal return type in table entry: %X",
322 package->ret_info.type));
323
324 return (AE_AML_INTERNAL);
325 }
326
327 return (status);
328
329 package_too_small:
330
331 /* Error exit for the case with an incorrect package count */
332
333 ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
334 "Return Package is too small - found %u elements, expected %u",
335 count, expected_count));
336
337 return (AE_AML_OPERAND_VALUE);
338}
339
340/*******************************************************************************
341 *
342 * FUNCTION: acpi_ns_check_package_list
343 *
344 * PARAMETERS: data - Pointer to validation data structure
345 * package - Pointer to package-specific info for method
346 * elements - Element list of parent package. All elements
347 * of this list should be of type Package.
348 * count - Count of subpackages
349 *
350 * RETURN: Status
351 *
352 * DESCRIPTION: Examine a list of subpackages
353 *
354 ******************************************************************************/
355
356static acpi_status
357acpi_ns_check_package_list(struct acpi_predefined_data *data,
358 const union acpi_predefined_info *package,
359 union acpi_operand_object **elements, u32 count)
360{
361 union acpi_operand_object *sub_package;
362 union acpi_operand_object **sub_elements;
363 acpi_status status;
364 u32 expected_count;
365 u32 i;
366 u32 j;
367
368 /*
369 * Validate each sub-Package in the parent Package
370 *
371 * NOTE: assumes list of sub-packages contains no NULL elements.
372 * Any NULL elements should have been removed by earlier call
373 * to acpi_ns_remove_null_elements.
374 */
375 for (i = 0; i < count; i++) {
376 sub_package = *elements;
377 sub_elements = sub_package->package.elements;
378 data->parent_package = sub_package;
379
380 /* Each sub-object must be of type Package */
381
382 status = acpi_ns_check_object_type(data, &sub_package,
383 ACPI_RTYPE_PACKAGE, i);
384 if (ACPI_FAILURE(status)) {
385 return (status);
386 }
387
388 /* Examine the different types of expected sub-packages */
389
390 data->parent_package = sub_package;
391 switch (package->ret_info.type) {
392 case ACPI_PTYPE2:
393 case ACPI_PTYPE2_PKG_COUNT:
394 case ACPI_PTYPE2_REV_FIXED:
395
396 /* Each subpackage has a fixed number of elements */
397
398 expected_count =
399 package->ret_info.count1 + package->ret_info.count2;
400 if (sub_package->package.count < expected_count) {
401 goto package_too_small;
402 }
403
404 status =
405 acpi_ns_check_package_elements(data, sub_elements,
406 package->ret_info.
407 object_type1,
408 package->ret_info.
409 count1,
410 package->ret_info.
411 object_type2,
412 package->ret_info.
413 count2, 0);
414 if (ACPI_FAILURE(status)) {
415 return (status);
416 }
417 break;
418
419 case ACPI_PTYPE2_FIX_VAR:
420 /*
421 * Each subpackage has a fixed number of elements and an
422 * optional element
423 */
424 expected_count =
425 package->ret_info.count1 + package->ret_info.count2;
426 if (sub_package->package.count < expected_count) {
427 goto package_too_small;
428 }
429
430 status =
431 acpi_ns_check_package_elements(data, sub_elements,
432 package->ret_info.
433 object_type1,
434 package->ret_info.
435 count1,
436 package->ret_info.
437 object_type2,
438 sub_package->package.
439 count -
440 package->ret_info.
441 count1, 0);
442 if (ACPI_FAILURE(status)) {
443 return (status);
444 }
445 break;
446
447 case ACPI_PTYPE2_FIXED:
448
449 /* Each sub-package has a fixed length */
450
451 expected_count = package->ret_info2.count;
452 if (sub_package->package.count < expected_count) {
453 goto package_too_small;
454 }
455
456 /* Check the type of each sub-package element */
457
458 for (j = 0; j < expected_count; j++) {
459 status =
460 acpi_ns_check_object_type(data,
461 &sub_elements[j],
462 package->
463 ret_info2.
464 object_type[j],
465 j);
466 if (ACPI_FAILURE(status)) {
467 return (status);
468 }
469 }
470 break;
471
472 case ACPI_PTYPE2_MIN:
473
474 /* Each sub-package has a variable but minimum length */
475
476 expected_count = package->ret_info.count1;
477 if (sub_package->package.count < expected_count) {
478 goto package_too_small;
479 }
480
481 /* Check the type of each sub-package element */
482
483 status =
484 acpi_ns_check_package_elements(data, sub_elements,
485 package->ret_info.
486 object_type1,
487 sub_package->package.
488 count, 0, 0, 0);
489 if (ACPI_FAILURE(status)) {
490 return (status);
491 }
492 break;
493
494 case ACPI_PTYPE2_COUNT:
495
496 /*
497 * First element is the (Integer) count of elements, including
498 * the count field (the ACPI name is num_elements)
499 */
500 status = acpi_ns_check_object_type(data, sub_elements,
501 ACPI_RTYPE_INTEGER,
502 0);
503 if (ACPI_FAILURE(status)) {
504 return (status);
505 }
506
507 /*
508 * Make sure package is large enough for the Count and is
509 * is as large as the minimum size
510 */
511 expected_count = (u32)(*sub_elements)->integer.value;
512 if (sub_package->package.count < expected_count) {
513 goto package_too_small;
514 }
515 if (sub_package->package.count <
516 package->ret_info.count1) {
517 expected_count = package->ret_info.count1;
518 goto package_too_small;
519 }
520 if (expected_count == 0) {
521 /*
522 * Either the num_entries element was originally zero or it was
523 * a NULL element and repaired to an Integer of value zero.
524 * In either case, repair it by setting num_entries to be the
525 * actual size of the subpackage.
526 */
527 expected_count = sub_package->package.count;
528 (*sub_elements)->integer.value = expected_count;
529 }
530
531 /* Check the type of each sub-package element */
532
533 status =
534 acpi_ns_check_package_elements(data,
535 (sub_elements + 1),
536 package->ret_info.
537 object_type1,
538 (expected_count - 1),
539 0, 0, 1);
540 if (ACPI_FAILURE(status)) {
541 return (status);
542 }
543 break;
544
545 default: /* Should not get here, type was validated by caller */
546
547 return (AE_AML_INTERNAL);
548 }
549
550 elements++;
551 }
552
553 return (AE_OK);
554
555 package_too_small:
556
557 /* The sub-package count was smaller than required */
558
559 ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
560 "Return Sub-Package[%u] is too small - found %u elements, expected %u",
561 i, sub_package->package.count, expected_count));
562
563 return (AE_AML_OPERAND_VALUE);
564}
565
566/*******************************************************************************
567 *
568 * FUNCTION: acpi_ns_check_package_elements
569 *
570 * PARAMETERS: data - Pointer to validation data structure
571 * elements - Pointer to the package elements array
572 * type1 - Object type for first group
573 * count1 - Count for first group
574 * type2 - Object type for second group
575 * count2 - Count for second group
576 * start_index - Start of the first group of elements
577 *
578 * RETURN: Status
579 *
580 * DESCRIPTION: Check that all elements of a package are of the correct object
581 * type. Supports up to two groups of different object types.
582 *
583 ******************************************************************************/
584
585static acpi_status
586acpi_ns_check_package_elements(struct acpi_predefined_data *data,
587 union acpi_operand_object **elements,
588 u8 type1,
589 u32 count1,
590 u8 type2, u32 count2, u32 start_index)
591{
592 union acpi_operand_object **this_element = elements;
593 acpi_status status;
594 u32 i;
595
596 /*
597 * Up to two groups of package elements are supported by the data
598 * structure. All elements in each group must be of the same type.
599 * The second group can have a count of zero.
600 */
601 for (i = 0; i < count1; i++) {
602 status = acpi_ns_check_object_type(data, this_element,
603 type1, i + start_index);
604 if (ACPI_FAILURE(status)) {
605 return (status);
606 }
607 this_element++;
608 }
609
610 for (i = 0; i < count2; i++) {
611 status = acpi_ns_check_object_type(data, this_element,
612 type2,
613 (i + count1 + start_index));
614 if (ACPI_FAILURE(status)) {
615 return (status);
616 }
617 this_element++;
618 }
619
620 return (AE_OK);
621}
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index 8c5f292860fc..9e833353c06a 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index 90189251cdf0..ba4d98287c6a 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2012, Intel Corp. 9 * Copyright (C) 2000 - 2013, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -55,7 +55,8 @@ ACPI_MODULE_NAME("nsrepair2")
55 */ 55 */
56typedef 56typedef
57acpi_status(*acpi_repair_function) (struct acpi_predefined_data *data, 57acpi_status(*acpi_repair_function) (struct acpi_predefined_data *data,
58 union acpi_operand_object **return_object_ptr); 58 union acpi_operand_object
59 **return_object_ptr);
59 60
60typedef struct acpi_repair_info { 61typedef struct acpi_repair_info {
61 char name[ACPI_NAME_SIZE]; 62 char name[ACPI_NAME_SIZE];
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index 1d2d8ffc1bc5..5d43efc53a61 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -328,6 +328,11 @@ acpi_ns_search_and_enter(u32 target_name,
328 if ((status == AE_OK) && (flags & ACPI_NS_ERROR_IF_FOUND)) { 328 if ((status == AE_OK) && (flags & ACPI_NS_ERROR_IF_FOUND)) {
329 status = AE_ALREADY_EXISTS; 329 status = AE_ALREADY_EXISTS;
330 } 330 }
331#ifdef ACPI_ASL_COMPILER
332 if (*return_node && (*return_node)->type == ACPI_TYPE_ANY) {
333 (*return_node)->flags |= ANOBJ_IS_EXTERNAL;
334 }
335#endif
331 336
332 /* Either found it or there was an error: finished either way */ 337 /* Either found it or there was an error: finished either way */
333 338
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index b5b4cb72a8a8..686420df684f 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2012, Intel Corp. 9 * Copyright (C) 2000 - 2013, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -46,14 +46,11 @@
46#include "accommon.h" 46#include "accommon.h"
47#include "acnamesp.h" 47#include "acnamesp.h"
48#include "amlcode.h" 48#include "amlcode.h"
49#include "actables.h"
50 49
51#define _COMPONENT ACPI_NAMESPACE 50#define _COMPONENT ACPI_NAMESPACE
52ACPI_MODULE_NAME("nsutils") 51ACPI_MODULE_NAME("nsutils")
53 52
54/* Local prototypes */ 53/* Local prototypes */
55static u8 acpi_ns_valid_path_separator(char sep);
56
57#ifdef ACPI_OBSOLETE_FUNCTIONS 54#ifdef ACPI_OBSOLETE_FUNCTIONS
58acpi_name acpi_ns_find_parent_name(struct acpi_namespace_node *node_to_search); 55acpi_name acpi_ns_find_parent_name(struct acpi_namespace_node *node_to_search);
59#endif 56#endif
@@ -99,42 +96,6 @@ acpi_ns_print_node_pathname(struct acpi_namespace_node *node,
99 96
100/******************************************************************************* 97/*******************************************************************************
101 * 98 *
102 * FUNCTION: acpi_ns_valid_root_prefix
103 *
104 * PARAMETERS: prefix - Character to be checked
105 *
106 * RETURN: TRUE if a valid prefix
107 *
108 * DESCRIPTION: Check if a character is a valid ACPI Root prefix
109 *
110 ******************************************************************************/
111
112u8 acpi_ns_valid_root_prefix(char prefix)
113{
114
115 return ((u8) (prefix == '\\'));
116}
117
118/*******************************************************************************
119 *
120 * FUNCTION: acpi_ns_valid_path_separator
121 *
122 * PARAMETERS: sep - Character to be checked
123 *
124 * RETURN: TRUE if a valid path separator
125 *
126 * DESCRIPTION: Check if a character is a valid ACPI path separator
127 *
128 ******************************************************************************/
129
130static u8 acpi_ns_valid_path_separator(char sep)
131{
132
133 return ((u8) (sep == '.'));
134}
135
136/*******************************************************************************
137 *
138 * FUNCTION: acpi_ns_get_type 99 * FUNCTION: acpi_ns_get_type
139 * 100 *
140 * PARAMETERS: node - Parent Node to be examined 101 * PARAMETERS: node - Parent Node to be examined
@@ -151,10 +112,10 @@ acpi_object_type acpi_ns_get_type(struct acpi_namespace_node * node)
151 112
152 if (!node) { 113 if (!node) {
153 ACPI_WARNING((AE_INFO, "Null Node parameter")); 114 ACPI_WARNING((AE_INFO, "Null Node parameter"));
154 return_UINT32(ACPI_TYPE_ANY); 115 return_VALUE(ACPI_TYPE_ANY);
155 } 116 }
156 117
157 return_UINT32((acpi_object_type) node->type); 118 return_VALUE(node->type);
158} 119}
159 120
160/******************************************************************************* 121/*******************************************************************************
@@ -179,10 +140,10 @@ u32 acpi_ns_local(acpi_object_type type)
179 /* Type code out of range */ 140 /* Type code out of range */
180 141
181 ACPI_WARNING((AE_INFO, "Invalid Object Type 0x%X", type)); 142 ACPI_WARNING((AE_INFO, "Invalid Object Type 0x%X", type));
182 return_UINT32(ACPI_NS_NORMAL); 143 return_VALUE(ACPI_NS_NORMAL);
183 } 144 }
184 145
185 return_UINT32((u32) acpi_gbl_ns_properties[type] & ACPI_NS_LOCAL); 146 return_VALUE(acpi_gbl_ns_properties[type] & ACPI_NS_LOCAL);
186} 147}
187 148
188/******************************************************************************* 149/*******************************************************************************
@@ -218,19 +179,19 @@ void acpi_ns_get_internal_name_length(struct acpi_namestring_info *info)
218 * 179 *
219 * strlen() + 1 covers the first name_seg, which has no path separator 180 * strlen() + 1 covers the first name_seg, which has no path separator
220 */ 181 */
221 if (acpi_ns_valid_root_prefix(*next_external_char)) { 182 if (ACPI_IS_ROOT_PREFIX(*next_external_char)) {
222 info->fully_qualified = TRUE; 183 info->fully_qualified = TRUE;
223 next_external_char++; 184 next_external_char++;
224 185
225 /* Skip redundant root_prefix, like \\_SB.PCI0.SBRG.EC0 */ 186 /* Skip redundant root_prefix, like \\_SB.PCI0.SBRG.EC0 */
226 187
227 while (acpi_ns_valid_root_prefix(*next_external_char)) { 188 while (ACPI_IS_ROOT_PREFIX(*next_external_char)) {
228 next_external_char++; 189 next_external_char++;
229 } 190 }
230 } else { 191 } else {
231 /* Handle Carat prefixes */ 192 /* Handle Carat prefixes */
232 193
233 while (*next_external_char == '^') { 194 while (ACPI_IS_PARENT_PREFIX(*next_external_char)) {
234 info->num_carats++; 195 info->num_carats++;
235 next_external_char++; 196 next_external_char++;
236 } 197 }
@@ -244,7 +205,7 @@ void acpi_ns_get_internal_name_length(struct acpi_namestring_info *info)
244 if (*next_external_char) { 205 if (*next_external_char) {
245 info->num_segments = 1; 206 info->num_segments = 1;
246 for (i = 0; next_external_char[i]; i++) { 207 for (i = 0; next_external_char[i]; i++) {
247 if (acpi_ns_valid_path_separator(next_external_char[i])) { 208 if (ACPI_IS_PATH_SEPARATOR(next_external_char[i])) {
248 info->num_segments++; 209 info->num_segments++;
249 } 210 }
250 } 211 }
@@ -282,7 +243,7 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
282 /* Setup the correct prefixes, counts, and pointers */ 243 /* Setup the correct prefixes, counts, and pointers */
283 244
284 if (info->fully_qualified) { 245 if (info->fully_qualified) {
285 internal_name[0] = '\\'; 246 internal_name[0] = AML_ROOT_PREFIX;
286 247
287 if (num_segments <= 1) { 248 if (num_segments <= 1) {
288 result = &internal_name[1]; 249 result = &internal_name[1];
@@ -302,7 +263,7 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
302 i = 0; 263 i = 0;
303 if (info->num_carats) { 264 if (info->num_carats) {
304 for (i = 0; i < info->num_carats; i++) { 265 for (i = 0; i < info->num_carats; i++) {
305 internal_name[i] = '^'; 266 internal_name[i] = AML_PARENT_PREFIX;
306 } 267 }
307 } 268 }
308 269
@@ -322,7 +283,7 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
322 283
323 for (; num_segments; num_segments--) { 284 for (; num_segments; num_segments--) {
324 for (i = 0; i < ACPI_NAME_SIZE; i++) { 285 for (i = 0; i < ACPI_NAME_SIZE; i++) {
325 if (acpi_ns_valid_path_separator(*external_name) || 286 if (ACPI_IS_PATH_SEPARATOR(*external_name) ||
326 (*external_name == 0)) { 287 (*external_name == 0)) {
327 288
328 /* Pad the segment with underscore(s) if segment is short */ 289 /* Pad the segment with underscore(s) if segment is short */
@@ -339,7 +300,7 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
339 300
340 /* Now we must have a path separator, or the pathname is bad */ 301 /* Now we must have a path separator, or the pathname is bad */
341 302
342 if (!acpi_ns_valid_path_separator(*external_name) && 303 if (!ACPI_IS_PATH_SEPARATOR(*external_name) &&
343 (*external_name != 0)) { 304 (*external_name != 0)) {
344 return_ACPI_STATUS(AE_BAD_PATHNAME); 305 return_ACPI_STATUS(AE_BAD_PATHNAME);
345 } 306 }
@@ -457,13 +418,13 @@ acpi_ns_externalize_name(u32 internal_name_length,
457 /* Check for a prefix (one '\' | one or more '^') */ 418 /* Check for a prefix (one '\' | one or more '^') */
458 419
459 switch (internal_name[0]) { 420 switch (internal_name[0]) {
460 case '\\': 421 case AML_ROOT_PREFIX:
461 prefix_length = 1; 422 prefix_length = 1;
462 break; 423 break;
463 424
464 case '^': 425 case AML_PARENT_PREFIX:
465 for (i = 0; i < internal_name_length; i++) { 426 for (i = 0; i < internal_name_length; i++) {
466 if (internal_name[i] == '^') { 427 if (ACPI_IS_PARENT_PREFIX(internal_name[i])) {
467 prefix_length = i + 1; 428 prefix_length = i + 1;
468 } else { 429 } else {
469 break; 430 break;
@@ -664,17 +625,17 @@ void acpi_ns_terminate(void)
664 625
665u32 acpi_ns_opens_scope(acpi_object_type type) 626u32 acpi_ns_opens_scope(acpi_object_type type)
666{ 627{
667 ACPI_FUNCTION_TRACE_STR(ns_opens_scope, acpi_ut_get_type_name(type)); 628 ACPI_FUNCTION_ENTRY();
668 629
669 if (!acpi_ut_valid_object_type(type)) { 630 if (type > ACPI_TYPE_LOCAL_MAX) {
670 631
671 /* type code out of range */ 632 /* type code out of range */
672 633
673 ACPI_WARNING((AE_INFO, "Invalid Object Type 0x%X", type)); 634 ACPI_WARNING((AE_INFO, "Invalid Object Type 0x%X", type));
674 return_UINT32(ACPI_NS_NORMAL); 635 return (ACPI_NS_NORMAL);
675 } 636 }
676 637
677 return_UINT32(((u32) acpi_gbl_ns_properties[type]) & ACPI_NS_NEWSCOPE); 638 return (((u32)acpi_gbl_ns_properties[type]) & ACPI_NS_NEWSCOPE);
678} 639}
679 640
680/******************************************************************************* 641/*******************************************************************************
@@ -710,6 +671,8 @@ acpi_ns_get_node(struct acpi_namespace_node *prefix_node,
710 671
711 ACPI_FUNCTION_TRACE_PTR(ns_get_node, ACPI_CAST_PTR(char, pathname)); 672 ACPI_FUNCTION_TRACE_PTR(ns_get_node, ACPI_CAST_PTR(char, pathname));
712 673
674 /* Simplest case is a null pathname */
675
713 if (!pathname) { 676 if (!pathname) {
714 *return_node = prefix_node; 677 *return_node = prefix_node;
715 if (!prefix_node) { 678 if (!prefix_node) {
@@ -718,6 +681,13 @@ acpi_ns_get_node(struct acpi_namespace_node *prefix_node,
718 return_ACPI_STATUS(AE_OK); 681 return_ACPI_STATUS(AE_OK);
719 } 682 }
720 683
684 /* Quick check for a reference to the root */
685
686 if (ACPI_IS_ROOT_PREFIX(pathname[0]) && (!pathname[1])) {
687 *return_node = acpi_gbl_root_node;
688 return_ACPI_STATUS(AE_OK);
689 }
690
721 /* Convert path to internal representation */ 691 /* Convert path to internal representation */
722 692
723 status = acpi_ns_internalize_name(pathname, &internal_path); 693 status = acpi_ns_internalize_name(pathname, &internal_path);
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index 0483877f26b8..e70911a9e441 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -76,12 +76,12 @@ struct acpi_namespace_node *acpi_ns_get_next_node(struct acpi_namespace_node
76 76
77 /* It's really the parent's _scope_ that we want */ 77 /* It's really the parent's _scope_ that we want */
78 78
79 return parent_node->child; 79 return (parent_node->child);
80 } 80 }
81 81
82 /* Otherwise just return the next peer */ 82 /* Otherwise just return the next peer */
83 83
84 return child_node->peer; 84 return (child_node->peer);
85} 85}
86 86
87/******************************************************************************* 87/*******************************************************************************
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index d6a9f77972b6..fc69949151bb 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2012, Intel Corp. 9 * Copyright (C) 2000 - 2013, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -236,7 +236,7 @@ acpi_evaluate_object(acpi_handle handle,
236 * 2) No handle, not fully qualified pathname (error) 236 * 2) No handle, not fully qualified pathname (error)
237 * 3) Valid handle 237 * 3) Valid handle
238 */ 238 */
239 if ((pathname) && (acpi_ns_valid_root_prefix(pathname[0]))) { 239 if ((pathname) && (ACPI_IS_ROOT_PREFIX(pathname[0]))) {
240 240
241 /* The path is fully qualified, just evaluate by name */ 241 /* The path is fully qualified, just evaluate by name */
242 242
@@ -492,7 +492,7 @@ acpi_walk_namespace(acpi_object_type type,
492 */ 492 */
493 status = acpi_ut_acquire_read_lock(&acpi_gbl_namespace_rw_lock); 493 status = acpi_ut_acquire_read_lock(&acpi_gbl_namespace_rw_lock);
494 if (ACPI_FAILURE(status)) { 494 if (ACPI_FAILURE(status)) {
495 return status; 495 return_ACPI_STATUS(status);
496 } 496 }
497 497
498 /* 498 /*
@@ -550,7 +550,7 @@ acpi_ns_get_device_callback(acpi_handle obj_handle,
550 550
551 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); 551 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
552 if (ACPI_FAILURE(status)) { 552 if (ACPI_FAILURE(status)) {
553 return_ACPI_STATUS(status); 553 return (status);
554 } 554 }
555 555
556 node = acpi_ns_validate_handle(obj_handle); 556 node = acpi_ns_validate_handle(obj_handle);
@@ -602,17 +602,22 @@ acpi_ns_get_device_callback(acpi_handle obj_handle,
602 602
603 /* Walk the CID list */ 603 /* Walk the CID list */
604 604
605 found = 0; 605 found = FALSE;
606 for (i = 0; i < cid->count; i++) { 606 for (i = 0; i < cid->count; i++) {
607 if (ACPI_STRCMP(cid->ids[i].string, info->hid) 607 if (ACPI_STRCMP(cid->ids[i].string, info->hid)
608 == 0) { 608 == 0) {
609 found = 1; 609
610 /* Found a matching CID */
611
612 found = TRUE;
610 break; 613 break;
611 } 614 }
612 } 615 }
616
613 ACPI_FREE(cid); 617 ACPI_FREE(cid);
614 if (!found) 618 if (!found) {
615 return (AE_OK); 619 return (AE_OK);
620 }
616 } 621 }
617 } 622 }
618 623
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 811c6f13f476..f3a4d95899f7 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2012, Intel Corp. 9 * Copyright (C) 2000 - 2013, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -107,7 +107,7 @@ acpi_get_handle(acpi_handle parent,
107 * 107 *
108 * Error for <null Parent + relative path> 108 * Error for <null Parent + relative path>
109 */ 109 */
110 if (acpi_ns_valid_root_prefix(pathname[0])) { 110 if (ACPI_IS_ROOT_PREFIX(pathname[0])) {
111 111
112 /* Pathname is fully qualified (starts with '\') */ 112 /* Pathname is fully qualified (starts with '\') */
113 113
@@ -290,7 +290,7 @@ acpi_get_object_info(acpi_handle handle,
290 290
291 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); 291 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
292 if (ACPI_FAILURE(status)) { 292 if (ACPI_FAILURE(status)) {
293 goto cleanup; 293 return (status);
294 } 294 }
295 295
296 node = acpi_ns_validate_handle(handle); 296 node = acpi_ns_validate_handle(handle);
@@ -539,14 +539,14 @@ acpi_status acpi_install_method(u8 *buffer)
539 /* Parameter validation */ 539 /* Parameter validation */
540 540
541 if (!buffer) { 541 if (!buffer) {
542 return AE_BAD_PARAMETER; 542 return (AE_BAD_PARAMETER);
543 } 543 }
544 544
545 /* Table must be a DSDT or SSDT */ 545 /* Table must be a DSDT or SSDT */
546 546
547 if (!ACPI_COMPARE_NAME(table->signature, ACPI_SIG_DSDT) && 547 if (!ACPI_COMPARE_NAME(table->signature, ACPI_SIG_DSDT) &&
548 !ACPI_COMPARE_NAME(table->signature, ACPI_SIG_SSDT)) { 548 !ACPI_COMPARE_NAME(table->signature, ACPI_SIG_SSDT)) {
549 return AE_BAD_HEADER; 549 return (AE_BAD_HEADER);
550 } 550 }
551 551
552 /* First AML opcode in the table must be a control method */ 552 /* First AML opcode in the table must be a control method */
@@ -554,7 +554,7 @@ acpi_status acpi_install_method(u8 *buffer)
554 parser_state.aml = buffer + sizeof(struct acpi_table_header); 554 parser_state.aml = buffer + sizeof(struct acpi_table_header);
555 opcode = acpi_ps_peek_opcode(&parser_state); 555 opcode = acpi_ps_peek_opcode(&parser_state);
556 if (opcode != AML_METHOD_OP) { 556 if (opcode != AML_METHOD_OP) {
557 return AE_BAD_PARAMETER; 557 return (AE_BAD_PARAMETER);
558 } 558 }
559 559
560 /* Extract method information from the raw AML */ 560 /* Extract method information from the raw AML */
@@ -572,13 +572,13 @@ acpi_status acpi_install_method(u8 *buffer)
572 */ 572 */
573 aml_buffer = ACPI_ALLOCATE(aml_length); 573 aml_buffer = ACPI_ALLOCATE(aml_length);
574 if (!aml_buffer) { 574 if (!aml_buffer) {
575 return AE_NO_MEMORY; 575 return (AE_NO_MEMORY);
576 } 576 }
577 577
578 method_obj = acpi_ut_create_internal_object(ACPI_TYPE_METHOD); 578 method_obj = acpi_ut_create_internal_object(ACPI_TYPE_METHOD);
579 if (!method_obj) { 579 if (!method_obj) {
580 ACPI_FREE(aml_buffer); 580 ACPI_FREE(aml_buffer);
581 return AE_NO_MEMORY; 581 return (AE_NO_MEMORY);
582 } 582 }
583 583
584 /* Lock namespace for acpi_ns_lookup, we may be creating a new node */ 584 /* Lock namespace for acpi_ns_lookup, we may be creating a new node */
@@ -644,12 +644,12 @@ acpi_status acpi_install_method(u8 *buffer)
644 /* Remove local reference to the method object */ 644 /* Remove local reference to the method object */
645 645
646 acpi_ut_remove_reference(method_obj); 646 acpi_ut_remove_reference(method_obj);
647 return status; 647 return (status);
648 648
649error_exit: 649error_exit:
650 650
651 ACPI_FREE(aml_buffer); 651 ACPI_FREE(aml_buffer);
652 ACPI_FREE(method_obj); 652 ACPI_FREE(method_obj);
653 return status; 653 return (status);
654} 654}
655ACPI_EXPORT_SYMBOL(acpi_install_method) 655ACPI_EXPORT_SYMBOL(acpi_install_method)
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c
index 9d029dac6b64..c0853ef294e4 100644
--- a/drivers/acpi/acpica/nsxfobj.c
+++ b/drivers/acpi/acpica/nsxfobj.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2012, Intel Corp. 9 * Copyright (C) 2000 - 2013, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index cb79e2d4d743..f51308cdbc65 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -108,7 +108,7 @@ acpi_ps_get_next_package_length(struct acpi_parse_state *parser_state)
108 /* Byte 0 is a special case, either bits [0:3] or [0:5] are used */ 108 /* Byte 0 is a special case, either bits [0:3] or [0:5] are used */
109 109
110 package_length |= (aml[0] & byte_zero_mask); 110 package_length |= (aml[0] & byte_zero_mask);
111 return_UINT32(package_length); 111 return_VALUE(package_length);
112} 112}
113 113
114/******************************************************************************* 114/*******************************************************************************
@@ -162,7 +162,7 @@ char *acpi_ps_get_next_namestring(struct acpi_parse_state *parser_state)
162 162
163 /* Point past any namestring prefix characters (backslash or carat) */ 163 /* Point past any namestring prefix characters (backslash or carat) */
164 164
165 while (acpi_ps_is_prefix_char(*end)) { 165 while (ACPI_IS_ROOT_PREFIX(*end) || ACPI_IS_PARENT_PREFIX(*end)) {
166 end++; 166 end++;
167 } 167 }
168 168
@@ -798,7 +798,8 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
798 subop = acpi_ps_peek_opcode(parser_state); 798 subop = acpi_ps_peek_opcode(parser_state);
799 if (subop == 0 || 799 if (subop == 0 ||
800 acpi_ps_is_leading_char(subop) || 800 acpi_ps_is_leading_char(subop) ||
801 acpi_ps_is_prefix_char(subop)) { 801 ACPI_IS_ROOT_PREFIX(subop) ||
802 ACPI_IS_PARENT_PREFIX(subop)) {
802 803
803 /* null_name or name_string */ 804 /* null_name or name_string */
804 805
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index 5607805aab26..63c455447481 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -58,352 +58,17 @@
58#define _COMPONENT ACPI_PARSER 58#define _COMPONENT ACPI_PARSER
59ACPI_MODULE_NAME("psloop") 59ACPI_MODULE_NAME("psloop")
60 60
61static u32 acpi_gbl_depth = 0;
62
63/* Local prototypes */ 61/* Local prototypes */
64
65static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state);
66
67static acpi_status
68acpi_ps_build_named_op(struct acpi_walk_state *walk_state,
69 u8 * aml_op_start,
70 union acpi_parse_object *unnamed_op,
71 union acpi_parse_object **op);
72
73static acpi_status
74acpi_ps_create_op(struct acpi_walk_state *walk_state,
75 u8 * aml_op_start, union acpi_parse_object **new_op);
76
77static acpi_status 62static acpi_status
78acpi_ps_get_arguments(struct acpi_walk_state *walk_state, 63acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
79 u8 * aml_op_start, union acpi_parse_object *op); 64 u8 * aml_op_start, union acpi_parse_object *op);
80 65
81static acpi_status
82acpi_ps_complete_op(struct acpi_walk_state *walk_state,
83 union acpi_parse_object **op, acpi_status status);
84
85static acpi_status
86acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
87 union acpi_parse_object *op, acpi_status status);
88
89static void 66static void
90acpi_ps_link_module_code(union acpi_parse_object *parent_op, 67acpi_ps_link_module_code(union acpi_parse_object *parent_op,
91 u8 *aml_start, u32 aml_length, acpi_owner_id owner_id); 68 u8 *aml_start, u32 aml_length, acpi_owner_id owner_id);
92 69
93/******************************************************************************* 70/*******************************************************************************
94 * 71 *
95 * FUNCTION: acpi_ps_get_aml_opcode
96 *
97 * PARAMETERS: walk_state - Current state
98 *
99 * RETURN: Status
100 *
101 * DESCRIPTION: Extract the next AML opcode from the input stream.
102 *
103 ******************************************************************************/
104
105static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
106{
107
108 ACPI_FUNCTION_TRACE_PTR(ps_get_aml_opcode, walk_state);
109
110 walk_state->aml_offset =
111 (u32) ACPI_PTR_DIFF(walk_state->parser_state.aml,
112 walk_state->parser_state.aml_start);
113 walk_state->opcode = acpi_ps_peek_opcode(&(walk_state->parser_state));
114
115 /*
116 * First cut to determine what we have found:
117 * 1) A valid AML opcode
118 * 2) A name string
119 * 3) An unknown/invalid opcode
120 */
121 walk_state->op_info = acpi_ps_get_opcode_info(walk_state->opcode);
122
123 switch (walk_state->op_info->class) {
124 case AML_CLASS_ASCII:
125 case AML_CLASS_PREFIX:
126 /*
127 * Starts with a valid prefix or ASCII char, this is a name
128 * string. Convert the bare name string to a namepath.
129 */
130 walk_state->opcode = AML_INT_NAMEPATH_OP;
131 walk_state->arg_types = ARGP_NAMESTRING;
132 break;
133
134 case AML_CLASS_UNKNOWN:
135
136 /* The opcode is unrecognized. Complain and skip unknown opcodes */
137
138 if (walk_state->pass_number == 2) {
139 ACPI_ERROR((AE_INFO,
140 "Unknown opcode 0x%.2X at table offset 0x%.4X, ignoring",
141 walk_state->opcode,
142 (u32)(walk_state->aml_offset +
143 sizeof(struct acpi_table_header))));
144
145 ACPI_DUMP_BUFFER(walk_state->parser_state.aml - 16, 48);
146
147#ifdef ACPI_ASL_COMPILER
148 /*
149 * This is executed for the disassembler only. Output goes
150 * to the disassembled ASL output file.
151 */
152 acpi_os_printf
153 ("/*\nError: Unknown opcode 0x%.2X at table offset 0x%.4X, context:\n",
154 walk_state->opcode,
155 (u32)(walk_state->aml_offset +
156 sizeof(struct acpi_table_header)));
157
158 /* Dump the context surrounding the invalid opcode */
159
160 acpi_ut_dump_buffer(((u8 *)walk_state->parser_state.
161 aml - 16), 48, DB_BYTE_DISPLAY,
162 walk_state->aml_offset +
163 sizeof(struct acpi_table_header) -
164 16);
165 acpi_os_printf(" */\n");
166#endif
167 }
168
169 /* Increment past one-byte or two-byte opcode */
170
171 walk_state->parser_state.aml++;
172 if (walk_state->opcode > 0xFF) { /* Can only happen if first byte is 0x5B */
173 walk_state->parser_state.aml++;
174 }
175
176 return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
177
178 default:
179
180 /* Found opcode info, this is a normal opcode */
181
182 walk_state->parser_state.aml +=
183 acpi_ps_get_opcode_size(walk_state->opcode);
184 walk_state->arg_types = walk_state->op_info->parse_args;
185 break;
186 }
187
188 return_ACPI_STATUS(AE_OK);
189}
190
191/*******************************************************************************
192 *
193 * FUNCTION: acpi_ps_build_named_op
194 *
195 * PARAMETERS: walk_state - Current state
196 * aml_op_start - Begin of named Op in AML
197 * unnamed_op - Early Op (not a named Op)
198 * op - Returned Op
199 *
200 * RETURN: Status
201 *
202 * DESCRIPTION: Parse a named Op
203 *
204 ******************************************************************************/
205
206static acpi_status
207acpi_ps_build_named_op(struct acpi_walk_state *walk_state,
208 u8 * aml_op_start,
209 union acpi_parse_object *unnamed_op,
210 union acpi_parse_object **op)
211{
212 acpi_status status = AE_OK;
213 union acpi_parse_object *arg = NULL;
214
215 ACPI_FUNCTION_TRACE_PTR(ps_build_named_op, walk_state);
216
217 unnamed_op->common.value.arg = NULL;
218 unnamed_op->common.arg_list_length = 0;
219 unnamed_op->common.aml_opcode = walk_state->opcode;
220
221 /*
222 * Get and append arguments until we find the node that contains
223 * the name (the type ARGP_NAME).
224 */
225 while (GET_CURRENT_ARG_TYPE(walk_state->arg_types) &&
226 (GET_CURRENT_ARG_TYPE(walk_state->arg_types) != ARGP_NAME)) {
227 status =
228 acpi_ps_get_next_arg(walk_state,
229 &(walk_state->parser_state),
230 GET_CURRENT_ARG_TYPE(walk_state->
231 arg_types), &arg);
232 if (ACPI_FAILURE(status)) {
233 return_ACPI_STATUS(status);
234 }
235
236 acpi_ps_append_arg(unnamed_op, arg);
237 INCREMENT_ARG_LIST(walk_state->arg_types);
238 }
239
240 /*
241 * Make sure that we found a NAME and didn't run out of arguments
242 */
243 if (!GET_CURRENT_ARG_TYPE(walk_state->arg_types)) {
244 return_ACPI_STATUS(AE_AML_NO_OPERAND);
245 }
246
247 /* We know that this arg is a name, move to next arg */
248
249 INCREMENT_ARG_LIST(walk_state->arg_types);
250
251 /*
252 * Find the object. This will either insert the object into
253 * the namespace or simply look it up
254 */
255 walk_state->op = NULL;
256
257 status = walk_state->descending_callback(walk_state, op);
258 if (ACPI_FAILURE(status)) {
259 ACPI_EXCEPTION((AE_INFO, status, "During name lookup/catalog"));
260 return_ACPI_STATUS(status);
261 }
262
263 if (!*op) {
264 return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
265 }
266
267 status = acpi_ps_next_parse_state(walk_state, *op, status);
268 if (ACPI_FAILURE(status)) {
269 if (status == AE_CTRL_PENDING) {
270 return_ACPI_STATUS(AE_CTRL_PARSE_PENDING);
271 }
272 return_ACPI_STATUS(status);
273 }
274
275 acpi_ps_append_arg(*op, unnamed_op->common.value.arg);
276 acpi_gbl_depth++;
277
278 if ((*op)->common.aml_opcode == AML_REGION_OP ||
279 (*op)->common.aml_opcode == AML_DATA_REGION_OP) {
280 /*
281 * Defer final parsing of an operation_region body, because we don't
282 * have enough info in the first pass to parse it correctly (i.e.,
283 * there may be method calls within the term_arg elements of the body.)
284 *
285 * However, we must continue parsing because the opregion is not a
286 * standalone package -- we don't know where the end is at this point.
287 *
288 * (Length is unknown until parse of the body complete)
289 */
290 (*op)->named.data = aml_op_start;
291 (*op)->named.length = 0;
292 }
293
294 return_ACPI_STATUS(AE_OK);
295}
296
297/*******************************************************************************
298 *
299 * FUNCTION: acpi_ps_create_op
300 *
301 * PARAMETERS: walk_state - Current state
302 * aml_op_start - Op start in AML
303 * new_op - Returned Op
304 *
305 * RETURN: Status
306 *
307 * DESCRIPTION: Get Op from AML
308 *
309 ******************************************************************************/
310
311static acpi_status
312acpi_ps_create_op(struct acpi_walk_state *walk_state,
313 u8 * aml_op_start, union acpi_parse_object **new_op)
314{
315 acpi_status status = AE_OK;
316 union acpi_parse_object *op;
317 union acpi_parse_object *named_op = NULL;
318 union acpi_parse_object *parent_scope;
319 u8 argument_count;
320 const struct acpi_opcode_info *op_info;
321
322 ACPI_FUNCTION_TRACE_PTR(ps_create_op, walk_state);
323
324 status = acpi_ps_get_aml_opcode(walk_state);
325 if (status == AE_CTRL_PARSE_CONTINUE) {
326 return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
327 }
328
329 /* Create Op structure and append to parent's argument list */
330
331 walk_state->op_info = acpi_ps_get_opcode_info(walk_state->opcode);
332 op = acpi_ps_alloc_op(walk_state->opcode);
333 if (!op) {
334 return_ACPI_STATUS(AE_NO_MEMORY);
335 }
336
337 if (walk_state->op_info->flags & AML_NAMED) {
338 status =
339 acpi_ps_build_named_op(walk_state, aml_op_start, op,
340 &named_op);
341 acpi_ps_free_op(op);
342 if (ACPI_FAILURE(status)) {
343 return_ACPI_STATUS(status);
344 }
345
346 *new_op = named_op;
347 return_ACPI_STATUS(AE_OK);
348 }
349
350 /* Not a named opcode, just allocate Op and append to parent */
351
352 if (walk_state->op_info->flags & AML_CREATE) {
353 /*
354 * Backup to beginning of create_XXXfield declaration
355 * body_length is unknown until we parse the body
356 */
357 op->named.data = aml_op_start;
358 op->named.length = 0;
359 }
360
361 if (walk_state->opcode == AML_BANK_FIELD_OP) {
362 /*
363 * Backup to beginning of bank_field declaration
364 * body_length is unknown until we parse the body
365 */
366 op->named.data = aml_op_start;
367 op->named.length = 0;
368 }
369
370 parent_scope = acpi_ps_get_parent_scope(&(walk_state->parser_state));
371 acpi_ps_append_arg(parent_scope, op);
372
373 if (parent_scope) {
374 op_info =
375 acpi_ps_get_opcode_info(parent_scope->common.aml_opcode);
376 if (op_info->flags & AML_HAS_TARGET) {
377 argument_count =
378 acpi_ps_get_argument_count(op_info->type);
379 if (parent_scope->common.arg_list_length >
380 argument_count) {
381 op->common.flags |= ACPI_PARSEOP_TARGET;
382 }
383 } else if (parent_scope->common.aml_opcode == AML_INCREMENT_OP) {
384 op->common.flags |= ACPI_PARSEOP_TARGET;
385 }
386 }
387
388 if (walk_state->descending_callback != NULL) {
389 /*
390 * Find the object. This will either insert the object into
391 * the namespace or simply look it up
392 */
393 walk_state->op = *new_op = op;
394
395 status = walk_state->descending_callback(walk_state, &op);
396 status = acpi_ps_next_parse_state(walk_state, op, status);
397 if (status == AE_CTRL_PENDING) {
398 status = AE_CTRL_PARSE_PENDING;
399 }
400 }
401
402 return_ACPI_STATUS(status);
403}
404
405/*******************************************************************************
406 *
407 * FUNCTION: acpi_ps_get_arguments 72 * FUNCTION: acpi_ps_get_arguments
408 * 73 *
409 * PARAMETERS: walk_state - Current state 74 * PARAMETERS: walk_state - Current state
@@ -711,288 +376,6 @@ acpi_ps_link_module_code(union acpi_parse_object *parent_op,
711 376
712/******************************************************************************* 377/*******************************************************************************
713 * 378 *
714 * FUNCTION: acpi_ps_complete_op
715 *
716 * PARAMETERS: walk_state - Current state
717 * op - Returned Op
718 * status - Parse status before complete Op
719 *
720 * RETURN: Status
721 *
722 * DESCRIPTION: Complete Op
723 *
724 ******************************************************************************/
725
726static acpi_status
727acpi_ps_complete_op(struct acpi_walk_state *walk_state,
728 union acpi_parse_object **op, acpi_status status)
729{
730 acpi_status status2;
731
732 ACPI_FUNCTION_TRACE_PTR(ps_complete_op, walk_state);
733
734 /*
735 * Finished one argument of the containing scope
736 */
737 walk_state->parser_state.scope->parse_scope.arg_count--;
738
739 /* Close this Op (will result in parse subtree deletion) */
740
741 status2 = acpi_ps_complete_this_op(walk_state, *op);
742 if (ACPI_FAILURE(status2)) {
743 return_ACPI_STATUS(status2);
744 }
745
746 *op = NULL;
747
748 switch (status) {
749 case AE_OK:
750 break;
751
752 case AE_CTRL_TRANSFER:
753
754 /* We are about to transfer to a called method */
755
756 walk_state->prev_op = NULL;
757 walk_state->prev_arg_types = walk_state->arg_types;
758 return_ACPI_STATUS(status);
759
760 case AE_CTRL_END:
761
762 acpi_ps_pop_scope(&(walk_state->parser_state), op,
763 &walk_state->arg_types,
764 &walk_state->arg_count);
765
766 if (*op) {
767 walk_state->op = *op;
768 walk_state->op_info =
769 acpi_ps_get_opcode_info((*op)->common.aml_opcode);
770 walk_state->opcode = (*op)->common.aml_opcode;
771
772 status = walk_state->ascending_callback(walk_state);
773 status =
774 acpi_ps_next_parse_state(walk_state, *op, status);
775
776 status2 = acpi_ps_complete_this_op(walk_state, *op);
777 if (ACPI_FAILURE(status2)) {
778 return_ACPI_STATUS(status2);
779 }
780 }
781
782 status = AE_OK;
783 break;
784
785 case AE_CTRL_BREAK:
786 case AE_CTRL_CONTINUE:
787
788 /* Pop off scopes until we find the While */
789
790 while (!(*op) || ((*op)->common.aml_opcode != AML_WHILE_OP)) {
791 acpi_ps_pop_scope(&(walk_state->parser_state), op,
792 &walk_state->arg_types,
793 &walk_state->arg_count);
794 }
795
796 /* Close this iteration of the While loop */
797
798 walk_state->op = *op;
799 walk_state->op_info =
800 acpi_ps_get_opcode_info((*op)->common.aml_opcode);
801 walk_state->opcode = (*op)->common.aml_opcode;
802
803 status = walk_state->ascending_callback(walk_state);
804 status = acpi_ps_next_parse_state(walk_state, *op, status);
805
806 status2 = acpi_ps_complete_this_op(walk_state, *op);
807 if (ACPI_FAILURE(status2)) {
808 return_ACPI_STATUS(status2);
809 }
810
811 status = AE_OK;
812 break;
813
814 case AE_CTRL_TERMINATE:
815
816 /* Clean up */
817 do {
818 if (*op) {
819 status2 =
820 acpi_ps_complete_this_op(walk_state, *op);
821 if (ACPI_FAILURE(status2)) {
822 return_ACPI_STATUS(status2);
823 }
824
825 acpi_ut_delete_generic_state
826 (acpi_ut_pop_generic_state
827 (&walk_state->control_state));
828 }
829
830 acpi_ps_pop_scope(&(walk_state->parser_state), op,
831 &walk_state->arg_types,
832 &walk_state->arg_count);
833
834 } while (*op);
835
836 return_ACPI_STATUS(AE_OK);
837
838 default: /* All other non-AE_OK status */
839
840 do {
841 if (*op) {
842 status2 =
843 acpi_ps_complete_this_op(walk_state, *op);
844 if (ACPI_FAILURE(status2)) {
845 return_ACPI_STATUS(status2);
846 }
847 }
848
849 acpi_ps_pop_scope(&(walk_state->parser_state), op,
850 &walk_state->arg_types,
851 &walk_state->arg_count);
852
853 } while (*op);
854
855#if 0
856 /*
857 * TBD: Cleanup parse ops on error
858 */
859 if (*op == NULL) {
860 acpi_ps_pop_scope(parser_state, op,
861 &walk_state->arg_types,
862 &walk_state->arg_count);
863 }
864#endif
865 walk_state->prev_op = NULL;
866 walk_state->prev_arg_types = walk_state->arg_types;
867 return_ACPI_STATUS(status);
868 }
869
870 /* This scope complete? */
871
872 if (acpi_ps_has_completed_scope(&(walk_state->parser_state))) {
873 acpi_ps_pop_scope(&(walk_state->parser_state), op,
874 &walk_state->arg_types,
875 &walk_state->arg_count);
876 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "Popped scope, Op=%p\n", *op));
877 } else {
878 *op = NULL;
879 }
880
881 return_ACPI_STATUS(AE_OK);
882}
883
884/*******************************************************************************
885 *
886 * FUNCTION: acpi_ps_complete_final_op
887 *
888 * PARAMETERS: walk_state - Current state
889 * op - Current Op
890 * status - Current parse status before complete last
891 * Op
892 *
893 * RETURN: Status
894 *
895 * DESCRIPTION: Complete last Op.
896 *
897 ******************************************************************************/
898
899static acpi_status
900acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
901 union acpi_parse_object *op, acpi_status status)
902{
903 acpi_status status2;
904
905 ACPI_FUNCTION_TRACE_PTR(ps_complete_final_op, walk_state);
906
907 /*
908 * Complete the last Op (if not completed), and clear the scope stack.
909 * It is easily possible to end an AML "package" with an unbounded number
910 * of open scopes (such as when several ASL blocks are closed with
911 * sequential closing braces). We want to terminate each one cleanly.
912 */
913 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "AML package complete at Op %p\n",
914 op));
915 do {
916 if (op) {
917 if (walk_state->ascending_callback != NULL) {
918 walk_state->op = op;
919 walk_state->op_info =
920 acpi_ps_get_opcode_info(op->common.
921 aml_opcode);
922 walk_state->opcode = op->common.aml_opcode;
923
924 status =
925 walk_state->ascending_callback(walk_state);
926 status =
927 acpi_ps_next_parse_state(walk_state, op,
928 status);
929 if (status == AE_CTRL_PENDING) {
930 status =
931 acpi_ps_complete_op(walk_state, &op,
932 AE_OK);
933 if (ACPI_FAILURE(status)) {
934 return_ACPI_STATUS(status);
935 }
936 }
937
938 if (status == AE_CTRL_TERMINATE) {
939 status = AE_OK;
940
941 /* Clean up */
942 do {
943 if (op) {
944 status2 =
945 acpi_ps_complete_this_op
946 (walk_state, op);
947 if (ACPI_FAILURE
948 (status2)) {
949 return_ACPI_STATUS
950 (status2);
951 }
952 }
953
954 acpi_ps_pop_scope(&
955 (walk_state->
956 parser_state),
957 &op,
958 &walk_state->
959 arg_types,
960 &walk_state->
961 arg_count);
962
963 } while (op);
964
965 return_ACPI_STATUS(status);
966 }
967
968 else if (ACPI_FAILURE(status)) {
969
970 /* First error is most important */
971
972 (void)
973 acpi_ps_complete_this_op(walk_state,
974 op);
975 return_ACPI_STATUS(status);
976 }
977 }
978
979 status2 = acpi_ps_complete_this_op(walk_state, op);
980 if (ACPI_FAILURE(status2)) {
981 return_ACPI_STATUS(status2);
982 }
983 }
984
985 acpi_ps_pop_scope(&(walk_state->parser_state), &op,
986 &walk_state->arg_types,
987 &walk_state->arg_count);
988
989 } while (op);
990
991 return_ACPI_STATUS(status);
992}
993
994/*******************************************************************************
995 *
996 * FUNCTION: acpi_ps_parse_loop 379 * FUNCTION: acpi_ps_parse_loop
997 * 380 *
998 * PARAMETERS: walk_state - Current state 381 * PARAMETERS: walk_state - Current state
@@ -1177,10 +560,6 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
1177 walk_state->op_info = 560 walk_state->op_info =
1178 acpi_ps_get_opcode_info(op->common.aml_opcode); 561 acpi_ps_get_opcode_info(op->common.aml_opcode);
1179 if (walk_state->op_info->flags & AML_NAMED) { 562 if (walk_state->op_info->flags & AML_NAMED) {
1180 if (acpi_gbl_depth) {
1181 acpi_gbl_depth--;
1182 }
1183
1184 if (op->common.aml_opcode == AML_REGION_OP || 563 if (op->common.aml_opcode == AML_REGION_OP ||
1185 op->common.aml_opcode == AML_DATA_REGION_OP) { 564 op->common.aml_opcode == AML_DATA_REGION_OP) {
1186 /* 565 /*
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
new file mode 100644
index 000000000000..12c4028002b1
--- /dev/null
+++ b/drivers/acpi/acpica/psobject.c
@@ -0,0 +1,647 @@
1/******************************************************************************
2 *
3 * Module Name: psobject - Support for parse objects
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include "accommon.h"
46#include "acparser.h"
47#include "amlcode.h"
48
49#define _COMPONENT ACPI_PARSER
50ACPI_MODULE_NAME("psobject")
51
52/* Local prototypes */
53static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state);
54
55/*******************************************************************************
56 *
57 * FUNCTION: acpi_ps_get_aml_opcode
58 *
59 * PARAMETERS: walk_state - Current state
60 *
61 * RETURN: Status
62 *
63 * DESCRIPTION: Extract the next AML opcode from the input stream.
64 *
65 ******************************************************************************/
66
67static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
68{
69
70 ACPI_FUNCTION_TRACE_PTR(ps_get_aml_opcode, walk_state);
71
72 walk_state->aml_offset =
73 (u32)ACPI_PTR_DIFF(walk_state->parser_state.aml,
74 walk_state->parser_state.aml_start);
75 walk_state->opcode = acpi_ps_peek_opcode(&(walk_state->parser_state));
76
77 /*
78 * First cut to determine what we have found:
79 * 1) A valid AML opcode
80 * 2) A name string
81 * 3) An unknown/invalid opcode
82 */
83 walk_state->op_info = acpi_ps_get_opcode_info(walk_state->opcode);
84
85 switch (walk_state->op_info->class) {
86 case AML_CLASS_ASCII:
87 case AML_CLASS_PREFIX:
88 /*
89 * Starts with a valid prefix or ASCII char, this is a name
90 * string. Convert the bare name string to a namepath.
91 */
92 walk_state->opcode = AML_INT_NAMEPATH_OP;
93 walk_state->arg_types = ARGP_NAMESTRING;
94 break;
95
96 case AML_CLASS_UNKNOWN:
97
98 /* The opcode is unrecognized. Complain and skip unknown opcodes */
99
100 if (walk_state->pass_number == 2) {
101 ACPI_ERROR((AE_INFO,
102 "Unknown opcode 0x%.2X at table offset 0x%.4X, ignoring",
103 walk_state->opcode,
104 (u32)(walk_state->aml_offset +
105 sizeof(struct acpi_table_header))));
106
107 ACPI_DUMP_BUFFER((walk_state->parser_state.aml - 16),
108 48);
109
110#ifdef ACPI_ASL_COMPILER
111 /*
112 * This is executed for the disassembler only. Output goes
113 * to the disassembled ASL output file.
114 */
115 acpi_os_printf
116 ("/*\nError: Unknown opcode 0x%.2X at table offset 0x%.4X, context:\n",
117 walk_state->opcode,
118 (u32)(walk_state->aml_offset +
119 sizeof(struct acpi_table_header)));
120
121 /* Dump the context surrounding the invalid opcode */
122
123 acpi_ut_dump_buffer(((u8 *)walk_state->parser_state.
124 aml - 16), 48, DB_BYTE_DISPLAY,
125 (walk_state->aml_offset +
126 sizeof(struct acpi_table_header) -
127 16));
128 acpi_os_printf(" */\n");
129#endif
130 }
131
132 /* Increment past one-byte or two-byte opcode */
133
134 walk_state->parser_state.aml++;
135 if (walk_state->opcode > 0xFF) { /* Can only happen if first byte is 0x5B */
136 walk_state->parser_state.aml++;
137 }
138
139 return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
140
141 default:
142
143 /* Found opcode info, this is a normal opcode */
144
145 walk_state->parser_state.aml +=
146 acpi_ps_get_opcode_size(walk_state->opcode);
147 walk_state->arg_types = walk_state->op_info->parse_args;
148 break;
149 }
150
151 return_ACPI_STATUS(AE_OK);
152}
153
154/*******************************************************************************
155 *
156 * FUNCTION: acpi_ps_build_named_op
157 *
158 * PARAMETERS: walk_state - Current state
159 * aml_op_start - Begin of named Op in AML
160 * unnamed_op - Early Op (not a named Op)
161 * op - Returned Op
162 *
163 * RETURN: Status
164 *
165 * DESCRIPTION: Parse a named Op
166 *
167 ******************************************************************************/
168
169acpi_status
170acpi_ps_build_named_op(struct acpi_walk_state *walk_state,
171 u8 *aml_op_start,
172 union acpi_parse_object *unnamed_op,
173 union acpi_parse_object **op)
174{
175 acpi_status status = AE_OK;
176 union acpi_parse_object *arg = NULL;
177
178 ACPI_FUNCTION_TRACE_PTR(ps_build_named_op, walk_state);
179
180 unnamed_op->common.value.arg = NULL;
181 unnamed_op->common.arg_list_length = 0;
182 unnamed_op->common.aml_opcode = walk_state->opcode;
183
184 /*
185 * Get and append arguments until we find the node that contains
186 * the name (the type ARGP_NAME).
187 */
188 while (GET_CURRENT_ARG_TYPE(walk_state->arg_types) &&
189 (GET_CURRENT_ARG_TYPE(walk_state->arg_types) != ARGP_NAME)) {
190 status =
191 acpi_ps_get_next_arg(walk_state,
192 &(walk_state->parser_state),
193 GET_CURRENT_ARG_TYPE(walk_state->
194 arg_types), &arg);
195 if (ACPI_FAILURE(status)) {
196 return_ACPI_STATUS(status);
197 }
198
199 acpi_ps_append_arg(unnamed_op, arg);
200 INCREMENT_ARG_LIST(walk_state->arg_types);
201 }
202
203 /*
204 * Make sure that we found a NAME and didn't run out of arguments
205 */
206 if (!GET_CURRENT_ARG_TYPE(walk_state->arg_types)) {
207 return_ACPI_STATUS(AE_AML_NO_OPERAND);
208 }
209
210 /* We know that this arg is a name, move to next arg */
211
212 INCREMENT_ARG_LIST(walk_state->arg_types);
213
214 /*
215 * Find the object. This will either insert the object into
216 * the namespace or simply look it up
217 */
218 walk_state->op = NULL;
219
220 status = walk_state->descending_callback(walk_state, op);
221 if (ACPI_FAILURE(status)) {
222 ACPI_EXCEPTION((AE_INFO, status, "During name lookup/catalog"));
223 return_ACPI_STATUS(status);
224 }
225
226 if (!*op) {
227 return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
228 }
229
230 status = acpi_ps_next_parse_state(walk_state, *op, status);
231 if (ACPI_FAILURE(status)) {
232 if (status == AE_CTRL_PENDING) {
233 return_ACPI_STATUS(AE_CTRL_PARSE_PENDING);
234 }
235 return_ACPI_STATUS(status);
236 }
237
238 acpi_ps_append_arg(*op, unnamed_op->common.value.arg);
239
240 if ((*op)->common.aml_opcode == AML_REGION_OP ||
241 (*op)->common.aml_opcode == AML_DATA_REGION_OP) {
242 /*
243 * Defer final parsing of an operation_region body, because we don't
244 * have enough info in the first pass to parse it correctly (i.e.,
245 * there may be method calls within the term_arg elements of the body.)
246 *
247 * However, we must continue parsing because the opregion is not a
248 * standalone package -- we don't know where the end is at this point.
249 *
250 * (Length is unknown until parse of the body complete)
251 */
252 (*op)->named.data = aml_op_start;
253 (*op)->named.length = 0;
254 }
255
256 return_ACPI_STATUS(AE_OK);
257}
258
259/*******************************************************************************
260 *
261 * FUNCTION: acpi_ps_create_op
262 *
263 * PARAMETERS: walk_state - Current state
264 * aml_op_start - Op start in AML
265 * new_op - Returned Op
266 *
267 * RETURN: Status
268 *
269 * DESCRIPTION: Get Op from AML
270 *
271 ******************************************************************************/
272
273acpi_status
274acpi_ps_create_op(struct acpi_walk_state *walk_state,
275 u8 *aml_op_start, union acpi_parse_object **new_op)
276{
277 acpi_status status = AE_OK;
278 union acpi_parse_object *op;
279 union acpi_parse_object *named_op = NULL;
280 union acpi_parse_object *parent_scope;
281 u8 argument_count;
282 const struct acpi_opcode_info *op_info;
283
284 ACPI_FUNCTION_TRACE_PTR(ps_create_op, walk_state);
285
286 status = acpi_ps_get_aml_opcode(walk_state);
287 if (status == AE_CTRL_PARSE_CONTINUE) {
288 return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
289 }
290
291 /* Create Op structure and append to parent's argument list */
292
293 walk_state->op_info = acpi_ps_get_opcode_info(walk_state->opcode);
294 op = acpi_ps_alloc_op(walk_state->opcode);
295 if (!op) {
296 return_ACPI_STATUS(AE_NO_MEMORY);
297 }
298
299 if (walk_state->op_info->flags & AML_NAMED) {
300 status =
301 acpi_ps_build_named_op(walk_state, aml_op_start, op,
302 &named_op);
303 acpi_ps_free_op(op);
304 if (ACPI_FAILURE(status)) {
305 return_ACPI_STATUS(status);
306 }
307
308 *new_op = named_op;
309 return_ACPI_STATUS(AE_OK);
310 }
311
312 /* Not a named opcode, just allocate Op and append to parent */
313
314 if (walk_state->op_info->flags & AML_CREATE) {
315 /*
316 * Backup to beginning of create_XXXfield declaration
317 * body_length is unknown until we parse the body
318 */
319 op->named.data = aml_op_start;
320 op->named.length = 0;
321 }
322
323 if (walk_state->opcode == AML_BANK_FIELD_OP) {
324 /*
325 * Backup to beginning of bank_field declaration
326 * body_length is unknown until we parse the body
327 */
328 op->named.data = aml_op_start;
329 op->named.length = 0;
330 }
331
332 parent_scope = acpi_ps_get_parent_scope(&(walk_state->parser_state));
333 acpi_ps_append_arg(parent_scope, op);
334
335 if (parent_scope) {
336 op_info =
337 acpi_ps_get_opcode_info(parent_scope->common.aml_opcode);
338 if (op_info->flags & AML_HAS_TARGET) {
339 argument_count =
340 acpi_ps_get_argument_count(op_info->type);
341 if (parent_scope->common.arg_list_length >
342 argument_count) {
343 op->common.flags |= ACPI_PARSEOP_TARGET;
344 }
345 } else if (parent_scope->common.aml_opcode == AML_INCREMENT_OP) {
346 op->common.flags |= ACPI_PARSEOP_TARGET;
347 }
348 }
349
350 if (walk_state->descending_callback != NULL) {
351 /*
352 * Find the object. This will either insert the object into
353 * the namespace or simply look it up
354 */
355 walk_state->op = *new_op = op;
356
357 status = walk_state->descending_callback(walk_state, &op);
358 status = acpi_ps_next_parse_state(walk_state, op, status);
359 if (status == AE_CTRL_PENDING) {
360 status = AE_CTRL_PARSE_PENDING;
361 }
362 }
363
364 return_ACPI_STATUS(status);
365}
366
367/*******************************************************************************
368 *
369 * FUNCTION: acpi_ps_complete_op
370 *
371 * PARAMETERS: walk_state - Current state
372 * op - Returned Op
373 * status - Parse status before complete Op
374 *
375 * RETURN: Status
376 *
377 * DESCRIPTION: Complete Op
378 *
379 ******************************************************************************/
380
381acpi_status
382acpi_ps_complete_op(struct acpi_walk_state *walk_state,
383 union acpi_parse_object **op, acpi_status status)
384{
385 acpi_status status2;
386
387 ACPI_FUNCTION_TRACE_PTR(ps_complete_op, walk_state);
388
389 /*
390 * Finished one argument of the containing scope
391 */
392 walk_state->parser_state.scope->parse_scope.arg_count--;
393
394 /* Close this Op (will result in parse subtree deletion) */
395
396 status2 = acpi_ps_complete_this_op(walk_state, *op);
397 if (ACPI_FAILURE(status2)) {
398 return_ACPI_STATUS(status2);
399 }
400
401 *op = NULL;
402
403 switch (status) {
404 case AE_OK:
405 break;
406
407 case AE_CTRL_TRANSFER:
408
409 /* We are about to transfer to a called method */
410
411 walk_state->prev_op = NULL;
412 walk_state->prev_arg_types = walk_state->arg_types;
413 return_ACPI_STATUS(status);
414
415 case AE_CTRL_END:
416
417 acpi_ps_pop_scope(&(walk_state->parser_state), op,
418 &walk_state->arg_types,
419 &walk_state->arg_count);
420
421 if (*op) {
422 walk_state->op = *op;
423 walk_state->op_info =
424 acpi_ps_get_opcode_info((*op)->common.aml_opcode);
425 walk_state->opcode = (*op)->common.aml_opcode;
426
427 status = walk_state->ascending_callback(walk_state);
428 status =
429 acpi_ps_next_parse_state(walk_state, *op, status);
430
431 status2 = acpi_ps_complete_this_op(walk_state, *op);
432 if (ACPI_FAILURE(status2)) {
433 return_ACPI_STATUS(status2);
434 }
435 }
436
437 status = AE_OK;
438 break;
439
440 case AE_CTRL_BREAK:
441 case AE_CTRL_CONTINUE:
442
443 /* Pop off scopes until we find the While */
444
445 while (!(*op) || ((*op)->common.aml_opcode != AML_WHILE_OP)) {
446 acpi_ps_pop_scope(&(walk_state->parser_state), op,
447 &walk_state->arg_types,
448 &walk_state->arg_count);
449 }
450
451 /* Close this iteration of the While loop */
452
453 walk_state->op = *op;
454 walk_state->op_info =
455 acpi_ps_get_opcode_info((*op)->common.aml_opcode);
456 walk_state->opcode = (*op)->common.aml_opcode;
457
458 status = walk_state->ascending_callback(walk_state);
459 status = acpi_ps_next_parse_state(walk_state, *op, status);
460
461 status2 = acpi_ps_complete_this_op(walk_state, *op);
462 if (ACPI_FAILURE(status2)) {
463 return_ACPI_STATUS(status2);
464 }
465
466 status = AE_OK;
467 break;
468
469 case AE_CTRL_TERMINATE:
470
471 /* Clean up */
472 do {
473 if (*op) {
474 status2 =
475 acpi_ps_complete_this_op(walk_state, *op);
476 if (ACPI_FAILURE(status2)) {
477 return_ACPI_STATUS(status2);
478 }
479
480 acpi_ut_delete_generic_state
481 (acpi_ut_pop_generic_state
482 (&walk_state->control_state));
483 }
484
485 acpi_ps_pop_scope(&(walk_state->parser_state), op,
486 &walk_state->arg_types,
487 &walk_state->arg_count);
488
489 } while (*op);
490
491 return_ACPI_STATUS(AE_OK);
492
493 default: /* All other non-AE_OK status */
494
495 do {
496 if (*op) {
497 status2 =
498 acpi_ps_complete_this_op(walk_state, *op);
499 if (ACPI_FAILURE(status2)) {
500 return_ACPI_STATUS(status2);
501 }
502 }
503
504 acpi_ps_pop_scope(&(walk_state->parser_state), op,
505 &walk_state->arg_types,
506 &walk_state->arg_count);
507
508 } while (*op);
509
510#if 0
511 /*
512 * TBD: Cleanup parse ops on error
513 */
514 if (*op == NULL) {
515 acpi_ps_pop_scope(parser_state, op,
516 &walk_state->arg_types,
517 &walk_state->arg_count);
518 }
519#endif
520 walk_state->prev_op = NULL;
521 walk_state->prev_arg_types = walk_state->arg_types;
522 return_ACPI_STATUS(status);
523 }
524
525 /* This scope complete? */
526
527 if (acpi_ps_has_completed_scope(&(walk_state->parser_state))) {
528 acpi_ps_pop_scope(&(walk_state->parser_state), op,
529 &walk_state->arg_types,
530 &walk_state->arg_count);
531 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "Popped scope, Op=%p\n", *op));
532 } else {
533 *op = NULL;
534 }
535
536 return_ACPI_STATUS(AE_OK);
537}
538
539/*******************************************************************************
540 *
541 * FUNCTION: acpi_ps_complete_final_op
542 *
543 * PARAMETERS: walk_state - Current state
544 * op - Current Op
545 * status - Current parse status before complete last
546 * Op
547 *
548 * RETURN: Status
549 *
550 * DESCRIPTION: Complete last Op.
551 *
552 ******************************************************************************/
553
554acpi_status
555acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
556 union acpi_parse_object *op, acpi_status status)
557{
558 acpi_status status2;
559
560 ACPI_FUNCTION_TRACE_PTR(ps_complete_final_op, walk_state);
561
562 /*
563 * Complete the last Op (if not completed), and clear the scope stack.
564 * It is easily possible to end an AML "package" with an unbounded number
565 * of open scopes (such as when several ASL blocks are closed with
566 * sequential closing braces). We want to terminate each one cleanly.
567 */
568 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "AML package complete at Op %p\n",
569 op));
570 do {
571 if (op) {
572 if (walk_state->ascending_callback != NULL) {
573 walk_state->op = op;
574 walk_state->op_info =
575 acpi_ps_get_opcode_info(op->common.
576 aml_opcode);
577 walk_state->opcode = op->common.aml_opcode;
578
579 status =
580 walk_state->ascending_callback(walk_state);
581 status =
582 acpi_ps_next_parse_state(walk_state, op,
583 status);
584 if (status == AE_CTRL_PENDING) {
585 status =
586 acpi_ps_complete_op(walk_state, &op,
587 AE_OK);
588 if (ACPI_FAILURE(status)) {
589 return_ACPI_STATUS(status);
590 }
591 }
592
593 if (status == AE_CTRL_TERMINATE) {
594 status = AE_OK;
595
596 /* Clean up */
597 do {
598 if (op) {
599 status2 =
600 acpi_ps_complete_this_op
601 (walk_state, op);
602 if (ACPI_FAILURE
603 (status2)) {
604 return_ACPI_STATUS
605 (status2);
606 }
607 }
608
609 acpi_ps_pop_scope(&
610 (walk_state->
611 parser_state),
612 &op,
613 &walk_state->
614 arg_types,
615 &walk_state->
616 arg_count);
617
618 } while (op);
619
620 return_ACPI_STATUS(status);
621 }
622
623 else if (ACPI_FAILURE(status)) {
624
625 /* First error is most important */
626
627 (void)
628 acpi_ps_complete_this_op(walk_state,
629 op);
630 return_ACPI_STATUS(status);
631 }
632 }
633
634 status2 = acpi_ps_complete_this_op(walk_state, op);
635 if (ACPI_FAILURE(status2)) {
636 return_ACPI_STATUS(status2);
637 }
638 }
639
640 acpi_ps_pop_scope(&(walk_state->parser_state), &op,
641 &walk_state->arg_types,
642 &walk_state->arg_count);
643
644 } while (op);
645
646 return_ACPI_STATUS(status);
647}
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
index 1793d934aa30..1b659e59710a 100644
--- a/drivers/acpi/acpica/psopcode.c
+++ b/drivers/acpi/acpica/psopcode.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -43,16 +43,12 @@
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include "accommon.h" 45#include "accommon.h"
46#include "acparser.h"
47#include "acopcode.h" 46#include "acopcode.h"
48#include "amlcode.h" 47#include "amlcode.h"
49 48
50#define _COMPONENT ACPI_PARSER 49#define _COMPONENT ACPI_PARSER
51ACPI_MODULE_NAME("psopcode") 50ACPI_MODULE_NAME("psopcode")
52 51
53static const u8 acpi_gbl_argument_count[] =
54 { 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 6 };
55
56/******************************************************************************* 52/*******************************************************************************
57 * 53 *
58 * NAME: acpi_gbl_aml_op_info 54 * NAME: acpi_gbl_aml_op_info
@@ -63,7 +59,6 @@ static const u8 acpi_gbl_argument_count[] =
63 * the operand type. 59 * the operand type.
64 * 60 *
65 ******************************************************************************/ 61 ******************************************************************************/
66
67/* 62/*
68 * Summary of opcode types/flags 63 * Summary of opcode types/flags
69 * 64 *
@@ -181,7 +176,6 @@ static const u8 acpi_gbl_argument_count[] =
181 AML_CREATE_QWORD_FIELD_OP 176 AML_CREATE_QWORD_FIELD_OP
182 177
183 ******************************************************************************/ 178 ******************************************************************************/
184
185/* 179/*
186 * Master Opcode information table. A summary of everything we know about each 180 * Master Opcode information table. A summary of everything we know about each
187 * opcode, all in one place. 181 * opcode, all in one place.
@@ -656,169 +650,3 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = {
656 650
657/*! [End] no source code translation !*/ 651/*! [End] no source code translation !*/
658}; 652};
659
660/*
661 * This table is directly indexed by the opcodes, and returns an
662 * index into the table above
663 */
664static const u8 acpi_gbl_short_op_index[256] = {
665/* 0 1 2 3 4 5 6 7 */
666/* 8 9 A B C D E F */
667/* 0x00 */ 0x00, 0x01, _UNK, _UNK, _UNK, _UNK, 0x02, _UNK,
668/* 0x08 */ 0x03, _UNK, 0x04, 0x05, 0x06, 0x07, 0x6E, _UNK,
669/* 0x10 */ 0x08, 0x09, 0x0a, 0x6F, 0x0b, _UNK, _UNK, _UNK,
670/* 0x18 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
671/* 0x20 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
672/* 0x28 */ _UNK, _UNK, _UNK, _UNK, _UNK, 0x63, _PFX, _PFX,
673/* 0x30 */ 0x67, 0x66, 0x68, 0x65, 0x69, 0x64, 0x6A, 0x7D,
674/* 0x38 */ 0x7F, 0x80, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
675/* 0x40 */ _UNK, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
676/* 0x48 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
677/* 0x50 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
678/* 0x58 */ _ASC, _ASC, _ASC, _UNK, _PFX, _UNK, _PFX, _ASC,
679/* 0x60 */ 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13,
680/* 0x68 */ 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, _UNK,
681/* 0x70 */ 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22,
682/* 0x78 */ 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a,
683/* 0x80 */ 0x2b, 0x2c, 0x2d, 0x2e, 0x70, 0x71, 0x2f, 0x30,
684/* 0x88 */ 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x72,
685/* 0x90 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x73, 0x74,
686/* 0x98 */ 0x75, 0x76, _UNK, _UNK, 0x77, 0x78, 0x79, 0x7A,
687/* 0xA0 */ 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x60, 0x61,
688/* 0xA8 */ 0x62, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
689/* 0xB0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
690/* 0xB8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
691/* 0xC0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
692/* 0xC8 */ _UNK, _UNK, _UNK, _UNK, 0x44, _UNK, _UNK, _UNK,
693/* 0xD0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
694/* 0xD8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
695/* 0xE0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
696/* 0xE8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
697/* 0xF0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
698/* 0xF8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, 0x45,
699};
700
701/*
702 * This table is indexed by the second opcode of the extended opcode
703 * pair. It returns an index into the opcode table (acpi_gbl_aml_op_info)
704 */
705static const u8 acpi_gbl_long_op_index[NUM_EXTENDED_OPCODE] = {
706/* 0 1 2 3 4 5 6 7 */
707/* 8 9 A B C D E F */
708/* 0x00 */ _UNK, 0x46, 0x47, _UNK, _UNK, _UNK, _UNK, _UNK,
709/* 0x08 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
710/* 0x10 */ _UNK, _UNK, 0x48, 0x49, _UNK, _UNK, _UNK, _UNK,
711/* 0x18 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, 0x7B,
712/* 0x20 */ 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51,
713/* 0x28 */ 0x52, 0x53, 0x54, _UNK, _UNK, _UNK, _UNK, _UNK,
714/* 0x30 */ 0x55, 0x56, 0x57, 0x7e, _UNK, _UNK, _UNK, _UNK,
715/* 0x38 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
716/* 0x40 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
717/* 0x48 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
718/* 0x50 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
719/* 0x58 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
720/* 0x60 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
721/* 0x68 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
722/* 0x70 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
723/* 0x78 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
724/* 0x80 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
725/* 0x88 */ 0x7C,
726};
727
728/*******************************************************************************
729 *
730 * FUNCTION: acpi_ps_get_opcode_info
731 *
732 * PARAMETERS: opcode - The AML opcode
733 *
734 * RETURN: A pointer to the info about the opcode.
735 *
736 * DESCRIPTION: Find AML opcode description based on the opcode.
737 * NOTE: This procedure must ALWAYS return a valid pointer!
738 *
739 ******************************************************************************/
740
741const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode)
742{
743 ACPI_FUNCTION_NAME(ps_get_opcode_info);
744
745 /*
746 * Detect normal 8-bit opcode or extended 16-bit opcode
747 */
748 if (!(opcode & 0xFF00)) {
749
750 /* Simple (8-bit) opcode: 0-255, can't index beyond table */
751
752 return (&acpi_gbl_aml_op_info
753 [acpi_gbl_short_op_index[(u8) opcode]]);
754 }
755
756 if (((opcode & 0xFF00) == AML_EXTENDED_OPCODE) &&
757 (((u8) opcode) <= MAX_EXTENDED_OPCODE)) {
758
759 /* Valid extended (16-bit) opcode */
760
761 return (&acpi_gbl_aml_op_info
762 [acpi_gbl_long_op_index[(u8) opcode]]);
763 }
764
765 /* Unknown AML opcode */
766
767 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
768 "Unknown AML opcode [%4.4X]\n", opcode));
769
770 return (&acpi_gbl_aml_op_info[_UNK]);
771}
772
773/*******************************************************************************
774 *
775 * FUNCTION: acpi_ps_get_opcode_name
776 *
777 * PARAMETERS: opcode - The AML opcode
778 *
779 * RETURN: A pointer to the name of the opcode (ASCII String)
780 * Note: Never returns NULL.
781 *
782 * DESCRIPTION: Translate an opcode into a human-readable string
783 *
784 ******************************************************************************/
785
786char *acpi_ps_get_opcode_name(u16 opcode)
787{
788#if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUG_OUTPUT)
789
790 const struct acpi_opcode_info *op;
791
792 op = acpi_ps_get_opcode_info(opcode);
793
794 /* Always guaranteed to return a valid pointer */
795
796 return (op->name);
797
798#else
799 return ("OpcodeName unavailable");
800
801#endif
802}
803
804/*******************************************************************************
805 *
806 * FUNCTION: acpi_ps_get_argument_count
807 *
808 * PARAMETERS: op_type - Type associated with the AML opcode
809 *
810 * RETURN: Argument count
811 *
812 * DESCRIPTION: Obtain the number of expected arguments for an AML opcode
813 *
814 ******************************************************************************/
815
816u8 acpi_ps_get_argument_count(u32 op_type)
817{
818
819 if (op_type <= AML_TYPE_EXEC_6A_0T_1R) {
820 return (acpi_gbl_argument_count[op_type]);
821 }
822
823 return (0);
824}
diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c
new file mode 100644
index 000000000000..9ba5301e5751
--- /dev/null
+++ b/drivers/acpi/acpica/psopinfo.c
@@ -0,0 +1,223 @@
1/******************************************************************************
2 *
3 * Module Name: psopinfo - AML opcode information functions and dispatch tables
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include "accommon.h"
46#include "acparser.h"
47#include "acopcode.h"
48#include "amlcode.h"
49
50#define _COMPONENT ACPI_PARSER
51ACPI_MODULE_NAME("psopinfo")
52
53extern const u8 acpi_gbl_short_op_index[];
54extern const u8 acpi_gbl_long_op_index[];
55
56static const u8 acpi_gbl_argument_count[] =
57 { 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 6 };
58
59/*******************************************************************************
60 *
61 * FUNCTION: acpi_ps_get_opcode_info
62 *
63 * PARAMETERS: opcode - The AML opcode
64 *
65 * RETURN: A pointer to the info about the opcode.
66 *
67 * DESCRIPTION: Find AML opcode description based on the opcode.
68 * NOTE: This procedure must ALWAYS return a valid pointer!
69 *
70 ******************************************************************************/
71
72const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode)
73{
74 ACPI_FUNCTION_NAME(ps_get_opcode_info);
75
76 /*
77 * Detect normal 8-bit opcode or extended 16-bit opcode
78 */
79 if (!(opcode & 0xFF00)) {
80
81 /* Simple (8-bit) opcode: 0-255, can't index beyond table */
82
83 return (&acpi_gbl_aml_op_info
84 [acpi_gbl_short_op_index[(u8)opcode]]);
85 }
86
87 if (((opcode & 0xFF00) == AML_EXTENDED_OPCODE) &&
88 (((u8)opcode) <= MAX_EXTENDED_OPCODE)) {
89
90 /* Valid extended (16-bit) opcode */
91
92 return (&acpi_gbl_aml_op_info
93 [acpi_gbl_long_op_index[(u8)opcode]]);
94 }
95
96 /* Unknown AML opcode */
97
98 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
99 "Unknown AML opcode [%4.4X]\n", opcode));
100
101 return (&acpi_gbl_aml_op_info[_UNK]);
102}
103
104/*******************************************************************************
105 *
106 * FUNCTION: acpi_ps_get_opcode_name
107 *
108 * PARAMETERS: opcode - The AML opcode
109 *
110 * RETURN: A pointer to the name of the opcode (ASCII String)
111 * Note: Never returns NULL.
112 *
113 * DESCRIPTION: Translate an opcode into a human-readable string
114 *
115 ******************************************************************************/
116
117char *acpi_ps_get_opcode_name(u16 opcode)
118{
119#if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUG_OUTPUT)
120
121 const struct acpi_opcode_info *op;
122
123 op = acpi_ps_get_opcode_info(opcode);
124
125 /* Always guaranteed to return a valid pointer */
126
127 return (op->name);
128
129#else
130 return ("OpcodeName unavailable");
131
132#endif
133}
134
135/*******************************************************************************
136 *
137 * FUNCTION: acpi_ps_get_argument_count
138 *
139 * PARAMETERS: op_type - Type associated with the AML opcode
140 *
141 * RETURN: Argument count
142 *
143 * DESCRIPTION: Obtain the number of expected arguments for an AML opcode
144 *
145 ******************************************************************************/
146
147u8 acpi_ps_get_argument_count(u32 op_type)
148{
149
150 if (op_type <= AML_TYPE_EXEC_6A_0T_1R) {
151 return (acpi_gbl_argument_count[op_type]);
152 }
153
154 return (0);
155}
156
157/*
158 * This table is directly indexed by the opcodes It returns
159 * an index into the opcode table (acpi_gbl_aml_op_info)
160 */
161const u8 acpi_gbl_short_op_index[256] = {
162/* 0 1 2 3 4 5 6 7 */
163/* 8 9 A B C D E F */
164/* 0x00 */ 0x00, 0x01, _UNK, _UNK, _UNK, _UNK, 0x02, _UNK,
165/* 0x08 */ 0x03, _UNK, 0x04, 0x05, 0x06, 0x07, 0x6E, _UNK,
166/* 0x10 */ 0x08, 0x09, 0x0a, 0x6F, 0x0b, _UNK, _UNK, _UNK,
167/* 0x18 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
168/* 0x20 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
169/* 0x28 */ _UNK, _UNK, _UNK, _UNK, _UNK, 0x63, _PFX, _PFX,
170/* 0x30 */ 0x67, 0x66, 0x68, 0x65, 0x69, 0x64, 0x6A, 0x7D,
171/* 0x38 */ 0x7F, 0x80, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
172/* 0x40 */ _UNK, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
173/* 0x48 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
174/* 0x50 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
175/* 0x58 */ _ASC, _ASC, _ASC, _UNK, _PFX, _UNK, _PFX, _ASC,
176/* 0x60 */ 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13,
177/* 0x68 */ 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, _UNK,
178/* 0x70 */ 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22,
179/* 0x78 */ 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a,
180/* 0x80 */ 0x2b, 0x2c, 0x2d, 0x2e, 0x70, 0x71, 0x2f, 0x30,
181/* 0x88 */ 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x72,
182/* 0x90 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x73, 0x74,
183/* 0x98 */ 0x75, 0x76, _UNK, _UNK, 0x77, 0x78, 0x79, 0x7A,
184/* 0xA0 */ 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x60, 0x61,
185/* 0xA8 */ 0x62, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
186/* 0xB0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
187/* 0xB8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
188/* 0xC0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
189/* 0xC8 */ _UNK, _UNK, _UNK, _UNK, 0x44, _UNK, _UNK, _UNK,
190/* 0xD0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
191/* 0xD8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
192/* 0xE0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
193/* 0xE8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
194/* 0xF0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
195/* 0xF8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, 0x45,
196};
197
198/*
199 * This table is indexed by the second opcode of the extended opcode
200 * pair. It returns an index into the opcode table (acpi_gbl_aml_op_info)
201 */
202const u8 acpi_gbl_long_op_index[NUM_EXTENDED_OPCODE] = {
203/* 0 1 2 3 4 5 6 7 */
204/* 8 9 A B C D E F */
205/* 0x00 */ _UNK, 0x46, 0x47, _UNK, _UNK, _UNK, _UNK, _UNK,
206/* 0x08 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
207/* 0x10 */ _UNK, _UNK, 0x48, 0x49, _UNK, _UNK, _UNK, _UNK,
208/* 0x18 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, 0x7B,
209/* 0x20 */ 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51,
210/* 0x28 */ 0x52, 0x53, 0x54, _UNK, _UNK, _UNK, _UNK, _UNK,
211/* 0x30 */ 0x55, 0x56, 0x57, 0x7e, _UNK, _UNK, _UNK, _UNK,
212/* 0x38 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
213/* 0x40 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
214/* 0x48 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
215/* 0x50 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
216/* 0x58 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
217/* 0x60 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
218/* 0x68 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
219/* 0x70 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
220/* 0x78 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
221/* 0x80 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
222/* 0x88 */ 0x7C,
223};
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index 2494caf47755..abc4c48b2edd 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psscope.c b/drivers/acpi/acpica/psscope.c
index 608dc20dc173..6a4b6fb39f32 100644
--- a/drivers/acpi/acpica/psscope.c
+++ b/drivers/acpi/acpica/psscope.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c
index fdb2e71f3046..c1934bf04f0a 100644
--- a/drivers/acpi/acpica/pstree.c
+++ b/drivers/acpi/acpica/pstree.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index 4137dcb352d1..91fa73a6e55e 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -202,14 +202,6 @@ u8 acpi_ps_is_leading_char(u32 c)
202} 202}
203 203
204/* 204/*
205 * Is "c" a namestring prefix character?
206 */
207u8 acpi_ps_is_prefix_char(u32 c)
208{
209 return ((u8) (c == '\\' || c == '^'));
210}
211
212/*
213 * Get op's name (4-byte name segment) or 0 if unnamed 205 * Get op's name (4-byte name segment) or 0 if unnamed
214 */ 206 */
215#ifdef ACPI_FUTURE_USAGE 207#ifdef ACPI_FUTURE_USAGE
diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c
index ab96cf47896d..abd65624754f 100644
--- a/drivers/acpi/acpica/pswalk.c
+++ b/drivers/acpi/acpica/pswalk.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index 963e16225797..f68254268965 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsaddr.c b/drivers/acpi/acpica/rsaddr.c
index 856ff075b6ab..f3a9276ac665 100644
--- a/drivers/acpi/acpica/rsaddr.c
+++ b/drivers/acpi/acpica/rsaddr.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
index 147feb6aa2a0..7816d4eef04e 100644
--- a/drivers/acpi/acpica/rscalc.c
+++ b/drivers/acpi/acpica/rscalc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -84,7 +84,7 @@ static u8 acpi_rs_count_set_bits(u16 bit_field)
84 bit_field &= (u16) (bit_field - 1); 84 bit_field &= (u16) (bit_field - 1);
85 } 85 }
86 86
87 return bits_set; 87 return (bits_set);
88} 88}
89 89
90/******************************************************************************* 90/*******************************************************************************
@@ -407,7 +407,9 @@ acpi_rs_get_list_length(u8 * aml_buffer,
407 407
408 /* Validate the Resource Type and Resource Length */ 408 /* Validate the Resource Type and Resource Length */
409 409
410 status = acpi_ut_validate_resource(aml_buffer, &resource_index); 410 status =
411 acpi_ut_validate_resource(NULL, aml_buffer,
412 &resource_index);
411 if (ACPI_FAILURE(status)) { 413 if (ACPI_FAILURE(status)) {
412 /* 414 /*
413 * Exit on failure. Cannot continue because the descriptor length 415 * Exit on failure. Cannot continue because the descriptor length
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c
index 311cbc4f05fa..f8b55b426c9d 100644
--- a/drivers/acpi/acpica/rscreate.c
+++ b/drivers/acpi/acpica/rscreate.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -98,7 +98,7 @@ acpi_buffer_to_resource(u8 *aml_buffer,
98 98
99 /* Perform the AML-to-Resource conversion */ 99 /* Perform the AML-to-Resource conversion */
100 100
101 status = acpi_ut_walk_aml_resources(aml_buffer, aml_buffer_length, 101 status = acpi_ut_walk_aml_resources(NULL, aml_buffer, aml_buffer_length,
102 acpi_rs_convert_aml_to_resources, 102 acpi_rs_convert_aml_to_resources,
103 &current_resource_ptr); 103 &current_resource_ptr);
104 if (status == AE_AML_NO_RESOURCE_END_TAG) { 104 if (status == AE_AML_NO_RESOURCE_END_TAG) {
@@ -174,7 +174,7 @@ acpi_rs_create_resource_list(union acpi_operand_object *aml_buffer,
174 /* Do the conversion */ 174 /* Do the conversion */
175 175
176 resource = output_buffer->pointer; 176 resource = output_buffer->pointer;
177 status = acpi_ut_walk_aml_resources(aml_start, aml_buffer_length, 177 status = acpi_ut_walk_aml_resources(NULL, aml_start, aml_buffer_length,
178 acpi_rs_convert_aml_to_resources, 178 acpi_rs_convert_aml_to_resources,
179 &resource); 179 &resource);
180 if (ACPI_FAILURE(status)) { 180 if (ACPI_FAILURE(status)) {
@@ -480,8 +480,7 @@ acpi_rs_create_aml_resources(struct acpi_resource *linked_list_buffer,
480 status = acpi_rs_get_aml_length(linked_list_buffer, &aml_size_needed); 480 status = acpi_rs_get_aml_length(linked_list_buffer, &aml_size_needed);
481 481
482 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "AmlSizeNeeded=%X, %s\n", 482 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "AmlSizeNeeded=%X, %s\n",
483 (u32) aml_size_needed, 483 (u32)aml_size_needed, acpi_format_exception(status)));
484 acpi_format_exception(status)));
485 if (ACPI_FAILURE(status)) { 484 if (ACPI_FAILURE(status)) {
486 return_ACPI_STATUS(status); 485 return_ACPI_STATUS(status);
487 } 486 }
diff --git a/drivers/acpi/acpica/rsdump.c b/drivers/acpi/acpica/rsdump.c
index 4d11b072388c..cab51445189d 100644
--- a/drivers/acpi/acpica/rsdump.c
+++ b/drivers/acpi/acpica/rsdump.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -77,419 +77,16 @@ static void acpi_rs_dump_address_common(union acpi_resource_data *resource);
77static void 77static void
78acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table); 78acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table);
79 79
80#define ACPI_RSD_OFFSET(f) (u8) ACPI_OFFSET (union acpi_resource_data,f)
81#define ACPI_PRT_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_pci_routing_table,f)
82#define ACPI_RSD_TABLE_SIZE(name) (sizeof(name) / sizeof (struct acpi_rsdump_info))
83
84/*******************************************************************************
85 *
86 * Resource Descriptor info tables
87 *
88 * Note: The first table entry must be a Title or Literal and must contain
89 * the table length (number of table entries)
90 *
91 ******************************************************************************/
92
93struct acpi_rsdump_info acpi_rs_dump_irq[7] = {
94 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_irq), "IRQ", NULL},
95 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(irq.descriptor_length),
96 "Descriptor Length", NULL},
97 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(irq.triggering), "Triggering",
98 acpi_gbl_he_decode},
99 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(irq.polarity), "Polarity",
100 acpi_gbl_ll_decode},
101 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(irq.sharable), "Sharing",
102 acpi_gbl_shr_decode},
103 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(irq.interrupt_count),
104 "Interrupt Count", NULL},
105 {ACPI_RSD_SHORTLIST, ACPI_RSD_OFFSET(irq.interrupts[0]),
106 "Interrupt List", NULL}
107};
108
109struct acpi_rsdump_info acpi_rs_dump_dma[6] = {
110 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_dma), "DMA", NULL},
111 {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(dma.type), "Speed",
112 acpi_gbl_typ_decode},
113 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(dma.bus_master), "Mastering",
114 acpi_gbl_bm_decode},
115 {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(dma.transfer), "Transfer Type",
116 acpi_gbl_siz_decode},
117 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(dma.channel_count), "Channel Count",
118 NULL},
119 {ACPI_RSD_SHORTLIST, ACPI_RSD_OFFSET(dma.channels[0]), "Channel List",
120 NULL}
121};
122
123struct acpi_rsdump_info acpi_rs_dump_start_dpf[4] = {
124 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_start_dpf),
125 "Start-Dependent-Functions", NULL},
126 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(start_dpf.descriptor_length),
127 "Descriptor Length", NULL},
128 {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(start_dpf.compatibility_priority),
129 "Compatibility Priority", acpi_gbl_config_decode},
130 {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(start_dpf.performance_robustness),
131 "Performance/Robustness", acpi_gbl_config_decode}
132};
133
134struct acpi_rsdump_info acpi_rs_dump_end_dpf[1] = {
135 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_end_dpf),
136 "End-Dependent-Functions", NULL}
137};
138
139struct acpi_rsdump_info acpi_rs_dump_io[6] = {
140 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_io), "I/O", NULL},
141 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(io.io_decode), "Address Decoding",
142 acpi_gbl_io_decode},
143 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(io.minimum), "Address Minimum", NULL},
144 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(io.maximum), "Address Maximum", NULL},
145 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(io.alignment), "Alignment", NULL},
146 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(io.address_length), "Address Length",
147 NULL}
148};
149
150struct acpi_rsdump_info acpi_rs_dump_fixed_io[3] = {
151 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_fixed_io),
152 "Fixed I/O", NULL},
153 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(fixed_io.address), "Address", NULL},
154 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(fixed_io.address_length),
155 "Address Length", NULL}
156};
157
158struct acpi_rsdump_info acpi_rs_dump_vendor[3] = {
159 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_vendor),
160 "Vendor Specific", NULL},
161 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(vendor.byte_length), "Length", NULL},
162 {ACPI_RSD_LONGLIST, ACPI_RSD_OFFSET(vendor.byte_data[0]), "Vendor Data",
163 NULL}
164};
165
166struct acpi_rsdump_info acpi_rs_dump_end_tag[1] = {
167 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_end_tag), "EndTag",
168 NULL}
169};
170
171struct acpi_rsdump_info acpi_rs_dump_memory24[6] = {
172 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_memory24),
173 "24-Bit Memory Range", NULL},
174 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(memory24.write_protect),
175 "Write Protect", acpi_gbl_rw_decode},
176 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(memory24.minimum), "Address Minimum",
177 NULL},
178 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(memory24.maximum), "Address Maximum",
179 NULL},
180 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(memory24.alignment), "Alignment",
181 NULL},
182 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(memory24.address_length),
183 "Address Length", NULL}
184};
185
186struct acpi_rsdump_info acpi_rs_dump_memory32[6] = {
187 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_memory32),
188 "32-Bit Memory Range", NULL},
189 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(memory32.write_protect),
190 "Write Protect", acpi_gbl_rw_decode},
191 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(memory32.minimum), "Address Minimum",
192 NULL},
193 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(memory32.maximum), "Address Maximum",
194 NULL},
195 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(memory32.alignment), "Alignment",
196 NULL},
197 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(memory32.address_length),
198 "Address Length", NULL}
199};
200
201struct acpi_rsdump_info acpi_rs_dump_fixed_memory32[4] = {
202 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_fixed_memory32),
203 "32-Bit Fixed Memory Range", NULL},
204 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(fixed_memory32.write_protect),
205 "Write Protect", acpi_gbl_rw_decode},
206 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(fixed_memory32.address), "Address",
207 NULL},
208 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(fixed_memory32.address_length),
209 "Address Length", NULL}
210};
211
212struct acpi_rsdump_info acpi_rs_dump_address16[8] = {
213 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address16),
214 "16-Bit WORD Address Space", NULL},
215 {ACPI_RSD_ADDRESS, 0, NULL, NULL},
216 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.granularity), "Granularity",
217 NULL},
218 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.minimum), "Address Minimum",
219 NULL},
220 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.maximum), "Address Maximum",
221 NULL},
222 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.translation_offset),
223 "Translation Offset", NULL},
224 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.address_length),
225 "Address Length", NULL},
226 {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address16.resource_source), NULL, NULL}
227};
228
229struct acpi_rsdump_info acpi_rs_dump_address32[8] = {
230 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address32),
231 "32-Bit DWORD Address Space", NULL},
232 {ACPI_RSD_ADDRESS, 0, NULL, NULL},
233 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.granularity), "Granularity",
234 NULL},
235 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.minimum), "Address Minimum",
236 NULL},
237 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.maximum), "Address Maximum",
238 NULL},
239 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.translation_offset),
240 "Translation Offset", NULL},
241 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.address_length),
242 "Address Length", NULL},
243 {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address32.resource_source), NULL, NULL}
244};
245
246struct acpi_rsdump_info acpi_rs_dump_address64[8] = {
247 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address64),
248 "64-Bit QWORD Address Space", NULL},
249 {ACPI_RSD_ADDRESS, 0, NULL, NULL},
250 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.granularity), "Granularity",
251 NULL},
252 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.minimum), "Address Minimum",
253 NULL},
254 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.maximum), "Address Maximum",
255 NULL},
256 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.translation_offset),
257 "Translation Offset", NULL},
258 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.address_length),
259 "Address Length", NULL},
260 {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address64.resource_source), NULL, NULL}
261};
262
263struct acpi_rsdump_info acpi_rs_dump_ext_address64[8] = {
264 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_ext_address64),
265 "64-Bit Extended Address Space", NULL},
266 {ACPI_RSD_ADDRESS, 0, NULL, NULL},
267 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.granularity),
268 "Granularity", NULL},
269 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.minimum),
270 "Address Minimum", NULL},
271 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.maximum),
272 "Address Maximum", NULL},
273 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.translation_offset),
274 "Translation Offset", NULL},
275 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.address_length),
276 "Address Length", NULL},
277 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.type_specific),
278 "Type-Specific Attribute", NULL}
279};
280
281struct acpi_rsdump_info acpi_rs_dump_ext_irq[8] = {
282 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_ext_irq),
283 "Extended IRQ", NULL},
284 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.producer_consumer),
285 "Type", acpi_gbl_consume_decode},
286 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.triggering),
287 "Triggering", acpi_gbl_he_decode},
288 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.polarity), "Polarity",
289 acpi_gbl_ll_decode},
290 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.sharable), "Sharing",
291 acpi_gbl_shr_decode},
292 {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(extended_irq.resource_source), NULL,
293 NULL},
294 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(extended_irq.interrupt_count),
295 "Interrupt Count", NULL},
296 {ACPI_RSD_DWORDLIST, ACPI_RSD_OFFSET(extended_irq.interrupts[0]),
297 "Interrupt List", NULL}
298};
299
300struct acpi_rsdump_info acpi_rs_dump_generic_reg[6] = {
301 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_generic_reg),
302 "Generic Register", NULL},
303 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(generic_reg.space_id), "Space ID",
304 NULL},
305 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(generic_reg.bit_width), "Bit Width",
306 NULL},
307 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(generic_reg.bit_offset), "Bit Offset",
308 NULL},
309 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(generic_reg.access_size),
310 "Access Size", NULL},
311 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(generic_reg.address), "Address", NULL}
312};
313
314struct acpi_rsdump_info acpi_rs_dump_gpio[16] = {
315 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_gpio), "GPIO", NULL},
316 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.revision_id), "RevisionId", NULL},
317 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.connection_type),
318 "ConnectionType", acpi_gbl_ct_decode},
319 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(gpio.producer_consumer),
320 "ProducerConsumer", acpi_gbl_consume_decode},
321 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.pin_config), "PinConfig",
322 acpi_gbl_ppc_decode},
323 {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.sharable), "Sharable",
324 acpi_gbl_shr_decode},
325 {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.io_restriction),
326 "IoRestriction", acpi_gbl_ior_decode},
327 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(gpio.triggering), "Triggering",
328 acpi_gbl_he_decode},
329 {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.polarity), "Polarity",
330 acpi_gbl_ll_decode},
331 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.drive_strength), "DriveStrength",
332 NULL},
333 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.debounce_timeout),
334 "DebounceTimeout", NULL},
335 {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(gpio.resource_source),
336 "ResourceSource", NULL},
337 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.pin_table_length),
338 "PinTableLength", NULL},
339 {ACPI_RSD_WORDLIST, ACPI_RSD_OFFSET(gpio.pin_table), "PinTable", NULL},
340 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.vendor_length), "VendorLength",
341 NULL},
342 {ACPI_RSD_SHORTLISTX, ACPI_RSD_OFFSET(gpio.vendor_data), "VendorData",
343 NULL},
344};
345
346struct acpi_rsdump_info acpi_rs_dump_fixed_dma[4] = {
347 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_fixed_dma),
348 "FixedDma", NULL},
349 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(fixed_dma.request_lines),
350 "RequestLines", NULL},
351 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(fixed_dma.channels), "Channels",
352 NULL},
353 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(fixed_dma.width), "TransferWidth",
354 acpi_gbl_dts_decode},
355};
356
357#define ACPI_RS_DUMP_COMMON_SERIAL_BUS \
358 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET (common_serial_bus.revision_id), "RevisionId", NULL}, \
359 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET (common_serial_bus.type), "Type", acpi_gbl_sbt_decode}, \
360 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET (common_serial_bus.producer_consumer), "ProducerConsumer", acpi_gbl_consume_decode}, \
361 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET (common_serial_bus.slave_mode), "SlaveMode", acpi_gbl_sm_decode}, \
362 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET (common_serial_bus.type_revision_id), "TypeRevisionId", NULL}, \
363 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET (common_serial_bus.type_data_length), "TypeDataLength", NULL}, \
364 {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET (common_serial_bus.resource_source), "ResourceSource", NULL}, \
365 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET (common_serial_bus.vendor_length), "VendorLength", NULL}, \
366 {ACPI_RSD_SHORTLISTX,ACPI_RSD_OFFSET (common_serial_bus.vendor_data), "VendorData", NULL},
367
368struct acpi_rsdump_info acpi_rs_dump_common_serial_bus[10] = {
369 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_common_serial_bus),
370 "Common Serial Bus", NULL},
371 ACPI_RS_DUMP_COMMON_SERIAL_BUS
372};
373
374struct acpi_rsdump_info acpi_rs_dump_i2c_serial_bus[13] = {
375 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_i2c_serial_bus),
376 "I2C Serial Bus", NULL},
377 ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_1BITFLAG,
378 ACPI_RSD_OFFSET(i2c_serial_bus.
379 access_mode),
380 "AccessMode", acpi_gbl_am_decode},
381 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(i2c_serial_bus.connection_speed),
382 "ConnectionSpeed", NULL},
383 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(i2c_serial_bus.slave_address),
384 "SlaveAddress", NULL},
385};
386
387struct acpi_rsdump_info acpi_rs_dump_spi_serial_bus[17] = {
388 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_spi_serial_bus),
389 "Spi Serial Bus", NULL},
390 ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_1BITFLAG,
391 ACPI_RSD_OFFSET(spi_serial_bus.
392 wire_mode), "WireMode",
393 acpi_gbl_wm_decode},
394 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(spi_serial_bus.device_polarity),
395 "DevicePolarity", acpi_gbl_dp_decode},
396 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.data_bit_length),
397 "DataBitLength", NULL},
398 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.clock_phase),
399 "ClockPhase", acpi_gbl_cph_decode},
400 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.clock_polarity),
401 "ClockPolarity", acpi_gbl_cpo_decode},
402 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(spi_serial_bus.device_selection),
403 "DeviceSelection", NULL},
404 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(spi_serial_bus.connection_speed),
405 "ConnectionSpeed", NULL},
406};
407
408struct acpi_rsdump_info acpi_rs_dump_uart_serial_bus[19] = {
409 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_uart_serial_bus),
410 "Uart Serial Bus", NULL},
411 ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_2BITFLAG,
412 ACPI_RSD_OFFSET(uart_serial_bus.
413 flow_control),
414 "FlowControl", acpi_gbl_fc_decode},
415 {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.stop_bits),
416 "StopBits", acpi_gbl_sb_decode},
417 {ACPI_RSD_3BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.data_bits),
418 "DataBits", acpi_gbl_bpb_decode},
419 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.endian), "Endian",
420 acpi_gbl_ed_decode},
421 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(uart_serial_bus.parity), "Parity",
422 acpi_gbl_pt_decode},
423 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(uart_serial_bus.lines_enabled),
424 "LinesEnabled", NULL},
425 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(uart_serial_bus.rx_fifo_size),
426 "RxFifoSize", NULL},
427 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(uart_serial_bus.tx_fifo_size),
428 "TxFifoSize", NULL},
429 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(uart_serial_bus.default_baud_rate),
430 "ConnectionSpeed", NULL},
431};
432
433/*
434 * Tables used for common address descriptor flag fields
435 */
436static struct acpi_rsdump_info acpi_rs_dump_general_flags[5] = {
437 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_general_flags), NULL,
438 NULL},
439 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.producer_consumer),
440 "Consumer/Producer", acpi_gbl_consume_decode},
441 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.decode), "Address Decode",
442 acpi_gbl_dec_decode},
443 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.min_address_fixed),
444 "Min Relocatability", acpi_gbl_min_decode},
445 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.max_address_fixed),
446 "Max Relocatability", acpi_gbl_max_decode}
447};
448
449static struct acpi_rsdump_info acpi_rs_dump_memory_flags[5] = {
450 {ACPI_RSD_LITERAL, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_memory_flags),
451 "Resource Type", (void *)"Memory Range"},
452 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.mem.write_protect),
453 "Write Protect", acpi_gbl_rw_decode},
454 {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(address.info.mem.caching),
455 "Caching", acpi_gbl_mem_decode},
456 {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(address.info.mem.range_type),
457 "Range Type", acpi_gbl_mtp_decode},
458 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.mem.translation),
459 "Translation", acpi_gbl_ttp_decode}
460};
461
462static struct acpi_rsdump_info acpi_rs_dump_io_flags[4] = {
463 {ACPI_RSD_LITERAL, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_io_flags),
464 "Resource Type", (void *)"I/O Range"},
465 {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(address.info.io.range_type),
466 "Range Type", acpi_gbl_rng_decode},
467 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.io.translation),
468 "Translation", acpi_gbl_ttp_decode},
469 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.io.translation_type),
470 "Translation Type", acpi_gbl_trs_decode}
471};
472
473/*
474 * Table used to dump _PRT contents
475 */
476static struct acpi_rsdump_info acpi_rs_dump_prt[5] = {
477 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_prt), NULL, NULL},
478 {ACPI_RSD_UINT64, ACPI_PRT_OFFSET(address), "Address", NULL},
479 {ACPI_RSD_UINT32, ACPI_PRT_OFFSET(pin), "Pin", NULL},
480 {ACPI_RSD_STRING, ACPI_PRT_OFFSET(source[0]), "Source", NULL},
481 {ACPI_RSD_UINT32, ACPI_PRT_OFFSET(source_index), "Source Index", NULL}
482};
483
484/******************************************************************************* 80/*******************************************************************************
485 * 81 *
486 * FUNCTION: acpi_rs_dump_descriptor 82 * FUNCTION: acpi_rs_dump_descriptor
487 * 83 *
488 * PARAMETERS: Resource 84 * PARAMETERS: resource - Buffer containing the resource
85 * table - Table entry to decode the resource
489 * 86 *
490 * RETURN: None 87 * RETURN: None
491 * 88 *
492 * DESCRIPTION: 89 * DESCRIPTION: Dump a resource descriptor based on a dump table entry.
493 * 90 *
494 ******************************************************************************/ 91 ******************************************************************************/
495 92
@@ -654,7 +251,8 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
654 /* 251 /*
655 * Optional resource_source for Address resources 252 * Optional resource_source for Address resources
656 */ 253 */
657 acpi_rs_dump_resource_source(ACPI_CAST_PTR(struct 254 acpi_rs_dump_resource_source(ACPI_CAST_PTR
255 (struct
658 acpi_resource_source, 256 acpi_resource_source,
659 target)); 257 target));
660 break; 258 break;
@@ -765,8 +363,9 @@ void acpi_rs_dump_resource_list(struct acpi_resource *resource_list)
765 363
766 ACPI_FUNCTION_ENTRY(); 364 ACPI_FUNCTION_ENTRY();
767 365
768 if (!(acpi_dbg_level & ACPI_LV_RESOURCES) 366 /* Check if debug output enabled */
769 || !(_COMPONENT & acpi_dbg_layer)) { 367
368 if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_RESOURCES, _COMPONENT)) {
770 return; 369 return;
771 } 370 }
772 371
@@ -827,8 +426,9 @@ void acpi_rs_dump_irq_list(u8 * route_table)
827 426
828 ACPI_FUNCTION_ENTRY(); 427 ACPI_FUNCTION_ENTRY();
829 428
830 if (!(acpi_dbg_level & ACPI_LV_RESOURCES) 429 /* Check if debug output enabled */
831 || !(_COMPONENT & acpi_dbg_layer)) { 430
431 if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_RESOURCES, _COMPONENT)) {
832 return; 432 return;
833 } 433 }
834 434
diff --git a/drivers/acpi/acpica/rsdumpinfo.c b/drivers/acpi/acpica/rsdumpinfo.c
new file mode 100644
index 000000000000..46192bd53653
--- /dev/null
+++ b/drivers/acpi/acpica/rsdumpinfo.c
@@ -0,0 +1,454 @@
1/*******************************************************************************
2 *
3 * Module Name: rsdumpinfo - Tables used to display resource descriptors.
4 *
5 ******************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include "accommon.h"
46#include "acresrc.h"
47
48#define _COMPONENT ACPI_RESOURCES
49ACPI_MODULE_NAME("rsdumpinfo")
50
51#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
52#define ACPI_RSD_OFFSET(f) (u8) ACPI_OFFSET (union acpi_resource_data,f)
53#define ACPI_PRT_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_pci_routing_table,f)
54#define ACPI_RSD_TABLE_SIZE(name) (sizeof(name) / sizeof (struct acpi_rsdump_info))
55/*******************************************************************************
56 *
57 * Resource Descriptor info tables
58 *
59 * Note: The first table entry must be a Title or Literal and must contain
60 * the table length (number of table entries)
61 *
62 ******************************************************************************/
63struct acpi_rsdump_info acpi_rs_dump_irq[7] = {
64 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_irq), "IRQ", NULL},
65 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(irq.descriptor_length),
66 "Descriptor Length", NULL},
67 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(irq.triggering), "Triggering",
68 acpi_gbl_he_decode},
69 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(irq.polarity), "Polarity",
70 acpi_gbl_ll_decode},
71 {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(irq.sharable), "Sharing",
72 acpi_gbl_shr_decode},
73 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(irq.interrupt_count),
74 "Interrupt Count", NULL},
75 {ACPI_RSD_SHORTLIST, ACPI_RSD_OFFSET(irq.interrupts[0]),
76 "Interrupt List", NULL}
77};
78
79struct acpi_rsdump_info acpi_rs_dump_dma[6] = {
80 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_dma), "DMA", NULL},
81 {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(dma.type), "Speed",
82 acpi_gbl_typ_decode},
83 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(dma.bus_master), "Mastering",
84 acpi_gbl_bm_decode},
85 {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(dma.transfer), "Transfer Type",
86 acpi_gbl_siz_decode},
87 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(dma.channel_count), "Channel Count",
88 NULL},
89 {ACPI_RSD_SHORTLIST, ACPI_RSD_OFFSET(dma.channels[0]), "Channel List",
90 NULL}
91};
92
93struct acpi_rsdump_info acpi_rs_dump_start_dpf[4] = {
94 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_start_dpf),
95 "Start-Dependent-Functions", NULL},
96 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(start_dpf.descriptor_length),
97 "Descriptor Length", NULL},
98 {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(start_dpf.compatibility_priority),
99 "Compatibility Priority", acpi_gbl_config_decode},
100 {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(start_dpf.performance_robustness),
101 "Performance/Robustness", acpi_gbl_config_decode}
102};
103
104struct acpi_rsdump_info acpi_rs_dump_end_dpf[1] = {
105 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_end_dpf),
106 "End-Dependent-Functions", NULL}
107};
108
109struct acpi_rsdump_info acpi_rs_dump_io[6] = {
110 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_io), "I/O", NULL},
111 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(io.io_decode), "Address Decoding",
112 acpi_gbl_io_decode},
113 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(io.minimum), "Address Minimum", NULL},
114 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(io.maximum), "Address Maximum", NULL},
115 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(io.alignment), "Alignment", NULL},
116 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(io.address_length), "Address Length",
117 NULL}
118};
119
120struct acpi_rsdump_info acpi_rs_dump_fixed_io[3] = {
121 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_fixed_io),
122 "Fixed I/O", NULL},
123 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(fixed_io.address), "Address", NULL},
124 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(fixed_io.address_length),
125 "Address Length", NULL}
126};
127
128struct acpi_rsdump_info acpi_rs_dump_vendor[3] = {
129 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_vendor),
130 "Vendor Specific", NULL},
131 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(vendor.byte_length), "Length", NULL},
132 {ACPI_RSD_LONGLIST, ACPI_RSD_OFFSET(vendor.byte_data[0]), "Vendor Data",
133 NULL}
134};
135
136struct acpi_rsdump_info acpi_rs_dump_end_tag[1] = {
137 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_end_tag), "EndTag",
138 NULL}
139};
140
141struct acpi_rsdump_info acpi_rs_dump_memory24[6] = {
142 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_memory24),
143 "24-Bit Memory Range", NULL},
144 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(memory24.write_protect),
145 "Write Protect", acpi_gbl_rw_decode},
146 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(memory24.minimum), "Address Minimum",
147 NULL},
148 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(memory24.maximum), "Address Maximum",
149 NULL},
150 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(memory24.alignment), "Alignment",
151 NULL},
152 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(memory24.address_length),
153 "Address Length", NULL}
154};
155
156struct acpi_rsdump_info acpi_rs_dump_memory32[6] = {
157 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_memory32),
158 "32-Bit Memory Range", NULL},
159 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(memory32.write_protect),
160 "Write Protect", acpi_gbl_rw_decode},
161 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(memory32.minimum), "Address Minimum",
162 NULL},
163 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(memory32.maximum), "Address Maximum",
164 NULL},
165 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(memory32.alignment), "Alignment",
166 NULL},
167 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(memory32.address_length),
168 "Address Length", NULL}
169};
170
171struct acpi_rsdump_info acpi_rs_dump_fixed_memory32[4] = {
172 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_fixed_memory32),
173 "32-Bit Fixed Memory Range", NULL},
174 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(fixed_memory32.write_protect),
175 "Write Protect", acpi_gbl_rw_decode},
176 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(fixed_memory32.address), "Address",
177 NULL},
178 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(fixed_memory32.address_length),
179 "Address Length", NULL}
180};
181
182struct acpi_rsdump_info acpi_rs_dump_address16[8] = {
183 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address16),
184 "16-Bit WORD Address Space", NULL},
185 {ACPI_RSD_ADDRESS, 0, NULL, NULL},
186 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.granularity), "Granularity",
187 NULL},
188 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.minimum), "Address Minimum",
189 NULL},
190 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.maximum), "Address Maximum",
191 NULL},
192 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.translation_offset),
193 "Translation Offset", NULL},
194 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.address_length),
195 "Address Length", NULL},
196 {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address16.resource_source), NULL, NULL}
197};
198
199struct acpi_rsdump_info acpi_rs_dump_address32[8] = {
200 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address32),
201 "32-Bit DWORD Address Space", NULL},
202 {ACPI_RSD_ADDRESS, 0, NULL, NULL},
203 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.granularity), "Granularity",
204 NULL},
205 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.minimum), "Address Minimum",
206 NULL},
207 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.maximum), "Address Maximum",
208 NULL},
209 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.translation_offset),
210 "Translation Offset", NULL},
211 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.address_length),
212 "Address Length", NULL},
213 {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address32.resource_source), NULL, NULL}
214};
215
216struct acpi_rsdump_info acpi_rs_dump_address64[8] = {
217 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address64),
218 "64-Bit QWORD Address Space", NULL},
219 {ACPI_RSD_ADDRESS, 0, NULL, NULL},
220 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.granularity), "Granularity",
221 NULL},
222 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.minimum), "Address Minimum",
223 NULL},
224 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.maximum), "Address Maximum",
225 NULL},
226 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.translation_offset),
227 "Translation Offset", NULL},
228 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.address_length),
229 "Address Length", NULL},
230 {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address64.resource_source), NULL, NULL}
231};
232
233struct acpi_rsdump_info acpi_rs_dump_ext_address64[8] = {
234 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_ext_address64),
235 "64-Bit Extended Address Space", NULL},
236 {ACPI_RSD_ADDRESS, 0, NULL, NULL},
237 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.granularity),
238 "Granularity", NULL},
239 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.minimum),
240 "Address Minimum", NULL},
241 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.maximum),
242 "Address Maximum", NULL},
243 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.translation_offset),
244 "Translation Offset", NULL},
245 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.address_length),
246 "Address Length", NULL},
247 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.type_specific),
248 "Type-Specific Attribute", NULL}
249};
250
251struct acpi_rsdump_info acpi_rs_dump_ext_irq[8] = {
252 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_ext_irq),
253 "Extended IRQ", NULL},
254 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.producer_consumer),
255 "Type", acpi_gbl_consume_decode},
256 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.triggering),
257 "Triggering", acpi_gbl_he_decode},
258 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.polarity), "Polarity",
259 acpi_gbl_ll_decode},
260 {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(extended_irq.sharable), "Sharing",
261 acpi_gbl_shr_decode},
262 {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(extended_irq.resource_source), NULL,
263 NULL},
264 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(extended_irq.interrupt_count),
265 "Interrupt Count", NULL},
266 {ACPI_RSD_DWORDLIST, ACPI_RSD_OFFSET(extended_irq.interrupts[0]),
267 "Interrupt List", NULL}
268};
269
270struct acpi_rsdump_info acpi_rs_dump_generic_reg[6] = {
271 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_generic_reg),
272 "Generic Register", NULL},
273 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(generic_reg.space_id), "Space ID",
274 NULL},
275 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(generic_reg.bit_width), "Bit Width",
276 NULL},
277 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(generic_reg.bit_offset), "Bit Offset",
278 NULL},
279 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(generic_reg.access_size),
280 "Access Size", NULL},
281 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(generic_reg.address), "Address", NULL}
282};
283
284struct acpi_rsdump_info acpi_rs_dump_gpio[16] = {
285 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_gpio), "GPIO", NULL},
286 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.revision_id), "RevisionId", NULL},
287 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.connection_type),
288 "ConnectionType", acpi_gbl_ct_decode},
289 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(gpio.producer_consumer),
290 "ProducerConsumer", acpi_gbl_consume_decode},
291 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.pin_config), "PinConfig",
292 acpi_gbl_ppc_decode},
293 {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.sharable), "Sharing",
294 acpi_gbl_shr_decode},
295 {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.io_restriction),
296 "IoRestriction", acpi_gbl_ior_decode},
297 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(gpio.triggering), "Triggering",
298 acpi_gbl_he_decode},
299 {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.polarity), "Polarity",
300 acpi_gbl_ll_decode},
301 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.drive_strength), "DriveStrength",
302 NULL},
303 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.debounce_timeout),
304 "DebounceTimeout", NULL},
305 {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(gpio.resource_source),
306 "ResourceSource", NULL},
307 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.pin_table_length),
308 "PinTableLength", NULL},
309 {ACPI_RSD_WORDLIST, ACPI_RSD_OFFSET(gpio.pin_table), "PinTable", NULL},
310 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.vendor_length), "VendorLength",
311 NULL},
312 {ACPI_RSD_SHORTLISTX, ACPI_RSD_OFFSET(gpio.vendor_data), "VendorData",
313 NULL},
314};
315
316struct acpi_rsdump_info acpi_rs_dump_fixed_dma[4] = {
317 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_fixed_dma),
318 "FixedDma", NULL},
319 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(fixed_dma.request_lines),
320 "RequestLines", NULL},
321 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(fixed_dma.channels), "Channels",
322 NULL},
323 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(fixed_dma.width), "TransferWidth",
324 acpi_gbl_dts_decode},
325};
326
327#define ACPI_RS_DUMP_COMMON_SERIAL_BUS \
328 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET (common_serial_bus.revision_id), "RevisionId", NULL}, \
329 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET (common_serial_bus.type), "Type", acpi_gbl_sbt_decode}, \
330 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET (common_serial_bus.producer_consumer), "ProducerConsumer", acpi_gbl_consume_decode}, \
331 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET (common_serial_bus.slave_mode), "SlaveMode", acpi_gbl_sm_decode}, \
332 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET (common_serial_bus.type_revision_id), "TypeRevisionId", NULL}, \
333 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET (common_serial_bus.type_data_length), "TypeDataLength", NULL}, \
334 {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET (common_serial_bus.resource_source), "ResourceSource", NULL}, \
335 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET (common_serial_bus.vendor_length), "VendorLength", NULL}, \
336 {ACPI_RSD_SHORTLISTX,ACPI_RSD_OFFSET (common_serial_bus.vendor_data), "VendorData", NULL},
337
338struct acpi_rsdump_info acpi_rs_dump_common_serial_bus[10] = {
339 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_common_serial_bus),
340 "Common Serial Bus", NULL},
341 ACPI_RS_DUMP_COMMON_SERIAL_BUS
342};
343
344struct acpi_rsdump_info acpi_rs_dump_i2c_serial_bus[13] = {
345 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_i2c_serial_bus),
346 "I2C Serial Bus", NULL},
347 ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_1BITFLAG,
348 ACPI_RSD_OFFSET(i2c_serial_bus.
349 access_mode),
350 "AccessMode", acpi_gbl_am_decode},
351 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(i2c_serial_bus.connection_speed),
352 "ConnectionSpeed", NULL},
353 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(i2c_serial_bus.slave_address),
354 "SlaveAddress", NULL},
355};
356
357struct acpi_rsdump_info acpi_rs_dump_spi_serial_bus[17] = {
358 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_spi_serial_bus),
359 "Spi Serial Bus", NULL},
360 ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_1BITFLAG,
361 ACPI_RSD_OFFSET(spi_serial_bus.
362 wire_mode), "WireMode",
363 acpi_gbl_wm_decode},
364 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(spi_serial_bus.device_polarity),
365 "DevicePolarity", acpi_gbl_dp_decode},
366 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.data_bit_length),
367 "DataBitLength", NULL},
368 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.clock_phase),
369 "ClockPhase", acpi_gbl_cph_decode},
370 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.clock_polarity),
371 "ClockPolarity", acpi_gbl_cpo_decode},
372 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(spi_serial_bus.device_selection),
373 "DeviceSelection", NULL},
374 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(spi_serial_bus.connection_speed),
375 "ConnectionSpeed", NULL},
376};
377
378struct acpi_rsdump_info acpi_rs_dump_uart_serial_bus[19] = {
379 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_uart_serial_bus),
380 "Uart Serial Bus", NULL},
381 ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_2BITFLAG,
382 ACPI_RSD_OFFSET(uart_serial_bus.
383 flow_control),
384 "FlowControl", acpi_gbl_fc_decode},
385 {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.stop_bits),
386 "StopBits", acpi_gbl_sb_decode},
387 {ACPI_RSD_3BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.data_bits),
388 "DataBits", acpi_gbl_bpb_decode},
389 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.endian), "Endian",
390 acpi_gbl_ed_decode},
391 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(uart_serial_bus.parity), "Parity",
392 acpi_gbl_pt_decode},
393 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(uart_serial_bus.lines_enabled),
394 "LinesEnabled", NULL},
395 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(uart_serial_bus.rx_fifo_size),
396 "RxFifoSize", NULL},
397 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(uart_serial_bus.tx_fifo_size),
398 "TxFifoSize", NULL},
399 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(uart_serial_bus.default_baud_rate),
400 "ConnectionSpeed", NULL},
401};
402
403/*
404 * Tables used for common address descriptor flag fields
405 */
406struct acpi_rsdump_info acpi_rs_dump_general_flags[5] = {
407 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_general_flags), NULL,
408 NULL},
409 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.producer_consumer),
410 "Consumer/Producer", acpi_gbl_consume_decode},
411 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.decode), "Address Decode",
412 acpi_gbl_dec_decode},
413 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.min_address_fixed),
414 "Min Relocatability", acpi_gbl_min_decode},
415 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.max_address_fixed),
416 "Max Relocatability", acpi_gbl_max_decode}
417};
418
419struct acpi_rsdump_info acpi_rs_dump_memory_flags[5] = {
420 {ACPI_RSD_LITERAL, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_memory_flags),
421 "Resource Type", (void *)"Memory Range"},
422 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.mem.write_protect),
423 "Write Protect", acpi_gbl_rw_decode},
424 {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(address.info.mem.caching),
425 "Caching", acpi_gbl_mem_decode},
426 {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(address.info.mem.range_type),
427 "Range Type", acpi_gbl_mtp_decode},
428 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.mem.translation),
429 "Translation", acpi_gbl_ttp_decode}
430};
431
432struct acpi_rsdump_info acpi_rs_dump_io_flags[4] = {
433 {ACPI_RSD_LITERAL, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_io_flags),
434 "Resource Type", (void *)"I/O Range"},
435 {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(address.info.io.range_type),
436 "Range Type", acpi_gbl_rng_decode},
437 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.io.translation),
438 "Translation", acpi_gbl_ttp_decode},
439 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.io.translation_type),
440 "Translation Type", acpi_gbl_trs_decode}
441};
442
443/*
444 * Table used to dump _PRT contents
445 */
446struct acpi_rsdump_info acpi_rs_dump_prt[5] = {
447 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_prt), NULL, NULL},
448 {ACPI_RSD_UINT64, ACPI_PRT_OFFSET(address), "Address", NULL},
449 {ACPI_RSD_UINT32, ACPI_PRT_OFFSET(pin), "Pin", NULL},
450 {ACPI_RSD_STRING, ACPI_PRT_OFFSET(source[0]), "Source", NULL},
451 {ACPI_RSD_UINT32, ACPI_PRT_OFFSET(source_index), "Source Index", NULL}
452};
453
454#endif
diff --git a/drivers/acpi/acpica/rsinfo.c b/drivers/acpi/acpica/rsinfo.c
index a9fa5158200b..41fed78e0de6 100644
--- a/drivers/acpi/acpica/rsinfo.c
+++ b/drivers/acpi/acpica/rsinfo.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsio.c b/drivers/acpi/acpica/rsio.c
index f6a081057a22..ca183755a6f9 100644
--- a/drivers/acpi/acpica/rsio.c
+++ b/drivers/acpi/acpica/rsio.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsirq.c b/drivers/acpi/acpica/rsirq.c
index e23a9ec248cb..364decc1028a 100644
--- a/drivers/acpi/acpica/rsirq.c
+++ b/drivers/acpi/acpica/rsirq.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -53,7 +53,7 @@ ACPI_MODULE_NAME("rsirq")
53 * acpi_rs_get_irq 53 * acpi_rs_get_irq
54 * 54 *
55 ******************************************************************************/ 55 ******************************************************************************/
56struct acpi_rsconvert_info acpi_rs_get_irq[8] = { 56struct acpi_rsconvert_info acpi_rs_get_irq[9] = {
57 {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_IRQ, 57 {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_IRQ,
58 ACPI_RS_SIZE(struct acpi_resource_irq), 58 ACPI_RS_SIZE(struct acpi_resource_irq),
59 ACPI_RSC_TABLE_SIZE(acpi_rs_get_irq)}, 59 ACPI_RSC_TABLE_SIZE(acpi_rs_get_irq)},
@@ -80,7 +80,7 @@ struct acpi_rsconvert_info acpi_rs_get_irq[8] = {
80 80
81 {ACPI_RSC_EXIT_NE, ACPI_RSC_COMPARE_AML_LENGTH, 0, 3}, 81 {ACPI_RSC_EXIT_NE, ACPI_RSC_COMPARE_AML_LENGTH, 0, 3},
82 82
83 /* Get flags: Triggering[0], Polarity[3], Sharing[4] */ 83 /* Get flags: Triggering[0], Polarity[3], Sharing[4], Wake[5] */
84 84
85 {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.triggering), 85 {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.triggering),
86 AML_OFFSET(irq.flags), 86 AML_OFFSET(irq.flags),
@@ -92,7 +92,11 @@ struct acpi_rsconvert_info acpi_rs_get_irq[8] = {
92 92
93 {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.sharable), 93 {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.sharable),
94 AML_OFFSET(irq.flags), 94 AML_OFFSET(irq.flags),
95 4} 95 4},
96
97 {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.wake_capable),
98 AML_OFFSET(irq.flags),
99 5}
96}; 100};
97 101
98/******************************************************************************* 102/*******************************************************************************
@@ -101,7 +105,7 @@ struct acpi_rsconvert_info acpi_rs_get_irq[8] = {
101 * 105 *
102 ******************************************************************************/ 106 ******************************************************************************/
103 107
104struct acpi_rsconvert_info acpi_rs_set_irq[13] = { 108struct acpi_rsconvert_info acpi_rs_set_irq[14] = {
105 /* Start with a default descriptor of length 3 */ 109 /* Start with a default descriptor of length 3 */
106 110
107 {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_IRQ, 111 {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_IRQ,
@@ -114,7 +118,7 @@ struct acpi_rsconvert_info acpi_rs_set_irq[13] = {
114 AML_OFFSET(irq.irq_mask), 118 AML_OFFSET(irq.irq_mask),
115 ACPI_RS_OFFSET(data.irq.interrupt_count)}, 119 ACPI_RS_OFFSET(data.irq.interrupt_count)},
116 120
117 /* Set the flags byte */ 121 /* Set flags: Triggering[0], Polarity[3], Sharing[4], Wake[5] */
118 122
119 {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.triggering), 123 {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.triggering),
120 AML_OFFSET(irq.flags), 124 AML_OFFSET(irq.flags),
@@ -128,6 +132,10 @@ struct acpi_rsconvert_info acpi_rs_set_irq[13] = {
128 AML_OFFSET(irq.flags), 132 AML_OFFSET(irq.flags),
129 4}, 133 4},
130 134
135 {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.wake_capable),
136 AML_OFFSET(irq.flags),
137 5},
138
131 /* 139 /*
132 * All done if the output descriptor length is required to be 3 140 * All done if the output descriptor length is required to be 3
133 * (i.e., optimization to 2 bytes cannot be attempted) 141 * (i.e., optimization to 2 bytes cannot be attempted)
@@ -181,7 +189,7 @@ struct acpi_rsconvert_info acpi_rs_set_irq[13] = {
181 * 189 *
182 ******************************************************************************/ 190 ******************************************************************************/
183 191
184struct acpi_rsconvert_info acpi_rs_convert_ext_irq[9] = { 192struct acpi_rsconvert_info acpi_rs_convert_ext_irq[10] = {
185 {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_EXTENDED_IRQ, 193 {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_EXTENDED_IRQ,
186 ACPI_RS_SIZE(struct acpi_resource_extended_irq), 194 ACPI_RS_SIZE(struct acpi_resource_extended_irq),
187 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_ext_irq)}, 195 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_ext_irq)},
@@ -190,8 +198,10 @@ struct acpi_rsconvert_info acpi_rs_convert_ext_irq[9] = {
190 sizeof(struct aml_resource_extended_irq), 198 sizeof(struct aml_resource_extended_irq),
191 0}, 199 0},
192 200
193 /* Flag bits */ 201 /*
194 202 * Flags: Producer/Consumer[0], Triggering[1], Polarity[2],
203 * Sharing[3], Wake[4]
204 */
195 {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.extended_irq.producer_consumer), 205 {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.extended_irq.producer_consumer),
196 AML_OFFSET(extended_irq.flags), 206 AML_OFFSET(extended_irq.flags),
197 0}, 207 0},
@@ -208,19 +218,21 @@ struct acpi_rsconvert_info acpi_rs_convert_ext_irq[9] = {
208 AML_OFFSET(extended_irq.flags), 218 AML_OFFSET(extended_irq.flags),
209 3}, 219 3},
210 220
221 {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.extended_irq.wake_capable),
222 AML_OFFSET(extended_irq.flags),
223 4},
224
211 /* IRQ Table length (Byte4) */ 225 /* IRQ Table length (Byte4) */
212 226
213 {ACPI_RSC_COUNT, ACPI_RS_OFFSET(data.extended_irq.interrupt_count), 227 {ACPI_RSC_COUNT, ACPI_RS_OFFSET(data.extended_irq.interrupt_count),
214 AML_OFFSET(extended_irq.interrupt_count), 228 AML_OFFSET(extended_irq.interrupt_count),
215 sizeof(u32)} 229 sizeof(u32)},
216 ,
217 230
218 /* Copy every IRQ in the table, each is 32 bits */ 231 /* Copy every IRQ in the table, each is 32 bits */
219 232
220 {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.extended_irq.interrupts[0]), 233 {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.extended_irq.interrupts[0]),
221 AML_OFFSET(extended_irq.interrupts[0]), 234 AML_OFFSET(extended_irq.interrupts[0]),
222 0} 235 0},
223 ,
224 236
225 /* Optional resource_source (Index and String) */ 237 /* Optional resource_source (Index and String) */
226 238
@@ -285,7 +297,6 @@ struct acpi_rsconvert_info acpi_rs_convert_fixed_dma[4] = {
285 * request_lines 297 * request_lines
286 * Channels 298 * Channels
287 */ 299 */
288
289 {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.fixed_dma.request_lines), 300 {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.fixed_dma.request_lines),
290 AML_OFFSET(fixed_dma.request_lines), 301 AML_OFFSET(fixed_dma.request_lines),
291 2}, 302 2},
@@ -293,5 +304,4 @@ struct acpi_rsconvert_info acpi_rs_convert_fixed_dma[4] = {
293 {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.fixed_dma.width), 304 {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.fixed_dma.width),
294 AML_OFFSET(fixed_dma.width), 305 AML_OFFSET(fixed_dma.width),
295 1}, 306 1},
296
297}; 307};
diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c
index 8b64db9a3fd2..ee2e206fc6c8 100644
--- a/drivers/acpi/acpica/rslist.c
+++ b/drivers/acpi/acpica/rslist.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -217,9 +217,10 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
217 217
218 /* Perform final sanity check on the new AML resource descriptor */ 218 /* Perform final sanity check on the new AML resource descriptor */
219 219
220 status = 220 status = acpi_ut_validate_resource(NULL,
221 acpi_ut_validate_resource(ACPI_CAST_PTR 221 ACPI_CAST_PTR(union
222 (union aml_resource, aml), NULL); 222 aml_resource,
223 aml), NULL);
223 if (ACPI_FAILURE(status)) { 224 if (ACPI_FAILURE(status)) {
224 return_ACPI_STATUS(status); 225 return_ACPI_STATUS(status);
225 } 226 }
diff --git a/drivers/acpi/acpica/rsmemory.c b/drivers/acpi/acpica/rsmemory.c
index 4fd611ad02b4..ebc773a1b350 100644
--- a/drivers/acpi/acpica/rsmemory.c
+++ b/drivers/acpi/acpica/rsmemory.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -156,8 +156,7 @@ struct acpi_rsconvert_info acpi_rs_get_vendor_small[3] = {
156 156
157 {ACPI_RSC_COUNT16, ACPI_RS_OFFSET(data.vendor.byte_length), 157 {ACPI_RSC_COUNT16, ACPI_RS_OFFSET(data.vendor.byte_length),
158 0, 158 0,
159 sizeof(u8)} 159 sizeof(u8)},
160 ,
161 160
162 /* Vendor data */ 161 /* Vendor data */
163 162
@@ -181,8 +180,7 @@ struct acpi_rsconvert_info acpi_rs_get_vendor_large[3] = {
181 180
182 {ACPI_RSC_COUNT16, ACPI_RS_OFFSET(data.vendor.byte_length), 181 {ACPI_RSC_COUNT16, ACPI_RS_OFFSET(data.vendor.byte_length),
183 0, 182 0,
184 sizeof(u8)} 183 sizeof(u8)},
185 ,
186 184
187 /* Vendor data */ 185 /* Vendor data */
188 186
diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c
index c6f291c2bc83..d5bf05a96096 100644
--- a/drivers/acpi/acpica/rsmisc.c
+++ b/drivers/acpi/acpica/rsmisc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -136,30 +136,30 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
136 /* 136 /*
137 * Mask and shift the flag bit 137 * Mask and shift the flag bit
138 */ 138 */
139 ACPI_SET8(destination) = (u8) 139 ACPI_SET8(destination,
140 ((ACPI_GET8(source) >> info->value) & 0x01); 140 ((ACPI_GET8(source) >> info->value) & 0x01));
141 break; 141 break;
142 142
143 case ACPI_RSC_2BITFLAG: 143 case ACPI_RSC_2BITFLAG:
144 /* 144 /*
145 * Mask and shift the flag bits 145 * Mask and shift the flag bits
146 */ 146 */
147 ACPI_SET8(destination) = (u8) 147 ACPI_SET8(destination,
148 ((ACPI_GET8(source) >> info->value) & 0x03); 148 ((ACPI_GET8(source) >> info->value) & 0x03));
149 break; 149 break;
150 150
151 case ACPI_RSC_3BITFLAG: 151 case ACPI_RSC_3BITFLAG:
152 /* 152 /*
153 * Mask and shift the flag bits 153 * Mask and shift the flag bits
154 */ 154 */
155 ACPI_SET8(destination) = (u8) 155 ACPI_SET8(destination,
156 ((ACPI_GET8(source) >> info->value) & 0x07); 156 ((ACPI_GET8(source) >> info->value) & 0x07));
157 break; 157 break;
158 158
159 case ACPI_RSC_COUNT: 159 case ACPI_RSC_COUNT:
160 160
161 item_count = ACPI_GET8(source); 161 item_count = ACPI_GET8(source);
162 ACPI_SET8(destination) = (u8) item_count; 162 ACPI_SET8(destination, item_count);
163 163
164 resource->length = resource->length + 164 resource->length = resource->length +
165 (info->value * (item_count - 1)); 165 (info->value * (item_count - 1));
@@ -168,7 +168,7 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
168 case ACPI_RSC_COUNT16: 168 case ACPI_RSC_COUNT16:
169 169
170 item_count = aml_resource_length; 170 item_count = aml_resource_length;
171 ACPI_SET16(destination) = item_count; 171 ACPI_SET16(destination, item_count);
172 172
173 resource->length = resource->length + 173 resource->length = resource->length +
174 (info->value * (item_count - 1)); 174 (info->value * (item_count - 1));
@@ -181,13 +181,13 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
181 181
182 resource->length = resource->length + item_count; 182 resource->length = resource->length + item_count;
183 item_count = item_count / 2; 183 item_count = item_count / 2;
184 ACPI_SET16(destination) = item_count; 184 ACPI_SET16(destination, item_count);
185 break; 185 break;
186 186
187 case ACPI_RSC_COUNT_GPIO_VEN: 187 case ACPI_RSC_COUNT_GPIO_VEN:
188 188
189 item_count = ACPI_GET8(source); 189 item_count = ACPI_GET8(source);
190 ACPI_SET8(destination) = (u8)item_count; 190 ACPI_SET8(destination, item_count);
191 191
192 resource->length = resource->length + 192 resource->length = resource->length +
193 (info->value * item_count); 193 (info->value * item_count);
@@ -216,7 +216,7 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
216 } 216 }
217 217
218 resource->length = resource->length + item_count; 218 resource->length = resource->length + item_count;
219 ACPI_SET16(destination) = item_count; 219 ACPI_SET16(destination, item_count);
220 break; 220 break;
221 221
222 case ACPI_RSC_COUNT_SERIAL_VEN: 222 case ACPI_RSC_COUNT_SERIAL_VEN:
@@ -224,7 +224,7 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
224 item_count = ACPI_GET16(source) - info->value; 224 item_count = ACPI_GET16(source) - info->value;
225 225
226 resource->length = resource->length + item_count; 226 resource->length = resource->length + item_count;
227 ACPI_SET16(destination) = item_count; 227 ACPI_SET16(destination, item_count);
228 break; 228 break;
229 229
230 case ACPI_RSC_COUNT_SERIAL_RES: 230 case ACPI_RSC_COUNT_SERIAL_RES:
@@ -234,7 +234,7 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
234 - ACPI_GET16(source) - info->value; 234 - ACPI_GET16(source) - info->value;
235 235
236 resource->length = resource->length + item_count; 236 resource->length = resource->length + item_count;
237 ACPI_SET16(destination) = item_count; 237 ACPI_SET16(destination, item_count);
238 break; 238 break;
239 239
240 case ACPI_RSC_LENGTH: 240 case ACPI_RSC_LENGTH:
@@ -385,7 +385,7 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
385 } 385 }
386 386
387 target = ACPI_ADD_PTR(char, resource, info->value); 387 target = ACPI_ADD_PTR(char, resource, info->value);
388 ACPI_SET8(target) = (u8) item_count; 388 ACPI_SET8(target, item_count);
389 break; 389 break;
390 390
391 case ACPI_RSC_BITMASK16: 391 case ACPI_RSC_BITMASK16:
@@ -401,7 +401,7 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
401 } 401 }
402 402
403 target = ACPI_ADD_PTR(char, resource, info->value); 403 target = ACPI_ADD_PTR(char, resource, info->value);
404 ACPI_SET8(target) = (u8) item_count; 404 ACPI_SET8(target, item_count);
405 break; 405 break;
406 406
407 case ACPI_RSC_EXIT_NE: 407 case ACPI_RSC_EXIT_NE:
@@ -514,37 +514,40 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
514 /* 514 /*
515 * Clear the flag byte 515 * Clear the flag byte
516 */ 516 */
517 ACPI_SET8(destination) = 0; 517 ACPI_SET8(destination, 0);
518 break; 518 break;
519 519
520 case ACPI_RSC_1BITFLAG: 520 case ACPI_RSC_1BITFLAG:
521 /* 521 /*
522 * Mask and shift the flag bit 522 * Mask and shift the flag bit
523 */ 523 */
524 ACPI_SET8(destination) |= (u8) 524 ACPI_SET_BIT(*ACPI_CAST8(destination), (u8)
525 ((ACPI_GET8(source) & 0x01) << info->value); 525 ((ACPI_GET8(source) & 0x01) << info->
526 value));
526 break; 527 break;
527 528
528 case ACPI_RSC_2BITFLAG: 529 case ACPI_RSC_2BITFLAG:
529 /* 530 /*
530 * Mask and shift the flag bits 531 * Mask and shift the flag bits
531 */ 532 */
532 ACPI_SET8(destination) |= (u8) 533 ACPI_SET_BIT(*ACPI_CAST8(destination), (u8)
533 ((ACPI_GET8(source) & 0x03) << info->value); 534 ((ACPI_GET8(source) & 0x03) << info->
535 value));
534 break; 536 break;
535 537
536 case ACPI_RSC_3BITFLAG: 538 case ACPI_RSC_3BITFLAG:
537 /* 539 /*
538 * Mask and shift the flag bits 540 * Mask and shift the flag bits
539 */ 541 */
540 ACPI_SET8(destination) |= (u8) 542 ACPI_SET_BIT(*ACPI_CAST8(destination), (u8)
541 ((ACPI_GET8(source) & 0x07) << info->value); 543 ((ACPI_GET8(source) & 0x07) << info->
544 value));
542 break; 545 break;
543 546
544 case ACPI_RSC_COUNT: 547 case ACPI_RSC_COUNT:
545 548
546 item_count = ACPI_GET8(source); 549 item_count = ACPI_GET8(source);
547 ACPI_SET8(destination) = (u8) item_count; 550 ACPI_SET8(destination, item_count);
548 551
549 aml_length = 552 aml_length =
550 (u16) (aml_length + 553 (u16) (aml_length +
@@ -561,18 +564,18 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
561 case ACPI_RSC_COUNT_GPIO_PIN: 564 case ACPI_RSC_COUNT_GPIO_PIN:
562 565
563 item_count = ACPI_GET16(source); 566 item_count = ACPI_GET16(source);
564 ACPI_SET16(destination) = (u16)aml_length; 567 ACPI_SET16(destination, aml_length);
565 568
566 aml_length = (u16)(aml_length + item_count * 2); 569 aml_length = (u16)(aml_length + item_count * 2);
567 target = ACPI_ADD_PTR(void, aml, info->value); 570 target = ACPI_ADD_PTR(void, aml, info->value);
568 ACPI_SET16(target) = (u16)aml_length; 571 ACPI_SET16(target, aml_length);
569 acpi_rs_set_resource_length(aml_length, aml); 572 acpi_rs_set_resource_length(aml_length, aml);
570 break; 573 break;
571 574
572 case ACPI_RSC_COUNT_GPIO_VEN: 575 case ACPI_RSC_COUNT_GPIO_VEN:
573 576
574 item_count = ACPI_GET16(source); 577 item_count = ACPI_GET16(source);
575 ACPI_SET16(destination) = (u16)item_count; 578 ACPI_SET16(destination, item_count);
576 579
577 aml_length = 580 aml_length =
578 (u16)(aml_length + (info->value * item_count)); 581 (u16)(aml_length + (info->value * item_count));
@@ -584,7 +587,7 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
584 /* Set resource source string length */ 587 /* Set resource source string length */
585 588
586 item_count = ACPI_GET16(source); 589 item_count = ACPI_GET16(source);
587 ACPI_SET16(destination) = (u16)aml_length; 590 ACPI_SET16(destination, aml_length);
588 591
589 /* Compute offset for the Vendor Data */ 592 /* Compute offset for the Vendor Data */
590 593
@@ -594,7 +597,7 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
594 /* Set vendor offset only if there is vendor data */ 597 /* Set vendor offset only if there is vendor data */
595 598
596 if (resource->data.gpio.vendor_length) { 599 if (resource->data.gpio.vendor_length) {
597 ACPI_SET16(target) = (u16)aml_length; 600 ACPI_SET16(target, aml_length);
598 } 601 }
599 602
600 acpi_rs_set_resource_length(aml_length, aml); 603 acpi_rs_set_resource_length(aml_length, aml);
@@ -603,7 +606,7 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
603 case ACPI_RSC_COUNT_SERIAL_VEN: 606 case ACPI_RSC_COUNT_SERIAL_VEN:
604 607
605 item_count = ACPI_GET16(source); 608 item_count = ACPI_GET16(source);
606 ACPI_SET16(destination) = item_count + info->value; 609 ACPI_SET16(destination, item_count + info->value);
607 aml_length = (u16)(aml_length + item_count); 610 aml_length = (u16)(aml_length + item_count);
608 acpi_rs_set_resource_length(aml_length, aml); 611 acpi_rs_set_resource_length(aml_length, aml);
609 break; 612 break;
@@ -686,7 +689,8 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
686 * Optional resource_source (Index and String) 689 * Optional resource_source (Index and String)
687 */ 690 */
688 aml_length = 691 aml_length =
689 acpi_rs_set_resource_source(aml, (acpi_rs_length) 692 acpi_rs_set_resource_source(aml,
693 (acpi_rs_length)
690 aml_length, source); 694 aml_length, source);
691 acpi_rs_set_resource_length(aml_length, aml); 695 acpi_rs_set_resource_length(aml_length, aml);
692 break; 696 break;
@@ -706,10 +710,12 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
706 /* 710 /*
707 * 8-bit encoded bitmask (DMA macro) 711 * 8-bit encoded bitmask (DMA macro)
708 */ 712 */
709 ACPI_SET8(destination) = (u8) 713 ACPI_SET8(destination,
710 acpi_rs_encode_bitmask(source, 714 acpi_rs_encode_bitmask(source,
711 *ACPI_ADD_PTR(u8, resource, 715 *ACPI_ADD_PTR(u8,
712 info->value)); 716 resource,
717 info->
718 value)));
713 break; 719 break;
714 720
715 case ACPI_RSC_BITMASK16: 721 case ACPI_RSC_BITMASK16:
diff --git a/drivers/acpi/acpica/rsserial.c b/drivers/acpi/acpica/rsserial.c
index 9aa5e689b444..fe49fc43e10f 100644
--- a/drivers/acpi/acpica/rsserial.c
+++ b/drivers/acpi/acpica/rsserial.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -53,7 +53,7 @@ ACPI_MODULE_NAME("rsserial")
53 * acpi_rs_convert_gpio 53 * acpi_rs_convert_gpio
54 * 54 *
55 ******************************************************************************/ 55 ******************************************************************************/
56struct acpi_rsconvert_info acpi_rs_convert_gpio[17] = { 56struct acpi_rsconvert_info acpi_rs_convert_gpio[18] = {
57 {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_GPIO, 57 {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_GPIO,
58 ACPI_RS_SIZE(struct acpi_resource_gpio), 58 ACPI_RS_SIZE(struct acpi_resource_gpio),
59 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_gpio)}, 59 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_gpio)},
@@ -75,10 +75,14 @@ struct acpi_rsconvert_info acpi_rs_convert_gpio[17] = {
75 AML_OFFSET(gpio.flags), 75 AML_OFFSET(gpio.flags),
76 0}, 76 0},
77 77
78 {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.gpio.sharable), 78 {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.gpio.sharable),
79 AML_OFFSET(gpio.int_flags), 79 AML_OFFSET(gpio.int_flags),
80 3}, 80 3},
81 81
82 {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.gpio.wake_capable),
83 AML_OFFSET(gpio.int_flags),
84 4},
85
82 {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.gpio.io_restriction), 86 {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.gpio.io_restriction),
83 AML_OFFSET(gpio.int_flags), 87 AML_OFFSET(gpio.int_flags),
84 0}, 88 0},
diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c
index 37d5241c0acf..a44953c6f75d 100644
--- a/drivers/acpi/acpica/rsutils.c
+++ b/drivers/acpi/acpica/rsutils.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -108,7 +108,7 @@ u16 acpi_rs_encode_bitmask(u8 * list, u8 count)
108 mask |= (0x1 << list[i]); 108 mask |= (0x1 << list[i]);
109 } 109 }
110 110
111 return mask; 111 return (mask);
112} 112}
113 113
114/******************************************************************************* 114/*******************************************************************************
@@ -358,8 +358,10 @@ acpi_rs_get_resource_source(acpi_rs_length resource_length,
358 * 358 *
359 * Zero the entire area of the buffer. 359 * Zero the entire area of the buffer.
360 */ 360 */
361 total_length = (u32) 361 total_length =
362 ACPI_STRLEN(ACPI_CAST_PTR(char, &aml_resource_source[1])) + 1; 362 (u32)
363 ACPI_STRLEN(ACPI_CAST_PTR(char, &aml_resource_source[1])) +
364 1;
363 total_length = (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(total_length); 365 total_length = (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(total_length);
364 366
365 ACPI_MEMSET(resource_source->string_ptr, 0, total_length); 367 ACPI_MEMSET(resource_source->string_ptr, 0, total_length);
@@ -675,7 +677,9 @@ acpi_rs_get_method_data(acpi_handle handle,
675 /* Execute the method, no parameters */ 677 /* Execute the method, no parameters */
676 678
677 status = 679 status =
678 acpi_ut_evaluate_object(handle, path, ACPI_BTYPE_BUFFER, &obj_desc); 680 acpi_ut_evaluate_object(ACPI_CAST_PTR
681 (struct acpi_namespace_node, handle), path,
682 ACPI_BTYPE_BUFFER, &obj_desc);
679 if (ACPI_FAILURE(status)) { 683 if (ACPI_FAILURE(status)) {
680 return_ACPI_STATUS(status); 684 return_ACPI_STATUS(status);
681 } 685 }
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index 5aad744b5b83..15d6eaef0e28 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -423,7 +423,7 @@ ACPI_EXPORT_SYMBOL(acpi_resource_to_address64)
423 * 423 *
424 * RETURN: Status 424 * RETURN: Status
425 * 425 *
426 * DESCRIPTION: Walk a resource template for the specified evice to find a 426 * DESCRIPTION: Walk a resource template for the specified device to find a
427 * vendor-defined resource that matches the supplied UUID and 427 * vendor-defined resource that matches the supplied UUID and
428 * UUID subtype. Returns a struct acpi_resource of type Vendor. 428 * UUID subtype. Returns a struct acpi_resource of type Vendor.
429 * 429 *
@@ -522,57 +522,42 @@ acpi_rs_match_vendor_resource(struct acpi_resource *resource, void *context)
522 522
523/******************************************************************************* 523/*******************************************************************************
524 * 524 *
525 * FUNCTION: acpi_walk_resources 525 * FUNCTION: acpi_walk_resource_buffer
526 * 526 *
527 * PARAMETERS: device_handle - Handle to the device object for the 527 * PARAMETERS: buffer - Formatted buffer returned by one of the
528 * device we are querying 528 * various Get*Resource functions
529 * name - Method name of the resources we want.
530 * (METHOD_NAME__CRS, METHOD_NAME__PRS, or
531 * METHOD_NAME__AEI)
532 * user_function - Called for each resource 529 * user_function - Called for each resource
533 * context - Passed to user_function 530 * context - Passed to user_function
534 * 531 *
535 * RETURN: Status 532 * RETURN: Status
536 * 533 *
537 * DESCRIPTION: Retrieves the current or possible resource list for the 534 * DESCRIPTION: Walks the input resource template. The user_function is called
538 * specified device. The user_function is called once for 535 * once for each resource in the list.
539 * each resource in the list.
540 * 536 *
541 ******************************************************************************/ 537 ******************************************************************************/
538
542acpi_status 539acpi_status
543acpi_walk_resources(acpi_handle device_handle, 540acpi_walk_resource_buffer(struct acpi_buffer * buffer,
544 char *name, 541 acpi_walk_resource_callback user_function,
545 acpi_walk_resource_callback user_function, void *context) 542 void *context)
546{ 543{
547 acpi_status status; 544 acpi_status status = AE_OK;
548 struct acpi_buffer buffer;
549 struct acpi_resource *resource; 545 struct acpi_resource *resource;
550 struct acpi_resource *resource_end; 546 struct acpi_resource *resource_end;
551 547
552 ACPI_FUNCTION_TRACE(acpi_walk_resources); 548 ACPI_FUNCTION_TRACE(acpi_walk_resource_buffer);
553 549
554 /* Parameter validation */ 550 /* Parameter validation */
555 551
556 if (!device_handle || !user_function || !name || 552 if (!buffer || !buffer->pointer || !user_function) {
557 (!ACPI_COMPARE_NAME(name, METHOD_NAME__CRS) &&
558 !ACPI_COMPARE_NAME(name, METHOD_NAME__PRS) &&
559 !ACPI_COMPARE_NAME(name, METHOD_NAME__AEI))) {
560 return_ACPI_STATUS(AE_BAD_PARAMETER); 553 return_ACPI_STATUS(AE_BAD_PARAMETER);
561 } 554 }
562 555
563 /* Get the _CRS/_PRS/_AEI resource list */ 556 /* Buffer contains the resource list and length */
564
565 buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
566 status = acpi_rs_get_method_data(device_handle, name, &buffer);
567 if (ACPI_FAILURE(status)) {
568 return_ACPI_STATUS(status);
569 }
570
571 /* Buffer now contains the resource list */
572 557
573 resource = ACPI_CAST_PTR(struct acpi_resource, buffer.pointer); 558 resource = ACPI_CAST_PTR(struct acpi_resource, buffer->pointer);
574 resource_end = 559 resource_end =
575 ACPI_ADD_PTR(struct acpi_resource, buffer.pointer, buffer.length); 560 ACPI_ADD_PTR(struct acpi_resource, buffer->pointer, buffer->length);
576 561
577 /* Walk the resource list until the end_tag is found (or buffer end) */ 562 /* Walk the resource list until the end_tag is found (or buffer end) */
578 563
@@ -606,11 +591,63 @@ acpi_walk_resources(acpi_handle device_handle,
606 591
607 /* Get the next resource descriptor */ 592 /* Get the next resource descriptor */
608 593
609 resource = 594 resource = ACPI_NEXT_RESOURCE(resource);
610 ACPI_ADD_PTR(struct acpi_resource, resource,
611 resource->length);
612 } 595 }
613 596
597 return_ACPI_STATUS(status);
598}
599
600ACPI_EXPORT_SYMBOL(acpi_walk_resource_buffer)
601
602/*******************************************************************************
603 *
604 * FUNCTION: acpi_walk_resources
605 *
606 * PARAMETERS: device_handle - Handle to the device object for the
607 * device we are querying
608 * name - Method name of the resources we want.
609 * (METHOD_NAME__CRS, METHOD_NAME__PRS, or
610 * METHOD_NAME__AEI)
611 * user_function - Called for each resource
612 * context - Passed to user_function
613 *
614 * RETURN: Status
615 *
616 * DESCRIPTION: Retrieves the current or possible resource list for the
617 * specified device. The user_function is called once for
618 * each resource in the list.
619 *
620 ******************************************************************************/
621acpi_status
622acpi_walk_resources(acpi_handle device_handle,
623 char *name,
624 acpi_walk_resource_callback user_function, void *context)
625{
626 acpi_status status;
627 struct acpi_buffer buffer;
628
629 ACPI_FUNCTION_TRACE(acpi_walk_resources);
630
631 /* Parameter validation */
632
633 if (!device_handle || !user_function || !name ||
634 (!ACPI_COMPARE_NAME(name, METHOD_NAME__CRS) &&
635 !ACPI_COMPARE_NAME(name, METHOD_NAME__PRS) &&
636 !ACPI_COMPARE_NAME(name, METHOD_NAME__AEI))) {
637 return_ACPI_STATUS(AE_BAD_PARAMETER);
638 }
639
640 /* Get the _CRS/_PRS/_AEI resource list */
641
642 buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
643 status = acpi_rs_get_method_data(device_handle, name, &buffer);
644 if (ACPI_FAILURE(status)) {
645 return_ACPI_STATUS(status);
646 }
647
648 /* Walk the resource list and cleanup */
649
650 status = acpi_walk_resource_buffer(&buffer, user_function, context);
614 ACPI_FREE(buffer.pointer); 651 ACPI_FREE(buffer.pointer);
615 return_ACPI_STATUS(status); 652 return_ACPI_STATUS(status);
616} 653}
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 390651860bf0..74181bf181ec 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -172,6 +172,7 @@ static struct acpi_fadt_pm_info fadt_pm_info_table[] = {
172 * FUNCTION: acpi_tb_init_generic_address 172 * FUNCTION: acpi_tb_init_generic_address
173 * 173 *
174 * PARAMETERS: generic_address - GAS struct to be initialized 174 * PARAMETERS: generic_address - GAS struct to be initialized
175 * space_id - ACPI Space ID for this register
175 * byte_width - Width of this register 176 * byte_width - Width of this register
176 * address - Address of the register 177 * address - Address of the register
177 * 178 *
@@ -407,8 +408,8 @@ static void acpi_tb_convert_fadt(void)
407 * should be zero are indeed zero. This will workaround BIOSs that 408 * should be zero are indeed zero. This will workaround BIOSs that
408 * inadvertently place values in these fields. 409 * inadvertently place values in these fields.
409 * 410 *
410 * The ACPI 1.0 reserved fields that will be zeroed are the bytes located at 411 * The ACPI 1.0 reserved fields that will be zeroed are the bytes located
411 * offset 45, 55, 95, and the word located at offset 109, 110. 412 * at offset 45, 55, 95, and the word located at offset 109, 110.
412 * 413 *
413 * Note: The FADT revision value is unreliable. Only the length can be 414 * Note: The FADT revision value is unreliable. Only the length can be
414 * trusted. 415 * trusted.
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index 77d1db29a725..e4f4f02d49e7 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index f540ae462925..e57cd38004e3 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 285e24b97382..ce3d5db39a9c 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -147,7 +147,7 @@ acpi_status acpi_tb_initialize_facs(void)
147 ACPI_CAST_INDIRECT_PTR(struct 147 ACPI_CAST_INDIRECT_PTR(struct
148 acpi_table_header, 148 acpi_table_header,
149 &acpi_gbl_FACS)); 149 &acpi_gbl_FACS));
150 return status; 150 return (status);
151} 151}
152#endif /* !ACPI_REDUCED_HARDWARE */ 152#endif /* !ACPI_REDUCED_HARDWARE */
153 153
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index f5632780421d..b35a5e6d653a 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -44,7 +44,6 @@
44#include <linux/export.h> 44#include <linux/export.h>
45#include <acpi/acpi.h> 45#include <acpi/acpi.h>
46#include "accommon.h" 46#include "accommon.h"
47#include "acnamesp.h"
48#include "actables.h" 47#include "actables.h"
49 48
50#define _COMPONENT ACPI_TABLES 49#define _COMPONENT ACPI_TABLES
@@ -437,7 +436,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_table_by_index)
437 * 436 *
438 ******************************************************************************/ 437 ******************************************************************************/
439acpi_status 438acpi_status
440acpi_install_table_handler(acpi_tbl_handler handler, void *context) 439acpi_install_table_handler(acpi_table_handler handler, void *context)
441{ 440{
442 acpi_status status; 441 acpi_status status;
443 442
@@ -483,7 +482,7 @@ ACPI_EXPORT_SYMBOL(acpi_install_table_handler)
483 * DESCRIPTION: Remove table event handler 482 * DESCRIPTION: Remove table event handler
484 * 483 *
485 ******************************************************************************/ 484 ******************************************************************************/
486acpi_status acpi_remove_table_handler(acpi_tbl_handler handler) 485acpi_status acpi_remove_table_handler(acpi_table_handler handler)
487{ 486{
488 acpi_status status; 487 acpi_status status;
489 488
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index a5e1e4e47098..67e046ec8f0a 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -192,7 +192,7 @@ static acpi_status acpi_tb_load_namespace(void)
192 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); 192 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
193 } 193 }
194 194
195 ACPI_DEBUG_PRINT((ACPI_DB_INIT, "ACPI Tables successfully acquired\n")); 195 ACPI_INFO((AE_INFO, "All ACPI Tables successfully acquired"));
196 196
197 unlock_and_exit: 197 unlock_and_exit:
198 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); 198 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index 28f330230f99..7c2ecfb7c2c3 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c
index 64880306133d..698b9d385516 100644
--- a/drivers/acpi/acpica/utaddress.c
+++ b/drivers/acpi/acpica/utaddress.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -214,7 +214,7 @@ acpi_ut_check_address_range(acpi_adr_space_type space_id,
214 214
215 if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) && 215 if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) &&
216 (space_id != ACPI_ADR_SPACE_SYSTEM_IO)) { 216 (space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
217 return_UINT32(0); 217 return_VALUE(0);
218 } 218 }
219 219
220 range_info = acpi_gbl_address_range_list[space_id]; 220 range_info = acpi_gbl_address_range_list[space_id];
@@ -256,7 +256,7 @@ acpi_ut_check_address_range(acpi_adr_space_type space_id,
256 range_info = range_info->next; 256 range_info = range_info->next;
257 } 257 }
258 258
259 return_UINT32(overlap_count); 259 return_VALUE(overlap_count);
260} 260}
261 261
262/******************************************************************************* 262/*******************************************************************************
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index ed29d474095e..e0ffb580f4b0 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c
index e1d40ed26390..e0e8579deaac 100644
--- a/drivers/acpi/acpica/utcache.c
+++ b/drivers/acpi/acpica/utcache.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index 294692ae76e9..e4c9291fc0a3 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -785,7 +785,7 @@ acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
785 785
786 status = acpi_os_create_mutex(&dest_desc->mutex.os_mutex); 786 status = acpi_os_create_mutex(&dest_desc->mutex.os_mutex);
787 if (ACPI_FAILURE(status)) { 787 if (ACPI_FAILURE(status)) {
788 return status; 788 return (status);
789 } 789 }
790 break; 790 break;
791 791
@@ -795,7 +795,7 @@ acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
795 &dest_desc->event. 795 &dest_desc->event.
796 os_semaphore); 796 os_semaphore);
797 if (ACPI_FAILURE(status)) { 797 if (ACPI_FAILURE(status)) {
798 return status; 798 return (status);
799 } 799 }
800 break; 800 break;
801 801
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index 5d95166245ae..c57d9cc07ba9 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -166,11 +166,9 @@ acpi_debug_print(u32 requested_debug_level,
166 acpi_thread_id thread_id; 166 acpi_thread_id thread_id;
167 va_list args; 167 va_list args;
168 168
169 /* 169 /* Check if debug output enabled */
170 * Stay silent if the debug level or component ID is disabled 170
171 */ 171 if (!ACPI_IS_DEBUG_ENABLED(requested_debug_level, component_id)) {
172 if (!(requested_debug_level & acpi_dbg_level) ||
173 !(component_id & acpi_dbg_layer)) {
174 return; 172 return;
175 } 173 }
176 174
@@ -236,8 +234,9 @@ acpi_debug_print_raw(u32 requested_debug_level,
236{ 234{
237 va_list args; 235 va_list args;
238 236
239 if (!(requested_debug_level & acpi_dbg_level) || 237 /* Check if debug output enabled */
240 !(component_id & acpi_dbg_layer)) { 238
239 if (!ACPI_IS_DEBUG_ENABLED(requested_debug_level, component_id)) {
241 return; 240 return;
242 } 241 }
243 242
@@ -272,9 +271,13 @@ acpi_ut_trace(u32 line_number,
272 acpi_gbl_nesting_level++; 271 acpi_gbl_nesting_level++;
273 acpi_ut_track_stack_ptr(); 272 acpi_ut_track_stack_ptr();
274 273
275 acpi_debug_print(ACPI_LV_FUNCTIONS, 274 /* Check if enabled up-front for performance */
276 line_number, function_name, module_name, component_id, 275
277 "%s\n", acpi_gbl_fn_entry_str); 276 if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_FUNCTIONS, component_id)) {
277 acpi_debug_print(ACPI_LV_FUNCTIONS,
278 line_number, function_name, module_name,
279 component_id, "%s\n", acpi_gbl_fn_entry_str);
280 }
278} 281}
279 282
280ACPI_EXPORT_SYMBOL(acpi_ut_trace) 283ACPI_EXPORT_SYMBOL(acpi_ut_trace)
@@ -304,9 +307,14 @@ acpi_ut_trace_ptr(u32 line_number,
304 acpi_gbl_nesting_level++; 307 acpi_gbl_nesting_level++;
305 acpi_ut_track_stack_ptr(); 308 acpi_ut_track_stack_ptr();
306 309
307 acpi_debug_print(ACPI_LV_FUNCTIONS, 310 /* Check if enabled up-front for performance */
308 line_number, function_name, module_name, component_id, 311
309 "%s %p\n", acpi_gbl_fn_entry_str, pointer); 312 if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_FUNCTIONS, component_id)) {
313 acpi_debug_print(ACPI_LV_FUNCTIONS,
314 line_number, function_name, module_name,
315 component_id, "%s %p\n", acpi_gbl_fn_entry_str,
316 pointer);
317 }
310} 318}
311 319
312/******************************************************************************* 320/*******************************************************************************
@@ -335,9 +343,14 @@ acpi_ut_trace_str(u32 line_number,
335 acpi_gbl_nesting_level++; 343 acpi_gbl_nesting_level++;
336 acpi_ut_track_stack_ptr(); 344 acpi_ut_track_stack_ptr();
337 345
338 acpi_debug_print(ACPI_LV_FUNCTIONS, 346 /* Check if enabled up-front for performance */
339 line_number, function_name, module_name, component_id, 347
340 "%s %s\n", acpi_gbl_fn_entry_str, string); 348 if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_FUNCTIONS, component_id)) {
349 acpi_debug_print(ACPI_LV_FUNCTIONS,
350 line_number, function_name, module_name,
351 component_id, "%s %s\n", acpi_gbl_fn_entry_str,
352 string);
353 }
341} 354}
342 355
343/******************************************************************************* 356/*******************************************************************************
@@ -366,9 +379,14 @@ acpi_ut_trace_u32(u32 line_number,
366 acpi_gbl_nesting_level++; 379 acpi_gbl_nesting_level++;
367 acpi_ut_track_stack_ptr(); 380 acpi_ut_track_stack_ptr();
368 381
369 acpi_debug_print(ACPI_LV_FUNCTIONS, 382 /* Check if enabled up-front for performance */
370 line_number, function_name, module_name, component_id, 383
371 "%s %08X\n", acpi_gbl_fn_entry_str, integer); 384 if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_FUNCTIONS, component_id)) {
385 acpi_debug_print(ACPI_LV_FUNCTIONS,
386 line_number, function_name, module_name,
387 component_id, "%s %08X\n",
388 acpi_gbl_fn_entry_str, integer);
389 }
372} 390}
373 391
374/******************************************************************************* 392/*******************************************************************************
@@ -393,9 +411,13 @@ acpi_ut_exit(u32 line_number,
393 const char *module_name, u32 component_id) 411 const char *module_name, u32 component_id)
394{ 412{
395 413
396 acpi_debug_print(ACPI_LV_FUNCTIONS, 414 /* Check if enabled up-front for performance */
397 line_number, function_name, module_name, component_id, 415
398 "%s\n", acpi_gbl_fn_exit_str); 416 if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_FUNCTIONS, component_id)) {
417 acpi_debug_print(ACPI_LV_FUNCTIONS,
418 line_number, function_name, module_name,
419 component_id, "%s\n", acpi_gbl_fn_exit_str);
420 }
399 421
400 acpi_gbl_nesting_level--; 422 acpi_gbl_nesting_level--;
401} 423}
@@ -425,17 +447,23 @@ acpi_ut_status_exit(u32 line_number,
425 u32 component_id, acpi_status status) 447 u32 component_id, acpi_status status)
426{ 448{
427 449
428 if (ACPI_SUCCESS(status)) { 450 /* Check if enabled up-front for performance */
429 acpi_debug_print(ACPI_LV_FUNCTIONS, 451
430 line_number, function_name, module_name, 452 if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_FUNCTIONS, component_id)) {
431 component_id, "%s %s\n", acpi_gbl_fn_exit_str, 453 if (ACPI_SUCCESS(status)) {
432 acpi_format_exception(status)); 454 acpi_debug_print(ACPI_LV_FUNCTIONS,
433 } else { 455 line_number, function_name,
434 acpi_debug_print(ACPI_LV_FUNCTIONS, 456 module_name, component_id, "%s %s\n",
435 line_number, function_name, module_name, 457 acpi_gbl_fn_exit_str,
436 component_id, "%s ****Exception****: %s\n", 458 acpi_format_exception(status));
437 acpi_gbl_fn_exit_str, 459 } else {
438 acpi_format_exception(status)); 460 acpi_debug_print(ACPI_LV_FUNCTIONS,
461 line_number, function_name,
462 module_name, component_id,
463 "%s ****Exception****: %s\n",
464 acpi_gbl_fn_exit_str,
465 acpi_format_exception(status));
466 }
439 } 467 }
440 468
441 acpi_gbl_nesting_level--; 469 acpi_gbl_nesting_level--;
@@ -465,10 +493,15 @@ acpi_ut_value_exit(u32 line_number,
465 const char *module_name, u32 component_id, u64 value) 493 const char *module_name, u32 component_id, u64 value)
466{ 494{
467 495
468 acpi_debug_print(ACPI_LV_FUNCTIONS, 496 /* Check if enabled up-front for performance */
469 line_number, function_name, module_name, component_id, 497
470 "%s %8.8X%8.8X\n", acpi_gbl_fn_exit_str, 498 if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_FUNCTIONS, component_id)) {
471 ACPI_FORMAT_UINT64(value)); 499 acpi_debug_print(ACPI_LV_FUNCTIONS,
500 line_number, function_name, module_name,
501 component_id, "%s %8.8X%8.8X\n",
502 acpi_gbl_fn_exit_str,
503 ACPI_FORMAT_UINT64(value));
504 }
472 505
473 acpi_gbl_nesting_level--; 506 acpi_gbl_nesting_level--;
474} 507}
@@ -497,9 +530,14 @@ acpi_ut_ptr_exit(u32 line_number,
497 const char *module_name, u32 component_id, u8 *ptr) 530 const char *module_name, u32 component_id, u8 *ptr)
498{ 531{
499 532
500 acpi_debug_print(ACPI_LV_FUNCTIONS, 533 /* Check if enabled up-front for performance */
501 line_number, function_name, module_name, component_id, 534
502 "%s %p\n", acpi_gbl_fn_exit_str, ptr); 535 if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_FUNCTIONS, component_id)) {
536 acpi_debug_print(ACPI_LV_FUNCTIONS,
537 line_number, function_name, module_name,
538 component_id, "%s %p\n", acpi_gbl_fn_exit_str,
539 ptr);
540 }
503 541
504 acpi_gbl_nesting_level--; 542 acpi_gbl_nesting_level--;
505} 543}
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 60a158472d82..11e2e02e1618 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index 798105443d0f..2541de420249 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -340,7 +340,7 @@ void acpi_ut_delete_internal_object_list(union acpi_operand_object **obj_list)
340{ 340{
341 union acpi_operand_object **internal_obj; 341 union acpi_operand_object **internal_obj;
342 342
343 ACPI_FUNCTION_TRACE(ut_delete_internal_object_list); 343 ACPI_FUNCTION_ENTRY();
344 344
345 /* Walk the null-terminated internal list */ 345 /* Walk the null-terminated internal list */
346 346
@@ -351,7 +351,7 @@ void acpi_ut_delete_internal_object_list(union acpi_operand_object **obj_list)
351 /* Free the combined parameter pointer list and object array */ 351 /* Free the combined parameter pointer list and object array */
352 352
353 ACPI_FREE(obj_list); 353 ACPI_FREE(obj_list);
354 return_VOID; 354 return;
355} 355}
356 356
357/******************************************************************************* 357/*******************************************************************************
@@ -484,7 +484,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
484 union acpi_generic_state *state; 484 union acpi_generic_state *state;
485 u32 i; 485 u32 i;
486 486
487 ACPI_FUNCTION_TRACE_PTR(ut_update_object_reference, object); 487 ACPI_FUNCTION_NAME(ut_update_object_reference);
488 488
489 while (object) { 489 while (object) {
490 490
@@ -493,7 +493,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
493 if (ACPI_GET_DESCRIPTOR_TYPE(object) == ACPI_DESC_TYPE_NAMED) { 493 if (ACPI_GET_DESCRIPTOR_TYPE(object) == ACPI_DESC_TYPE_NAMED) {
494 ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, 494 ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
495 "Object %p is NS handle\n", object)); 495 "Object %p is NS handle\n", object));
496 return_ACPI_STATUS(AE_OK); 496 return (AE_OK);
497 } 497 }
498 498
499 /* 499 /*
@@ -530,18 +530,42 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
530 */ 530 */
531 for (i = 0; i < object->package.count; i++) { 531 for (i = 0; i < object->package.count; i++) {
532 /* 532 /*
533 * Push each element onto the stack for later processing. 533 * Null package elements are legal and can be simply
534 * Note: There can be null elements within the package, 534 * ignored.
535 * these are simply ignored
536 */ 535 */
537 status = 536 next_object = object->package.elements[i];
538 acpi_ut_create_update_state_and_push 537 if (!next_object) {
539 (object->package.elements[i], action, 538 continue;
540 &state_list); 539 }
541 if (ACPI_FAILURE(status)) { 540
542 goto error_exit; 541 switch (next_object->common.type) {
542 case ACPI_TYPE_INTEGER:
543 case ACPI_TYPE_STRING:
544 case ACPI_TYPE_BUFFER:
545 /*
546 * For these very simple sub-objects, we can just
547 * update the reference count here and continue.
548 * Greatly increases performance of this operation.
549 */
550 acpi_ut_update_ref_count(next_object,
551 action);
552 break;
553
554 default:
555 /*
556 * For complex sub-objects, push them onto the stack
557 * for later processing (this eliminates recursion.)
558 */
559 status =
560 acpi_ut_create_update_state_and_push
561 (next_object, action, &state_list);
562 if (ACPI_FAILURE(status)) {
563 goto error_exit;
564 }
565 break;
543 } 566 }
544 } 567 }
568 next_object = NULL;
545 break; 569 break;
546 570
547 case ACPI_TYPE_BUFFER_FIELD: 571 case ACPI_TYPE_BUFFER_FIELD:
@@ -619,7 +643,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
619 } 643 }
620 } 644 }
621 645
622 return_ACPI_STATUS(AE_OK); 646 return (AE_OK);
623 647
624 error_exit: 648 error_exit:
625 649
@@ -633,7 +657,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
633 acpi_ut_delete_generic_state(state); 657 acpi_ut_delete_generic_state(state);
634 } 658 }
635 659
636 return_ACPI_STATUS(status); 660 return (status);
637} 661}
638 662
639/******************************************************************************* 663/*******************************************************************************
@@ -652,12 +676,12 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
652void acpi_ut_add_reference(union acpi_operand_object *object) 676void acpi_ut_add_reference(union acpi_operand_object *object)
653{ 677{
654 678
655 ACPI_FUNCTION_TRACE_PTR(ut_add_reference, object); 679 ACPI_FUNCTION_NAME(ut_add_reference);
656 680
657 /* Ensure that we have a valid object */ 681 /* Ensure that we have a valid object */
658 682
659 if (!acpi_ut_valid_internal_object(object)) { 683 if (!acpi_ut_valid_internal_object(object)) {
660 return_VOID; 684 return;
661 } 685 }
662 686
663 ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, 687 ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
@@ -667,7 +691,7 @@ void acpi_ut_add_reference(union acpi_operand_object *object)
667 /* Increment the reference count */ 691 /* Increment the reference count */
668 692
669 (void)acpi_ut_update_object_reference(object, REF_INCREMENT); 693 (void)acpi_ut_update_object_reference(object, REF_INCREMENT);
670 return_VOID; 694 return;
671} 695}
672 696
673/******************************************************************************* 697/*******************************************************************************
@@ -685,7 +709,7 @@ void acpi_ut_add_reference(union acpi_operand_object *object)
685void acpi_ut_remove_reference(union acpi_operand_object *object) 709void acpi_ut_remove_reference(union acpi_operand_object *object)
686{ 710{
687 711
688 ACPI_FUNCTION_TRACE_PTR(ut_remove_reference, object); 712 ACPI_FUNCTION_NAME(ut_remove_reference);
689 713
690 /* 714 /*
691 * Allow a NULL pointer to be passed in, just ignore it. This saves 715 * Allow a NULL pointer to be passed in, just ignore it. This saves
@@ -694,13 +718,13 @@ void acpi_ut_remove_reference(union acpi_operand_object *object)
694 */ 718 */
695 if (!object || 719 if (!object ||
696 (ACPI_GET_DESCRIPTOR_TYPE(object) == ACPI_DESC_TYPE_NAMED)) { 720 (ACPI_GET_DESCRIPTOR_TYPE(object) == ACPI_DESC_TYPE_NAMED)) {
697 return_VOID; 721 return;
698 } 722 }
699 723
700 /* Ensure that we have a valid object */ 724 /* Ensure that we have a valid object */
701 725
702 if (!acpi_ut_valid_internal_object(object)) { 726 if (!acpi_ut_valid_internal_object(object)) {
703 return_VOID; 727 return;
704 } 728 }
705 729
706 ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, 730 ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
@@ -713,5 +737,5 @@ void acpi_ut_remove_reference(union acpi_operand_object *object)
713 * of all subobjects!) 737 * of all subobjects!)
714 */ 738 */
715 (void)acpi_ut_update_object_reference(object, REF_DECREMENT); 739 (void)acpi_ut_update_object_reference(object, REF_DECREMENT);
716 return_VOID; 740 return;
717} 741}
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index a9c65fbea5f4..c3f3a7e7bdc7 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -68,7 +68,7 @@ ACPI_MODULE_NAME("uteval")
68 ******************************************************************************/ 68 ******************************************************************************/
69 69
70acpi_status 70acpi_status
71acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node, 71acpi_ut_evaluate_object(struct acpi_namespace_node * prefix_node,
72 char *path, 72 char *path,
73 u32 expected_return_btypes, 73 u32 expected_return_btypes,
74 union acpi_operand_object **return_desc) 74 union acpi_operand_object **return_desc)
diff --git a/drivers/acpi/acpica/utexcep.c b/drivers/acpi/acpica/utexcep.c
index 23b98945f6b7..a0ab7c02e87c 100644
--- a/drivers/acpi/acpica/utexcep.c
+++ b/drivers/acpi/acpica/utexcep.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index ed1893155f8b..ffecf4b4f0dd 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -293,11 +293,11 @@ acpi_status acpi_ut_init_globals(void)
293 293
294 /* GPE support */ 294 /* GPE support */
295 295
296 acpi_gbl_all_gpes_initialized = FALSE;
296 acpi_gbl_gpe_xrupt_list_head = NULL; 297 acpi_gbl_gpe_xrupt_list_head = NULL;
297 acpi_gbl_gpe_fadt_blocks[0] = NULL; 298 acpi_gbl_gpe_fadt_blocks[0] = NULL;
298 acpi_gbl_gpe_fadt_blocks[1] = NULL; 299 acpi_gbl_gpe_fadt_blocks[1] = NULL;
299 acpi_current_gpe_count = 0; 300 acpi_current_gpe_count = 0;
300 acpi_gbl_all_gpes_initialized = FALSE;
301 301
302 acpi_gbl_global_event_handler = NULL; 302 acpi_gbl_global_event_handler = NULL;
303 303
@@ -357,17 +357,24 @@ acpi_status acpi_ut_init_globals(void)
357 acpi_gbl_root_node_struct.peer = NULL; 357 acpi_gbl_root_node_struct.peer = NULL;
358 acpi_gbl_root_node_struct.object = NULL; 358 acpi_gbl_root_node_struct.object = NULL;
359 359
360#ifdef ACPI_DISASSEMBLER
361 acpi_gbl_external_list = NULL;
362#endif
363
360#ifdef ACPI_DEBUG_OUTPUT 364#ifdef ACPI_DEBUG_OUTPUT
361 acpi_gbl_lowest_stack_pointer = ACPI_CAST_PTR(acpi_size, ACPI_SIZE_MAX); 365 acpi_gbl_lowest_stack_pointer = ACPI_CAST_PTR(acpi_size, ACPI_SIZE_MAX);
362#endif 366#endif
363 367
364#ifdef ACPI_DBG_TRACK_ALLOCATIONS 368#ifdef ACPI_DBG_TRACK_ALLOCATIONS
365 acpi_gbl_display_final_mem_stats = FALSE; 369 acpi_gbl_display_final_mem_stats = FALSE;
370 acpi_gbl_disable_mem_tracking = FALSE;
366#endif 371#endif
367 372
368 return_ACPI_STATUS(AE_OK); 373 return_ACPI_STATUS(AE_OK);
369} 374}
370 375
376/* Public globals */
377
371ACPI_EXPORT_SYMBOL(acpi_gbl_FADT) 378ACPI_EXPORT_SYMBOL(acpi_gbl_FADT)
372ACPI_EXPORT_SYMBOL(acpi_dbg_level) 379ACPI_EXPORT_SYMBOL(acpi_dbg_level)
373ACPI_EXPORT_SYMBOL(acpi_dbg_layer) 380ACPI_EXPORT_SYMBOL(acpi_dbg_layer)
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index 774c3aefbf5d..43a170a74a61 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index 246798e4c938..c5d1ac44c07d 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utlock.c b/drivers/acpi/acpica/utlock.c
index b1eb7f17e110..5c26ad420344 100644
--- a/drivers/acpi/acpica/utlock.c
+++ b/drivers/acpi/acpica/utlock.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -66,11 +66,11 @@ acpi_status acpi_ut_create_rw_lock(struct acpi_rw_lock *lock)
66 lock->num_readers = 0; 66 lock->num_readers = 0;
67 status = acpi_os_create_mutex(&lock->reader_mutex); 67 status = acpi_os_create_mutex(&lock->reader_mutex);
68 if (ACPI_FAILURE(status)) { 68 if (ACPI_FAILURE(status)) {
69 return status; 69 return (status);
70 } 70 }
71 71
72 status = acpi_os_create_mutex(&lock->writer_mutex); 72 status = acpi_os_create_mutex(&lock->writer_mutex);
73 return status; 73 return (status);
74} 74}
75 75
76void acpi_ut_delete_rw_lock(struct acpi_rw_lock *lock) 76void acpi_ut_delete_rw_lock(struct acpi_rw_lock *lock)
@@ -108,7 +108,7 @@ acpi_status acpi_ut_acquire_read_lock(struct acpi_rw_lock *lock)
108 108
109 status = acpi_os_acquire_mutex(lock->reader_mutex, ACPI_WAIT_FOREVER); 109 status = acpi_os_acquire_mutex(lock->reader_mutex, ACPI_WAIT_FOREVER);
110 if (ACPI_FAILURE(status)) { 110 if (ACPI_FAILURE(status)) {
111 return status; 111 return (status);
112 } 112 }
113 113
114 /* Acquire the write lock only for the first reader */ 114 /* Acquire the write lock only for the first reader */
@@ -121,7 +121,7 @@ acpi_status acpi_ut_acquire_read_lock(struct acpi_rw_lock *lock)
121 } 121 }
122 122
123 acpi_os_release_mutex(lock->reader_mutex); 123 acpi_os_release_mutex(lock->reader_mutex);
124 return status; 124 return (status);
125} 125}
126 126
127acpi_status acpi_ut_release_read_lock(struct acpi_rw_lock *lock) 127acpi_status acpi_ut_release_read_lock(struct acpi_rw_lock *lock)
@@ -130,7 +130,7 @@ acpi_status acpi_ut_release_read_lock(struct acpi_rw_lock *lock)
130 130
131 status = acpi_os_acquire_mutex(lock->reader_mutex, ACPI_WAIT_FOREVER); 131 status = acpi_os_acquire_mutex(lock->reader_mutex, ACPI_WAIT_FOREVER);
132 if (ACPI_FAILURE(status)) { 132 if (ACPI_FAILURE(status)) {
133 return status; 133 return (status);
134 } 134 }
135 135
136 /* Release the write lock only for the very last reader */ 136 /* Release the write lock only for the very last reader */
@@ -141,7 +141,7 @@ acpi_status acpi_ut_release_read_lock(struct acpi_rw_lock *lock)
141 } 141 }
142 142
143 acpi_os_release_mutex(lock->reader_mutex); 143 acpi_os_release_mutex(lock->reader_mutex);
144 return status; 144 return (status);
145} 145}
146 146
147/******************************************************************************* 147/*******************************************************************************
@@ -165,7 +165,7 @@ acpi_status acpi_ut_acquire_write_lock(struct acpi_rw_lock *lock)
165 acpi_status status; 165 acpi_status status;
166 166
167 status = acpi_os_acquire_mutex(lock->writer_mutex, ACPI_WAIT_FOREVER); 167 status = acpi_os_acquire_mutex(lock->writer_mutex, ACPI_WAIT_FOREVER);
168 return status; 168 return (status);
169} 169}
170 170
171void acpi_ut_release_write_lock(struct acpi_rw_lock *lock) 171void acpi_ut_release_write_lock(struct acpi_rw_lock *lock)
diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c
index 49563674833a..909fe66e1934 100644
--- a/drivers/acpi/acpica/utmath.c
+++ b/drivers/acpi/acpica/utmath.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index 9286a69eb9aa..785fdd07ae56 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -48,36 +48,6 @@
48#define _COMPONENT ACPI_UTILITIES 48#define _COMPONENT ACPI_UTILITIES
49ACPI_MODULE_NAME("utmisc") 49ACPI_MODULE_NAME("utmisc")
50 50
51#if defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP
52/*******************************************************************************
53 *
54 * FUNCTION: ut_convert_backslashes
55 *
56 * PARAMETERS: pathname - File pathname string to be converted
57 *
58 * RETURN: Modifies the input Pathname
59 *
60 * DESCRIPTION: Convert all backslashes (0x5C) to forward slashes (0x2F) within
61 * the entire input file pathname string.
62 *
63 ******************************************************************************/
64void ut_convert_backslashes(char *pathname)
65{
66
67 if (!pathname) {
68 return;
69 }
70
71 while (*pathname) {
72 if (*pathname == '\\') {
73 *pathname = '/';
74 }
75
76 pathname++;
77 }
78}
79#endif
80
81/******************************************************************************* 51/*******************************************************************************
82 * 52 *
83 * FUNCTION: acpi_ut_is_pci_root_bridge 53 * FUNCTION: acpi_ut_is_pci_root_bridge
@@ -89,7 +59,6 @@ void ut_convert_backslashes(char *pathname)
89 * DESCRIPTION: Determine if the input ID is a PCI Root Bridge ID. 59 * DESCRIPTION: Determine if the input ID is a PCI Root Bridge ID.
90 * 60 *
91 ******************************************************************************/ 61 ******************************************************************************/
92
93u8 acpi_ut_is_pci_root_bridge(char *id) 62u8 acpi_ut_is_pci_root_bridge(char *id)
94{ 63{
95 64
@@ -136,362 +105,6 @@ u8 acpi_ut_is_aml_table(struct acpi_table_header *table)
136 105
137/******************************************************************************* 106/*******************************************************************************
138 * 107 *
139 * FUNCTION: acpi_ut_allocate_owner_id
140 *
141 * PARAMETERS: owner_id - Where the new owner ID is returned
142 *
143 * RETURN: Status
144 *
145 * DESCRIPTION: Allocate a table or method owner ID. The owner ID is used to
146 * track objects created by the table or method, to be deleted
147 * when the method exits or the table is unloaded.
148 *
149 ******************************************************************************/
150
151acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
152{
153 u32 i;
154 u32 j;
155 u32 k;
156 acpi_status status;
157
158 ACPI_FUNCTION_TRACE(ut_allocate_owner_id);
159
160 /* Guard against multiple allocations of ID to the same location */
161
162 if (*owner_id) {
163 ACPI_ERROR((AE_INFO, "Owner ID [0x%2.2X] already exists",
164 *owner_id));
165 return_ACPI_STATUS(AE_ALREADY_EXISTS);
166 }
167
168 /* Mutex for the global ID mask */
169
170 status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES);
171 if (ACPI_FAILURE(status)) {
172 return_ACPI_STATUS(status);
173 }
174
175 /*
176 * Find a free owner ID, cycle through all possible IDs on repeated
177 * allocations. (ACPI_NUM_OWNERID_MASKS + 1) because first index may have
178 * to be scanned twice.
179 */
180 for (i = 0, j = acpi_gbl_last_owner_id_index;
181 i < (ACPI_NUM_OWNERID_MASKS + 1); i++, j++) {
182 if (j >= ACPI_NUM_OWNERID_MASKS) {
183 j = 0; /* Wraparound to start of mask array */
184 }
185
186 for (k = acpi_gbl_next_owner_id_offset; k < 32; k++) {
187 if (acpi_gbl_owner_id_mask[j] == ACPI_UINT32_MAX) {
188
189 /* There are no free IDs in this mask */
190
191 break;
192 }
193
194 if (!(acpi_gbl_owner_id_mask[j] & (1 << k))) {
195 /*
196 * Found a free ID. The actual ID is the bit index plus one,
197 * making zero an invalid Owner ID. Save this as the last ID
198 * allocated and update the global ID mask.
199 */
200 acpi_gbl_owner_id_mask[j] |= (1 << k);
201
202 acpi_gbl_last_owner_id_index = (u8)j;
203 acpi_gbl_next_owner_id_offset = (u8)(k + 1);
204
205 /*
206 * Construct encoded ID from the index and bit position
207 *
208 * Note: Last [j].k (bit 255) is never used and is marked
209 * permanently allocated (prevents +1 overflow)
210 */
211 *owner_id =
212 (acpi_owner_id) ((k + 1) + ACPI_MUL_32(j));
213
214 ACPI_DEBUG_PRINT((ACPI_DB_VALUES,
215 "Allocated OwnerId: %2.2X\n",
216 (unsigned int)*owner_id));
217 goto exit;
218 }
219 }
220
221 acpi_gbl_next_owner_id_offset = 0;
222 }
223
224 /*
225 * All owner_ids have been allocated. This typically should
226 * not happen since the IDs are reused after deallocation. The IDs are
227 * allocated upon table load (one per table) and method execution, and
228 * they are released when a table is unloaded or a method completes
229 * execution.
230 *
231 * If this error happens, there may be very deep nesting of invoked control
232 * methods, or there may be a bug where the IDs are not released.
233 */
234 status = AE_OWNER_ID_LIMIT;
235 ACPI_ERROR((AE_INFO,
236 "Could not allocate new OwnerId (255 max), AE_OWNER_ID_LIMIT"));
237
238 exit:
239 (void)acpi_ut_release_mutex(ACPI_MTX_CACHES);
240 return_ACPI_STATUS(status);
241}
242
243/*******************************************************************************
244 *
245 * FUNCTION: acpi_ut_release_owner_id
246 *
247 * PARAMETERS: owner_id_ptr - Pointer to a previously allocated owner_ID
248 *
249 * RETURN: None. No error is returned because we are either exiting a
250 * control method or unloading a table. Either way, we would
251 * ignore any error anyway.
252 *
253 * DESCRIPTION: Release a table or method owner ID. Valid IDs are 1 - 255
254 *
255 ******************************************************************************/
256
257void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr)
258{
259 acpi_owner_id owner_id = *owner_id_ptr;
260 acpi_status status;
261 u32 index;
262 u32 bit;
263
264 ACPI_FUNCTION_TRACE_U32(ut_release_owner_id, owner_id);
265
266 /* Always clear the input owner_id (zero is an invalid ID) */
267
268 *owner_id_ptr = 0;
269
270 /* Zero is not a valid owner_ID */
271
272 if (owner_id == 0) {
273 ACPI_ERROR((AE_INFO, "Invalid OwnerId: 0x%2.2X", owner_id));
274 return_VOID;
275 }
276
277 /* Mutex for the global ID mask */
278
279 status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES);
280 if (ACPI_FAILURE(status)) {
281 return_VOID;
282 }
283
284 /* Normalize the ID to zero */
285
286 owner_id--;
287
288 /* Decode ID to index/offset pair */
289
290 index = ACPI_DIV_32(owner_id);
291 bit = 1 << ACPI_MOD_32(owner_id);
292
293 /* Free the owner ID only if it is valid */
294
295 if (acpi_gbl_owner_id_mask[index] & bit) {
296 acpi_gbl_owner_id_mask[index] ^= bit;
297 } else {
298 ACPI_ERROR((AE_INFO,
299 "Release of non-allocated OwnerId: 0x%2.2X",
300 owner_id + 1));
301 }
302
303 (void)acpi_ut_release_mutex(ACPI_MTX_CACHES);
304 return_VOID;
305}
306
307/*******************************************************************************
308 *
309 * FUNCTION: acpi_ut_strupr (strupr)
310 *
311 * PARAMETERS: src_string - The source string to convert
312 *
313 * RETURN: None
314 *
315 * DESCRIPTION: Convert string to uppercase
316 *
317 * NOTE: This is not a POSIX function, so it appears here, not in utclib.c
318 *
319 ******************************************************************************/
320
321void acpi_ut_strupr(char *src_string)
322{
323 char *string;
324
325 ACPI_FUNCTION_ENTRY();
326
327 if (!src_string) {
328 return;
329 }
330
331 /* Walk entire string, uppercasing the letters */
332
333 for (string = src_string; *string; string++) {
334 *string = (char)ACPI_TOUPPER(*string);
335 }
336
337 return;
338}
339
340#ifdef ACPI_ASL_COMPILER
341/*******************************************************************************
342 *
343 * FUNCTION: acpi_ut_strlwr (strlwr)
344 *
345 * PARAMETERS: src_string - The source string to convert
346 *
347 * RETURN: None
348 *
349 * DESCRIPTION: Convert string to lowercase
350 *
351 * NOTE: This is not a POSIX function, so it appears here, not in utclib.c
352 *
353 ******************************************************************************/
354
355void acpi_ut_strlwr(char *src_string)
356{
357 char *string;
358
359 ACPI_FUNCTION_ENTRY();
360
361 if (!src_string) {
362 return;
363 }
364
365 /* Walk entire string, lowercasing the letters */
366
367 for (string = src_string; *string; string++) {
368 *string = (char)ACPI_TOLOWER(*string);
369 }
370
371 return;
372}
373
374/******************************************************************************
375 *
376 * FUNCTION: acpi_ut_stricmp
377 *
378 * PARAMETERS: string1 - first string to compare
379 * string2 - second string to compare
380 *
381 * RETURN: int that signifies string relationship. Zero means strings
382 * are equal.
383 *
384 * DESCRIPTION: Implementation of the non-ANSI stricmp function (compare
385 * strings with no case sensitivity)
386 *
387 ******************************************************************************/
388
389int acpi_ut_stricmp(char *string1, char *string2)
390{
391 int c1;
392 int c2;
393
394 do {
395 c1 = tolower((int)*string1);
396 c2 = tolower((int)*string2);
397
398 string1++;
399 string2++;
400 }
401 while ((c1 == c2) && (c1));
402
403 return (c1 - c2);
404}
405#endif
406
407/*******************************************************************************
408 *
409 * FUNCTION: acpi_ut_print_string
410 *
411 * PARAMETERS: string - Null terminated ASCII string
412 * max_length - Maximum output length
413 *
414 * RETURN: None
415 *
416 * DESCRIPTION: Dump an ASCII string with support for ACPI-defined escape
417 * sequences.
418 *
419 ******************************************************************************/
420
421void acpi_ut_print_string(char *string, u8 max_length)
422{
423 u32 i;
424
425 if (!string) {
426 acpi_os_printf("<\"NULL STRING PTR\">");
427 return;
428 }
429
430 acpi_os_printf("\"");
431 for (i = 0; string[i] && (i < max_length); i++) {
432
433 /* Escape sequences */
434
435 switch (string[i]) {
436 case 0x07:
437 acpi_os_printf("\\a"); /* BELL */
438 break;
439
440 case 0x08:
441 acpi_os_printf("\\b"); /* BACKSPACE */
442 break;
443
444 case 0x0C:
445 acpi_os_printf("\\f"); /* FORMFEED */
446 break;
447
448 case 0x0A:
449 acpi_os_printf("\\n"); /* LINEFEED */
450 break;
451
452 case 0x0D:
453 acpi_os_printf("\\r"); /* CARRIAGE RETURN */
454 break;
455
456 case 0x09:
457 acpi_os_printf("\\t"); /* HORIZONTAL TAB */
458 break;
459
460 case 0x0B:
461 acpi_os_printf("\\v"); /* VERTICAL TAB */
462 break;
463
464 case '\'': /* Single Quote */
465 case '\"': /* Double Quote */
466 case '\\': /* Backslash */
467 acpi_os_printf("\\%c", (int)string[i]);
468 break;
469
470 default:
471
472 /* Check for printable character or hex escape */
473
474 if (ACPI_IS_PRINT(string[i])) {
475 /* This is a normal character */
476
477 acpi_os_printf("%c", (int)string[i]);
478 } else {
479 /* All others will be Hex escapes */
480
481 acpi_os_printf("\\x%2.2X", (s32) string[i]);
482 }
483 break;
484 }
485 }
486 acpi_os_printf("\"");
487
488 if (i == max_length && string[i]) {
489 acpi_os_printf("...");
490 }
491}
492
493/*******************************************************************************
494 *
495 * FUNCTION: acpi_ut_dword_byte_swap 108 * FUNCTION: acpi_ut_dword_byte_swap
496 * 109 *
497 * PARAMETERS: value - Value to be converted 110 * PARAMETERS: value - Value to be converted
@@ -559,379 +172,6 @@ void acpi_ut_set_integer_width(u8 revision)
559 } 172 }
560} 173}
561 174
562#ifdef ACPI_DEBUG_OUTPUT
563/*******************************************************************************
564 *
565 * FUNCTION: acpi_ut_display_init_pathname
566 *
567 * PARAMETERS: type - Object type of the node
568 * obj_handle - Handle whose pathname will be displayed
569 * path - Additional path string to be appended.
570 * (NULL if no extra path)
571 *
572 * RETURN: acpi_status
573 *
574 * DESCRIPTION: Display full pathname of an object, DEBUG ONLY
575 *
576 ******************************************************************************/
577
578void
579acpi_ut_display_init_pathname(u8 type,
580 struct acpi_namespace_node *obj_handle,
581 char *path)
582{
583 acpi_status status;
584 struct acpi_buffer buffer;
585
586 ACPI_FUNCTION_ENTRY();
587
588 /* Only print the path if the appropriate debug level is enabled */
589
590 if (!(acpi_dbg_level & ACPI_LV_INIT_NAMES)) {
591 return;
592 }
593
594 /* Get the full pathname to the node */
595
596 buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
597 status = acpi_ns_handle_to_pathname(obj_handle, &buffer);
598 if (ACPI_FAILURE(status)) {
599 return;
600 }
601
602 /* Print what we're doing */
603
604 switch (type) {
605 case ACPI_TYPE_METHOD:
606 acpi_os_printf("Executing ");
607 break;
608
609 default:
610 acpi_os_printf("Initializing ");
611 break;
612 }
613
614 /* Print the object type and pathname */
615
616 acpi_os_printf("%-12s %s",
617 acpi_ut_get_type_name(type), (char *)buffer.pointer);
618
619 /* Extra path is used to append names like _STA, _INI, etc. */
620
621 if (path) {
622 acpi_os_printf(".%s", path);
623 }
624 acpi_os_printf("\n");
625
626 ACPI_FREE(buffer.pointer);
627}
628#endif
629
630/*******************************************************************************
631 *
632 * FUNCTION: acpi_ut_valid_acpi_char
633 *
634 * PARAMETERS: char - The character to be examined
635 * position - Byte position (0-3)
636 *
637 * RETURN: TRUE if the character is valid, FALSE otherwise
638 *
639 * DESCRIPTION: Check for a valid ACPI character. Must be one of:
640 * 1) Upper case alpha
641 * 2) numeric
642 * 3) underscore
643 *
644 * We allow a '!' as the last character because of the ASF! table
645 *
646 ******************************************************************************/
647
648u8 acpi_ut_valid_acpi_char(char character, u32 position)
649{
650
651 if (!((character >= 'A' && character <= 'Z') ||
652 (character >= '0' && character <= '9') || (character == '_'))) {
653
654 /* Allow a '!' in the last position */
655
656 if (character == '!' && position == 3) {
657 return (TRUE);
658 }
659
660 return (FALSE);
661 }
662
663 return (TRUE);
664}
665
666/*******************************************************************************
667 *
668 * FUNCTION: acpi_ut_valid_acpi_name
669 *
670 * PARAMETERS: name - The name to be examined
671 *
672 * RETURN: TRUE if the name is valid, FALSE otherwise
673 *
674 * DESCRIPTION: Check for a valid ACPI name. Each character must be one of:
675 * 1) Upper case alpha
676 * 2) numeric
677 * 3) underscore
678 *
679 ******************************************************************************/
680
681u8 acpi_ut_valid_acpi_name(u32 name)
682{
683 u32 i;
684
685 ACPI_FUNCTION_ENTRY();
686
687 for (i = 0; i < ACPI_NAME_SIZE; i++) {
688 if (!acpi_ut_valid_acpi_char
689 ((ACPI_CAST_PTR(char, &name))[i], i)) {
690 return (FALSE);
691 }
692 }
693
694 return (TRUE);
695}
696
697/*******************************************************************************
698 *
699 * FUNCTION: acpi_ut_repair_name
700 *
701 * PARAMETERS: name - The ACPI name to be repaired
702 *
703 * RETURN: Repaired version of the name
704 *
705 * DESCRIPTION: Repair an ACPI name: Change invalid characters to '*' and
706 * return the new name. NOTE: the Name parameter must reside in
707 * read/write memory, cannot be a const.
708 *
709 * An ACPI Name must consist of valid ACPI characters. We will repair the name
710 * if necessary because we don't want to abort because of this, but we want
711 * all namespace names to be printable. A warning message is appropriate.
712 *
713 * This issue came up because there are in fact machines that exhibit
714 * this problem, and we want to be able to enable ACPI support for them,
715 * even though there are a few bad names.
716 *
717 ******************************************************************************/
718
719void acpi_ut_repair_name(char *name)
720{
721 u32 i;
722 u8 found_bad_char = FALSE;
723 u32 original_name;
724
725 ACPI_FUNCTION_NAME(ut_repair_name);
726
727 ACPI_MOVE_NAME(&original_name, name);
728
729 /* Check each character in the name */
730
731 for (i = 0; i < ACPI_NAME_SIZE; i++) {
732 if (acpi_ut_valid_acpi_char(name[i], i)) {
733 continue;
734 }
735
736 /*
737 * Replace a bad character with something printable, yet technically
738 * still invalid. This prevents any collisions with existing "good"
739 * names in the namespace.
740 */
741 name[i] = '*';
742 found_bad_char = TRUE;
743 }
744
745 if (found_bad_char) {
746
747 /* Report warning only if in strict mode or debug mode */
748
749 if (!acpi_gbl_enable_interpreter_slack) {
750 ACPI_WARNING((AE_INFO,
751 "Found bad character(s) in name, repaired: [%4.4s]\n",
752 name));
753 } else {
754 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
755 "Found bad character(s) in name, repaired: [%4.4s]\n",
756 name));
757 }
758 }
759}
760
761/*******************************************************************************
762 *
763 * FUNCTION: acpi_ut_strtoul64
764 *
765 * PARAMETERS: string - Null terminated string
766 * base - Radix of the string: 16 or ACPI_ANY_BASE;
767 * ACPI_ANY_BASE means 'in behalf of to_integer'
768 * ret_integer - Where the converted integer is returned
769 *
770 * RETURN: Status and Converted value
771 *
772 * DESCRIPTION: Convert a string into an unsigned value. Performs either a
773 * 32-bit or 64-bit conversion, depending on the current mode
774 * of the interpreter.
775 * NOTE: Does not support Octal strings, not needed.
776 *
777 ******************************************************************************/
778
779acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
780{
781 u32 this_digit = 0;
782 u64 return_value = 0;
783 u64 quotient;
784 u64 dividend;
785 u32 to_integer_op = (base == ACPI_ANY_BASE);
786 u32 mode32 = (acpi_gbl_integer_byte_width == 4);
787 u8 valid_digits = 0;
788 u8 sign_of0x = 0;
789 u8 term = 0;
790
791 ACPI_FUNCTION_TRACE_STR(ut_stroul64, string);
792
793 switch (base) {
794 case ACPI_ANY_BASE:
795 case 16:
796 break;
797
798 default:
799 /* Invalid Base */
800 return_ACPI_STATUS(AE_BAD_PARAMETER);
801 }
802
803 if (!string) {
804 goto error_exit;
805 }
806
807 /* Skip over any white space in the buffer */
808
809 while ((*string) && (ACPI_IS_SPACE(*string) || *string == '\t')) {
810 string++;
811 }
812
813 if (to_integer_op) {
814 /*
815 * Base equal to ACPI_ANY_BASE means 'ToInteger operation case'.
816 * We need to determine if it is decimal or hexadecimal.
817 */
818 if ((*string == '0') && (ACPI_TOLOWER(*(string + 1)) == 'x')) {
819 sign_of0x = 1;
820 base = 16;
821
822 /* Skip over the leading '0x' */
823 string += 2;
824 } else {
825 base = 10;
826 }
827 }
828
829 /* Any string left? Check that '0x' is not followed by white space. */
830
831 if (!(*string) || ACPI_IS_SPACE(*string) || *string == '\t') {
832 if (to_integer_op) {
833 goto error_exit;
834 } else {
835 goto all_done;
836 }
837 }
838
839 /*
840 * Perform a 32-bit or 64-bit conversion, depending upon the current
841 * execution mode of the interpreter
842 */
843 dividend = (mode32) ? ACPI_UINT32_MAX : ACPI_UINT64_MAX;
844
845 /* Main loop: convert the string to a 32- or 64-bit integer */
846
847 while (*string) {
848 if (ACPI_IS_DIGIT(*string)) {
849
850 /* Convert ASCII 0-9 to Decimal value */
851
852 this_digit = ((u8)*string) - '0';
853 } else if (base == 10) {
854
855 /* Digit is out of range; possible in to_integer case only */
856
857 term = 1;
858 } else {
859 this_digit = (u8)ACPI_TOUPPER(*string);
860 if (ACPI_IS_XDIGIT((char)this_digit)) {
861
862 /* Convert ASCII Hex char to value */
863
864 this_digit = this_digit - 'A' + 10;
865 } else {
866 term = 1;
867 }
868 }
869
870 if (term) {
871 if (to_integer_op) {
872 goto error_exit;
873 } else {
874 break;
875 }
876 } else if ((valid_digits == 0) && (this_digit == 0)
877 && !sign_of0x) {
878
879 /* Skip zeros */
880 string++;
881 continue;
882 }
883
884 valid_digits++;
885
886 if (sign_of0x
887 && ((valid_digits > 16)
888 || ((valid_digits > 8) && mode32))) {
889 /*
890 * This is to_integer operation case.
891 * No any restrictions for string-to-integer conversion,
892 * see ACPI spec.
893 */
894 goto error_exit;
895 }
896
897 /* Divide the digit into the correct position */
898
899 (void)acpi_ut_short_divide((dividend - (u64)this_digit),
900 base, &quotient, NULL);
901
902 if (return_value > quotient) {
903 if (to_integer_op) {
904 goto error_exit;
905 } else {
906 break;
907 }
908 }
909
910 return_value *= base;
911 return_value += this_digit;
912 string++;
913 }
914
915 /* All done, normal exit */
916
917 all_done:
918
919 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Converted value: %8.8X%8.8X\n",
920 ACPI_FORMAT_UINT64(return_value)));
921
922 *ret_integer = return_value;
923 return_ACPI_STATUS(AE_OK);
924
925 error_exit:
926 /* Base was set/validated above */
927
928 if (base == 10) {
929 return_ACPI_STATUS(AE_BAD_DECIMAL_CONSTANT);
930 } else {
931 return_ACPI_STATUS(AE_BAD_HEX_CONSTANT);
932 }
933}
934
935/******************************************************************************* 175/*******************************************************************************
936 * 176 *
937 * FUNCTION: acpi_ut_create_update_state_and_push 177 * FUNCTION: acpi_ut_create_update_state_and_push
@@ -1097,3 +337,71 @@ acpi_ut_walk_package_tree(union acpi_operand_object *source_object,
1097 337
1098 return_ACPI_STATUS(AE_AML_INTERNAL); 338 return_ACPI_STATUS(AE_AML_INTERNAL);
1099} 339}
340
341#ifdef ACPI_DEBUG_OUTPUT
342/*******************************************************************************
343 *
344 * FUNCTION: acpi_ut_display_init_pathname
345 *
346 * PARAMETERS: type - Object type of the node
347 * obj_handle - Handle whose pathname will be displayed
348 * path - Additional path string to be appended.
349 * (NULL if no extra path)
350 *
351 * RETURN: acpi_status
352 *
353 * DESCRIPTION: Display full pathname of an object, DEBUG ONLY
354 *
355 ******************************************************************************/
356
357void
358acpi_ut_display_init_pathname(u8 type,
359 struct acpi_namespace_node *obj_handle,
360 char *path)
361{
362 acpi_status status;
363 struct acpi_buffer buffer;
364
365 ACPI_FUNCTION_ENTRY();
366
367 /* Only print the path if the appropriate debug level is enabled */
368
369 if (!(acpi_dbg_level & ACPI_LV_INIT_NAMES)) {
370 return;
371 }
372
373 /* Get the full pathname to the node */
374
375 buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
376 status = acpi_ns_handle_to_pathname(obj_handle, &buffer);
377 if (ACPI_FAILURE(status)) {
378 return;
379 }
380
381 /* Print what we're doing */
382
383 switch (type) {
384 case ACPI_TYPE_METHOD:
385 acpi_os_printf("Executing ");
386 break;
387
388 default:
389 acpi_os_printf("Initializing ");
390 break;
391 }
392
393 /* Print the object type and pathname */
394
395 acpi_os_printf("%-12s %s",
396 acpi_ut_get_type_name(type), (char *)buffer.pointer);
397
398 /* Extra path is used to append names like _STA, _INI, etc. */
399
400 if (path) {
401 acpi_os_printf(".%s", path);
402 }
403 acpi_os_printf("\n");
404
405 ACPI_FREE(buffer.pointer);
406}
407#endif
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index 5ccf57c0d87e..22feb99b8e35 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index 5c52ca78f6fa..1099f5c069f8 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -419,7 +419,7 @@ void acpi_ut_delete_object_desc(union acpi_operand_object *object)
419{ 419{
420 ACPI_FUNCTION_TRACE_PTR(ut_delete_object_desc, object); 420 ACPI_FUNCTION_TRACE_PTR(ut_delete_object_desc, object);
421 421
422 /* Object must be a union acpi_operand_object */ 422 /* Object must be of type union acpi_operand_object */
423 423
424 if (ACPI_GET_DESCRIPTOR_TYPE(object) != ACPI_DESC_TYPE_OPERAND) { 424 if (ACPI_GET_DESCRIPTOR_TYPE(object) != ACPI_DESC_TYPE_OPERAND) {
425 ACPI_ERROR((AE_INFO, 425 ACPI_ERROR((AE_INFO,
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 676285d6116d..36a7d361d7cb 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utownerid.c b/drivers/acpi/acpica/utownerid.c
new file mode 100644
index 000000000000..835340b26d37
--- /dev/null
+++ b/drivers/acpi/acpica/utownerid.c
@@ -0,0 +1,218 @@
1/*******************************************************************************
2 *
3 * Module Name: utownerid - Support for Table/Method Owner IDs
4 *
5 ******************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include "accommon.h"
46#include "acnamesp.h"
47
48#define _COMPONENT ACPI_UTILITIES
49ACPI_MODULE_NAME("utownerid")
50
51/*******************************************************************************
52 *
53 * FUNCTION: acpi_ut_allocate_owner_id
54 *
55 * PARAMETERS: owner_id - Where the new owner ID is returned
56 *
57 * RETURN: Status
58 *
59 * DESCRIPTION: Allocate a table or method owner ID. The owner ID is used to
60 * track objects created by the table or method, to be deleted
61 * when the method exits or the table is unloaded.
62 *
63 ******************************************************************************/
64acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
65{
66 u32 i;
67 u32 j;
68 u32 k;
69 acpi_status status;
70
71 ACPI_FUNCTION_TRACE(ut_allocate_owner_id);
72
73 /* Guard against multiple allocations of ID to the same location */
74
75 if (*owner_id) {
76 ACPI_ERROR((AE_INFO, "Owner ID [0x%2.2X] already exists",
77 *owner_id));
78 return_ACPI_STATUS(AE_ALREADY_EXISTS);
79 }
80
81 /* Mutex for the global ID mask */
82
83 status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES);
84 if (ACPI_FAILURE(status)) {
85 return_ACPI_STATUS(status);
86 }
87
88 /*
89 * Find a free owner ID, cycle through all possible IDs on repeated
90 * allocations. (ACPI_NUM_OWNERID_MASKS + 1) because first index may have
91 * to be scanned twice.
92 */
93 for (i = 0, j = acpi_gbl_last_owner_id_index;
94 i < (ACPI_NUM_OWNERID_MASKS + 1); i++, j++) {
95 if (j >= ACPI_NUM_OWNERID_MASKS) {
96 j = 0; /* Wraparound to start of mask array */
97 }
98
99 for (k = acpi_gbl_next_owner_id_offset; k < 32; k++) {
100 if (acpi_gbl_owner_id_mask[j] == ACPI_UINT32_MAX) {
101
102 /* There are no free IDs in this mask */
103
104 break;
105 }
106
107 if (!(acpi_gbl_owner_id_mask[j] & (1 << k))) {
108 /*
109 * Found a free ID. The actual ID is the bit index plus one,
110 * making zero an invalid Owner ID. Save this as the last ID
111 * allocated and update the global ID mask.
112 */
113 acpi_gbl_owner_id_mask[j] |= (1 << k);
114
115 acpi_gbl_last_owner_id_index = (u8)j;
116 acpi_gbl_next_owner_id_offset = (u8)(k + 1);
117
118 /*
119 * Construct encoded ID from the index and bit position
120 *
121 * Note: Last [j].k (bit 255) is never used and is marked
122 * permanently allocated (prevents +1 overflow)
123 */
124 *owner_id =
125 (acpi_owner_id) ((k + 1) + ACPI_MUL_32(j));
126
127 ACPI_DEBUG_PRINT((ACPI_DB_VALUES,
128 "Allocated OwnerId: %2.2X\n",
129 (unsigned int)*owner_id));
130 goto exit;
131 }
132 }
133
134 acpi_gbl_next_owner_id_offset = 0;
135 }
136
137 /*
138 * All owner_ids have been allocated. This typically should
139 * not happen since the IDs are reused after deallocation. The IDs are
140 * allocated upon table load (one per table) and method execution, and
141 * they are released when a table is unloaded or a method completes
142 * execution.
143 *
144 * If this error happens, there may be very deep nesting of invoked control
145 * methods, or there may be a bug where the IDs are not released.
146 */
147 status = AE_OWNER_ID_LIMIT;
148 ACPI_ERROR((AE_INFO,
149 "Could not allocate new OwnerId (255 max), AE_OWNER_ID_LIMIT"));
150
151 exit:
152 (void)acpi_ut_release_mutex(ACPI_MTX_CACHES);
153 return_ACPI_STATUS(status);
154}
155
156/*******************************************************************************
157 *
158 * FUNCTION: acpi_ut_release_owner_id
159 *
160 * PARAMETERS: owner_id_ptr - Pointer to a previously allocated owner_ID
161 *
162 * RETURN: None. No error is returned because we are either exiting a
163 * control method or unloading a table. Either way, we would
164 * ignore any error anyway.
165 *
166 * DESCRIPTION: Release a table or method owner ID. Valid IDs are 1 - 255
167 *
168 ******************************************************************************/
169
170void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr)
171{
172 acpi_owner_id owner_id = *owner_id_ptr;
173 acpi_status status;
174 u32 index;
175 u32 bit;
176
177 ACPI_FUNCTION_TRACE_U32(ut_release_owner_id, owner_id);
178
179 /* Always clear the input owner_id (zero is an invalid ID) */
180
181 *owner_id_ptr = 0;
182
183 /* Zero is not a valid owner_ID */
184
185 if (owner_id == 0) {
186 ACPI_ERROR((AE_INFO, "Invalid OwnerId: 0x%2.2X", owner_id));
187 return_VOID;
188 }
189
190 /* Mutex for the global ID mask */
191
192 status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES);
193 if (ACPI_FAILURE(status)) {
194 return_VOID;
195 }
196
197 /* Normalize the ID to zero */
198
199 owner_id--;
200
201 /* Decode ID to index/offset pair */
202
203 index = ACPI_DIV_32(owner_id);
204 bit = 1 << ACPI_MOD_32(owner_id);
205
206 /* Free the owner ID only if it is valid */
207
208 if (acpi_gbl_owner_id_mask[index] & bit) {
209 acpi_gbl_owner_id_mask[index] ^= bit;
210 } else {
211 ACPI_ERROR((AE_INFO,
212 "Release of non-allocated OwnerId: 0x%2.2X",
213 owner_id + 1));
214 }
215
216 (void)acpi_ut_release_mutex(ACPI_MTX_CACHES);
217 return_VOID;
218}
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index e38bef4980bc..cb7fa491decf 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -127,7 +127,9 @@ const char *acpi_gbl_rw_decode[] = {
127 127
128const char *acpi_gbl_shr_decode[] = { 128const char *acpi_gbl_shr_decode[] = {
129 "Exclusive", 129 "Exclusive",
130 "Shared" 130 "Shared",
131 "ExclusiveAndWake", /* ACPI 5.0 */
132 "SharedAndWake" /* ACPI 5.0 */
131}; 133};
132 134
133const char *acpi_gbl_siz_decode[] = { 135const char *acpi_gbl_siz_decode[] = {
@@ -383,26 +385,16 @@ static const u8 acpi_gbl_resource_types[] = {
383 ACPI_VARIABLE_LENGTH /* 0E *serial_bus */ 385 ACPI_VARIABLE_LENGTH /* 0E *serial_bus */
384}; 386};
385 387
386/*
387 * For the iASL compiler/disassembler, we don't want any error messages
388 * because the disassembler uses the resource validation code to determine
389 * if Buffer objects are actually Resource Templates.
390 */
391#ifdef ACPI_ASL_COMPILER
392#define ACPI_RESOURCE_ERROR(plist)
393#else
394#define ACPI_RESOURCE_ERROR(plist) ACPI_ERROR(plist)
395#endif
396
397/******************************************************************************* 388/*******************************************************************************
398 * 389 *
399 * FUNCTION: acpi_ut_walk_aml_resources 390 * FUNCTION: acpi_ut_walk_aml_resources
400 * 391 *
401 * PARAMETERS: aml - Pointer to the raw AML resource template 392 * PARAMETERS: walk_state - Current walk info
402 * aml_length - Length of the entire template 393 * PARAMETERS: aml - Pointer to the raw AML resource template
403 * user_function - Called once for each descriptor found. If 394 * aml_length - Length of the entire template
404 * NULL, a pointer to the end_tag is returned 395 * user_function - Called once for each descriptor found. If
405 * context - Passed to user_function 396 * NULL, a pointer to the end_tag is returned
397 * context - Passed to user_function
406 * 398 *
407 * RETURN: Status 399 * RETURN: Status
408 * 400 *
@@ -412,7 +404,8 @@ static const u8 acpi_gbl_resource_types[] = {
412 ******************************************************************************/ 404 ******************************************************************************/
413 405
414acpi_status 406acpi_status
415acpi_ut_walk_aml_resources(u8 * aml, 407acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
408 u8 *aml,
416 acpi_size aml_length, 409 acpi_size aml_length,
417 acpi_walk_aml_callback user_function, void **context) 410 acpi_walk_aml_callback user_function, void **context)
418{ 411{
@@ -441,7 +434,8 @@ acpi_ut_walk_aml_resources(u8 * aml,
441 434
442 /* Validate the Resource Type and Resource Length */ 435 /* Validate the Resource Type and Resource Length */
443 436
444 status = acpi_ut_validate_resource(aml, &resource_index); 437 status =
438 acpi_ut_validate_resource(walk_state, aml, &resource_index);
445 if (ACPI_FAILURE(status)) { 439 if (ACPI_FAILURE(status)) {
446 /* 440 /*
447 * Exit on failure. Cannot continue because the descriptor length 441 * Exit on failure. Cannot continue because the descriptor length
@@ -498,7 +492,8 @@ acpi_ut_walk_aml_resources(u8 * aml,
498 492
499 /* Insert an end_tag anyway. acpi_rs_get_list_length always leaves room */ 493 /* Insert an end_tag anyway. acpi_rs_get_list_length always leaves room */
500 494
501 (void)acpi_ut_validate_resource(end_tag, &resource_index); 495 (void)acpi_ut_validate_resource(walk_state, end_tag,
496 &resource_index);
502 status = 497 status =
503 user_function(end_tag, 2, offset, resource_index, context); 498 user_function(end_tag, 2, offset, resource_index, context);
504 if (ACPI_FAILURE(status)) { 499 if (ACPI_FAILURE(status)) {
@@ -513,9 +508,10 @@ acpi_ut_walk_aml_resources(u8 * aml,
513 * 508 *
514 * FUNCTION: acpi_ut_validate_resource 509 * FUNCTION: acpi_ut_validate_resource
515 * 510 *
516 * PARAMETERS: aml - Pointer to the raw AML resource descriptor 511 * PARAMETERS: walk_state - Current walk info
517 * return_index - Where the resource index is returned. NULL 512 * aml - Pointer to the raw AML resource descriptor
518 * if the index is not required. 513 * return_index - Where the resource index is returned. NULL
514 * if the index is not required.
519 * 515 *
520 * RETURN: Status, and optionally the Index into the global resource tables 516 * RETURN: Status, and optionally the Index into the global resource tables
521 * 517 *
@@ -525,7 +521,9 @@ acpi_ut_walk_aml_resources(u8 * aml,
525 * 521 *
526 ******************************************************************************/ 522 ******************************************************************************/
527 523
528acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index) 524acpi_status
525acpi_ut_validate_resource(struct acpi_walk_state *walk_state,
526 void *aml, u8 *return_index)
529{ 527{
530 union aml_resource *aml_resource; 528 union aml_resource *aml_resource;
531 u8 resource_type; 529 u8 resource_type;
@@ -627,10 +625,12 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
627 if ((aml_resource->common_serial_bus.type == 0) || 625 if ((aml_resource->common_serial_bus.type == 0) ||
628 (aml_resource->common_serial_bus.type > 626 (aml_resource->common_serial_bus.type >
629 AML_RESOURCE_MAX_SERIALBUSTYPE)) { 627 AML_RESOURCE_MAX_SERIALBUSTYPE)) {
630 ACPI_RESOURCE_ERROR((AE_INFO, 628 if (walk_state) {
631 "Invalid/unsupported SerialBus resource descriptor: BusType 0x%2.2X", 629 ACPI_ERROR((AE_INFO,
632 aml_resource->common_serial_bus. 630 "Invalid/unsupported SerialBus resource descriptor: BusType 0x%2.2X",
633 type)); 631 aml_resource->common_serial_bus.
632 type));
633 }
634 return (AE_AML_INVALID_RESOURCE_TYPE); 634 return (AE_AML_INVALID_RESOURCE_TYPE);
635 } 635 }
636 } 636 }
@@ -645,18 +645,22 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
645 645
646 invalid_resource: 646 invalid_resource:
647 647
648 ACPI_RESOURCE_ERROR((AE_INFO, 648 if (walk_state) {
649 "Invalid/unsupported resource descriptor: Type 0x%2.2X", 649 ACPI_ERROR((AE_INFO,
650 resource_type)); 650 "Invalid/unsupported resource descriptor: Type 0x%2.2X",
651 resource_type));
652 }
651 return (AE_AML_INVALID_RESOURCE_TYPE); 653 return (AE_AML_INVALID_RESOURCE_TYPE);
652 654
653 bad_resource_length: 655 bad_resource_length:
654 656
655 ACPI_RESOURCE_ERROR((AE_INFO, 657 if (walk_state) {
656 "Invalid resource descriptor length: Type " 658 ACPI_ERROR((AE_INFO,
657 "0x%2.2X, Length 0x%4.4X, MinLength 0x%4.4X", 659 "Invalid resource descriptor length: Type "
658 resource_type, resource_length, 660 "0x%2.2X, Length 0x%4.4X, MinLength 0x%4.4X",
659 minimum_resource_length)); 661 resource_type, resource_length,
662 minimum_resource_length));
663 }
660 return (AE_AML_BAD_RESOURCE_LENGTH); 664 return (AE_AML_BAD_RESOURCE_LENGTH);
661} 665}
662 666
@@ -800,8 +804,7 @@ u32 acpi_ut_get_descriptor_length(void *aml)
800 ******************************************************************************/ 804 ******************************************************************************/
801 805
802acpi_status 806acpi_status
803acpi_ut_get_resource_end_tag(union acpi_operand_object * obj_desc, 807acpi_ut_get_resource_end_tag(union acpi_operand_object *obj_desc, u8 **end_tag)
804 u8 ** end_tag)
805{ 808{
806 acpi_status status; 809 acpi_status status;
807 810
@@ -816,7 +819,7 @@ acpi_ut_get_resource_end_tag(union acpi_operand_object * obj_desc,
816 819
817 /* Validate the template and get a pointer to the end_tag */ 820 /* Validate the template and get a pointer to the end_tag */
818 821
819 status = acpi_ut_walk_aml_resources(obj_desc->buffer.pointer, 822 status = acpi_ut_walk_aml_resources(NULL, obj_desc->buffer.pointer,
820 obj_desc->buffer.length, NULL, 823 obj_desc->buffer.length, NULL,
821 (void **)end_tag); 824 (void **)end_tag);
822 825
diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c
index cee0473ba813..a6b729d4c1dc 100644
--- a/drivers/acpi/acpica/utstate.c
+++ b/drivers/acpi/acpica/utstate.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -97,14 +97,13 @@ void
97acpi_ut_push_generic_state(union acpi_generic_state **list_head, 97acpi_ut_push_generic_state(union acpi_generic_state **list_head,
98 union acpi_generic_state *state) 98 union acpi_generic_state *state)
99{ 99{
100 ACPI_FUNCTION_TRACE(ut_push_generic_state); 100 ACPI_FUNCTION_ENTRY();
101 101
102 /* Push the state object onto the front of the list (stack) */ 102 /* Push the state object onto the front of the list (stack) */
103 103
104 state->common.next = *list_head; 104 state->common.next = *list_head;
105 *list_head = state; 105 *list_head = state;
106 106 return;
107 return_VOID;
108} 107}
109 108
110/******************************************************************************* 109/*******************************************************************************
@@ -124,7 +123,7 @@ union acpi_generic_state *acpi_ut_pop_generic_state(union acpi_generic_state
124{ 123{
125 union acpi_generic_state *state; 124 union acpi_generic_state *state;
126 125
127 ACPI_FUNCTION_TRACE(ut_pop_generic_state); 126 ACPI_FUNCTION_ENTRY();
128 127
129 /* Remove the state object at the head of the list (stack) */ 128 /* Remove the state object at the head of the list (stack) */
130 129
@@ -136,7 +135,7 @@ union acpi_generic_state *acpi_ut_pop_generic_state(union acpi_generic_state
136 *list_head = state->common.next; 135 *list_head = state->common.next;
137 } 136 }
138 137
139 return_PTR(state); 138 return (state);
140} 139}
141 140
142/******************************************************************************* 141/*******************************************************************************
@@ -186,13 +185,13 @@ struct acpi_thread_state *acpi_ut_create_thread_state(void)
186{ 185{
187 union acpi_generic_state *state; 186 union acpi_generic_state *state;
188 187
189 ACPI_FUNCTION_TRACE(ut_create_thread_state); 188 ACPI_FUNCTION_ENTRY();
190 189
191 /* Create the generic state object */ 190 /* Create the generic state object */
192 191
193 state = acpi_ut_create_generic_state(); 192 state = acpi_ut_create_generic_state();
194 if (!state) { 193 if (!state) {
195 return_PTR(NULL); 194 return (NULL);
196 } 195 }
197 196
198 /* Init fields specific to the update struct */ 197 /* Init fields specific to the update struct */
@@ -207,7 +206,7 @@ struct acpi_thread_state *acpi_ut_create_thread_state(void)
207 state->thread.thread_id = (acpi_thread_id) 1; 206 state->thread.thread_id = (acpi_thread_id) 1;
208 } 207 }
209 208
210 return_PTR((struct acpi_thread_state *)state); 209 return ((struct acpi_thread_state *)state);
211} 210}
212 211
213/******************************************************************************* 212/*******************************************************************************
@@ -230,13 +229,13 @@ union acpi_generic_state *acpi_ut_create_update_state(union acpi_operand_object
230{ 229{
231 union acpi_generic_state *state; 230 union acpi_generic_state *state;
232 231
233 ACPI_FUNCTION_TRACE_PTR(ut_create_update_state, object); 232 ACPI_FUNCTION_ENTRY();
234 233
235 /* Create the generic state object */ 234 /* Create the generic state object */
236 235
237 state = acpi_ut_create_generic_state(); 236 state = acpi_ut_create_generic_state();
238 if (!state) { 237 if (!state) {
239 return_PTR(NULL); 238 return (NULL);
240 } 239 }
241 240
242 /* Init fields specific to the update struct */ 241 /* Init fields specific to the update struct */
@@ -244,8 +243,7 @@ union acpi_generic_state *acpi_ut_create_update_state(union acpi_operand_object
244 state->common.descriptor_type = ACPI_DESC_TYPE_STATE_UPDATE; 243 state->common.descriptor_type = ACPI_DESC_TYPE_STATE_UPDATE;
245 state->update.object = object; 244 state->update.object = object;
246 state->update.value = action; 245 state->update.value = action;
247 246 return (state);
248 return_PTR(state);
249} 247}
250 248
251/******************************************************************************* 249/*******************************************************************************
@@ -267,13 +265,13 @@ union acpi_generic_state *acpi_ut_create_pkg_state(void *internal_object,
267{ 265{
268 union acpi_generic_state *state; 266 union acpi_generic_state *state;
269 267
270 ACPI_FUNCTION_TRACE_PTR(ut_create_pkg_state, internal_object); 268 ACPI_FUNCTION_ENTRY();
271 269
272 /* Create the generic state object */ 270 /* Create the generic state object */
273 271
274 state = acpi_ut_create_generic_state(); 272 state = acpi_ut_create_generic_state();
275 if (!state) { 273 if (!state) {
276 return_PTR(NULL); 274 return (NULL);
277 } 275 }
278 276
279 /* Init fields specific to the update struct */ 277 /* Init fields specific to the update struct */
@@ -283,8 +281,7 @@ union acpi_generic_state *acpi_ut_create_pkg_state(void *internal_object,
283 state->pkg.dest_object = external_object; 281 state->pkg.dest_object = external_object;
284 state->pkg.index = index; 282 state->pkg.index = index;
285 state->pkg.num_packages = 1; 283 state->pkg.num_packages = 1;
286 284 return (state);
287 return_PTR(state);
288} 285}
289 286
290/******************************************************************************* 287/*******************************************************************************
@@ -304,21 +301,20 @@ union acpi_generic_state *acpi_ut_create_control_state(void)
304{ 301{
305 union acpi_generic_state *state; 302 union acpi_generic_state *state;
306 303
307 ACPI_FUNCTION_TRACE(ut_create_control_state); 304 ACPI_FUNCTION_ENTRY();
308 305
309 /* Create the generic state object */ 306 /* Create the generic state object */
310 307
311 state = acpi_ut_create_generic_state(); 308 state = acpi_ut_create_generic_state();
312 if (!state) { 309 if (!state) {
313 return_PTR(NULL); 310 return (NULL);
314 } 311 }
315 312
316 /* Init fields specific to the control struct */ 313 /* Init fields specific to the control struct */
317 314
318 state->common.descriptor_type = ACPI_DESC_TYPE_STATE_CONTROL; 315 state->common.descriptor_type = ACPI_DESC_TYPE_STATE_CONTROL;
319 state->common.state = ACPI_CONTROL_CONDITIONAL_EXECUTING; 316 state->common.state = ACPI_CONTROL_CONDITIONAL_EXECUTING;
320 317 return (state);
321 return_PTR(state);
322} 318}
323 319
324/******************************************************************************* 320/*******************************************************************************
@@ -336,12 +332,12 @@ union acpi_generic_state *acpi_ut_create_control_state(void)
336 332
337void acpi_ut_delete_generic_state(union acpi_generic_state *state) 333void acpi_ut_delete_generic_state(union acpi_generic_state *state)
338{ 334{
339 ACPI_FUNCTION_TRACE(ut_delete_generic_state); 335 ACPI_FUNCTION_ENTRY();
340 336
341 /* Ignore null state */ 337 /* Ignore null state */
342 338
343 if (state) { 339 if (state) {
344 (void)acpi_os_release_object(acpi_gbl_state_cache, state); 340 (void)acpi_os_release_object(acpi_gbl_state_cache, state);
345 } 341 }
346 return_VOID; 342 return;
347} 343}
diff --git a/drivers/acpi/acpica/utstring.c b/drivers/acpi/acpica/utstring.c
new file mode 100644
index 000000000000..b3e36a81aa4d
--- /dev/null
+++ b/drivers/acpi/acpica/utstring.c
@@ -0,0 +1,574 @@
1/*******************************************************************************
2 *
3 * Module Name: utstring - Common functions for strings and characters
4 *
5 ******************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include "accommon.h"
46#include "acnamesp.h"
47
48#define _COMPONENT ACPI_UTILITIES
49ACPI_MODULE_NAME("utstring")
50
51/*
52 * Non-ANSI C library functions - strlwr, strupr, stricmp, and a 64-bit
53 * version of strtoul.
54 */
55#ifdef ACPI_ASL_COMPILER
56/*******************************************************************************
57 *
58 * FUNCTION: acpi_ut_strlwr (strlwr)
59 *
60 * PARAMETERS: src_string - The source string to convert
61 *
62 * RETURN: None
63 *
64 * DESCRIPTION: Convert string to lowercase
65 *
66 * NOTE: This is not a POSIX function, so it appears here, not in utclib.c
67 *
68 ******************************************************************************/
69void acpi_ut_strlwr(char *src_string)
70{
71 char *string;
72
73 ACPI_FUNCTION_ENTRY();
74
75 if (!src_string) {
76 return;
77 }
78
79 /* Walk entire string, lowercasing the letters */
80
81 for (string = src_string; *string; string++) {
82 *string = (char)ACPI_TOLOWER(*string);
83 }
84
85 return;
86}
87
88/******************************************************************************
89 *
90 * FUNCTION: acpi_ut_stricmp (stricmp)
91 *
92 * PARAMETERS: string1 - first string to compare
93 * string2 - second string to compare
94 *
95 * RETURN: int that signifies string relationship. Zero means strings
96 * are equal.
97 *
98 * DESCRIPTION: Implementation of the non-ANSI stricmp function (compare
99 * strings with no case sensitivity)
100 *
101 ******************************************************************************/
102
103int acpi_ut_stricmp(char *string1, char *string2)
104{
105 int c1;
106 int c2;
107
108 do {
109 c1 = tolower((int)*string1);
110 c2 = tolower((int)*string2);
111
112 string1++;
113 string2++;
114 }
115 while ((c1 == c2) && (c1));
116
117 return (c1 - c2);
118}
119#endif
120
121/*******************************************************************************
122 *
123 * FUNCTION: acpi_ut_strupr (strupr)
124 *
125 * PARAMETERS: src_string - The source string to convert
126 *
127 * RETURN: None
128 *
129 * DESCRIPTION: Convert string to uppercase
130 *
131 * NOTE: This is not a POSIX function, so it appears here, not in utclib.c
132 *
133 ******************************************************************************/
134
135void acpi_ut_strupr(char *src_string)
136{
137 char *string;
138
139 ACPI_FUNCTION_ENTRY();
140
141 if (!src_string) {
142 return;
143 }
144
145 /* Walk entire string, uppercasing the letters */
146
147 for (string = src_string; *string; string++) {
148 *string = (char)ACPI_TOUPPER(*string);
149 }
150
151 return;
152}
153
154/*******************************************************************************
155 *
156 * FUNCTION: acpi_ut_strtoul64
157 *
158 * PARAMETERS: string - Null terminated string
159 * base - Radix of the string: 16 or ACPI_ANY_BASE;
160 * ACPI_ANY_BASE means 'in behalf of to_integer'
161 * ret_integer - Where the converted integer is returned
162 *
163 * RETURN: Status and Converted value
164 *
165 * DESCRIPTION: Convert a string into an unsigned value. Performs either a
166 * 32-bit or 64-bit conversion, depending on the current mode
167 * of the interpreter.
168 * NOTE: Does not support Octal strings, not needed.
169 *
170 ******************************************************************************/
171
172acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
173{
174 u32 this_digit = 0;
175 u64 return_value = 0;
176 u64 quotient;
177 u64 dividend;
178 u32 to_integer_op = (base == ACPI_ANY_BASE);
179 u32 mode32 = (acpi_gbl_integer_byte_width == 4);
180 u8 valid_digits = 0;
181 u8 sign_of0x = 0;
182 u8 term = 0;
183
184 ACPI_FUNCTION_TRACE_STR(ut_stroul64, string);
185
186 switch (base) {
187 case ACPI_ANY_BASE:
188 case 16:
189 break;
190
191 default:
192 /* Invalid Base */
193 return_ACPI_STATUS(AE_BAD_PARAMETER);
194 }
195
196 if (!string) {
197 goto error_exit;
198 }
199
200 /* Skip over any white space in the buffer */
201
202 while ((*string) && (ACPI_IS_SPACE(*string) || *string == '\t')) {
203 string++;
204 }
205
206 if (to_integer_op) {
207 /*
208 * Base equal to ACPI_ANY_BASE means 'ToInteger operation case'.
209 * We need to determine if it is decimal or hexadecimal.
210 */
211 if ((*string == '0') && (ACPI_TOLOWER(*(string + 1)) == 'x')) {
212 sign_of0x = 1;
213 base = 16;
214
215 /* Skip over the leading '0x' */
216 string += 2;
217 } else {
218 base = 10;
219 }
220 }
221
222 /* Any string left? Check that '0x' is not followed by white space. */
223
224 if (!(*string) || ACPI_IS_SPACE(*string) || *string == '\t') {
225 if (to_integer_op) {
226 goto error_exit;
227 } else {
228 goto all_done;
229 }
230 }
231
232 /*
233 * Perform a 32-bit or 64-bit conversion, depending upon the current
234 * execution mode of the interpreter
235 */
236 dividend = (mode32) ? ACPI_UINT32_MAX : ACPI_UINT64_MAX;
237
238 /* Main loop: convert the string to a 32- or 64-bit integer */
239
240 while (*string) {
241 if (ACPI_IS_DIGIT(*string)) {
242
243 /* Convert ASCII 0-9 to Decimal value */
244
245 this_digit = ((u8)*string) - '0';
246 } else if (base == 10) {
247
248 /* Digit is out of range; possible in to_integer case only */
249
250 term = 1;
251 } else {
252 this_digit = (u8)ACPI_TOUPPER(*string);
253 if (ACPI_IS_XDIGIT((char)this_digit)) {
254
255 /* Convert ASCII Hex char to value */
256
257 this_digit = this_digit - 'A' + 10;
258 } else {
259 term = 1;
260 }
261 }
262
263 if (term) {
264 if (to_integer_op) {
265 goto error_exit;
266 } else {
267 break;
268 }
269 } else if ((valid_digits == 0) && (this_digit == 0)
270 && !sign_of0x) {
271
272 /* Skip zeros */
273 string++;
274 continue;
275 }
276
277 valid_digits++;
278
279 if (sign_of0x
280 && ((valid_digits > 16)
281 || ((valid_digits > 8) && mode32))) {
282 /*
283 * This is to_integer operation case.
284 * No any restrictions for string-to-integer conversion,
285 * see ACPI spec.
286 */
287 goto error_exit;
288 }
289
290 /* Divide the digit into the correct position */
291
292 (void)acpi_ut_short_divide((dividend - (u64)this_digit),
293 base, &quotient, NULL);
294
295 if (return_value > quotient) {
296 if (to_integer_op) {
297 goto error_exit;
298 } else {
299 break;
300 }
301 }
302
303 return_value *= base;
304 return_value += this_digit;
305 string++;
306 }
307
308 /* All done, normal exit */
309
310 all_done:
311
312 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Converted value: %8.8X%8.8X\n",
313 ACPI_FORMAT_UINT64(return_value)));
314
315 *ret_integer = return_value;
316 return_ACPI_STATUS(AE_OK);
317
318 error_exit:
319 /* Base was set/validated above */
320
321 if (base == 10) {
322 return_ACPI_STATUS(AE_BAD_DECIMAL_CONSTANT);
323 } else {
324 return_ACPI_STATUS(AE_BAD_HEX_CONSTANT);
325 }
326}
327
328/*******************************************************************************
329 *
330 * FUNCTION: acpi_ut_print_string
331 *
332 * PARAMETERS: string - Null terminated ASCII string
333 * max_length - Maximum output length
334 *
335 * RETURN: None
336 *
337 * DESCRIPTION: Dump an ASCII string with support for ACPI-defined escape
338 * sequences.
339 *
340 ******************************************************************************/
341
342void acpi_ut_print_string(char *string, u8 max_length)
343{
344 u32 i;
345
346 if (!string) {
347 acpi_os_printf("<\"NULL STRING PTR\">");
348 return;
349 }
350
351 acpi_os_printf("\"");
352 for (i = 0; string[i] && (i < max_length); i++) {
353
354 /* Escape sequences */
355
356 switch (string[i]) {
357 case 0x07:
358 acpi_os_printf("\\a"); /* BELL */
359 break;
360
361 case 0x08:
362 acpi_os_printf("\\b"); /* BACKSPACE */
363 break;
364
365 case 0x0C:
366 acpi_os_printf("\\f"); /* FORMFEED */
367 break;
368
369 case 0x0A:
370 acpi_os_printf("\\n"); /* LINEFEED */
371 break;
372
373 case 0x0D:
374 acpi_os_printf("\\r"); /* CARRIAGE RETURN */
375 break;
376
377 case 0x09:
378 acpi_os_printf("\\t"); /* HORIZONTAL TAB */
379 break;
380
381 case 0x0B:
382 acpi_os_printf("\\v"); /* VERTICAL TAB */
383 break;
384
385 case '\'': /* Single Quote */
386 case '\"': /* Double Quote */
387 case '\\': /* Backslash */
388 acpi_os_printf("\\%c", (int)string[i]);
389 break;
390
391 default:
392
393 /* Check for printable character or hex escape */
394
395 if (ACPI_IS_PRINT(string[i])) {
396 /* This is a normal character */
397
398 acpi_os_printf("%c", (int)string[i]);
399 } else {
400 /* All others will be Hex escapes */
401
402 acpi_os_printf("\\x%2.2X", (s32) string[i]);
403 }
404 break;
405 }
406 }
407 acpi_os_printf("\"");
408
409 if (i == max_length && string[i]) {
410 acpi_os_printf("...");
411 }
412}
413
414/*******************************************************************************
415 *
416 * FUNCTION: acpi_ut_valid_acpi_char
417 *
418 * PARAMETERS: char - The character to be examined
419 * position - Byte position (0-3)
420 *
421 * RETURN: TRUE if the character is valid, FALSE otherwise
422 *
423 * DESCRIPTION: Check for a valid ACPI character. Must be one of:
424 * 1) Upper case alpha
425 * 2) numeric
426 * 3) underscore
427 *
428 * We allow a '!' as the last character because of the ASF! table
429 *
430 ******************************************************************************/
431
432u8 acpi_ut_valid_acpi_char(char character, u32 position)
433{
434
435 if (!((character >= 'A' && character <= 'Z') ||
436 (character >= '0' && character <= '9') || (character == '_'))) {
437
438 /* Allow a '!' in the last position */
439
440 if (character == '!' && position == 3) {
441 return (TRUE);
442 }
443
444 return (FALSE);
445 }
446
447 return (TRUE);
448}
449
450/*******************************************************************************
451 *
452 * FUNCTION: acpi_ut_valid_acpi_name
453 *
454 * PARAMETERS: name - The name to be examined
455 *
456 * RETURN: TRUE if the name is valid, FALSE otherwise
457 *
458 * DESCRIPTION: Check for a valid ACPI name. Each character must be one of:
459 * 1) Upper case alpha
460 * 2) numeric
461 * 3) underscore
462 *
463 ******************************************************************************/
464
465u8 acpi_ut_valid_acpi_name(u32 name)
466{
467 u32 i;
468
469 ACPI_FUNCTION_ENTRY();
470
471 for (i = 0; i < ACPI_NAME_SIZE; i++) {
472 if (!acpi_ut_valid_acpi_char
473 ((ACPI_CAST_PTR(char, &name))[i], i)) {
474 return (FALSE);
475 }
476 }
477
478 return (TRUE);
479}
480
481/*******************************************************************************
482 *
483 * FUNCTION: acpi_ut_repair_name
484 *
485 * PARAMETERS: name - The ACPI name to be repaired
486 *
487 * RETURN: Repaired version of the name
488 *
489 * DESCRIPTION: Repair an ACPI name: Change invalid characters to '*' and
490 * return the new name. NOTE: the Name parameter must reside in
491 * read/write memory, cannot be a const.
492 *
493 * An ACPI Name must consist of valid ACPI characters. We will repair the name
494 * if necessary because we don't want to abort because of this, but we want
495 * all namespace names to be printable. A warning message is appropriate.
496 *
497 * This issue came up because there are in fact machines that exhibit
498 * this problem, and we want to be able to enable ACPI support for them,
499 * even though there are a few bad names.
500 *
501 ******************************************************************************/
502
503void acpi_ut_repair_name(char *name)
504{
505 u32 i;
506 u8 found_bad_char = FALSE;
507 u32 original_name;
508
509 ACPI_FUNCTION_NAME(ut_repair_name);
510
511 ACPI_MOVE_NAME(&original_name, name);
512
513 /* Check each character in the name */
514
515 for (i = 0; i < ACPI_NAME_SIZE; i++) {
516 if (acpi_ut_valid_acpi_char(name[i], i)) {
517 continue;
518 }
519
520 /*
521 * Replace a bad character with something printable, yet technically
522 * still invalid. This prevents any collisions with existing "good"
523 * names in the namespace.
524 */
525 name[i] = '*';
526 found_bad_char = TRUE;
527 }
528
529 if (found_bad_char) {
530
531 /* Report warning only if in strict mode or debug mode */
532
533 if (!acpi_gbl_enable_interpreter_slack) {
534 ACPI_WARNING((AE_INFO,
535 "Invalid character(s) in name (0x%.8X), repaired: [%4.4s]",
536 original_name, name));
537 } else {
538 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
539 "Invalid character(s) in name (0x%.8X), repaired: [%4.4s]",
540 original_name, name));
541 }
542 }
543}
544
545#if defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP
546/*******************************************************************************
547 *
548 * FUNCTION: ut_convert_backslashes
549 *
550 * PARAMETERS: pathname - File pathname string to be converted
551 *
552 * RETURN: Modifies the input Pathname
553 *
554 * DESCRIPTION: Convert all backslashes (0x5C) to forward slashes (0x2F) within
555 * the entire input file pathname string.
556 *
557 ******************************************************************************/
558
559void ut_convert_backslashes(char *pathname)
560{
561
562 if (!pathname) {
563 return;
564 }
565
566 while (*pathname) {
567 if (*pathname == '\\') {
568 *pathname = '/';
569 }
570
571 pathname++;
572 }
573}
574#endif
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index a424a9e3fea4..62774c7b76a8 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -436,10 +436,10 @@ acpi_ut_remove_allocation(struct acpi_debug_mem_block *allocation,
436 struct acpi_memory_list *mem_list; 436 struct acpi_memory_list *mem_list;
437 acpi_status status; 437 acpi_status status;
438 438
439 ACPI_FUNCTION_TRACE(ut_remove_allocation); 439 ACPI_FUNCTION_NAME(ut_remove_allocation);
440 440
441 if (acpi_gbl_disable_mem_tracking) { 441 if (acpi_gbl_disable_mem_tracking) {
442 return_ACPI_STATUS(AE_OK); 442 return (AE_OK);
443 } 443 }
444 444
445 mem_list = acpi_gbl_global_list; 445 mem_list = acpi_gbl_global_list;
@@ -450,12 +450,12 @@ acpi_ut_remove_allocation(struct acpi_debug_mem_block *allocation,
450 ACPI_ERROR((module, line, 450 ACPI_ERROR((module, line,
451 "Empty allocation list, nothing to free!")); 451 "Empty allocation list, nothing to free!"));
452 452
453 return_ACPI_STATUS(AE_OK); 453 return (AE_OK);
454 } 454 }
455 455
456 status = acpi_ut_acquire_mutex(ACPI_MTX_MEMORY); 456 status = acpi_ut_acquire_mutex(ACPI_MTX_MEMORY);
457 if (ACPI_FAILURE(status)) { 457 if (ACPI_FAILURE(status)) {
458 return_ACPI_STATUS(status); 458 return (status);
459 } 459 }
460 460
461 /* Unlink */ 461 /* Unlink */
@@ -470,15 +470,15 @@ acpi_ut_remove_allocation(struct acpi_debug_mem_block *allocation,
470 (allocation->next)->previous = allocation->previous; 470 (allocation->next)->previous = allocation->previous;
471 } 471 }
472 472
473 ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "Freeing %p, size 0%X\n",
474 &allocation->user_space, allocation->size));
475
473 /* Mark the segment as deleted */ 476 /* Mark the segment as deleted */
474 477
475 ACPI_MEMSET(&allocation->user_space, 0xEA, allocation->size); 478 ACPI_MEMSET(&allocation->user_space, 0xEA, allocation->size);
476 479
477 ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "Freeing size 0%X\n",
478 allocation->size));
479
480 status = acpi_ut_release_mutex(ACPI_MTX_MEMORY); 480 status = acpi_ut_release_mutex(ACPI_MTX_MEMORY);
481 return_ACPI_STATUS(status); 481 return (status);
482} 482}
483 483
484/******************************************************************************* 484/*******************************************************************************
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 390db0ca5e2e..48efb446258c 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -44,11 +44,7 @@
44#include <linux/export.h> 44#include <linux/export.h>
45#include <acpi/acpi.h> 45#include <acpi/acpi.h>
46#include "accommon.h" 46#include "accommon.h"
47#include "acevents.h"
48#include "acnamesp.h"
49#include "acdebug.h" 47#include "acdebug.h"
50#include "actables.h"
51#include "acinterp.h"
52 48
53#define _COMPONENT ACPI_UTILITIES 49#define _COMPONENT ACPI_UTILITIES
54ACPI_MODULE_NAME("utxface") 50ACPI_MODULE_NAME("utxface")
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
index d4d3826140d8..976b6c734fce 100644
--- a/drivers/acpi/acpica/utxferror.c
+++ b/drivers/acpi/acpica/utxferror.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -297,9 +297,9 @@ ACPI_EXPORT_SYMBOL(acpi_bios_warning)
297 * 297 *
298 * PARAMETERS: module_name - Caller's module name (for error output) 298 * PARAMETERS: module_name - Caller's module name (for error output)
299 * line_number - Caller's line number (for error output) 299 * line_number - Caller's line number (for error output)
300 * Pathname - Full pathname to the node 300 * pathname - Full pathname to the node
301 * node_flags - From Namespace node for the method/object 301 * node_flags - From Namespace node for the method/object
302 * Format - Printf format string + additional args 302 * format - Printf format string + additional args
303 * 303 *
304 * RETURN: None 304 * RETURN: None
305 * 305 *
diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
index 14f523627a5e..41ebaaf8bb1a 100644
--- a/drivers/acpi/acpica/utxfinit.c
+++ b/drivers/acpi/acpica/utxfinit.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxfmutex.c b/drivers/acpi/acpica/utxfmutex.c
index 0a40a851b354..312299721ba1 100644
--- a/drivers/acpi/acpica/utxfmutex.c
+++ b/drivers/acpi/acpica/utxfmutex.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 00a783661d0b..46f80e2c92f7 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -590,6 +590,9 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr,
590 if (bit_width == 32 && bit_offset == 0 && (*paddr & 0x03) == 0 && 590 if (bit_width == 32 && bit_offset == 0 && (*paddr & 0x03) == 0 &&
591 *access_bit_width < 32) 591 *access_bit_width < 32)
592 *access_bit_width = 32; 592 *access_bit_width = 32;
593 else if (bit_width == 64 && bit_offset == 0 && (*paddr & 0x07) == 0 &&
594 *access_bit_width < 64)
595 *access_bit_width = 64;
593 596
594 if ((bit_width + bit_offset) > *access_bit_width) { 597 if ((bit_width + bit_offset) > *access_bit_width) {
595 pr_warning(FW_BUG APEI_PFX 598 pr_warning(FW_BUG APEI_PFX
diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
index e6defd86b424..1e5d8a40101e 100644
--- a/drivers/acpi/apei/cper.c
+++ b/drivers/acpi/apei/cper.c
@@ -29,6 +29,7 @@
29#include <linux/time.h> 29#include <linux/time.h>
30#include <linux/cper.h> 30#include <linux/cper.h>
31#include <linux/acpi.h> 31#include <linux/acpi.h>
32#include <linux/pci.h>
32#include <linux/aer.h> 33#include <linux/aer.h>
33 34
34/* 35/*
@@ -249,6 +250,10 @@ static const char *cper_pcie_port_type_strs[] = {
249static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie, 250static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
250 const struct acpi_hest_generic_data *gdata) 251 const struct acpi_hest_generic_data *gdata)
251{ 252{
253#ifdef CONFIG_ACPI_APEI_PCIEAER
254 struct pci_dev *dev;
255#endif
256
252 if (pcie->validation_bits & CPER_PCIE_VALID_PORT_TYPE) 257 if (pcie->validation_bits & CPER_PCIE_VALID_PORT_TYPE)
253 printk("%s""port_type: %d, %s\n", pfx, pcie->port_type, 258 printk("%s""port_type: %d, %s\n", pfx, pcie->port_type,
254 pcie->port_type < ARRAY_SIZE(cper_pcie_port_type_strs) ? 259 pcie->port_type < ARRAY_SIZE(cper_pcie_port_type_strs) ?
@@ -281,10 +286,18 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
281 "%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n", 286 "%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n",
282 pfx, pcie->bridge.secondary_status, pcie->bridge.control); 287 pfx, pcie->bridge.secondary_status, pcie->bridge.control);
283#ifdef CONFIG_ACPI_APEI_PCIEAER 288#ifdef CONFIG_ACPI_APEI_PCIEAER
284 if (pcie->validation_bits & CPER_PCIE_VALID_AER_INFO) { 289 dev = pci_get_domain_bus_and_slot(pcie->device_id.segment,
285 struct aer_capability_regs *aer_regs = (void *)pcie->aer_info; 290 pcie->device_id.bus, pcie->device_id.function);
286 cper_print_aer(pfx, gdata->error_severity, aer_regs); 291 if (!dev) {
292 pr_err("PCI AER Cannot get PCI device %04x:%02x:%02x.%d\n",
293 pcie->device_id.segment, pcie->device_id.bus,
294 pcie->device_id.slot, pcie->device_id.function);
295 return;
287 } 296 }
297 if (pcie->validation_bits & CPER_PCIE_VALID_AER_INFO)
298 cper_print_aer(pfx, dev, gdata->error_severity,
299 (struct aer_capability_regs *) pcie->aer_info);
300 pci_dev_put(dev);
288#endif 301#endif
289} 302}
290 303
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 7efaeaa53b88..c5cd5b5513e6 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -1111,7 +1111,7 @@ fail:
1111 return result; 1111 return result;
1112} 1112}
1113 1113
1114static int acpi_battery_remove(struct acpi_device *device, int type) 1114static int acpi_battery_remove(struct acpi_device *device)
1115{ 1115{
1116 struct acpi_battery *battery = NULL; 1116 struct acpi_battery *battery = NULL;
1117 1117
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 1f0d457ecbcf..01708a165368 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -178,276 +178,6 @@ int acpi_bus_get_private_data(acpi_handle handle, void **data)
178} 178}
179EXPORT_SYMBOL(acpi_bus_get_private_data); 179EXPORT_SYMBOL(acpi_bus_get_private_data);
180 180
181/* --------------------------------------------------------------------------
182 Power Management
183 -------------------------------------------------------------------------- */
184
185static const char *state_string(int state)
186{
187 switch (state) {
188 case ACPI_STATE_D0:
189 return "D0";
190 case ACPI_STATE_D1:
191 return "D1";
192 case ACPI_STATE_D2:
193 return "D2";
194 case ACPI_STATE_D3_HOT:
195 return "D3hot";
196 case ACPI_STATE_D3_COLD:
197 return "D3";
198 default:
199 return "(unknown)";
200 }
201}
202
203static int __acpi_bus_get_power(struct acpi_device *device, int *state)
204{
205 int result = ACPI_STATE_UNKNOWN;
206
207 if (!device || !state)
208 return -EINVAL;
209
210 if (!device->flags.power_manageable) {
211 /* TBD: Non-recursive algorithm for walking up hierarchy. */
212 *state = device->parent ?
213 device->parent->power.state : ACPI_STATE_D0;
214 goto out;
215 }
216
217 /*
218 * Get the device's power state either directly (via _PSC) or
219 * indirectly (via power resources).
220 */
221 if (device->power.flags.explicit_get) {
222 unsigned long long psc;
223 acpi_status status = acpi_evaluate_integer(device->handle,
224 "_PSC", NULL, &psc);
225 if (ACPI_FAILURE(status))
226 return -ENODEV;
227
228 result = psc;
229 }
230 /* The test below covers ACPI_STATE_UNKNOWN too. */
231 if (result <= ACPI_STATE_D2) {
232 ; /* Do nothing. */
233 } else if (device->power.flags.power_resources) {
234 int error = acpi_power_get_inferred_state(device, &result);
235 if (error)
236 return error;
237 } else if (result == ACPI_STATE_D3_HOT) {
238 result = ACPI_STATE_D3;
239 }
240
241 /*
242 * If we were unsure about the device parent's power state up to this
243 * point, the fact that the device is in D0 implies that the parent has
244 * to be in D0 too.
245 */
246 if (device->parent && device->parent->power.state == ACPI_STATE_UNKNOWN
247 && result == ACPI_STATE_D0)
248 device->parent->power.state = ACPI_STATE_D0;
249
250 *state = result;
251
252 out:
253 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is %s\n",
254 device->pnp.bus_id, state_string(*state)));
255
256 return 0;
257}
258
259
260/**
261 * acpi_device_set_power - Set power state of an ACPI device.
262 * @device: Device to set the power state of.
263 * @state: New power state to set.
264 *
265 * Callers must ensure that the device is power manageable before using this
266 * function.
267 */
268int acpi_device_set_power(struct acpi_device *device, int state)
269{
270 int result = 0;
271 acpi_status status = AE_OK;
272 char object_name[5] = { '_', 'P', 'S', '0' + state, '\0' };
273
274 if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD))
275 return -EINVAL;
276
277 /* Make sure this is a valid target state */
278
279 if (state == device->power.state) {
280 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at %s\n",
281 state_string(state)));
282 return 0;
283 }
284
285 if (!device->power.states[state].flags.valid) {
286 printk(KERN_WARNING PREFIX "Device does not support %s\n",
287 state_string(state));
288 return -ENODEV;
289 }
290 if (device->parent && (state < device->parent->power.state)) {
291 printk(KERN_WARNING PREFIX
292 "Cannot set device to a higher-powered"
293 " state than parent\n");
294 return -ENODEV;
295 }
296
297 /* For D3cold we should execute _PS3, not _PS4. */
298 if (state == ACPI_STATE_D3_COLD)
299 object_name[3] = '3';
300
301 /*
302 * Transition Power
303 * ----------------
304 * On transitions to a high-powered state we first apply power (via
305 * power resources) then evalute _PSx. Conversly for transitions to
306 * a lower-powered state.
307 */
308 if (state < device->power.state) {
309 if (device->power.state >= ACPI_STATE_D3_HOT &&
310 state != ACPI_STATE_D0) {
311 printk(KERN_WARNING PREFIX
312 "Cannot transition to non-D0 state from D3\n");
313 return -ENODEV;
314 }
315 if (device->power.flags.power_resources) {
316 result = acpi_power_transition(device, state);
317 if (result)
318 goto end;
319 }
320 if (device->power.states[state].flags.explicit_set) {
321 status = acpi_evaluate_object(device->handle,
322 object_name, NULL, NULL);
323 if (ACPI_FAILURE(status)) {
324 result = -ENODEV;
325 goto end;
326 }
327 }
328 } else {
329 if (device->power.states[state].flags.explicit_set) {
330 status = acpi_evaluate_object(device->handle,
331 object_name, NULL, NULL);
332 if (ACPI_FAILURE(status)) {
333 result = -ENODEV;
334 goto end;
335 }
336 }
337 if (device->power.flags.power_resources) {
338 result = acpi_power_transition(device, state);
339 if (result)
340 goto end;
341 }
342 }
343
344 end:
345 if (result)
346 printk(KERN_WARNING PREFIX
347 "Device [%s] failed to transition to %s\n",
348 device->pnp.bus_id, state_string(state));
349 else {
350 device->power.state = state;
351 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
352 "Device [%s] transitioned to %s\n",
353 device->pnp.bus_id, state_string(state)));
354 }
355
356 return result;
357}
358EXPORT_SYMBOL(acpi_device_set_power);
359
360
361int acpi_bus_set_power(acpi_handle handle, int state)
362{
363 struct acpi_device *device;
364 int result;
365
366 result = acpi_bus_get_device(handle, &device);
367 if (result)
368 return result;
369
370 if (!device->flags.power_manageable) {
371 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
372 "Device [%s] is not power manageable\n",
373 dev_name(&device->dev)));
374 return -ENODEV;
375 }
376
377 return acpi_device_set_power(device, state);
378}
379EXPORT_SYMBOL(acpi_bus_set_power);
380
381
382int acpi_bus_init_power(struct acpi_device *device)
383{
384 int state;
385 int result;
386
387 if (!device)
388 return -EINVAL;
389
390 device->power.state = ACPI_STATE_UNKNOWN;
391
392 result = __acpi_bus_get_power(device, &state);
393 if (result)
394 return result;
395
396 if (device->power.flags.power_resources)
397 result = acpi_power_on_resources(device, state);
398
399 if (!result)
400 device->power.state = state;
401
402 return result;
403}
404
405
406int acpi_bus_update_power(acpi_handle handle, int *state_p)
407{
408 struct acpi_device *device;
409 int state;
410 int result;
411
412 result = acpi_bus_get_device(handle, &device);
413 if (result)
414 return result;
415
416 result = __acpi_bus_get_power(device, &state);
417 if (result)
418 return result;
419
420 result = acpi_device_set_power(device, state);
421 if (!result && state_p)
422 *state_p = state;
423
424 return result;
425}
426EXPORT_SYMBOL_GPL(acpi_bus_update_power);
427
428
429bool acpi_bus_power_manageable(acpi_handle handle)
430{
431 struct acpi_device *device;
432 int result;
433
434 result = acpi_bus_get_device(handle, &device);
435 return result ? false : device->flags.power_manageable;
436}
437
438EXPORT_SYMBOL(acpi_bus_power_manageable);
439
440bool acpi_bus_can_wakeup(acpi_handle handle)
441{
442 struct acpi_device *device;
443 int result;
444
445 result = acpi_bus_get_device(handle, &device);
446 return result ? false : device->wakeup.flags.valid;
447}
448
449EXPORT_SYMBOL(acpi_bus_can_wakeup);
450
451static void acpi_print_osc_error(acpi_handle handle, 181static void acpi_print_osc_error(acpi_handle handle,
452 struct acpi_osc_context *context, char *error) 182 struct acpi_osc_context *context, char *error)
453{ 183{
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index f0d936b65e37..86c7d5445c38 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -75,7 +75,7 @@ static const struct acpi_device_id button_device_ids[] = {
75MODULE_DEVICE_TABLE(acpi, button_device_ids); 75MODULE_DEVICE_TABLE(acpi, button_device_ids);
76 76
77static int acpi_button_add(struct acpi_device *device); 77static int acpi_button_add(struct acpi_device *device);
78static int acpi_button_remove(struct acpi_device *device, int type); 78static int acpi_button_remove(struct acpi_device *device);
79static void acpi_button_notify(struct acpi_device *device, u32 event); 79static void acpi_button_notify(struct acpi_device *device, u32 event);
80 80
81#ifdef CONFIG_PM_SLEEP 81#ifdef CONFIG_PM_SLEEP
@@ -433,7 +433,7 @@ static int acpi_button_add(struct acpi_device *device)
433 return error; 433 return error;
434} 434}
435 435
436static int acpi_button_remove(struct acpi_device *device, int type) 436static int acpi_button_remove(struct acpi_device *device)
437{ 437{
438 struct acpi_button *button = acpi_driver_data(device); 438 struct acpi_button *button = acpi_driver_data(device);
439 439
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c
index 811910b50b75..5523ba7d764d 100644
--- a/drivers/acpi/container.c
+++ b/drivers/acpi/container.c
@@ -34,46 +34,34 @@
34#include <linux/acpi.h> 34#include <linux/acpi.h>
35#include <acpi/acpi_bus.h> 35#include <acpi/acpi_bus.h>
36#include <acpi/acpi_drivers.h> 36#include <acpi/acpi_drivers.h>
37#include <acpi/container.h>
38 37
39#define PREFIX "ACPI: " 38#define PREFIX "ACPI: "
40 39
41#define ACPI_CONTAINER_DEVICE_NAME "ACPI container device"
42#define ACPI_CONTAINER_CLASS "container"
43
44#define INSTALL_NOTIFY_HANDLER 1
45#define UNINSTALL_NOTIFY_HANDLER 2
46
47#define _COMPONENT ACPI_CONTAINER_COMPONENT 40#define _COMPONENT ACPI_CONTAINER_COMPONENT
48ACPI_MODULE_NAME("container"); 41ACPI_MODULE_NAME("container");
49 42
50MODULE_AUTHOR("Anil S Keshavamurthy");
51MODULE_DESCRIPTION("ACPI container driver");
52MODULE_LICENSE("GPL");
53
54static int acpi_container_add(struct acpi_device *device);
55static int acpi_container_remove(struct acpi_device *device, int type);
56
57static const struct acpi_device_id container_device_ids[] = { 43static const struct acpi_device_id container_device_ids[] = {
58 {"ACPI0004", 0}, 44 {"ACPI0004", 0},
59 {"PNP0A05", 0}, 45 {"PNP0A05", 0},
60 {"PNP0A06", 0}, 46 {"PNP0A06", 0},
61 {"", 0}, 47 {"", 0},
62}; 48};
63MODULE_DEVICE_TABLE(acpi, container_device_ids);
64 49
65static struct acpi_driver acpi_container_driver = { 50static int container_device_attach(struct acpi_device *device,
66 .name = "container", 51 const struct acpi_device_id *not_used)
67 .class = ACPI_CONTAINER_CLASS, 52{
53 /*
54 * FIXME: This is necessary, so that acpi_eject_store() doesn't return
55 * -ENODEV for containers.
56 */
57 return 1;
58}
59
60static struct acpi_scan_handler container_device_handler = {
68 .ids = container_device_ids, 61 .ids = container_device_ids,
69 .ops = { 62 .attach = container_device_attach,
70 .add = acpi_container_add,
71 .remove = acpi_container_remove,
72 },
73}; 63};
74 64
75/*******************************************************************/
76
77static int is_device_present(acpi_handle handle) 65static int is_device_present(acpi_handle handle)
78{ 66{
79 acpi_handle temp; 67 acpi_handle temp;
@@ -92,73 +80,6 @@ static int is_device_present(acpi_handle handle)
92 return ((sta & ACPI_STA_DEVICE_PRESENT) == ACPI_STA_DEVICE_PRESENT); 80 return ((sta & ACPI_STA_DEVICE_PRESENT) == ACPI_STA_DEVICE_PRESENT);
93} 81}
94 82
95static bool is_container_device(const char *hid)
96{
97 const struct acpi_device_id *container_id;
98
99 for (container_id = container_device_ids;
100 container_id->id[0]; container_id++) {
101 if (!strcmp((char *)container_id->id, hid))
102 return true;
103 }
104
105 return false;
106}
107
108/*******************************************************************/
109static int acpi_container_add(struct acpi_device *device)
110{
111 struct acpi_container *container;
112
113 container = kzalloc(sizeof(struct acpi_container), GFP_KERNEL);
114 if (!container)
115 return -ENOMEM;
116
117 container->handle = device->handle;
118 strcpy(acpi_device_name(device), ACPI_CONTAINER_DEVICE_NAME);
119 strcpy(acpi_device_class(device), ACPI_CONTAINER_CLASS);
120 device->driver_data = container;
121
122 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device <%s> bid <%s>\n",
123 acpi_device_name(device), acpi_device_bid(device)));
124
125 return 0;
126}
127
128static int acpi_container_remove(struct acpi_device *device, int type)
129{
130 acpi_status status = AE_OK;
131 struct acpi_container *pc = NULL;
132
133 pc = acpi_driver_data(device);
134 kfree(pc);
135 return status;
136}
137
138static int container_device_add(struct acpi_device **device, acpi_handle handle)
139{
140 acpi_handle phandle;
141 struct acpi_device *pdev;
142 int result;
143
144
145 if (acpi_get_parent(handle, &phandle)) {
146 return -ENODEV;
147 }
148
149 if (acpi_bus_get_device(phandle, &pdev)) {
150 return -ENODEV;
151 }
152
153 if (acpi_bus_add(device, pdev, handle, ACPI_BUS_TYPE_DEVICE)) {
154 return -ENODEV;
155 }
156
157 result = acpi_bus_start(*device);
158
159 return result;
160}
161
162static void container_notify_cb(acpi_handle handle, u32 type, void *context) 83static void container_notify_cb(acpi_handle handle, u32 type, void *context)
163{ 84{
164 struct acpi_device *device = NULL; 85 struct acpi_device *device = NULL;
@@ -167,6 +88,8 @@ static void container_notify_cb(acpi_handle handle, u32 type, void *context)
167 acpi_status status; 88 acpi_status status;
168 u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */ 89 u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
169 90
91 acpi_scan_lock_acquire();
92
170 switch (type) { 93 switch (type) {
171 case ACPI_NOTIFY_BUS_CHECK: 94 case ACPI_NOTIFY_BUS_CHECK:
172 /* Fall through */ 95 /* Fall through */
@@ -182,7 +105,7 @@ static void container_notify_cb(acpi_handle handle, u32 type, void *context)
182 /* device exist and this is a remove request */ 105 /* device exist and this is a remove request */
183 device->flags.eject_pending = 1; 106 device->flags.eject_pending = 1;
184 kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE); 107 kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
185 return; 108 goto out;
186 } 109 }
187 break; 110 break;
188 } 111 }
@@ -190,11 +113,16 @@ static void container_notify_cb(acpi_handle handle, u32 type, void *context)
190 if (!ACPI_FAILURE(status) || device) 113 if (!ACPI_FAILURE(status) || device)
191 break; 114 break;
192 115
193 result = container_device_add(&device, handle); 116 result = acpi_bus_scan(handle);
194 if (result) { 117 if (result) {
195 acpi_handle_warn(handle, "Failed to add container\n"); 118 acpi_handle_warn(handle, "Failed to add container\n");
196 break; 119 break;
197 } 120 }
121 result = acpi_bus_get_device(handle, &device);
122 if (result) {
123 acpi_handle_warn(handle, "Missing device object\n");
124 break;
125 }
198 126
199 kobject_uevent(&device->dev.kobj, KOBJ_ONLINE); 127 kobject_uevent(&device->dev.kobj, KOBJ_ONLINE);
200 ost_code = ACPI_OST_SC_SUCCESS; 128 ost_code = ACPI_OST_SC_SUCCESS;
@@ -204,98 +132,59 @@ static void container_notify_cb(acpi_handle handle, u32 type, void *context)
204 if (!acpi_bus_get_device(handle, &device) && device) { 132 if (!acpi_bus_get_device(handle, &device) && device) {
205 device->flags.eject_pending = 1; 133 device->flags.eject_pending = 1;
206 kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE); 134 kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
207 return; 135 goto out;
208 } 136 }
209 break; 137 break;
210 138
211 default: 139 default:
212 /* non-hotplug event; possibly handled by other handler */ 140 /* non-hotplug event; possibly handled by other handler */
213 return; 141 goto out;
214 } 142 }
215 143
216 /* Inform firmware that the hotplug operation has completed */ 144 /* Inform firmware that the hotplug operation has completed */
217 (void) acpi_evaluate_hotplug_ost(handle, type, ost_code, NULL); 145 (void) acpi_evaluate_hotplug_ost(handle, type, ost_code, NULL);
218 return; 146
147 out:
148 acpi_scan_lock_release();
219} 149}
220 150
221static acpi_status 151static bool is_container(acpi_handle handle)
222container_walk_namespace_cb(acpi_handle handle,
223 u32 lvl, void *context, void **rv)
224{ 152{
225 char *hid = NULL;
226 struct acpi_device_info *info; 153 struct acpi_device_info *info;
227 acpi_status status; 154 bool ret = false;
228 int *action = context;
229
230 status = acpi_get_object_info(handle, &info);
231 if (ACPI_FAILURE(status)) {
232 return AE_OK;
233 }
234 155
235 if (info->valid & ACPI_VALID_HID) 156 if (ACPI_FAILURE(acpi_get_object_info(handle, &info)))
236 hid = info->hardware_id.string; 157 return false;
237 158
238 if (hid == NULL) { 159 if (info->valid & ACPI_VALID_HID) {
239 goto end; 160 const struct acpi_device_id *id;
240 }
241
242 if (!is_container_device(hid))
243 goto end;
244 161
245 switch (*action) { 162 for (id = container_device_ids; id->id[0]; id++) {
246 case INSTALL_NOTIFY_HANDLER: 163 ret = !strcmp((char *)id->id, info->hardware_id.string);
247 acpi_install_notify_handler(handle, 164 if (ret)
248 ACPI_SYSTEM_NOTIFY, 165 break;
249 container_notify_cb, NULL); 166 }
250 break;
251 case UNINSTALL_NOTIFY_HANDLER:
252 acpi_remove_notify_handler(handle,
253 ACPI_SYSTEM_NOTIFY,
254 container_notify_cb);
255 break;
256 default:
257 break;
258 } 167 }
259
260 end:
261 kfree(info); 168 kfree(info);
262 169 return ret;
263 return AE_OK;
264} 170}
265 171
266static int __init acpi_container_init(void) 172static acpi_status acpi_container_register_notify_handler(acpi_handle handle,
173 u32 lvl, void *ctxt,
174 void **retv)
267{ 175{
268 int result = 0; 176 if (is_container(handle))
269 int action = INSTALL_NOTIFY_HANDLER; 177 acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
270 178 container_notify_cb, NULL);
271 result = acpi_bus_register_driver(&acpi_container_driver);
272 if (result < 0) {
273 return (result);
274 }
275
276 /* register notify handler to every container device */
277 acpi_walk_namespace(ACPI_TYPE_DEVICE,
278 ACPI_ROOT_OBJECT,
279 ACPI_UINT32_MAX,
280 container_walk_namespace_cb, NULL, &action, NULL);
281 179
282 return (0); 180 return AE_OK;
283} 181}
284 182
285static void __exit acpi_container_exit(void) 183void __init acpi_container_init(void)
286{ 184{
287 int action = UNINSTALL_NOTIFY_HANDLER; 185 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
186 acpi_container_register_notify_handler, NULL,
187 NULL, NULL);
288 188
289 189 acpi_scan_add_handler(&container_device_handler);
290 acpi_walk_namespace(ACPI_TYPE_DEVICE,
291 ACPI_ROOT_OBJECT,
292 ACPI_UINT32_MAX,
293 container_walk_namespace_cb, NULL, &action, NULL);
294
295 acpi_bus_unregister_driver(&acpi_container_driver);
296
297 return;
298} 190}
299
300module_init(acpi_container_init);
301module_exit(acpi_container_exit);
diff --git a/drivers/acpi/csrt.c b/drivers/acpi/csrt.c
new file mode 100644
index 000000000000..5c15a91faf0b
--- /dev/null
+++ b/drivers/acpi/csrt.c
@@ -0,0 +1,159 @@
1/*
2 * Support for Core System Resources Table (CSRT)
3 *
4 * Copyright (C) 2013, Intel Corporation
5 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
6 * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#define pr_fmt(fmt) "ACPI: CSRT: " fmt
14
15#include <linux/acpi.h>
16#include <linux/device.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/platform_device.h>
20#include <linux/sizes.h>
21
22ACPI_MODULE_NAME("CSRT");
23
24static int __init acpi_csrt_parse_shared_info(struct platform_device *pdev,
25 const struct acpi_csrt_group *grp)
26{
27 const struct acpi_csrt_shared_info *si;
28 struct resource res[3];
29 size_t nres;
30 int ret;
31
32 memset(res, 0, sizeof(res));
33 nres = 0;
34
35 si = (const struct acpi_csrt_shared_info *)&grp[1];
36 /*
37 * The peripherals that are listed on CSRT typically support only
38 * 32-bit addresses so we only use the low part of MMIO base for
39 * now.
40 */
41 if (!si->mmio_base_high && si->mmio_base_low) {
42 /*
43 * There is no size of the memory resource in shared_info
44 * so we assume that it is 4k here.
45 */
46 res[nres].start = si->mmio_base_low;
47 res[nres].end = res[0].start + SZ_4K - 1;
48 res[nres++].flags = IORESOURCE_MEM;
49 }
50
51 if (si->gsi_interrupt) {
52 int irq = acpi_register_gsi(NULL, si->gsi_interrupt,
53 si->interrupt_mode,
54 si->interrupt_polarity);
55 res[nres].start = irq;
56 res[nres].end = irq;
57 res[nres++].flags = IORESOURCE_IRQ;
58 }
59
60 if (si->base_request_line || si->num_handshake_signals) {
61 /*
62 * We pass the driver a DMA resource describing the range
63 * of request lines the device supports.
64 */
65 res[nres].start = si->base_request_line;
66 res[nres].end = res[nres].start + si->num_handshake_signals - 1;
67 res[nres++].flags = IORESOURCE_DMA;
68 }
69
70 ret = platform_device_add_resources(pdev, res, nres);
71 if (ret) {
72 if (si->gsi_interrupt)
73 acpi_unregister_gsi(si->gsi_interrupt);
74 return ret;
75 }
76
77 return 0;
78}
79
80static int __init
81acpi_csrt_parse_resource_group(const struct acpi_csrt_group *grp)
82{
83 struct platform_device *pdev;
84 char vendor[5], name[16];
85 int ret, i;
86
87 vendor[0] = grp->vendor_id;
88 vendor[1] = grp->vendor_id >> 8;
89 vendor[2] = grp->vendor_id >> 16;
90 vendor[3] = grp->vendor_id >> 24;
91 vendor[4] = '\0';
92
93 if (grp->shared_info_length != sizeof(struct acpi_csrt_shared_info))
94 return -ENODEV;
95
96 snprintf(name, sizeof(name), "%s%04X", vendor, grp->device_id);
97 pdev = platform_device_alloc(name, PLATFORM_DEVID_AUTO);
98 if (!pdev)
99 return -ENOMEM;
100
101 /* Add resources based on the shared info */
102 ret = acpi_csrt_parse_shared_info(pdev, grp);
103 if (ret)
104 goto fail;
105
106 ret = platform_device_add(pdev);
107 if (ret)
108 goto fail;
109
110 for (i = 0; i < pdev->num_resources; i++)
111 dev_dbg(&pdev->dev, "%pR\n", &pdev->resource[i]);
112
113 return 0;
114
115fail:
116 platform_device_put(pdev);
117 return ret;
118}
119
120/*
121 * CSRT or Core System Resources Table is a proprietary ACPI table
122 * introduced by Microsoft. This table can contain devices that are not in
123 * the system DSDT table. In particular DMA controllers might be described
124 * here.
125 *
126 * We present these devices as normal platform devices that don't have ACPI
127 * IDs or handle. The platform device name will be something like
128 * <VENDOR><DEVID>.<n>.auto for example: INTL9C06.0.auto.
129 */
130void __init acpi_csrt_init(void)
131{
132 struct acpi_csrt_group *grp, *end;
133 struct acpi_table_csrt *csrt;
134 acpi_status status;
135 int ret;
136
137 status = acpi_get_table(ACPI_SIG_CSRT, 0,
138 (struct acpi_table_header **)&csrt);
139 if (ACPI_FAILURE(status)) {
140 if (status != AE_NOT_FOUND)
141 pr_warn("failed to get the CSRT table\n");
142 return;
143 }
144
145 pr_debug("parsing CSRT table for devices\n");
146
147 grp = (struct acpi_csrt_group *)(csrt + 1);
148 end = (struct acpi_csrt_group *)((void *)csrt + csrt->header.length);
149
150 while (grp < end) {
151 ret = acpi_csrt_parse_resource_group(grp);
152 if (ret) {
153 pr_warn("error in parsing resource group: %d\n", ret);
154 return;
155 }
156
157 grp = (struct acpi_csrt_group *)((void *)grp + grp->length);
158 }
159}
diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
index 5d42c2414ae5..6adfc706a1de 100644
--- a/drivers/acpi/custom_method.c
+++ b/drivers/acpi/custom_method.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * debugfs.c - ACPI debugfs interface to userspace. 2 * custom_method.c - debugfs interface for customizing ACPI control method
3 */ 3 */
4 4
5#include <linux/init.h> 5#include <linux/init.h>
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index c6ff606c6d5b..dd314ef9bff1 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -30,6 +30,12 @@
30 30
31#include <acpi/acpi.h> 31#include <acpi/acpi.h>
32#include <acpi/acpi_bus.h> 32#include <acpi/acpi_bus.h>
33#include <acpi/acpi_drivers.h>
34
35#include "internal.h"
36
37#define _COMPONENT ACPI_POWER_COMPONENT
38ACPI_MODULE_NAME("device_pm");
33 39
34static DEFINE_MUTEX(acpi_pm_notifier_lock); 40static DEFINE_MUTEX(acpi_pm_notifier_lock);
35 41
@@ -94,6 +100,293 @@ acpi_status acpi_remove_pm_notifier(struct acpi_device *adev,
94} 100}
95 101
96/** 102/**
103 * acpi_power_state_string - String representation of ACPI device power state.
104 * @state: ACPI device power state to return the string representation of.
105 */
106const char *acpi_power_state_string(int state)
107{
108 switch (state) {
109 case ACPI_STATE_D0:
110 return "D0";
111 case ACPI_STATE_D1:
112 return "D1";
113 case ACPI_STATE_D2:
114 return "D2";
115 case ACPI_STATE_D3_HOT:
116 return "D3hot";
117 case ACPI_STATE_D3_COLD:
118 return "D3cold";
119 default:
120 return "(unknown)";
121 }
122}
123
124/**
125 * acpi_device_get_power - Get power state of an ACPI device.
126 * @device: Device to get the power state of.
127 * @state: Place to store the power state of the device.
128 *
129 * This function does not update the device's power.state field, but it may
130 * update its parent's power.state field (when the parent's power state is
131 * unknown and the device's power state turns out to be D0).
132 */
133int acpi_device_get_power(struct acpi_device *device, int *state)
134{
135 int result = ACPI_STATE_UNKNOWN;
136
137 if (!device || !state)
138 return -EINVAL;
139
140 if (!device->flags.power_manageable) {
141 /* TBD: Non-recursive algorithm for walking up hierarchy. */
142 *state = device->parent ?
143 device->parent->power.state : ACPI_STATE_D0;
144 goto out;
145 }
146
147 /*
148 * Get the device's power state either directly (via _PSC) or
149 * indirectly (via power resources).
150 */
151 if (device->power.flags.explicit_get) {
152 unsigned long long psc;
153 acpi_status status = acpi_evaluate_integer(device->handle,
154 "_PSC", NULL, &psc);
155 if (ACPI_FAILURE(status))
156 return -ENODEV;
157
158 result = psc;
159 }
160 /* The test below covers ACPI_STATE_UNKNOWN too. */
161 if (result <= ACPI_STATE_D2) {
162 ; /* Do nothing. */
163 } else if (device->power.flags.power_resources) {
164 int error = acpi_power_get_inferred_state(device, &result);
165 if (error)
166 return error;
167 } else if (result == ACPI_STATE_D3_HOT) {
168 result = ACPI_STATE_D3;
169 }
170
171 /*
172 * If we were unsure about the device parent's power state up to this
173 * point, the fact that the device is in D0 implies that the parent has
174 * to be in D0 too.
175 */
176 if (device->parent && device->parent->power.state == ACPI_STATE_UNKNOWN
177 && result == ACPI_STATE_D0)
178 device->parent->power.state = ACPI_STATE_D0;
179
180 *state = result;
181
182 out:
183 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is %s\n",
184 device->pnp.bus_id, acpi_power_state_string(*state)));
185
186 return 0;
187}
188
189static int acpi_dev_pm_explicit_set(struct acpi_device *adev, int state)
190{
191 if (adev->power.states[state].flags.explicit_set) {
192 char method[5] = { '_', 'P', 'S', '0' + state, '\0' };
193 acpi_status status;
194
195 status = acpi_evaluate_object(adev->handle, method, NULL, NULL);
196 if (ACPI_FAILURE(status))
197 return -ENODEV;
198 }
199 return 0;
200}
201
202/**
203 * acpi_device_set_power - Set power state of an ACPI device.
204 * @device: Device to set the power state of.
205 * @state: New power state to set.
206 *
207 * Callers must ensure that the device is power manageable before using this
208 * function.
209 */
210int acpi_device_set_power(struct acpi_device *device, int state)
211{
212 int result = 0;
213 bool cut_power = false;
214
215 if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD))
216 return -EINVAL;
217
218 /* Make sure this is a valid target state */
219
220 if (state == device->power.state) {
221 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at %s\n",
222 acpi_power_state_string(state)));
223 return 0;
224 }
225
226 if (!device->power.states[state].flags.valid) {
227 printk(KERN_WARNING PREFIX "Device does not support %s\n",
228 acpi_power_state_string(state));
229 return -ENODEV;
230 }
231 if (device->parent && (state < device->parent->power.state)) {
232 printk(KERN_WARNING PREFIX
233 "Cannot set device to a higher-powered"
234 " state than parent\n");
235 return -ENODEV;
236 }
237
238 /* For D3cold we should first transition into D3hot. */
239 if (state == ACPI_STATE_D3_COLD
240 && device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible) {
241 state = ACPI_STATE_D3_HOT;
242 cut_power = true;
243 }
244
245 if (state < device->power.state && state != ACPI_STATE_D0
246 && device->power.state >= ACPI_STATE_D3_HOT) {
247 printk(KERN_WARNING PREFIX
248 "Cannot transition to non-D0 state from D3\n");
249 return -ENODEV;
250 }
251
252 /*
253 * Transition Power
254 * ----------------
255 * In accordance with the ACPI specification first apply power (via
256 * power resources) and then evalute _PSx.
257 */
258 if (device->power.flags.power_resources) {
259 result = acpi_power_transition(device, state);
260 if (result)
261 goto end;
262 }
263 result = acpi_dev_pm_explicit_set(device, state);
264 if (result)
265 goto end;
266
267 if (cut_power) {
268 device->power.state = state;
269 state = ACPI_STATE_D3_COLD;
270 result = acpi_power_transition(device, state);
271 }
272
273 end:
274 if (result) {
275 printk(KERN_WARNING PREFIX
276 "Device [%s] failed to transition to %s\n",
277 device->pnp.bus_id,
278 acpi_power_state_string(state));
279 } else {
280 device->power.state = state;
281 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
282 "Device [%s] transitioned to %s\n",
283 device->pnp.bus_id,
284 acpi_power_state_string(state)));
285 }
286
287 return result;
288}
289EXPORT_SYMBOL(acpi_device_set_power);
290
291int acpi_bus_set_power(acpi_handle handle, int state)
292{
293 struct acpi_device *device;
294 int result;
295
296 result = acpi_bus_get_device(handle, &device);
297 if (result)
298 return result;
299
300 if (!device->flags.power_manageable) {
301 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
302 "Device [%s] is not power manageable\n",
303 dev_name(&device->dev)));
304 return -ENODEV;
305 }
306
307 return acpi_device_set_power(device, state);
308}
309EXPORT_SYMBOL(acpi_bus_set_power);
310
311int acpi_bus_init_power(struct acpi_device *device)
312{
313 int state;
314 int result;
315
316 if (!device)
317 return -EINVAL;
318
319 device->power.state = ACPI_STATE_UNKNOWN;
320
321 result = acpi_device_get_power(device, &state);
322 if (result)
323 return result;
324
325 if (state < ACPI_STATE_D3_COLD && device->power.flags.power_resources) {
326 result = acpi_power_on_resources(device, state);
327 if (result)
328 return result;
329
330 result = acpi_dev_pm_explicit_set(device, state);
331 if (result)
332 return result;
333 } else if (state == ACPI_STATE_UNKNOWN) {
334 /* No power resources and missing _PSC? Try to force D0. */
335 state = ACPI_STATE_D0;
336 result = acpi_dev_pm_explicit_set(device, state);
337 if (result)
338 return result;
339 }
340 device->power.state = state;
341 return 0;
342}
343
344int acpi_bus_update_power(acpi_handle handle, int *state_p)
345{
346 struct acpi_device *device;
347 int state;
348 int result;
349
350 result = acpi_bus_get_device(handle, &device);
351 if (result)
352 return result;
353
354 result = acpi_device_get_power(device, &state);
355 if (result)
356 return result;
357
358 if (state == ACPI_STATE_UNKNOWN)
359 state = ACPI_STATE_D0;
360
361 result = acpi_device_set_power(device, state);
362 if (!result && state_p)
363 *state_p = state;
364
365 return result;
366}
367EXPORT_SYMBOL_GPL(acpi_bus_update_power);
368
369bool acpi_bus_power_manageable(acpi_handle handle)
370{
371 struct acpi_device *device;
372 int result;
373
374 result = acpi_bus_get_device(handle, &device);
375 return result ? false : device->flags.power_manageable;
376}
377EXPORT_SYMBOL(acpi_bus_power_manageable);
378
379bool acpi_bus_can_wakeup(acpi_handle handle)
380{
381 struct acpi_device *device;
382 int result;
383
384 result = acpi_bus_get_device(handle, &device);
385 return result ? false : device->wakeup.flags.valid;
386}
387EXPORT_SYMBOL(acpi_bus_can_wakeup);
388
389/**
97 * acpi_device_power_state - Get preferred power state of ACPI device. 390 * acpi_device_power_state - Get preferred power state of ACPI device.
98 * @dev: Device whose preferred target power state to return. 391 * @dev: Device whose preferred target power state to return.
99 * @adev: ACPI device node corresponding to @dev. 392 * @adev: ACPI device node corresponding to @dev.
@@ -213,7 +506,7 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p, int d_max_in)
213 acpi_handle handle = DEVICE_ACPI_HANDLE(dev); 506 acpi_handle handle = DEVICE_ACPI_HANDLE(dev);
214 struct acpi_device *adev; 507 struct acpi_device *adev;
215 508
216 if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) { 509 if (!handle || acpi_bus_get_device(handle, &adev)) {
217 dev_dbg(dev, "ACPI handle without context in %s!\n", __func__); 510 dev_dbg(dev, "ACPI handle without context in %s!\n", __func__);
218 return -ENODEV; 511 return -ENODEV;
219 } 512 }
@@ -290,7 +583,7 @@ int acpi_pm_device_run_wake(struct device *phys_dev, bool enable)
290 return -EINVAL; 583 return -EINVAL;
291 584
292 handle = DEVICE_ACPI_HANDLE(phys_dev); 585 handle = DEVICE_ACPI_HANDLE(phys_dev);
293 if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) { 586 if (!handle || acpi_bus_get_device(handle, &adev)) {
294 dev_dbg(phys_dev, "ACPI handle without context in %s!\n", 587 dev_dbg(phys_dev, "ACPI handle without context in %s!\n",
295 __func__); 588 __func__);
296 return -ENODEV; 589 return -ENODEV;
@@ -304,7 +597,7 @@ static inline void acpi_wakeup_device(acpi_handle handle, u32 event,
304 void *context) {} 597 void *context) {}
305#endif /* CONFIG_PM_RUNTIME */ 598#endif /* CONFIG_PM_RUNTIME */
306 599
307 #ifdef CONFIG_PM_SLEEP 600#ifdef CONFIG_PM_SLEEP
308/** 601/**
309 * __acpi_device_sleep_wake - Enable or disable device to wake up the system. 602 * __acpi_device_sleep_wake - Enable or disable device to wake up the system.
310 * @dev: Device to enable/desible to wake up the system. 603 * @dev: Device to enable/desible to wake up the system.
@@ -334,7 +627,7 @@ int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
334 return -EINVAL; 627 return -EINVAL;
335 628
336 handle = DEVICE_ACPI_HANDLE(dev); 629 handle = DEVICE_ACPI_HANDLE(dev);
337 if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) { 630 if (!handle || acpi_bus_get_device(handle, &adev)) {
338 dev_dbg(dev, "ACPI handle without context in %s!\n", __func__); 631 dev_dbg(dev, "ACPI handle without context in %s!\n", __func__);
339 return -ENODEV; 632 return -ENODEV;
340 } 633 }
@@ -353,7 +646,7 @@ int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
353 * acpi_dev_pm_get_node - Get ACPI device node for the given physical device. 646 * acpi_dev_pm_get_node - Get ACPI device node for the given physical device.
354 * @dev: Device to get the ACPI node for. 647 * @dev: Device to get the ACPI node for.
355 */ 648 */
356static struct acpi_device *acpi_dev_pm_get_node(struct device *dev) 649struct acpi_device *acpi_dev_pm_get_node(struct device *dev)
357{ 650{
358 acpi_handle handle = DEVICE_ACPI_HANDLE(dev); 651 acpi_handle handle = DEVICE_ACPI_HANDLE(dev);
359 struct acpi_device *adev; 652 struct acpi_device *adev;
@@ -665,3 +958,59 @@ void acpi_dev_pm_detach(struct device *dev, bool power_off)
665 } 958 }
666} 959}
667EXPORT_SYMBOL_GPL(acpi_dev_pm_detach); 960EXPORT_SYMBOL_GPL(acpi_dev_pm_detach);
961
962/**
963 * acpi_dev_pm_add_dependent - Add physical device depending for PM.
964 * @handle: Handle of ACPI device node.
965 * @depdev: Device depending on that node for PM.
966 */
967void acpi_dev_pm_add_dependent(acpi_handle handle, struct device *depdev)
968{
969 struct acpi_device_physical_node *dep;
970 struct acpi_device *adev;
971
972 if (!depdev || acpi_bus_get_device(handle, &adev))
973 return;
974
975 mutex_lock(&adev->physical_node_lock);
976
977 list_for_each_entry(dep, &adev->power_dependent, node)
978 if (dep->dev == depdev)
979 goto out;
980
981 dep = kzalloc(sizeof(*dep), GFP_KERNEL);
982 if (dep) {
983 dep->dev = depdev;
984 list_add_tail(&dep->node, &adev->power_dependent);
985 }
986
987 out:
988 mutex_unlock(&adev->physical_node_lock);
989}
990EXPORT_SYMBOL_GPL(acpi_dev_pm_add_dependent);
991
992/**
993 * acpi_dev_pm_remove_dependent - Remove physical device depending for PM.
994 * @handle: Handle of ACPI device node.
995 * @depdev: Device depending on that node for PM.
996 */
997void acpi_dev_pm_remove_dependent(acpi_handle handle, struct device *depdev)
998{
999 struct acpi_device_physical_node *dep;
1000 struct acpi_device *adev;
1001
1002 if (!depdev || acpi_bus_get_device(handle, &adev))
1003 return;
1004
1005 mutex_lock(&adev->physical_node_lock);
1006
1007 list_for_each_entry(dep, &adev->power_dependent, node)
1008 if (dep->dev == depdev) {
1009 list_del(&dep->node);
1010 kfree(dep);
1011 break;
1012 }
1013
1014 mutex_unlock(&adev->physical_node_lock);
1015}
1016EXPORT_SYMBOL_GPL(acpi_dev_pm_remove_dependent);
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index f32bd47b35e0..4fdea381ef21 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -310,8 +310,6 @@ static int dock_present(struct dock_station *ds)
310static struct acpi_device * dock_create_acpi_device(acpi_handle handle) 310static struct acpi_device * dock_create_acpi_device(acpi_handle handle)
311{ 311{
312 struct acpi_device *device; 312 struct acpi_device *device;
313 struct acpi_device *parent_device;
314 acpi_handle parent;
315 int ret; 313 int ret;
316 314
317 if (acpi_bus_get_device(handle, &device)) { 315 if (acpi_bus_get_device(handle, &device)) {
@@ -319,16 +317,11 @@ static struct acpi_device * dock_create_acpi_device(acpi_handle handle)
319 * no device created for this object, 317 * no device created for this object,
320 * so we should create one. 318 * so we should create one.
321 */ 319 */
322 acpi_get_parent(handle, &parent); 320 ret = acpi_bus_scan(handle);
323 if (acpi_bus_get_device(parent, &parent_device)) 321 if (ret)
324 parent_device = NULL;
325
326 ret = acpi_bus_add(&device, parent_device, handle,
327 ACPI_BUS_TYPE_DEVICE);
328 if (ret) {
329 pr_debug("error adding bus, %x\n", -ret); 322 pr_debug("error adding bus, %x\n", -ret);
330 return NULL; 323
331 } 324 acpi_bus_get_device(handle, &device);
332 } 325 }
333 return device; 326 return device;
334} 327}
@@ -343,13 +336,9 @@ static struct acpi_device * dock_create_acpi_device(acpi_handle handle)
343static void dock_remove_acpi_device(acpi_handle handle) 336static void dock_remove_acpi_device(acpi_handle handle)
344{ 337{
345 struct acpi_device *device; 338 struct acpi_device *device;
346 int ret;
347 339
348 if (!acpi_bus_get_device(handle, &device)) { 340 if (!acpi_bus_get_device(handle, &device))
349 ret = acpi_bus_trim(device, 1); 341 acpi_bus_trim(device);
350 if (ret)
351 pr_debug("error removing bus, %x\n", -ret);
352 }
353} 342}
354 343
355/** 344/**
@@ -755,7 +744,9 @@ static void acpi_dock_deferred_cb(void *context)
755{ 744{
756 struct dock_data *data = context; 745 struct dock_data *data = context;
757 746
747 acpi_scan_lock_acquire();
758 dock_notify(data->handle, data->event, data->ds); 748 dock_notify(data->handle, data->event, data->ds);
749 acpi_scan_lock_release();
759 kfree(data); 750 kfree(data);
760} 751}
761 752
@@ -768,20 +759,31 @@ static int acpi_dock_notifier_call(struct notifier_block *this,
768 if (event != ACPI_NOTIFY_BUS_CHECK && event != ACPI_NOTIFY_DEVICE_CHECK 759 if (event != ACPI_NOTIFY_BUS_CHECK && event != ACPI_NOTIFY_DEVICE_CHECK
769 && event != ACPI_NOTIFY_EJECT_REQUEST) 760 && event != ACPI_NOTIFY_EJECT_REQUEST)
770 return 0; 761 return 0;
762
763 acpi_scan_lock_acquire();
764
771 list_for_each_entry(dock_station, &dock_stations, sibling) { 765 list_for_each_entry(dock_station, &dock_stations, sibling) {
772 if (dock_station->handle == handle) { 766 if (dock_station->handle == handle) {
773 struct dock_data *dd; 767 struct dock_data *dd;
768 acpi_status status;
774 769
775 dd = kmalloc(sizeof(*dd), GFP_KERNEL); 770 dd = kmalloc(sizeof(*dd), GFP_KERNEL);
776 if (!dd) 771 if (!dd)
777 return 0; 772 break;
773
778 dd->handle = handle; 774 dd->handle = handle;
779 dd->event = event; 775 dd->event = event;
780 dd->ds = dock_station; 776 dd->ds = dock_station;
781 acpi_os_hotplug_execute(acpi_dock_deferred_cb, dd); 777 status = acpi_os_hotplug_execute(acpi_dock_deferred_cb,
782 return 0 ; 778 dd);
779 if (ACPI_FAILURE(status))
780 kfree(dd);
781
782 break;
783 } 783 }
784 } 784 }
785
786 acpi_scan_lock_release();
785 return 0; 787 return 0;
786} 788}
787 789
@@ -836,7 +838,7 @@ static ssize_t show_docked(struct device *dev,
836 838
837 struct dock_station *dock_station = dev->platform_data; 839 struct dock_station *dock_station = dev->platform_data;
838 840
839 if (ACPI_SUCCESS(acpi_bus_get_device(dock_station->handle, &tmp))) 841 if (!acpi_bus_get_device(dock_station->handle, &tmp))
840 return snprintf(buf, PAGE_SIZE, "1\n"); 842 return snprintf(buf, PAGE_SIZE, "1\n");
841 return snprintf(buf, PAGE_SIZE, "0\n"); 843 return snprintf(buf, PAGE_SIZE, "0\n");
842} 844}
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 354007d490d1..d45b2871d33b 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -852,7 +852,7 @@ static int acpi_ec_add(struct acpi_device *device)
852 return ret; 852 return ret;
853} 853}
854 854
855static int acpi_ec_remove(struct acpi_device *device, int type) 855static int acpi_ec_remove(struct acpi_device *device)
856{ 856{
857 struct acpi_ec *ec; 857 struct acpi_ec *ec;
858 struct acpi_ec_query_handler *handler, *tmp; 858 struct acpi_ec_query_handler *handler, *tmp;
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 3bd6a54702d6..f815da82c765 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -45,7 +45,7 @@ MODULE_DESCRIPTION("ACPI Fan Driver");
45MODULE_LICENSE("GPL"); 45MODULE_LICENSE("GPL");
46 46
47static int acpi_fan_add(struct acpi_device *device); 47static int acpi_fan_add(struct acpi_device *device);
48static int acpi_fan_remove(struct acpi_device *device, int type); 48static int acpi_fan_remove(struct acpi_device *device);
49 49
50static const struct acpi_device_id fan_device_ids[] = { 50static const struct acpi_device_id fan_device_ids[] = {
51 {"PNP0C0B", 0}, 51 {"PNP0C0B", 0},
@@ -172,7 +172,7 @@ static int acpi_fan_add(struct acpi_device *device)
172 return result; 172 return result;
173} 173}
174 174
175static int acpi_fan_remove(struct acpi_device *device, int type) 175static int acpi_fan_remove(struct acpi_device *device)
176{ 176{
177 struct thermal_cooling_device *cdev = acpi_driver_data(device); 177 struct thermal_cooling_device *cdev = acpi_driver_data(device);
178 178
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 35da18113216..ef6f155469b5 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -68,6 +68,9 @@ static struct acpi_bus_type *acpi_get_bus_type(struct bus_type *type)
68{ 68{
69 struct acpi_bus_type *tmp, *ret = NULL; 69 struct acpi_bus_type *tmp, *ret = NULL;
70 70
71 if (!type)
72 return NULL;
73
71 down_read(&bus_type_sem); 74 down_read(&bus_type_sem);
72 list_for_each_entry(tmp, &bus_type_list, list) { 75 list_for_each_entry(tmp, &bus_type_list, list) {
73 if (tmp->bus == type) { 76 if (tmp->bus == type) {
@@ -95,40 +98,31 @@ static int acpi_find_bridge_device(struct device *dev, acpi_handle * handle)
95 return ret; 98 return ret;
96} 99}
97 100
98/* Get device's handler per its address under its parent */ 101static acpi_status do_acpi_find_child(acpi_handle handle, u32 lvl_not_used,
99struct acpi_find_child { 102 void *addr_p, void **ret_p)
100 acpi_handle handle;
101 u64 address;
102};
103
104static acpi_status
105do_acpi_find_child(acpi_handle handle, u32 lvl, void *context, void **rv)
106{ 103{
104 unsigned long long addr;
107 acpi_status status; 105 acpi_status status;
108 struct acpi_device_info *info; 106
109 struct acpi_find_child *find = context; 107 status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr);
110 108 if (ACPI_SUCCESS(status) && addr == *((u64 *)addr_p)) {
111 status = acpi_get_object_info(handle, &info); 109 *ret_p = handle;
112 if (ACPI_SUCCESS(status)) { 110 return AE_CTRL_TERMINATE;
113 if ((info->address == find->address)
114 && (info->valid & ACPI_VALID_ADR))
115 find->handle = handle;
116 kfree(info);
117 } 111 }
118 return AE_OK; 112 return AE_OK;
119} 113}
120 114
121acpi_handle acpi_get_child(acpi_handle parent, u64 address) 115acpi_handle acpi_get_child(acpi_handle parent, u64 address)
122{ 116{
123 struct acpi_find_child find = { NULL, address }; 117 void *ret = NULL;
124 118
125 if (!parent) 119 if (!parent)
126 return NULL; 120 return NULL;
127 acpi_walk_namespace(ACPI_TYPE_DEVICE, parent,
128 1, do_acpi_find_child, NULL, &find, NULL);
129 return find.handle;
130}
131 121
122 acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, 1, NULL,
123 do_acpi_find_child, &address, &ret);
124 return (acpi_handle)ret;
125}
132EXPORT_SYMBOL(acpi_get_child); 126EXPORT_SYMBOL(acpi_get_child);
133 127
134static int acpi_bind_one(struct device *dev, acpi_handle handle) 128static int acpi_bind_one(struct device *dev, acpi_handle handle)
@@ -269,28 +263,39 @@ static int acpi_platform_notify(struct device *dev)
269{ 263{
270 struct acpi_bus_type *type; 264 struct acpi_bus_type *type;
271 acpi_handle handle; 265 acpi_handle handle;
272 int ret = -EINVAL; 266 int ret;
273 267
274 ret = acpi_bind_one(dev, NULL); 268 ret = acpi_bind_one(dev, NULL);
275 if (!ret) 269 if (ret && (!dev->bus || !dev->parent)) {
276 goto out;
277
278 if (!dev->bus || !dev->parent) {
279 /* bridge devices genernally haven't bus or parent */ 270 /* bridge devices genernally haven't bus or parent */
280 ret = acpi_find_bridge_device(dev, &handle); 271 ret = acpi_find_bridge_device(dev, &handle);
281 goto end; 272 if (!ret) {
273 ret = acpi_bind_one(dev, handle);
274 if (ret)
275 goto out;
276 }
282 } 277 }
278
283 type = acpi_get_bus_type(dev->bus); 279 type = acpi_get_bus_type(dev->bus);
284 if (!type) { 280 if (ret) {
285 DBG("No ACPI bus support for %s\n", dev_name(dev)); 281 if (!type || !type->find_device) {
286 ret = -EINVAL; 282 DBG("No ACPI bus support for %s\n", dev_name(dev));
287 goto end; 283 ret = -EINVAL;
284 goto out;
285 }
286
287 ret = type->find_device(dev, &handle);
288 if (ret) {
289 DBG("Unable to get handle for %s\n", dev_name(dev));
290 goto out;
291 }
292 ret = acpi_bind_one(dev, handle);
293 if (ret)
294 goto out;
288 } 295 }
289 if ((ret = type->find_device(dev, &handle)) != 0) 296
290 DBG("Can't get handler for %s\n", dev_name(dev)); 297 if (type && type->setup)
291 end: 298 type->setup(dev);
292 if (!ret)
293 acpi_bind_one(dev, handle);
294 299
295 out: 300 out:
296#if ACPI_GLUE_DEBUG 301#if ACPI_GLUE_DEBUG
@@ -309,6 +314,12 @@ static int acpi_platform_notify(struct device *dev)
309 314
310static int acpi_platform_notify_remove(struct device *dev) 315static int acpi_platform_notify_remove(struct device *dev)
311{ 316{
317 struct acpi_bus_type *type;
318
319 type = acpi_get_bus_type(dev->bus);
320 if (type && type->cleanup)
321 type->cleanup(dev);
322
312 acpi_unbind_one(dev); 323 acpi_unbind_one(dev);
313 return 0; 324 return 0;
314} 325}
diff --git a/drivers/acpi/hed.c b/drivers/acpi/hed.c
index a0cc796932f7..13b1d39d7cdf 100644
--- a/drivers/acpi/hed.c
+++ b/drivers/acpi/hed.c
@@ -70,7 +70,7 @@ static int acpi_hed_add(struct acpi_device *device)
70 return 0; 70 return 0;
71} 71}
72 72
73static int acpi_hed_remove(struct acpi_device *device, int type) 73static int acpi_hed_remove(struct acpi_device *device)
74{ 74{
75 hed_handle = NULL; 75 hed_handle = NULL;
76 return 0; 76 return 0;
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 3c407cdc1ec1..79092328cf06 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -25,7 +25,16 @@
25 25
26int init_acpi_device_notify(void); 26int init_acpi_device_notify(void);
27int acpi_scan_init(void); 27int acpi_scan_init(void);
28void acpi_pci_root_init(void);
29void acpi_pci_link_init(void);
30void acpi_platform_init(void);
28int acpi_sysfs_init(void); 31int acpi_sysfs_init(void);
32void acpi_csrt_init(void);
33#ifdef CONFIG_ACPI_CONTAINER
34void acpi_container_init(void);
35#else
36static inline void acpi_container_init(void) {}
37#endif
29 38
30#ifdef CONFIG_DEBUG_FS 39#ifdef CONFIG_DEBUG_FS
31extern struct dentry *acpi_debugfs_dir; 40extern struct dentry *acpi_debugfs_dir;
@@ -35,15 +44,33 @@ static inline void acpi_debugfs_init(void) { return; }
35#endif 44#endif
36 45
37/* -------------------------------------------------------------------------- 46/* --------------------------------------------------------------------------
47 Device Node Initialization / Removal
48 -------------------------------------------------------------------------- */
49#define ACPI_STA_DEFAULT (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED | \
50 ACPI_STA_DEVICE_UI | ACPI_STA_DEVICE_FUNCTIONING)
51
52int acpi_device_add(struct acpi_device *device,
53 void (*release)(struct device *));
54void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
55 int type, unsigned long long sta);
56void acpi_device_add_finalize(struct acpi_device *device);
57void acpi_free_ids(struct acpi_device *device);
58
59/* --------------------------------------------------------------------------
38 Power Resource 60 Power Resource
39 -------------------------------------------------------------------------- */ 61 -------------------------------------------------------------------------- */
40int acpi_power_init(void); 62int acpi_power_init(void);
63void acpi_power_resources_list_free(struct list_head *list);
64int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
65 struct list_head *list);
66int acpi_add_power_resource(acpi_handle handle);
67void acpi_power_add_remove_device(struct acpi_device *adev, bool add);
68int acpi_power_min_system_level(struct list_head *list);
41int acpi_device_sleep_wake(struct acpi_device *dev, 69int acpi_device_sleep_wake(struct acpi_device *dev,
42 int enable, int sleep_state, int dev_state); 70 int enable, int sleep_state, int dev_state);
43int acpi_power_get_inferred_state(struct acpi_device *device, int *state); 71int acpi_power_get_inferred_state(struct acpi_device *device, int *state);
44int acpi_power_on_resources(struct acpi_device *device, int state); 72int acpi_power_on_resources(struct acpi_device *device, int state);
45int acpi_power_transition(struct acpi_device *device, int state); 73int acpi_power_transition(struct acpi_device *device, int state);
46int acpi_bus_init_power(struct acpi_device *device);
47 74
48int acpi_wakeup_device_init(void); 75int acpi_wakeup_device_init(void);
49void acpi_early_processor_set_pdc(void); 76void acpi_early_processor_set_pdc(void);
@@ -98,6 +125,4 @@ static inline void suspend_nvs_restore(void) {}
98 -------------------------------------------------------------------------- */ 125 -------------------------------------------------------------------------- */
99struct platform_device; 126struct platform_device;
100 127
101struct platform_device *acpi_create_platform_device(struct acpi_device *adev);
102
103#endif /* _ACPI_INTERNAL_H_ */ 128#endif /* _ACPI_INTERNAL_H_ */
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index cb31298ca684..33e609f63585 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -116,14 +116,16 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
116 struct acpi_srat_mem_affinity *p = 116 struct acpi_srat_mem_affinity *p =
117 (struct acpi_srat_mem_affinity *)header; 117 (struct acpi_srat_mem_affinity *)header;
118 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 118 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
119 "SRAT Memory (0x%lx length 0x%lx) in proximity domain %d %s%s\n", 119 "SRAT Memory (0x%lx length 0x%lx) in proximity domain %d %s%s%s\n",
120 (unsigned long)p->base_address, 120 (unsigned long)p->base_address,
121 (unsigned long)p->length, 121 (unsigned long)p->length,
122 p->proximity_domain, 122 p->proximity_domain,
123 (p->flags & ACPI_SRAT_MEM_ENABLED)? 123 (p->flags & ACPI_SRAT_MEM_ENABLED)?
124 "enabled" : "disabled", 124 "enabled" : "disabled",
125 (p->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)? 125 (p->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)?
126 " hot-pluggable" : "")); 126 " hot-pluggable" : "",
127 (p->flags & ACPI_SRAT_MEM_NON_VOLATILE)?
128 " non-volatile" : ""));
127 } 129 }
128#endif /* ACPI_DEBUG_OUTPUT */ 130#endif /* ACPI_DEBUG_OUTPUT */
129 break; 131 break;
@@ -273,7 +275,7 @@ static int __init acpi_parse_srat(struct acpi_table_header *table)
273 275
274static int __init 276static int __init
275acpi_table_parse_srat(enum acpi_srat_type id, 277acpi_table_parse_srat(enum acpi_srat_type id,
276 acpi_table_entry_handler handler, unsigned int max_entries) 278 acpi_tbl_entry_handler handler, unsigned int max_entries)
277{ 279{
278 return acpi_table_parse_entries(ACPI_SIG_SRAT, 280 return acpi_table_parse_entries(ACPI_SIG_SRAT,
279 sizeof(struct acpi_table_srat), id, 281 sizeof(struct acpi_table_srat), id,
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 3ff267861541..908b02d5da1b 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -250,7 +250,7 @@ acpi_physical_address __init acpi_os_get_root_pointer(void)
250 return acpi_rsdp; 250 return acpi_rsdp;
251#endif 251#endif
252 252
253 if (efi_enabled) { 253 if (efi_enabled(EFI_CONFIG_TABLES)) {
254 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) 254 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
255 return efi.acpi20; 255 return efi.acpi20;
256 else if (efi.acpi != EFI_INVALID_TABLE_ADDR) 256 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
@@ -787,7 +787,7 @@ acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
787 787
788 acpi_irq_handler = handler; 788 acpi_irq_handler = handler;
789 acpi_irq_context = context; 789 acpi_irq_context = context;
790 if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) { 790 if (request_irq(irq, acpi_irq, IRQF_SHARED | IRQF_NO_SUSPEND, "acpi", acpi_irq)) {
791 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq); 791 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
792 acpi_irq_handler = NULL; 792 acpi_irq_handler = NULL;
793 return AE_NOT_ACQUIRED; 793 return AE_NOT_ACQUIRED;
diff --git a/drivers/acpi/pci_bind.c b/drivers/acpi/pci_bind.c
deleted file mode 100644
index a1dee29beed3..000000000000
--- a/drivers/acpi/pci_bind.c
+++ /dev/null
@@ -1,122 +0,0 @@
1/*
2 * pci_bind.c - ACPI PCI Device Binding ($Revision: 2 $)
3 *
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 *
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or (at
12 * your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
22 *
23 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24 */
25
26#include <linux/kernel.h>
27#include <linux/types.h>
28#include <linux/pci.h>
29#include <linux/pci-acpi.h>
30#include <linux/acpi.h>
31#include <linux/pm_runtime.h>
32#include <acpi/acpi_bus.h>
33#include <acpi/acpi_drivers.h>
34
35#define _COMPONENT ACPI_PCI_COMPONENT
36ACPI_MODULE_NAME("pci_bind");
37
38static int acpi_pci_unbind(struct acpi_device *device)
39{
40 struct pci_dev *dev;
41
42 dev = acpi_get_pci_dev(device->handle);
43 if (!dev)
44 goto out;
45
46 device_set_run_wake(&dev->dev, false);
47 pci_acpi_remove_pm_notifier(device);
48 acpi_power_resource_unregister_device(&dev->dev, device->handle);
49
50 if (!dev->subordinate)
51 goto out;
52
53 acpi_pci_irq_del_prt(pci_domain_nr(dev->bus), dev->subordinate->number);
54
55 device->ops.bind = NULL;
56 device->ops.unbind = NULL;
57
58out:
59 pci_dev_put(dev);
60 return 0;
61}
62
63static int acpi_pci_bind(struct acpi_device *device)
64{
65 acpi_status status;
66 acpi_handle handle;
67 unsigned char bus;
68 struct pci_dev *dev;
69
70 dev = acpi_get_pci_dev(device->handle);
71 if (!dev)
72 return 0;
73
74 pci_acpi_add_pm_notifier(device, dev);
75 acpi_power_resource_register_device(&dev->dev, device->handle);
76 if (device->wakeup.flags.run_wake)
77 device_set_run_wake(&dev->dev, true);
78
79 /*
80 * Install the 'bind' function to facilitate callbacks for
81 * children of the P2P bridge.
82 */
83 if (dev->subordinate) {
84 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
85 "Device %04x:%02x:%02x.%d is a PCI bridge\n",
86 pci_domain_nr(dev->bus), dev->bus->number,
87 PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn)));
88 device->ops.bind = acpi_pci_bind;
89 device->ops.unbind = acpi_pci_unbind;
90 }
91
92 /*
93 * Evaluate and parse _PRT, if exists. This code allows parsing of
94 * _PRT objects within the scope of non-bridge devices. Note that
95 * _PRTs within the scope of a PCI bridge assume the bridge's
96 * subordinate bus number.
97 *
98 * TBD: Can _PRTs exist within the scope of non-bridge PCI devices?
99 */
100 status = acpi_get_handle(device->handle, METHOD_NAME__PRT, &handle);
101 if (ACPI_FAILURE(status))
102 goto out;
103
104 if (dev->subordinate)
105 bus = dev->subordinate->number;
106 else
107 bus = dev->bus->number;
108
109 acpi_pci_irq_add_prt(device->handle, pci_domain_nr(dev->bus), bus);
110
111out:
112 pci_dev_put(dev);
113 return 0;
114}
115
116int acpi_pci_bind_root(struct acpi_device *device)
117{
118 device->ops.bind = acpi_pci_bind;
119 device->ops.unbind = acpi_pci_unbind;
120
121 return 0;
122}
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index a12808259dfb..ab764ed34a50 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -53,23 +53,19 @@ ACPI_MODULE_NAME("pci_link");
53#define ACPI_PCI_LINK_FILE_STATUS "state" 53#define ACPI_PCI_LINK_FILE_STATUS "state"
54#define ACPI_PCI_LINK_MAX_POSSIBLE 16 54#define ACPI_PCI_LINK_MAX_POSSIBLE 16
55 55
56static int acpi_pci_link_add(struct acpi_device *device); 56static int acpi_pci_link_add(struct acpi_device *device,
57static int acpi_pci_link_remove(struct acpi_device *device, int type); 57 const struct acpi_device_id *not_used);
58static void acpi_pci_link_remove(struct acpi_device *device);
58 59
59static const struct acpi_device_id link_device_ids[] = { 60static const struct acpi_device_id link_device_ids[] = {
60 {"PNP0C0F", 0}, 61 {"PNP0C0F", 0},
61 {"", 0}, 62 {"", 0},
62}; 63};
63MODULE_DEVICE_TABLE(acpi, link_device_ids);
64 64
65static struct acpi_driver acpi_pci_link_driver = { 65static struct acpi_scan_handler pci_link_handler = {
66 .name = "pci_link",
67 .class = ACPI_PCI_LINK_CLASS,
68 .ids = link_device_ids, 66 .ids = link_device_ids,
69 .ops = { 67 .attach = acpi_pci_link_add,
70 .add = acpi_pci_link_add, 68 .detach = acpi_pci_link_remove,
71 .remove = acpi_pci_link_remove,
72 },
73}; 69};
74 70
75/* 71/*
@@ -692,7 +688,8 @@ int acpi_pci_link_free_irq(acpi_handle handle)
692 Driver Interface 688 Driver Interface
693 -------------------------------------------------------------------------- */ 689 -------------------------------------------------------------------------- */
694 690
695static int acpi_pci_link_add(struct acpi_device *device) 691static int acpi_pci_link_add(struct acpi_device *device,
692 const struct acpi_device_id *not_used)
696{ 693{
697 int result; 694 int result;
698 struct acpi_pci_link *link; 695 struct acpi_pci_link *link;
@@ -746,7 +743,7 @@ static int acpi_pci_link_add(struct acpi_device *device)
746 if (result) 743 if (result)
747 kfree(link); 744 kfree(link);
748 745
749 return result; 746 return result < 0 ? result : 1;
750} 747}
751 748
752static int acpi_pci_link_resume(struct acpi_pci_link *link) 749static int acpi_pci_link_resume(struct acpi_pci_link *link)
@@ -766,7 +763,7 @@ static void irqrouter_resume(void)
766 } 763 }
767} 764}
768 765
769static int acpi_pci_link_remove(struct acpi_device *device, int type) 766static void acpi_pci_link_remove(struct acpi_device *device)
770{ 767{
771 struct acpi_pci_link *link; 768 struct acpi_pci_link *link;
772 769
@@ -777,7 +774,6 @@ static int acpi_pci_link_remove(struct acpi_device *device, int type)
777 mutex_unlock(&acpi_link_lock); 774 mutex_unlock(&acpi_link_lock);
778 775
779 kfree(link); 776 kfree(link);
780 return 0;
781} 777}
782 778
783/* 779/*
@@ -874,20 +870,10 @@ static struct syscore_ops irqrouter_syscore_ops = {
874 .resume = irqrouter_resume, 870 .resume = irqrouter_resume,
875}; 871};
876 872
877static int __init irqrouter_init_ops(void) 873void __init acpi_pci_link_init(void)
878{
879 if (!acpi_disabled && !acpi_noirq)
880 register_syscore_ops(&irqrouter_syscore_ops);
881
882 return 0;
883}
884
885device_initcall(irqrouter_init_ops);
886
887static int __init acpi_pci_link_init(void)
888{ 874{
889 if (acpi_noirq) 875 if (acpi_noirq)
890 return 0; 876 return;
891 877
892 if (acpi_irq_balance == -1) { 878 if (acpi_irq_balance == -1) {
893 /* no command line switch: enable balancing in IOAPIC mode */ 879 /* no command line switch: enable balancing in IOAPIC mode */
@@ -896,11 +882,6 @@ static int __init acpi_pci_link_init(void)
896 else 882 else
897 acpi_irq_balance = 0; 883 acpi_irq_balance = 0;
898 } 884 }
899 885 register_syscore_ops(&irqrouter_syscore_ops);
900 if (acpi_bus_register_driver(&acpi_pci_link_driver) < 0) 886 acpi_scan_add_handler(&pci_link_handler);
901 return -ENODEV;
902
903 return 0;
904} 887}
905
906subsys_initcall(acpi_pci_link_init);
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 7928d4dc7056..b3cc69c5caf1 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -45,9 +45,9 @@
45ACPI_MODULE_NAME("pci_root"); 45ACPI_MODULE_NAME("pci_root");
46#define ACPI_PCI_ROOT_CLASS "pci_bridge" 46#define ACPI_PCI_ROOT_CLASS "pci_bridge"
47#define ACPI_PCI_ROOT_DEVICE_NAME "PCI Root Bridge" 47#define ACPI_PCI_ROOT_DEVICE_NAME "PCI Root Bridge"
48static int acpi_pci_root_add(struct acpi_device *device); 48static int acpi_pci_root_add(struct acpi_device *device,
49static int acpi_pci_root_remove(struct acpi_device *device, int type); 49 const struct acpi_device_id *not_used);
50static int acpi_pci_root_start(struct acpi_device *device); 50static void acpi_pci_root_remove(struct acpi_device *device);
51 51
52#define ACPI_PCIE_REQ_SUPPORT (OSC_EXT_PCI_CONFIG_SUPPORT \ 52#define ACPI_PCIE_REQ_SUPPORT (OSC_EXT_PCI_CONFIG_SUPPORT \
53 | OSC_ACTIVE_STATE_PWR_SUPPORT \ 53 | OSC_ACTIVE_STATE_PWR_SUPPORT \
@@ -58,17 +58,11 @@ static const struct acpi_device_id root_device_ids[] = {
58 {"PNP0A03", 0}, 58 {"PNP0A03", 0},
59 {"", 0}, 59 {"", 0},
60}; 60};
61MODULE_DEVICE_TABLE(acpi, root_device_ids);
62 61
63static struct acpi_driver acpi_pci_root_driver = { 62static struct acpi_scan_handler pci_root_handler = {
64 .name = "pci_root",
65 .class = ACPI_PCI_ROOT_CLASS,
66 .ids = root_device_ids, 63 .ids = root_device_ids,
67 .ops = { 64 .attach = acpi_pci_root_add,
68 .add = acpi_pci_root_add, 65 .detach = acpi_pci_root_remove,
69 .remove = acpi_pci_root_remove,
70 .start = acpi_pci_root_start,
71 },
72}; 66};
73 67
74/* Lock to protect both acpi_pci_roots and acpi_pci_drivers lists */ 68/* Lock to protect both acpi_pci_roots and acpi_pci_drivers lists */
@@ -188,21 +182,6 @@ static acpi_status try_get_root_bridge_busnr(acpi_handle handle,
188 return AE_OK; 182 return AE_OK;
189} 183}
190 184
191static void acpi_pci_bridge_scan(struct acpi_device *device)
192{
193 int status;
194 struct acpi_device *child = NULL;
195
196 if (device->flags.bus_address)
197 if (device->parent && device->parent->ops.bind) {
198 status = device->parent->ops.bind(device);
199 if (!status) {
200 list_for_each_entry(child, &device->children, node)
201 acpi_pci_bridge_scan(child);
202 }
203 }
204}
205
206static u8 pci_osc_uuid_str[] = "33DB4D5B-1FF7-401C-9657-7441C03DD766"; 185static u8 pci_osc_uuid_str[] = "33DB4D5B-1FF7-401C-9657-7441C03DD766";
207 186
208static acpi_status acpi_pci_run_osc(acpi_handle handle, 187static acpi_status acpi_pci_run_osc(acpi_handle handle,
@@ -445,14 +424,15 @@ out:
445} 424}
446EXPORT_SYMBOL(acpi_pci_osc_control_set); 425EXPORT_SYMBOL(acpi_pci_osc_control_set);
447 426
448static int acpi_pci_root_add(struct acpi_device *device) 427static int acpi_pci_root_add(struct acpi_device *device,
428 const struct acpi_device_id *not_used)
449{ 429{
450 unsigned long long segment, bus; 430 unsigned long long segment, bus;
451 acpi_status status; 431 acpi_status status;
452 int result; 432 int result;
453 struct acpi_pci_root *root; 433 struct acpi_pci_root *root;
454 acpi_handle handle; 434 acpi_handle handle;
455 struct acpi_device *child; 435 struct acpi_pci_driver *driver;
456 u32 flags, base_flags; 436 u32 flags, base_flags;
457 bool is_osc_granted = false; 437 bool is_osc_granted = false;
458 438
@@ -603,21 +583,6 @@ static int acpi_pci_root_add(struct acpi_device *device)
603 goto out_del_root; 583 goto out_del_root;
604 } 584 }
605 585
606 /*
607 * Attach ACPI-PCI Context
608 * -----------------------
609 * Thus binding the ACPI and PCI devices.
610 */
611 result = acpi_pci_bind_root(device);
612 if (result)
613 goto out_del_root;
614
615 /*
616 * Scan and bind all _ADR-Based Devices
617 */
618 list_for_each_entry(child, &device->children, node)
619 acpi_pci_bridge_scan(child);
620
621 /* ASPM setting */ 586 /* ASPM setting */
622 if (is_osc_granted) { 587 if (is_osc_granted) {
623 if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) 588 if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM)
@@ -632,24 +597,6 @@ static int acpi_pci_root_add(struct acpi_device *device)
632 if (device->wakeup.flags.run_wake) 597 if (device->wakeup.flags.run_wake)
633 device_set_run_wake(root->bus->bridge, true); 598 device_set_run_wake(root->bus->bridge, true);
634 599
635 return 0;
636
637out_del_root:
638 mutex_lock(&acpi_pci_root_lock);
639 list_del(&root->node);
640 mutex_unlock(&acpi_pci_root_lock);
641
642 acpi_pci_irq_del_prt(root->segment, root->secondary.start);
643end:
644 kfree(root);
645 return result;
646}
647
648static int acpi_pci_root_start(struct acpi_device *device)
649{
650 struct acpi_pci_root *root = acpi_driver_data(device);
651 struct acpi_pci_driver *driver;
652
653 if (system_state != SYSTEM_BOOTING) 600 if (system_state != SYSTEM_BOOTING)
654 pci_assign_unassigned_bus_resources(root->bus); 601 pci_assign_unassigned_bus_resources(root->bus);
655 602
@@ -664,11 +611,20 @@ static int acpi_pci_root_start(struct acpi_device *device)
664 pci_enable_bridges(root->bus); 611 pci_enable_bridges(root->bus);
665 612
666 pci_bus_add_devices(root->bus); 613 pci_bus_add_devices(root->bus);
614 return 1;
667 615
668 return 0; 616out_del_root:
617 mutex_lock(&acpi_pci_root_lock);
618 list_del(&root->node);
619 mutex_unlock(&acpi_pci_root_lock);
620
621 acpi_pci_irq_del_prt(root->segment, root->secondary.start);
622end:
623 kfree(root);
624 return result;
669} 625}
670 626
671static int acpi_pci_root_remove(struct acpi_device *device, int type) 627static void acpi_pci_root_remove(struct acpi_device *device)
672{ 628{
673 acpi_status status; 629 acpi_status status;
674 acpi_handle handle; 630 acpi_handle handle;
@@ -696,21 +652,14 @@ static int acpi_pci_root_remove(struct acpi_device *device, int type)
696 list_del(&root->node); 652 list_del(&root->node);
697 mutex_unlock(&acpi_pci_root_lock); 653 mutex_unlock(&acpi_pci_root_lock);
698 kfree(root); 654 kfree(root);
699 return 0;
700} 655}
701 656
702static int __init acpi_pci_root_init(void) 657void __init acpi_pci_root_init(void)
703{ 658{
704 acpi_hest_init(); 659 acpi_hest_init();
705 660
706 if (acpi_pci_disabled) 661 if (!acpi_pci_disabled) {
707 return 0; 662 pci_acpi_crs_quirks();
708 663 acpi_scan_add_handler(&pci_root_handler);
709 pci_acpi_crs_quirks(); 664 }
710 if (acpi_bus_register_driver(&acpi_pci_root_driver) < 0)
711 return -ENODEV;
712
713 return 0;
714} 665}
715
716subsys_initcall(acpi_pci_root_init);
diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c
index d22585f21aeb..2c630c006c2f 100644
--- a/drivers/acpi/pci_slot.c
+++ b/drivers/acpi/pci_slot.c
@@ -50,13 +50,12 @@ module_param(debug, bool, 0644);
50ACPI_MODULE_NAME("pci_slot"); 50ACPI_MODULE_NAME("pci_slot");
51 51
52#define MY_NAME "pci_slot" 52#define MY_NAME "pci_slot"
53#define err(format, arg...) printk(KERN_ERR "%s: " format , MY_NAME , ## arg) 53#define err(format, arg...) pr_err("%s: " format , MY_NAME , ## arg)
54#define info(format, arg...) printk(KERN_INFO "%s: " format , MY_NAME , ## arg) 54#define info(format, arg...) pr_info("%s: " format , MY_NAME , ## arg)
55#define dbg(format, arg...) \ 55#define dbg(format, arg...) \
56 do { \ 56 do { \
57 if (debug) \ 57 if (debug) \
58 printk(KERN_DEBUG "%s: " format, \ 58 pr_debug("%s: " format, MY_NAME , ## arg); \
59 MY_NAME , ## arg); \
60 } while (0) 59 } while (0)
61 60
62#define SLOT_NAME_SIZE 21 /* Inspired by #define in acpiphp.h */ 61#define SLOT_NAME_SIZE 21 /* Inspired by #define in acpiphp.h */
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 6e7b9d523812..b820528a5fa3 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -41,6 +41,7 @@
41#include <linux/types.h> 41#include <linux/types.h>
42#include <linux/slab.h> 42#include <linux/slab.h>
43#include <linux/pm_runtime.h> 43#include <linux/pm_runtime.h>
44#include <linux/sysfs.h>
44#include <acpi/acpi_bus.h> 45#include <acpi/acpi_bus.h>
45#include <acpi/acpi_drivers.h> 46#include <acpi/acpi_drivers.h>
46#include "sleep.h" 47#include "sleep.h"
@@ -58,88 +59,121 @@ ACPI_MODULE_NAME("power");
58#define ACPI_POWER_RESOURCE_STATE_ON 0x01 59#define ACPI_POWER_RESOURCE_STATE_ON 0x01
59#define ACPI_POWER_RESOURCE_STATE_UNKNOWN 0xFF 60#define ACPI_POWER_RESOURCE_STATE_UNKNOWN 0xFF
60 61
61static int acpi_power_add(struct acpi_device *device); 62struct acpi_power_dependent_device {
62static int acpi_power_remove(struct acpi_device *device, int type); 63 struct list_head node;
63 64 struct acpi_device *adev;
64static const struct acpi_device_id power_device_ids[] = { 65 struct work_struct work;
65 {ACPI_POWER_HID, 0},
66 {"", 0},
67};
68MODULE_DEVICE_TABLE(acpi, power_device_ids);
69
70#ifdef CONFIG_PM_SLEEP
71static int acpi_power_resume(struct device *dev);
72#endif
73static SIMPLE_DEV_PM_OPS(acpi_power_pm, NULL, acpi_power_resume);
74
75static struct acpi_driver acpi_power_driver = {
76 .name = "power",
77 .class = ACPI_POWER_CLASS,
78 .ids = power_device_ids,
79 .ops = {
80 .add = acpi_power_add,
81 .remove = acpi_power_remove,
82 },
83 .drv.pm = &acpi_power_pm,
84};
85
86/*
87 * A power managed device
88 * A device may rely on multiple power resources.
89 * */
90struct acpi_power_managed_device {
91 struct device *dev; /* The physical device */
92 acpi_handle *handle;
93};
94
95struct acpi_power_resource_device {
96 struct acpi_power_managed_device *device;
97 struct acpi_power_resource_device *next;
98}; 66};
99 67
100struct acpi_power_resource { 68struct acpi_power_resource {
101 struct acpi_device * device; 69 struct acpi_device device;
102 acpi_bus_id name; 70 struct list_head list_node;
71 struct list_head dependent;
72 char *name;
103 u32 system_level; 73 u32 system_level;
104 u32 order; 74 u32 order;
105 unsigned int ref_count; 75 unsigned int ref_count;
106 struct mutex resource_lock; 76 struct mutex resource_lock;
77};
107 78
108 /* List of devices relying on this power resource */ 79struct acpi_power_resource_entry {
109 struct acpi_power_resource_device *devices; 80 struct list_head node;
110 struct mutex devices_lock; 81 struct acpi_power_resource *resource;
111}; 82};
112 83
113static struct list_head acpi_power_resource_list; 84static LIST_HEAD(acpi_power_resource_list);
85static DEFINE_MUTEX(power_resource_list_lock);
114 86
115/* -------------------------------------------------------------------------- 87/* --------------------------------------------------------------------------
116 Power Resource Management 88 Power Resource Management
117 -------------------------------------------------------------------------- */ 89 -------------------------------------------------------------------------- */
118 90
119static int 91static inline
120acpi_power_get_context(acpi_handle handle, 92struct acpi_power_resource *to_power_resource(struct acpi_device *device)
121 struct acpi_power_resource **resource)
122{ 93{
123 int result = 0; 94 return container_of(device, struct acpi_power_resource, device);
124 struct acpi_device *device = NULL; 95}
96
97static struct acpi_power_resource *acpi_power_get_context(acpi_handle handle)
98{
99 struct acpi_device *device;
125 100
101 if (acpi_bus_get_device(handle, &device))
102 return NULL;
126 103
127 if (!resource) 104 return to_power_resource(device);
128 return -ENODEV; 105}
129 106
130 result = acpi_bus_get_device(handle, &device); 107static int acpi_power_resources_list_add(acpi_handle handle,
131 if (result) { 108 struct list_head *list)
132 printk(KERN_WARNING PREFIX "Getting context [%p]\n", handle); 109{
133 return result; 110 struct acpi_power_resource *resource = acpi_power_get_context(handle);
134 } 111 struct acpi_power_resource_entry *entry;
135 112
136 *resource = acpi_driver_data(device); 113 if (!resource || !list)
137 if (!*resource) 114 return -EINVAL;
138 return -ENODEV; 115
116 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
117 if (!entry)
118 return -ENOMEM;
119
120 entry->resource = resource;
121 if (!list_empty(list)) {
122 struct acpi_power_resource_entry *e;
139 123
124 list_for_each_entry(e, list, node)
125 if (e->resource->order > resource->order) {
126 list_add_tail(&entry->node, &e->node);
127 return 0;
128 }
129 }
130 list_add_tail(&entry->node, list);
140 return 0; 131 return 0;
141} 132}
142 133
134void acpi_power_resources_list_free(struct list_head *list)
135{
136 struct acpi_power_resource_entry *entry, *e;
137
138 list_for_each_entry_safe(entry, e, list, node) {
139 list_del(&entry->node);
140 kfree(entry);
141 }
142}
143
144int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
145 struct list_head *list)
146{
147 unsigned int i;
148 int err = 0;
149
150 for (i = start; i < package->package.count; i++) {
151 union acpi_object *element = &package->package.elements[i];
152 acpi_handle rhandle;
153
154 if (element->type != ACPI_TYPE_LOCAL_REFERENCE) {
155 err = -ENODATA;
156 break;
157 }
158 rhandle = element->reference.handle;
159 if (!rhandle) {
160 err = -ENODEV;
161 break;
162 }
163 err = acpi_add_power_resource(rhandle);
164 if (err)
165 break;
166
167 err = acpi_power_resources_list_add(rhandle, list);
168 if (err)
169 break;
170 }
171 if (err)
172 acpi_power_resources_list_free(list);
173
174 return err;
175}
176
143static int acpi_power_get_state(acpi_handle handle, int *state) 177static int acpi_power_get_state(acpi_handle handle, int *state)
144{ 178{
145 acpi_status status = AE_OK; 179 acpi_status status = AE_OK;
@@ -167,31 +201,23 @@ static int acpi_power_get_state(acpi_handle handle, int *state)
167 return 0; 201 return 0;
168} 202}
169 203
170static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state) 204static int acpi_power_get_list_state(struct list_head *list, int *state)
171{ 205{
206 struct acpi_power_resource_entry *entry;
172 int cur_state; 207 int cur_state;
173 int i = 0;
174 208
175 if (!list || !state) 209 if (!list || !state)
176 return -EINVAL; 210 return -EINVAL;
177 211
178 /* The state of the list is 'on' IFF all resources are 'on'. */ 212 /* The state of the list is 'on' IFF all resources are 'on'. */
179 213 list_for_each_entry(entry, list, node) {
180 for (i = 0; i < list->count; i++) { 214 struct acpi_power_resource *resource = entry->resource;
181 struct acpi_power_resource *resource; 215 acpi_handle handle = resource->device.handle;
182 acpi_handle handle = list->handles[i];
183 int result; 216 int result;
184 217
185 result = acpi_power_get_context(handle, &resource);
186 if (result)
187 return result;
188
189 mutex_lock(&resource->resource_lock); 218 mutex_lock(&resource->resource_lock);
190
191 result = acpi_power_get_state(handle, &cur_state); 219 result = acpi_power_get_state(handle, &cur_state);
192
193 mutex_unlock(&resource->resource_lock); 220 mutex_unlock(&resource->resource_lock);
194
195 if (result) 221 if (result)
196 return result; 222 return result;
197 223
@@ -203,54 +229,52 @@ static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state)
203 cur_state ? "on" : "off")); 229 cur_state ? "on" : "off"));
204 230
205 *state = cur_state; 231 *state = cur_state;
206
207 return 0; 232 return 0;
208} 233}
209 234
210/* Resume the device when all power resources in _PR0 are on */ 235static void acpi_power_resume_dependent(struct work_struct *work)
211static void acpi_power_on_device(struct acpi_power_managed_device *device)
212{ 236{
213 struct acpi_device *acpi_dev; 237 struct acpi_power_dependent_device *dep;
214 acpi_handle handle = device->handle; 238 struct acpi_device_physical_node *pn;
239 struct acpi_device *adev;
215 int state; 240 int state;
216 241
217 if (acpi_bus_get_device(handle, &acpi_dev)) 242 dep = container_of(work, struct acpi_power_dependent_device, work);
243 adev = dep->adev;
244 if (acpi_power_get_inferred_state(adev, &state))
218 return; 245 return;
219 246
220 if(acpi_power_get_inferred_state(acpi_dev, &state)) 247 if (state > ACPI_STATE_D0)
221 return; 248 return;
222 249
223 if (state == ACPI_STATE_D0 && pm_runtime_suspended(device->dev)) 250 mutex_lock(&adev->physical_node_lock);
224 pm_request_resume(device->dev); 251
252 list_for_each_entry(pn, &adev->physical_node_list, node)
253 pm_request_resume(pn->dev);
254
255 list_for_each_entry(pn, &adev->power_dependent, node)
256 pm_request_resume(pn->dev);
257
258 mutex_unlock(&adev->physical_node_lock);
225} 259}
226 260
227static int __acpi_power_on(struct acpi_power_resource *resource) 261static int __acpi_power_on(struct acpi_power_resource *resource)
228{ 262{
229 acpi_status status = AE_OK; 263 acpi_status status = AE_OK;
230 264
231 status = acpi_evaluate_object(resource->device->handle, "_ON", NULL, NULL); 265 status = acpi_evaluate_object(resource->device.handle, "_ON", NULL, NULL);
232 if (ACPI_FAILURE(status)) 266 if (ACPI_FAILURE(status))
233 return -ENODEV; 267 return -ENODEV;
234 268
235 /* Update the power resource's _device_ power state */
236 resource->device->power.state = ACPI_STATE_D0;
237
238 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Power resource [%s] turned on\n", 269 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Power resource [%s] turned on\n",
239 resource->name)); 270 resource->name));
240 271
241 return 0; 272 return 0;
242} 273}
243 274
244static int acpi_power_on(acpi_handle handle) 275static int acpi_power_on(struct acpi_power_resource *resource)
245{ 276{
246 int result = 0; 277 int result = 0;;
247 bool resume_device = false;
248 struct acpi_power_resource *resource = NULL;
249 struct acpi_power_resource_device *device_list;
250
251 result = acpi_power_get_context(handle, &resource);
252 if (result)
253 return result;
254 278
255 mutex_lock(&resource->resource_lock); 279 mutex_lock(&resource->resource_lock);
256 280
@@ -260,39 +284,38 @@ static int acpi_power_on(acpi_handle handle)
260 resource->name)); 284 resource->name));
261 } else { 285 } else {
262 result = __acpi_power_on(resource); 286 result = __acpi_power_on(resource);
263 if (result) 287 if (result) {
264 resource->ref_count--; 288 resource->ref_count--;
265 else 289 } else {
266 resume_device = true; 290 struct acpi_power_dependent_device *dep;
291
292 list_for_each_entry(dep, &resource->dependent, node)
293 schedule_work(&dep->work);
294 }
267 } 295 }
268 296
269 mutex_unlock(&resource->resource_lock); 297 mutex_unlock(&resource->resource_lock);
270 298
271 if (!resume_device) 299 return result;
272 return result; 300}
273
274 mutex_lock(&resource->devices_lock);
275 301
276 device_list = resource->devices; 302static int __acpi_power_off(struct acpi_power_resource *resource)
277 while (device_list) { 303{
278 acpi_power_on_device(device_list->device); 304 acpi_status status;
279 device_list = device_list->next;
280 }
281 305
282 mutex_unlock(&resource->devices_lock); 306 status = acpi_evaluate_object(resource->device.handle, "_OFF",
307 NULL, NULL);
308 if (ACPI_FAILURE(status))
309 return -ENODEV;
283 310
284 return result; 311 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Power resource [%s] turned off\n",
312 resource->name));
313 return 0;
285} 314}
286 315
287static int acpi_power_off(acpi_handle handle) 316static int acpi_power_off(struct acpi_power_resource *resource)
288{ 317{
289 int result = 0; 318 int result = 0;
290 acpi_status status = AE_OK;
291 struct acpi_power_resource *resource = NULL;
292
293 result = acpi_power_get_context(handle, &resource);
294 if (result)
295 return result;
296 319
297 mutex_lock(&resource->resource_lock); 320 mutex_lock(&resource->resource_lock);
298 321
@@ -307,19 +330,10 @@ static int acpi_power_off(acpi_handle handle)
307 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 330 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
308 "Power resource [%s] still in use\n", 331 "Power resource [%s] still in use\n",
309 resource->name)); 332 resource->name));
310 goto unlock;
311 }
312
313 status = acpi_evaluate_object(resource->device->handle, "_OFF", NULL, NULL);
314 if (ACPI_FAILURE(status)) {
315 result = -ENODEV;
316 } else { 333 } else {
317 /* Update the power resource's _device_ power state */ 334 result = __acpi_power_off(resource);
318 resource->device->power.state = ACPI_STATE_D3; 335 if (result)
319 336 resource->ref_count++;
320 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
321 "Power resource [%s] turned off\n",
322 resource->name));
323 } 337 }
324 338
325 unlock: 339 unlock:
@@ -328,148 +342,202 @@ static int acpi_power_off(acpi_handle handle)
328 return result; 342 return result;
329} 343}
330 344
331static void __acpi_power_off_list(struct acpi_handle_list *list, int num_res) 345static int acpi_power_off_list(struct list_head *list)
332{ 346{
333 int i; 347 struct acpi_power_resource_entry *entry;
348 int result = 0;
334 349
335 for (i = num_res - 1; i >= 0 ; i--) 350 list_for_each_entry_reverse(entry, list, node) {
336 acpi_power_off(list->handles[i]); 351 result = acpi_power_off(entry->resource);
337} 352 if (result)
353 goto err;
354 }
355 return 0;
338 356
339static void acpi_power_off_list(struct acpi_handle_list *list) 357 err:
340{ 358 list_for_each_entry_continue(entry, list, node)
341 __acpi_power_off_list(list, list->count); 359 acpi_power_on(entry->resource);
360
361 return result;
342} 362}
343 363
344static int acpi_power_on_list(struct acpi_handle_list *list) 364static int acpi_power_on_list(struct list_head *list)
345{ 365{
366 struct acpi_power_resource_entry *entry;
346 int result = 0; 367 int result = 0;
347 int i;
348 368
349 for (i = 0; i < list->count; i++) { 369 list_for_each_entry(entry, list, node) {
350 result = acpi_power_on(list->handles[i]); 370 result = acpi_power_on(entry->resource);
351 if (result) { 371 if (result)
352 __acpi_power_off_list(list, i); 372 goto err;
353 break;
354 }
355 } 373 }
374 return 0;
375
376 err:
377 list_for_each_entry_continue_reverse(entry, list, node)
378 acpi_power_off(entry->resource);
356 379
357 return result; 380 return result;
358} 381}
359 382
360static void __acpi_power_resource_unregister_device(struct device *dev, 383static void acpi_power_add_dependent(struct acpi_power_resource *resource,
361 acpi_handle res_handle) 384 struct acpi_device *adev)
362{ 385{
363 struct acpi_power_resource *resource = NULL; 386 struct acpi_power_dependent_device *dep;
364 struct acpi_power_resource_device *prev, *curr;
365 387
366 if (acpi_power_get_context(res_handle, &resource)) 388 mutex_lock(&resource->resource_lock);
367 return; 389
390 list_for_each_entry(dep, &resource->dependent, node)
391 if (dep->adev == adev)
392 goto out;
393
394 dep = kzalloc(sizeof(*dep), GFP_KERNEL);
395 if (!dep)
396 goto out;
397
398 dep->adev = adev;
399 INIT_WORK(&dep->work, acpi_power_resume_dependent);
400 list_add_tail(&dep->node, &resource->dependent);
368 401
369 mutex_lock(&resource->devices_lock); 402 out:
370 prev = NULL; 403 mutex_unlock(&resource->resource_lock);
371 curr = resource->devices; 404}
372 while (curr) { 405
373 if (curr->device->dev == dev) { 406static void acpi_power_remove_dependent(struct acpi_power_resource *resource,
374 if (!prev) 407 struct acpi_device *adev)
375 resource->devices = curr->next; 408{
376 else 409 struct acpi_power_dependent_device *dep;
377 prev->next = curr->next; 410 struct work_struct *work = NULL;
378 411
379 kfree(curr); 412 mutex_lock(&resource->resource_lock);
413
414 list_for_each_entry(dep, &resource->dependent, node)
415 if (dep->adev == adev) {
416 list_del(&dep->node);
417 work = &dep->work;
380 break; 418 break;
381 } 419 }
382 420
383 prev = curr; 421 mutex_unlock(&resource->resource_lock);
384 curr = curr->next; 422
423 if (work) {
424 cancel_work_sync(work);
425 kfree(dep);
385 } 426 }
386 mutex_unlock(&resource->devices_lock);
387} 427}
388 428
389/* Unlink dev from all power resources in _PR0 */ 429static struct attribute *attrs[] = {
390void acpi_power_resource_unregister_device(struct device *dev, acpi_handle handle) 430 NULL,
391{ 431};
392 struct acpi_device *acpi_dev;
393 struct acpi_handle_list *list;
394 int i;
395 432
396 if (!dev || !handle) 433static struct attribute_group attr_groups[] = {
397 return; 434 [ACPI_STATE_D0] = {
435 .name = "power_resources_D0",
436 .attrs = attrs,
437 },
438 [ACPI_STATE_D1] = {
439 .name = "power_resources_D1",
440 .attrs = attrs,
441 },
442 [ACPI_STATE_D2] = {
443 .name = "power_resources_D2",
444 .attrs = attrs,
445 },
446 [ACPI_STATE_D3_HOT] = {
447 .name = "power_resources_D3hot",
448 .attrs = attrs,
449 },
450};
398 451
399 if (acpi_bus_get_device(handle, &acpi_dev)) 452static void acpi_power_hide_list(struct acpi_device *adev, int state)
453{
454 struct acpi_device_power_state *ps = &adev->power.states[state];
455 struct acpi_power_resource_entry *entry;
456
457 if (list_empty(&ps->resources))
400 return; 458 return;
401 459
402 list = &acpi_dev->power.states[ACPI_STATE_D0].resources; 460 list_for_each_entry_reverse(entry, &ps->resources, node) {
461 struct acpi_device *res_dev = &entry->resource->device;
403 462
404 for (i = 0; i < list->count; i++) 463 sysfs_remove_link_from_group(&adev->dev.kobj,
405 __acpi_power_resource_unregister_device(dev, 464 attr_groups[state].name,
406 list->handles[i]); 465 dev_name(&res_dev->dev));
466 }
467 sysfs_remove_group(&adev->dev.kobj, &attr_groups[state]);
407} 468}
408EXPORT_SYMBOL_GPL(acpi_power_resource_unregister_device);
409 469
410static int __acpi_power_resource_register_device( 470static void acpi_power_expose_list(struct acpi_device *adev, int state)
411 struct acpi_power_managed_device *powered_device, acpi_handle handle)
412{ 471{
413 struct acpi_power_resource *resource = NULL; 472 struct acpi_device_power_state *ps = &adev->power.states[state];
414 struct acpi_power_resource_device *power_resource_device; 473 struct acpi_power_resource_entry *entry;
415 int result; 474 int ret;
416
417 result = acpi_power_get_context(handle, &resource);
418 if (result)
419 return result;
420 475
421 power_resource_device = kzalloc( 476 if (list_empty(&ps->resources))
422 sizeof(*power_resource_device), GFP_KERNEL); 477 return;
423 if (!power_resource_device)
424 return -ENOMEM;
425 478
426 power_resource_device->device = powered_device; 479 ret = sysfs_create_group(&adev->dev.kobj, &attr_groups[state]);
480 if (ret)
481 return;
427 482
428 mutex_lock(&resource->devices_lock); 483 list_for_each_entry(entry, &ps->resources, node) {
429 power_resource_device->next = resource->devices; 484 struct acpi_device *res_dev = &entry->resource->device;
430 resource->devices = power_resource_device;
431 mutex_unlock(&resource->devices_lock);
432 485
433 return 0; 486 ret = sysfs_add_link_to_group(&adev->dev.kobj,
487 attr_groups[state].name,
488 &res_dev->dev.kobj,
489 dev_name(&res_dev->dev));
490 if (ret) {
491 acpi_power_hide_list(adev, state);
492 break;
493 }
494 }
434} 495}
435 496
436/* Link dev to all power resources in _PR0 */ 497void acpi_power_add_remove_device(struct acpi_device *adev, bool add)
437int acpi_power_resource_register_device(struct device *dev, acpi_handle handle)
438{ 498{
439 struct acpi_device *acpi_dev; 499 struct acpi_device_power_state *ps;
440 struct acpi_handle_list *list; 500 struct acpi_power_resource_entry *entry;
441 struct acpi_power_managed_device *powered_device; 501 int state;
442 int i, ret;
443 502
444 if (!dev || !handle) 503 if (!adev->power.flags.power_resources)
445 return -ENODEV; 504 return;
446 505
447 ret = acpi_bus_get_device(handle, &acpi_dev); 506 ps = &adev->power.states[ACPI_STATE_D0];
448 if (ret || !acpi_dev->power.flags.power_resources) 507 list_for_each_entry(entry, &ps->resources, node) {
449 return -ENODEV; 508 struct acpi_power_resource *resource = entry->resource;
450 509
451 powered_device = kzalloc(sizeof(*powered_device), GFP_KERNEL); 510 if (add)
452 if (!powered_device) 511 acpi_power_add_dependent(resource, adev);
453 return -ENOMEM; 512 else
513 acpi_power_remove_dependent(resource, adev);
514 }
454 515
455 powered_device->dev = dev; 516 for (state = ACPI_STATE_D0; state <= ACPI_STATE_D3_HOT; state++) {
456 powered_device->handle = handle; 517 if (add)
518 acpi_power_expose_list(adev, state);
519 else
520 acpi_power_hide_list(adev, state);
521 }
522}
457 523
458 list = &acpi_dev->power.states[ACPI_STATE_D0].resources; 524int acpi_power_min_system_level(struct list_head *list)
525{
526 struct acpi_power_resource_entry *entry;
527 int system_level = 5;
459 528
460 for (i = 0; i < list->count; i++) { 529 list_for_each_entry(entry, list, node) {
461 ret = __acpi_power_resource_register_device(powered_device, 530 struct acpi_power_resource *resource = entry->resource;
462 list->handles[i]);
463 531
464 if (ret) { 532 if (system_level > resource->system_level)
465 acpi_power_resource_unregister_device(dev, handle); 533 system_level = resource->system_level;
466 break;
467 }
468 } 534 }
469 535 return system_level;
470 return ret;
471} 536}
472EXPORT_SYMBOL_GPL(acpi_power_resource_register_device); 537
538/* --------------------------------------------------------------------------
539 Device Power Management
540 -------------------------------------------------------------------------- */
473 541
474/** 542/**
475 * acpi_device_sleep_wake - execute _DSW (Device Sleep Wake) or (deprecated in 543 * acpi_device_sleep_wake - execute _DSW (Device Sleep Wake) or (deprecated in
@@ -542,7 +610,7 @@ int acpi_device_sleep_wake(struct acpi_device *dev,
542 */ 610 */
543int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state) 611int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state)
544{ 612{
545 int i, err = 0; 613 int err = 0;
546 614
547 if (!dev || !dev->wakeup.flags.valid) 615 if (!dev || !dev->wakeup.flags.valid)
548 return -EINVAL; 616 return -EINVAL;
@@ -552,24 +620,17 @@ int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state)
552 if (dev->wakeup.prepare_count++) 620 if (dev->wakeup.prepare_count++)
553 goto out; 621 goto out;
554 622
555 /* Open power resource */ 623 err = acpi_power_on_list(&dev->wakeup.resources);
556 for (i = 0; i < dev->wakeup.resources.count; i++) { 624 if (err) {
557 int ret = acpi_power_on(dev->wakeup.resources.handles[i]); 625 dev_err(&dev->dev, "Cannot turn wakeup power resources on\n");
558 if (ret) { 626 dev->wakeup.flags.valid = 0;
559 printk(KERN_ERR PREFIX "Transition power state\n"); 627 } else {
560 dev->wakeup.flags.valid = 0; 628 /*
561 err = -ENODEV; 629 * Passing 3 as the third argument below means the device may be
562 goto err_out; 630 * put into arbitrary power state afterward.
563 } 631 */
632 err = acpi_device_sleep_wake(dev, 1, sleep_state, 3);
564 } 633 }
565
566 /*
567 * Passing 3 as the third argument below means the device may be placed
568 * in arbitrary power state afterwards.
569 */
570 err = acpi_device_sleep_wake(dev, 1, sleep_state, 3);
571
572 err_out:
573 if (err) 634 if (err)
574 dev->wakeup.prepare_count = 0; 635 dev->wakeup.prepare_count = 0;
575 636
@@ -586,7 +647,7 @@ int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state)
586 */ 647 */
587int acpi_disable_wakeup_device_power(struct acpi_device *dev) 648int acpi_disable_wakeup_device_power(struct acpi_device *dev)
588{ 649{
589 int i, err = 0; 650 int err = 0;
590 651
591 if (!dev || !dev->wakeup.flags.valid) 652 if (!dev || !dev->wakeup.flags.valid)
592 return -EINVAL; 653 return -EINVAL;
@@ -607,15 +668,10 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev)
607 if (err) 668 if (err)
608 goto out; 669 goto out;
609 670
610 /* Close power resource */ 671 err = acpi_power_off_list(&dev->wakeup.resources);
611 for (i = 0; i < dev->wakeup.resources.count; i++) { 672 if (err) {
612 int ret = acpi_power_off(dev->wakeup.resources.handles[i]); 673 dev_err(&dev->dev, "Cannot turn wakeup power resources off\n");
613 if (ret) { 674 dev->wakeup.flags.valid = 0;
614 printk(KERN_ERR PREFIX "Transition power state\n");
615 dev->wakeup.flags.valid = 0;
616 err = -ENODEV;
617 goto out;
618 }
619 } 675 }
620 676
621 out: 677 out:
@@ -623,14 +679,9 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev)
623 return err; 679 return err;
624} 680}
625 681
626/* --------------------------------------------------------------------------
627 Device Power Management
628 -------------------------------------------------------------------------- */
629
630int acpi_power_get_inferred_state(struct acpi_device *device, int *state) 682int acpi_power_get_inferred_state(struct acpi_device *device, int *state)
631{ 683{
632 int result = 0; 684 int result = 0;
633 struct acpi_handle_list *list = NULL;
634 int list_state = 0; 685 int list_state = 0;
635 int i = 0; 686 int i = 0;
636 687
@@ -642,8 +693,9 @@ int acpi_power_get_inferred_state(struct acpi_device *device, int *state)
642 * required for a given D-state are 'on'. 693 * required for a given D-state are 'on'.
643 */ 694 */
644 for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) { 695 for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) {
645 list = &device->power.states[i].resources; 696 struct list_head *list = &device->power.states[i].resources;
646 if (list->count < 1) 697
698 if (list_empty(list))
647 continue; 699 continue;
648 700
649 result = acpi_power_get_list_state(list, &list_state); 701 result = acpi_power_get_list_state(list, &list_state);
@@ -662,7 +714,7 @@ int acpi_power_get_inferred_state(struct acpi_device *device, int *state)
662 714
663int acpi_power_on_resources(struct acpi_device *device, int state) 715int acpi_power_on_resources(struct acpi_device *device, int state)
664{ 716{
665 if (!device || state < ACPI_STATE_D0 || state > ACPI_STATE_D3) 717 if (!device || state < ACPI_STATE_D0 || state > ACPI_STATE_D3_HOT)
666 return -EINVAL; 718 return -EINVAL;
667 719
668 return acpi_power_on_list(&device->power.states[state].resources); 720 return acpi_power_on_list(&device->power.states[state].resources);
@@ -675,7 +727,7 @@ int acpi_power_transition(struct acpi_device *device, int state)
675 if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD)) 727 if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD))
676 return -EINVAL; 728 return -EINVAL;
677 729
678 if (device->power.state == state) 730 if (device->power.state == state || !device->flags.power_manageable)
679 return 0; 731 return 0;
680 732
681 if ((device->power.state < ACPI_STATE_D0) 733 if ((device->power.state < ACPI_STATE_D0)
@@ -703,118 +755,126 @@ int acpi_power_transition(struct acpi_device *device, int state)
703 return result; 755 return result;
704} 756}
705 757
706/* -------------------------------------------------------------------------- 758static void acpi_release_power_resource(struct device *dev)
707 Driver Interface 759{
708 -------------------------------------------------------------------------- */ 760 struct acpi_device *device = to_acpi_device(dev);
761 struct acpi_power_resource *resource;
762
763 resource = container_of(device, struct acpi_power_resource, device);
764
765 mutex_lock(&power_resource_list_lock);
766 list_del(&resource->list_node);
767 mutex_unlock(&power_resource_list_lock);
768
769 acpi_free_ids(device);
770 kfree(resource);
771}
709 772
710static int acpi_power_add(struct acpi_device *device) 773static ssize_t acpi_power_in_use_show(struct device *dev,
774 struct device_attribute *attr,
775 char *buf) {
776 struct acpi_power_resource *resource;
777
778 resource = to_power_resource(to_acpi_device(dev));
779 return sprintf(buf, "%u\n", !!resource->ref_count);
780}
781static DEVICE_ATTR(resource_in_use, 0444, acpi_power_in_use_show, NULL);
782
783static void acpi_power_sysfs_remove(struct acpi_device *device)
711{ 784{
712 int result = 0, state; 785 device_remove_file(&device->dev, &dev_attr_resource_in_use);
713 acpi_status status = AE_OK; 786}
714 struct acpi_power_resource *resource = NULL; 787
788int acpi_add_power_resource(acpi_handle handle)
789{
790 struct acpi_power_resource *resource;
791 struct acpi_device *device = NULL;
715 union acpi_object acpi_object; 792 union acpi_object acpi_object;
716 struct acpi_buffer buffer = { sizeof(acpi_object), &acpi_object }; 793 struct acpi_buffer buffer = { sizeof(acpi_object), &acpi_object };
794 acpi_status status;
795 int state, result = -ENODEV;
717 796
797 acpi_bus_get_device(handle, &device);
798 if (device)
799 return 0;
718 800
719 if (!device) 801 resource = kzalloc(sizeof(*resource), GFP_KERNEL);
720 return -EINVAL;
721
722 resource = kzalloc(sizeof(struct acpi_power_resource), GFP_KERNEL);
723 if (!resource) 802 if (!resource)
724 return -ENOMEM; 803 return -ENOMEM;
725 804
726 resource->device = device; 805 device = &resource->device;
806 acpi_init_device_object(device, handle, ACPI_BUS_TYPE_POWER,
807 ACPI_STA_DEFAULT);
727 mutex_init(&resource->resource_lock); 808 mutex_init(&resource->resource_lock);
728 mutex_init(&resource->devices_lock); 809 INIT_LIST_HEAD(&resource->dependent);
729 strcpy(resource->name, device->pnp.bus_id); 810 resource->name = device->pnp.bus_id;
730 strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME); 811 strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME);
731 strcpy(acpi_device_class(device), ACPI_POWER_CLASS); 812 strcpy(acpi_device_class(device), ACPI_POWER_CLASS);
732 device->driver_data = resource; 813 device->power.state = ACPI_STATE_UNKNOWN;
733 814
734 /* Evalute the object to get the system level and resource order. */ 815 /* Evalute the object to get the system level and resource order. */
735 status = acpi_evaluate_object(device->handle, NULL, NULL, &buffer); 816 status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
736 if (ACPI_FAILURE(status)) { 817 if (ACPI_FAILURE(status))
737 result = -ENODEV; 818 goto err;
738 goto end; 819
739 }
740 resource->system_level = acpi_object.power_resource.system_level; 820 resource->system_level = acpi_object.power_resource.system_level;
741 resource->order = acpi_object.power_resource.resource_order; 821 resource->order = acpi_object.power_resource.resource_order;
742 822
743 result = acpi_power_get_state(device->handle, &state); 823 result = acpi_power_get_state(handle, &state);
744 if (result) 824 if (result)
745 goto end; 825 goto err;
746
747 switch (state) {
748 case ACPI_POWER_RESOURCE_STATE_ON:
749 device->power.state = ACPI_STATE_D0;
750 break;
751 case ACPI_POWER_RESOURCE_STATE_OFF:
752 device->power.state = ACPI_STATE_D3;
753 break;
754 default:
755 device->power.state = ACPI_STATE_UNKNOWN;
756 break;
757 }
758 826
759 printk(KERN_INFO PREFIX "%s [%s] (%s)\n", acpi_device_name(device), 827 printk(KERN_INFO PREFIX "%s [%s] (%s)\n", acpi_device_name(device),
760 acpi_device_bid(device), state ? "on" : "off"); 828 acpi_device_bid(device), state ? "on" : "off");
761 829
762 end: 830 device->flags.match_driver = true;
831 result = acpi_device_add(device, acpi_release_power_resource);
763 if (result) 832 if (result)
764 kfree(resource); 833 goto err;
765 834
766 return result; 835 if (!device_create_file(&device->dev, &dev_attr_resource_in_use))
767} 836 device->remove = acpi_power_sysfs_remove;
768
769static int acpi_power_remove(struct acpi_device *device, int type)
770{
771 struct acpi_power_resource *resource;
772
773 if (!device)
774 return -EINVAL;
775
776 resource = acpi_driver_data(device);
777 if (!resource)
778 return -EINVAL;
779
780 kfree(resource);
781 837
838 mutex_lock(&power_resource_list_lock);
839 list_add(&resource->list_node, &acpi_power_resource_list);
840 mutex_unlock(&power_resource_list_lock);
841 acpi_device_add_finalize(device);
782 return 0; 842 return 0;
843
844 err:
845 acpi_release_power_resource(&device->dev);
846 return result;
783} 847}
784 848
785#ifdef CONFIG_PM_SLEEP 849#ifdef CONFIG_ACPI_SLEEP
786static int acpi_power_resume(struct device *dev) 850void acpi_resume_power_resources(void)
787{ 851{
788 int result = 0, state;
789 struct acpi_device *device;
790 struct acpi_power_resource *resource; 852 struct acpi_power_resource *resource;
791 853
792 if (!dev) 854 mutex_lock(&power_resource_list_lock);
793 return -EINVAL;
794 855
795 device = to_acpi_device(dev); 856 list_for_each_entry(resource, &acpi_power_resource_list, list_node) {
796 resource = acpi_driver_data(device); 857 int result, state;
797 if (!resource)
798 return -EINVAL;
799 858
800 mutex_lock(&resource->resource_lock); 859 mutex_lock(&resource->resource_lock);
801 860
802 result = acpi_power_get_state(device->handle, &state); 861 result = acpi_power_get_state(resource->device.handle, &state);
803 if (result) 862 if (result)
804 goto unlock; 863 continue;
805 864
806 if (state == ACPI_POWER_RESOURCE_STATE_OFF && resource->ref_count) 865 if (state == ACPI_POWER_RESOURCE_STATE_OFF
807 result = __acpi_power_on(resource); 866 && resource->ref_count) {
867 dev_info(&resource->device.dev, "Turning ON\n");
868 __acpi_power_on(resource);
869 } else if (state == ACPI_POWER_RESOURCE_STATE_ON
870 && !resource->ref_count) {
871 dev_info(&resource->device.dev, "Turning OFF\n");
872 __acpi_power_off(resource);
873 }
808 874
809 unlock: 875 mutex_unlock(&resource->resource_lock);
810 mutex_unlock(&resource->resource_lock); 876 }
811 877
812 return result; 878 mutex_unlock(&power_resource_list_lock);
813} 879}
814#endif 880#endif
815
816int __init acpi_power_init(void)
817{
818 INIT_LIST_HEAD(&acpi_power_resource_list);
819 return acpi_bus_register_driver(&acpi_power_driver);
820}
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index ef98796b3824..52ce76725c20 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -311,11 +311,12 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
311 dev->pnp.bus_id, 311 dev->pnp.bus_id,
312 (u32) dev->wakeup.sleep_state); 312 (u32) dev->wakeup.sleep_state);
313 313
314 if (!dev->physical_node_count) 314 if (!dev->physical_node_count) {
315 seq_printf(seq, "%c%-8s\n", 315 seq_printf(seq, "%c%-8s\n",
316 dev->wakeup.flags.run_wake ? 316 dev->wakeup.flags.run_wake ? '*' : ' ',
317 '*' : ' ', "disabled"); 317 device_may_wakeup(&dev->dev) ?
318 else { 318 "enabled" : "disabled");
319 } else {
319 struct device *ldev; 320 struct device *ldev;
320 list_for_each_entry(entry, &dev->physical_node_list, 321 list_for_each_entry(entry, &dev->physical_node_list,
321 node) { 322 node) {
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index e83311bf1ebd..cbf1f122666b 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -81,7 +81,7 @@ MODULE_DESCRIPTION("ACPI Processor Driver");
81MODULE_LICENSE("GPL"); 81MODULE_LICENSE("GPL");
82 82
83static int acpi_processor_add(struct acpi_device *device); 83static int acpi_processor_add(struct acpi_device *device);
84static int acpi_processor_remove(struct acpi_device *device, int type); 84static int acpi_processor_remove(struct acpi_device *device);
85static void acpi_processor_notify(struct acpi_device *device, u32 event); 85static void acpi_processor_notify(struct acpi_device *device, u32 event);
86static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr); 86static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr);
87static int acpi_processor_handle_eject(struct acpi_processor *pr); 87static int acpi_processor_handle_eject(struct acpi_processor *pr);
@@ -610,7 +610,7 @@ err_free_pr:
610 return result; 610 return result;
611} 611}
612 612
613static int acpi_processor_remove(struct acpi_device *device, int type) 613static int acpi_processor_remove(struct acpi_device *device)
614{ 614{
615 struct acpi_processor *pr = NULL; 615 struct acpi_processor *pr = NULL;
616 616
@@ -623,7 +623,7 @@ static int acpi_processor_remove(struct acpi_device *device, int type)
623 if (pr->id >= nr_cpu_ids) 623 if (pr->id >= nr_cpu_ids)
624 goto free; 624 goto free;
625 625
626 if (type == ACPI_BUS_REMOVAL_EJECT) { 626 if (device->removal_type == ACPI_BUS_REMOVAL_EJECT) {
627 if (acpi_processor_handle_eject(pr)) 627 if (acpi_processor_handle_eject(pr))
628 return -EINVAL; 628 return -EINVAL;
629 } 629 }
@@ -677,36 +677,17 @@ static int is_processor_present(acpi_handle handle)
677 return 0; 677 return 0;
678} 678}
679 679
680static
681int acpi_processor_device_add(acpi_handle handle, struct acpi_device **device)
682{
683 acpi_handle phandle;
684 struct acpi_device *pdev;
685
686
687 if (acpi_get_parent(handle, &phandle)) {
688 return -ENODEV;
689 }
690
691 if (acpi_bus_get_device(phandle, &pdev)) {
692 return -ENODEV;
693 }
694
695 if (acpi_bus_add(device, pdev, handle, ACPI_BUS_TYPE_PROCESSOR)) {
696 return -ENODEV;
697 }
698
699 return 0;
700}
701
702static void acpi_processor_hotplug_notify(acpi_handle handle, 680static void acpi_processor_hotplug_notify(acpi_handle handle,
703 u32 event, void *data) 681 u32 event, void *data)
704{ 682{
705 struct acpi_device *device = NULL; 683 struct acpi_device *device = NULL;
706 struct acpi_eject_event *ej_event = NULL; 684 struct acpi_eject_event *ej_event = NULL;
707 u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */ 685 u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
686 acpi_status status;
708 int result; 687 int result;
709 688
689 acpi_scan_lock_acquire();
690
710 switch (event) { 691 switch (event) {
711 case ACPI_NOTIFY_BUS_CHECK: 692 case ACPI_NOTIFY_BUS_CHECK:
712 case ACPI_NOTIFY_DEVICE_CHECK: 693 case ACPI_NOTIFY_DEVICE_CHECK:
@@ -721,12 +702,16 @@ static void acpi_processor_hotplug_notify(acpi_handle handle,
721 if (!acpi_bus_get_device(handle, &device)) 702 if (!acpi_bus_get_device(handle, &device))
722 break; 703 break;
723 704
724 result = acpi_processor_device_add(handle, &device); 705 result = acpi_bus_scan(handle);
725 if (result) { 706 if (result) {
726 acpi_handle_err(handle, "Unable to add the device\n"); 707 acpi_handle_err(handle, "Unable to add the device\n");
727 break; 708 break;
728 } 709 }
729 710 result = acpi_bus_get_device(handle, &device);
711 if (result) {
712 acpi_handle_err(handle, "Missing device object\n");
713 break;
714 }
730 ost_code = ACPI_OST_SC_SUCCESS; 715 ost_code = ACPI_OST_SC_SUCCESS;
731 break; 716 break;
732 717
@@ -751,25 +736,32 @@ static void acpi_processor_hotplug_notify(acpi_handle handle,
751 break; 736 break;
752 } 737 }
753 738
754 ej_event->handle = handle; 739 get_device(&device->dev);
740 ej_event->device = device;
755 ej_event->event = ACPI_NOTIFY_EJECT_REQUEST; 741 ej_event->event = ACPI_NOTIFY_EJECT_REQUEST;
756 acpi_os_hotplug_execute(acpi_bus_hot_remove_device, 742 /* The eject is carried out asynchronously. */
757 (void *)ej_event); 743 status = acpi_os_hotplug_execute(acpi_bus_hot_remove_device,
758 744 ej_event);
759 /* eject is performed asynchronously */ 745 if (ACPI_FAILURE(status)) {
760 return; 746 put_device(&device->dev);
747 kfree(ej_event);
748 break;
749 }
750 goto out;
761 751
762 default: 752 default:
763 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 753 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
764 "Unsupported event [0x%x]\n", event)); 754 "Unsupported event [0x%x]\n", event));
765 755
766 /* non-hotplug event; possibly handled by other handler */ 756 /* non-hotplug event; possibly handled by other handler */
767 return; 757 goto out;
768 } 758 }
769 759
770 /* Inform firmware that the hotplug operation has completed */ 760 /* Inform firmware that the hotplug operation has completed */
771 (void) acpi_evaluate_hotplug_ost(handle, event, ost_code, NULL); 761 (void) acpi_evaluate_hotplug_ost(handle, event, ost_code, NULL);
772 return; 762
763 out:
764 acpi_scan_lock_release();
773} 765}
774 766
775static acpi_status is_processor_device(acpi_handle handle) 767static acpi_status is_processor_device(acpi_handle handle)
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index f1a5da44591d..fc95308e9a11 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -28,19 +28,12 @@
28 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 28 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
29 */ 29 */
30 30
31#include <linux/kernel.h>
32#include <linux/module.h> 31#include <linux/module.h>
33#include <linux/init.h>
34#include <linux/cpufreq.h>
35#include <linux/slab.h>
36#include <linux/acpi.h> 32#include <linux/acpi.h>
37#include <linux/dmi.h> 33#include <linux/dmi.h>
38#include <linux/moduleparam.h> 34#include <linux/sched.h> /* need_resched() */
39#include <linux/sched.h> /* need_resched() */
40#include <linux/pm_qos.h>
41#include <linux/clockchips.h> 35#include <linux/clockchips.h>
42#include <linux/cpuidle.h> 36#include <linux/cpuidle.h>
43#include <linux/irqflags.h>
44 37
45/* 38/*
46 * Include the apic definitions for x86 to have the APIC timer related defines 39 * Include the apic definitions for x86 to have the APIC timer related defines
@@ -52,22 +45,14 @@
52#include <asm/apic.h> 45#include <asm/apic.h>
53#endif 46#endif
54 47
55#include <asm/io.h>
56#include <asm/uaccess.h>
57
58#include <acpi/acpi_bus.h> 48#include <acpi/acpi_bus.h>
59#include <acpi/processor.h> 49#include <acpi/processor.h>
60#include <asm/processor.h>
61 50
62#define PREFIX "ACPI: " 51#define PREFIX "ACPI: "
63 52
64#define ACPI_PROCESSOR_CLASS "processor" 53#define ACPI_PROCESSOR_CLASS "processor"
65#define _COMPONENT ACPI_PROCESSOR_COMPONENT 54#define _COMPONENT ACPI_PROCESSOR_COMPONENT
66ACPI_MODULE_NAME("processor_idle"); 55ACPI_MODULE_NAME("processor_idle");
67#define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY)
68#define C2_OVERHEAD 1 /* 1us */
69#define C3_OVERHEAD 1 /* 1us */
70#define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000))
71 56
72static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER; 57static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
73module_param(max_cstate, uint, 0000); 58module_param(max_cstate, uint, 0000);
@@ -81,10 +66,11 @@ module_param(latency_factor, uint, 0644);
81 66
82static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device); 67static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
83 68
69static struct acpi_processor_cx *acpi_cstate[CPUIDLE_STATE_MAX];
70
84static int disabled_by_idle_boot_param(void) 71static int disabled_by_idle_boot_param(void)
85{ 72{
86 return boot_option_idle_override == IDLE_POLL || 73 return boot_option_idle_override == IDLE_POLL ||
87 boot_option_idle_override == IDLE_FORCE_MWAIT ||
88 boot_option_idle_override == IDLE_HALT; 74 boot_option_idle_override == IDLE_HALT;
89} 75}
90 76
@@ -736,8 +722,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
736 struct cpuidle_driver *drv, int index) 722 struct cpuidle_driver *drv, int index)
737{ 723{
738 struct acpi_processor *pr; 724 struct acpi_processor *pr;
739 struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; 725 struct acpi_processor_cx *cx = acpi_cstate[index];
740 struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
741 726
742 pr = __this_cpu_read(processors); 727 pr = __this_cpu_read(processors);
743 728
@@ -760,8 +745,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
760 */ 745 */
761static int acpi_idle_play_dead(struct cpuidle_device *dev, int index) 746static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
762{ 747{
763 struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; 748 struct acpi_processor_cx *cx = acpi_cstate[index];
764 struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
765 749
766 ACPI_FLUSH_CPU_CACHE(); 750 ACPI_FLUSH_CPU_CACHE();
767 751
@@ -791,8 +775,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
791 struct cpuidle_driver *drv, int index) 775 struct cpuidle_driver *drv, int index)
792{ 776{
793 struct acpi_processor *pr; 777 struct acpi_processor *pr;
794 struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; 778 struct acpi_processor_cx *cx = acpi_cstate[index];
795 struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
796 779
797 pr = __this_cpu_read(processors); 780 pr = __this_cpu_read(processors);
798 781
@@ -850,8 +833,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
850 struct cpuidle_driver *drv, int index) 833 struct cpuidle_driver *drv, int index)
851{ 834{
852 struct acpi_processor *pr; 835 struct acpi_processor *pr;
853 struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; 836 struct acpi_processor_cx *cx = acpi_cstate[index];
854 struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
855 837
856 pr = __this_cpu_read(processors); 838 pr = __this_cpu_read(processors);
857 839
@@ -943,13 +925,13 @@ struct cpuidle_driver acpi_idle_driver = {
943 * device i.e. per-cpu data 925 * device i.e. per-cpu data
944 * 926 *
945 * @pr: the ACPI processor 927 * @pr: the ACPI processor
928 * @dev : the cpuidle device
946 */ 929 */
947static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr) 930static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
931 struct cpuidle_device *dev)
948{ 932{
949 int i, count = CPUIDLE_DRIVER_STATE_START; 933 int i, count = CPUIDLE_DRIVER_STATE_START;
950 struct acpi_processor_cx *cx; 934 struct acpi_processor_cx *cx;
951 struct cpuidle_state_usage *state_usage;
952 struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
953 935
954 if (!pr->flags.power_setup_done) 936 if (!pr->flags.power_setup_done)
955 return -EINVAL; 937 return -EINVAL;
@@ -958,6 +940,9 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr)
958 return -EINVAL; 940 return -EINVAL;
959 } 941 }
960 942
943 if (!dev)
944 return -EINVAL;
945
961 dev->cpu = pr->id; 946 dev->cpu = pr->id;
962 947
963 if (max_cstate == 0) 948 if (max_cstate == 0)
@@ -965,7 +950,6 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr)
965 950
966 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { 951 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
967 cx = &pr->power.states[i]; 952 cx = &pr->power.states[i];
968 state_usage = &dev->states_usage[count];
969 953
970 if (!cx->valid) 954 if (!cx->valid)
971 continue; 955 continue;
@@ -976,8 +960,7 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr)
976 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) 960 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
977 continue; 961 continue;
978#endif 962#endif
979 963 acpi_cstate[count] = cx;
980 cpuidle_set_statedata(state_usage, cx);
981 964
982 count++; 965 count++;
983 if (count == CPUIDLE_STATE_MAX) 966 if (count == CPUIDLE_STATE_MAX)
@@ -1101,7 +1084,7 @@ int acpi_processor_hotplug(struct acpi_processor *pr)
1101 cpuidle_disable_device(dev); 1084 cpuidle_disable_device(dev);
1102 acpi_processor_get_power_info(pr); 1085 acpi_processor_get_power_info(pr);
1103 if (pr->flags.power) { 1086 if (pr->flags.power) {
1104 acpi_processor_setup_cpuidle_cx(pr); 1087 acpi_processor_setup_cpuidle_cx(pr, dev);
1105 ret = cpuidle_enable_device(dev); 1088 ret = cpuidle_enable_device(dev);
1106 } 1089 }
1107 cpuidle_resume_and_unlock(); 1090 cpuidle_resume_and_unlock();
@@ -1149,6 +1132,7 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1149 } 1132 }
1150 1133
1151 /* Populate Updated C-state information */ 1134 /* Populate Updated C-state information */
1135 acpi_processor_get_power_info(pr);
1152 acpi_processor_setup_cpuidle_states(pr); 1136 acpi_processor_setup_cpuidle_states(pr);
1153 1137
1154 /* Enable all cpuidle devices */ 1138 /* Enable all cpuidle devices */
@@ -1158,8 +1142,8 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1158 continue; 1142 continue;
1159 acpi_processor_get_power_info(_pr); 1143 acpi_processor_get_power_info(_pr);
1160 if (_pr->flags.power) { 1144 if (_pr->flags.power) {
1161 acpi_processor_setup_cpuidle_cx(_pr);
1162 dev = per_cpu(acpi_cpuidle_device, cpu); 1145 dev = per_cpu(acpi_cpuidle_device, cpu);
1146 acpi_processor_setup_cpuidle_cx(_pr, dev);
1163 cpuidle_enable_device(dev); 1147 cpuidle_enable_device(dev);
1164 } 1148 }
1165 } 1149 }
@@ -1228,7 +1212,7 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr)
1228 return -ENOMEM; 1212 return -ENOMEM;
1229 per_cpu(acpi_cpuidle_device, pr->id) = dev; 1213 per_cpu(acpi_cpuidle_device, pr->id) = dev;
1230 1214
1231 acpi_processor_setup_cpuidle_cx(pr); 1215 acpi_processor_setup_cpuidle_cx(pr, dev);
1232 1216
1233 /* Register per-cpu cpuidle_device. Cpuidle driver 1217 /* Register per-cpu cpuidle_device. Cpuidle driver
1234 * must already be registered before registering device 1218 * must already be registered before registering device
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 836bfe069042..53e7ac9403a7 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -340,6 +340,13 @@ static void amd_fixup_frequency(struct acpi_processor_px *px, int i)
340 if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10) 340 if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
341 || boot_cpu_data.x86 == 0x11) { 341 || boot_cpu_data.x86 == 0x11) {
342 rdmsr(MSR_AMD_PSTATE_DEF_BASE + index, lo, hi); 342 rdmsr(MSR_AMD_PSTATE_DEF_BASE + index, lo, hi);
343 /*
344 * MSR C001_0064+:
345 * Bit 63: PstateEn. Read-write. If set, the P-state is valid.
346 */
347 if (!(hi & BIT(31)))
348 return;
349
343 fid = lo & 0x3f; 350 fid = lo & 0x3f;
344 did = (lo >> 6) & 7; 351 did = (lo >> 6) & 7;
345 if (boot_cpu_data.x86 == 0x10) 352 if (boot_cpu_data.x86 == 0x10)
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index ff0740e0a9c2..e523245643ac 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -130,7 +130,7 @@ struct acpi_sbs {
130 130
131#define to_acpi_sbs(x) container_of(x, struct acpi_sbs, charger) 131#define to_acpi_sbs(x) container_of(x, struct acpi_sbs, charger)
132 132
133static int acpi_sbs_remove(struct acpi_device *device, int type); 133static int acpi_sbs_remove(struct acpi_device *device);
134static int acpi_battery_get_state(struct acpi_battery *battery); 134static int acpi_battery_get_state(struct acpi_battery *battery);
135 135
136static inline int battery_scale(int log) 136static inline int battery_scale(int log)
@@ -949,11 +949,11 @@ static int acpi_sbs_add(struct acpi_device *device)
949 acpi_smbus_register_callback(sbs->hc, acpi_sbs_callback, sbs); 949 acpi_smbus_register_callback(sbs->hc, acpi_sbs_callback, sbs);
950 end: 950 end:
951 if (result) 951 if (result)
952 acpi_sbs_remove(device, 0); 952 acpi_sbs_remove(device);
953 return result; 953 return result;
954} 954}
955 955
956static int acpi_sbs_remove(struct acpi_device *device, int type) 956static int acpi_sbs_remove(struct acpi_device *device)
957{ 957{
958 struct acpi_sbs *sbs; 958 struct acpi_sbs *sbs;
959 int id; 959 int id;
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
index cf6129a8af7c..b78bc605837e 100644
--- a/drivers/acpi/sbshc.c
+++ b/drivers/acpi/sbshc.c
@@ -33,7 +33,7 @@ struct acpi_smb_hc {
33}; 33};
34 34
35static int acpi_smbus_hc_add(struct acpi_device *device); 35static int acpi_smbus_hc_add(struct acpi_device *device);
36static int acpi_smbus_hc_remove(struct acpi_device *device, int type); 36static int acpi_smbus_hc_remove(struct acpi_device *device);
37 37
38static const struct acpi_device_id sbs_device_ids[] = { 38static const struct acpi_device_id sbs_device_ids[] = {
39 {"ACPI0001", 0}, 39 {"ACPI0001", 0},
@@ -296,7 +296,7 @@ static int acpi_smbus_hc_add(struct acpi_device *device)
296 296
297extern void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit); 297extern void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit);
298 298
299static int acpi_smbus_hc_remove(struct acpi_device *device, int type) 299static int acpi_smbus_hc_remove(struct acpi_device *device)
300{ 300{
301 struct acpi_smb_hc *hc; 301 struct acpi_smb_hc *hc;
302 302
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index c88be6c37c30..daee7497efd3 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -29,29 +29,10 @@ extern struct acpi_device *acpi_root;
29 29
30static const char *dummy_hid = "device"; 30static const char *dummy_hid = "device";
31 31
32/*
33 * The following ACPI IDs are known to be suitable for representing as
34 * platform devices.
35 */
36static const struct acpi_device_id acpi_platform_device_ids[] = {
37
38 { "PNP0D40" },
39
40 /* Haswell LPSS devices */
41 { "INT33C0", 0 },
42 { "INT33C1", 0 },
43 { "INT33C2", 0 },
44 { "INT33C3", 0 },
45 { "INT33C4", 0 },
46 { "INT33C5", 0 },
47 { "INT33C6", 0 },
48 { "INT33C7", 0 },
49
50 { }
51};
52
53static LIST_HEAD(acpi_device_list); 32static LIST_HEAD(acpi_device_list);
54static LIST_HEAD(acpi_bus_id_list); 33static LIST_HEAD(acpi_bus_id_list);
34static DEFINE_MUTEX(acpi_scan_lock);
35static LIST_HEAD(acpi_scan_handlers_list);
55DEFINE_MUTEX(acpi_device_lock); 36DEFINE_MUTEX(acpi_device_lock);
56LIST_HEAD(acpi_wakeup_device_list); 37LIST_HEAD(acpi_wakeup_device_list);
57 38
@@ -61,6 +42,27 @@ struct acpi_device_bus_id{
61 struct list_head node; 42 struct list_head node;
62}; 43};
63 44
45void acpi_scan_lock_acquire(void)
46{
47 mutex_lock(&acpi_scan_lock);
48}
49EXPORT_SYMBOL_GPL(acpi_scan_lock_acquire);
50
51void acpi_scan_lock_release(void)
52{
53 mutex_unlock(&acpi_scan_lock);
54}
55EXPORT_SYMBOL_GPL(acpi_scan_lock_release);
56
57int acpi_scan_add_handler(struct acpi_scan_handler *handler)
58{
59 if (!handler || !handler->attach)
60 return -EINVAL;
61
62 list_add_tail(&handler->list_node, &acpi_scan_handlers_list);
63 return 0;
64}
65
64/* 66/*
65 * Creates hid/cid(s) string needed for modalias and uevent 67 * Creates hid/cid(s) string needed for modalias and uevent
66 * e.g. on a device with hid:IBM0001 and cid:ACPI0001 you get: 68 * e.g. on a device with hid:IBM0001 and cid:ACPI0001 you get:
@@ -115,39 +117,32 @@ static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL);
115 */ 117 */
116void acpi_bus_hot_remove_device(void *context) 118void acpi_bus_hot_remove_device(void *context)
117{ 119{
118 struct acpi_eject_event *ej_event = (struct acpi_eject_event *) context; 120 struct acpi_eject_event *ej_event = context;
119 struct acpi_device *device; 121 struct acpi_device *device = ej_event->device;
120 acpi_handle handle = ej_event->handle; 122 acpi_handle handle = device->handle;
121 acpi_handle temp; 123 acpi_handle temp;
122 struct acpi_object_list arg_list; 124 struct acpi_object_list arg_list;
123 union acpi_object arg; 125 union acpi_object arg;
124 acpi_status status = AE_OK; 126 acpi_status status = AE_OK;
125 u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */ 127 u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
126 128
127 if (acpi_bus_get_device(handle, &device)) 129 mutex_lock(&acpi_scan_lock);
128 goto err_out;
129 130
130 if (!device) 131 /* If there is no handle, the device node has been unregistered. */
131 goto err_out; 132 if (!device->handle) {
133 dev_dbg(&device->dev, "ACPI handle missing\n");
134 put_device(&device->dev);
135 goto out;
136 }
132 137
133 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 138 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
134 "Hot-removing device %s...\n", dev_name(&device->dev))); 139 "Hot-removing device %s...\n", dev_name(&device->dev)));
135 140
136 if (acpi_bus_trim(device, 1)) { 141 acpi_bus_trim(device);
137 printk(KERN_ERR PREFIX 142 /* Device node has been unregistered. */
138 "Removing device failed\n"); 143 put_device(&device->dev);
139 goto err_out;
140 }
141
142 /* device has been freed */
143 device = NULL; 144 device = NULL;
144 145
145 /* power off device */
146 status = acpi_evaluate_object(handle, "_PS3", NULL, NULL);
147 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND)
148 printk(KERN_WARNING PREFIX
149 "Power-off device failed\n");
150
151 if (ACPI_SUCCESS(acpi_get_handle(handle, "_LCK", &temp))) { 146 if (ACPI_SUCCESS(acpi_get_handle(handle, "_LCK", &temp))) {
152 arg_list.count = 1; 147 arg_list.count = 1;
153 arg_list.pointer = &arg; 148 arg_list.pointer = &arg;
@@ -167,23 +162,46 @@ void acpi_bus_hot_remove_device(void *context)
167 status = acpi_evaluate_object(handle, "_EJ0", &arg_list, NULL); 162 status = acpi_evaluate_object(handle, "_EJ0", &arg_list, NULL);
168 if (ACPI_FAILURE(status)) { 163 if (ACPI_FAILURE(status)) {
169 if (status != AE_NOT_FOUND) 164 if (status != AE_NOT_FOUND)
170 printk(KERN_WARNING PREFIX 165 acpi_handle_warn(handle, "Eject failed\n");
171 "Eject device failed\n");
172 goto err_out;
173 }
174 166
175 kfree(context); 167 /* Tell the firmware the hot-remove operation has failed. */
176 return; 168 acpi_evaluate_hotplug_ost(handle, ej_event->event,
169 ost_code, NULL);
170 }
177 171
178err_out: 172 out:
179 /* Inform firmware the hot-remove operation has completed w/ error */ 173 mutex_unlock(&acpi_scan_lock);
180 (void) acpi_evaluate_hotplug_ost(handle,
181 ej_event->event, ost_code, NULL);
182 kfree(context); 174 kfree(context);
183 return; 175 return;
184} 176}
185EXPORT_SYMBOL(acpi_bus_hot_remove_device); 177EXPORT_SYMBOL(acpi_bus_hot_remove_device);
186 178
179static ssize_t real_power_state_show(struct device *dev,
180 struct device_attribute *attr, char *buf)
181{
182 struct acpi_device *adev = to_acpi_device(dev);
183 int state;
184 int ret;
185
186 ret = acpi_device_get_power(adev, &state);
187 if (ret)
188 return ret;
189
190 return sprintf(buf, "%s\n", acpi_power_state_string(state));
191}
192
193static DEVICE_ATTR(real_power_state, 0444, real_power_state_show, NULL);
194
195static ssize_t power_state_show(struct device *dev,
196 struct device_attribute *attr, char *buf)
197{
198 struct acpi_device *adev = to_acpi_device(dev);
199
200 return sprintf(buf, "%s\n", acpi_power_state_string(adev->power.state));
201}
202
203static DEVICE_ATTR(power_state, 0444, power_state_show, NULL);
204
187static ssize_t 205static ssize_t
188acpi_eject_store(struct device *d, struct device_attribute *attr, 206acpi_eject_store(struct device *d, struct device_attribute *attr,
189 const char *buf, size_t count) 207 const char *buf, size_t count)
@@ -197,12 +215,10 @@ acpi_eject_store(struct device *d, struct device_attribute *attr,
197 if ((!count) || (buf[0] != '1')) { 215 if ((!count) || (buf[0] != '1')) {
198 return -EINVAL; 216 return -EINVAL;
199 } 217 }
200#ifndef FORCE_EJECT 218 if (!acpi_device->driver && !acpi_device->handler) {
201 if (acpi_device->driver == NULL) {
202 ret = -ENODEV; 219 ret = -ENODEV;
203 goto err; 220 goto err;
204 } 221 }
205#endif
206 status = acpi_get_type(acpi_device->handle, &type); 222 status = acpi_get_type(acpi_device->handle, &type);
207 if (ACPI_FAILURE(status) || (!acpi_device->flags.ejectable)) { 223 if (ACPI_FAILURE(status) || (!acpi_device->flags.ejectable)) {
208 ret = -ENODEV; 224 ret = -ENODEV;
@@ -215,7 +231,8 @@ acpi_eject_store(struct device *d, struct device_attribute *attr,
215 goto err; 231 goto err;
216 } 232 }
217 233
218 ej_event->handle = acpi_device->handle; 234 get_device(&acpi_device->dev);
235 ej_event->device = acpi_device;
219 if (acpi_device->flags.eject_pending) { 236 if (acpi_device->flags.eject_pending) {
220 /* event originated from ACPI eject notification */ 237 /* event originated from ACPI eject notification */
221 ej_event->event = ACPI_NOTIFY_EJECT_REQUEST; 238 ej_event->event = ACPI_NOTIFY_EJECT_REQUEST;
@@ -223,11 +240,15 @@ acpi_eject_store(struct device *d, struct device_attribute *attr,
223 } else { 240 } else {
224 /* event originated from user */ 241 /* event originated from user */
225 ej_event->event = ACPI_OST_EC_OSPM_EJECT; 242 ej_event->event = ACPI_OST_EC_OSPM_EJECT;
226 (void) acpi_evaluate_hotplug_ost(ej_event->handle, 243 (void) acpi_evaluate_hotplug_ost(acpi_device->handle,
227 ej_event->event, ACPI_OST_SC_EJECT_IN_PROGRESS, NULL); 244 ej_event->event, ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
228 } 245 }
229 246
230 acpi_os_hotplug_execute(acpi_bus_hot_remove_device, (void *)ej_event); 247 status = acpi_os_hotplug_execute(acpi_bus_hot_remove_device, ej_event);
248 if (ACPI_FAILURE(status)) {
249 put_device(&acpi_device->dev);
250 kfree(ej_event);
251 }
231err: 252err:
232 return ret; 253 return ret;
233} 254}
@@ -375,8 +396,22 @@ static int acpi_device_setup_files(struct acpi_device *dev)
375 * hot-removal function from userland. 396 * hot-removal function from userland.
376 */ 397 */
377 status = acpi_get_handle(dev->handle, "_EJ0", &temp); 398 status = acpi_get_handle(dev->handle, "_EJ0", &temp);
378 if (ACPI_SUCCESS(status)) 399 if (ACPI_SUCCESS(status)) {
379 result = device_create_file(&dev->dev, &dev_attr_eject); 400 result = device_create_file(&dev->dev, &dev_attr_eject);
401 if (result)
402 return result;
403 }
404
405 if (dev->flags.power_manageable) {
406 result = device_create_file(&dev->dev, &dev_attr_power_state);
407 if (result)
408 return result;
409
410 if (dev->power.flags.power_resources)
411 result = device_create_file(&dev->dev,
412 &dev_attr_real_power_state);
413 }
414
380end: 415end:
381 return result; 416 return result;
382} 417}
@@ -386,6 +421,13 @@ static void acpi_device_remove_files(struct acpi_device *dev)
386 acpi_status status; 421 acpi_status status;
387 acpi_handle temp; 422 acpi_handle temp;
388 423
424 if (dev->flags.power_manageable) {
425 device_remove_file(&dev->dev, &dev_attr_power_state);
426 if (dev->power.flags.power_resources)
427 device_remove_file(&dev->dev,
428 &dev_attr_real_power_state);
429 }
430
389 /* 431 /*
390 * If device has _STR, remove 'description' file 432 * If device has _STR, remove 'description' file
391 */ 433 */
@@ -454,9 +496,9 @@ const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
454 const struct device *dev) 496 const struct device *dev)
455{ 497{
456 struct acpi_device *adev; 498 struct acpi_device *adev;
499 acpi_handle handle = ACPI_HANDLE(dev);
457 500
458 if (!ids || !ACPI_HANDLE(dev) 501 if (!ids || !handle || acpi_bus_get_device(handle, &adev))
459 || ACPI_FAILURE(acpi_bus_get_device(ACPI_HANDLE(dev), &adev)))
460 return NULL; 502 return NULL;
461 503
462 return __acpi_match_device(adev, ids); 504 return __acpi_match_device(adev, ids);
@@ -470,7 +512,7 @@ int acpi_match_device_ids(struct acpi_device *device,
470} 512}
471EXPORT_SYMBOL(acpi_match_device_ids); 513EXPORT_SYMBOL(acpi_match_device_ids);
472 514
473static void acpi_free_ids(struct acpi_device *device) 515void acpi_free_ids(struct acpi_device *device)
474{ 516{
475 struct acpi_hardware_id *id, *tmp; 517 struct acpi_hardware_id *id, *tmp;
476 518
@@ -478,6 +520,23 @@ static void acpi_free_ids(struct acpi_device *device)
478 kfree(id->id); 520 kfree(id->id);
479 kfree(id); 521 kfree(id);
480 } 522 }
523 kfree(device->pnp.unique_id);
524}
525
526static void acpi_free_power_resources_lists(struct acpi_device *device)
527{
528 int i;
529
530 if (device->wakeup.flags.valid)
531 acpi_power_resources_list_free(&device->wakeup.resources);
532
533 if (!device->flags.power_manageable)
534 return;
535
536 for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) {
537 struct acpi_device_power_state *ps = &device->power.states[i];
538 acpi_power_resources_list_free(&ps->resources);
539 }
481} 540}
482 541
483static void acpi_device_release(struct device *dev) 542static void acpi_device_release(struct device *dev)
@@ -485,7 +544,7 @@ static void acpi_device_release(struct device *dev)
485 struct acpi_device *acpi_dev = to_acpi_device(dev); 544 struct acpi_device *acpi_dev = to_acpi_device(dev);
486 545
487 acpi_free_ids(acpi_dev); 546 acpi_free_ids(acpi_dev);
488 kfree(acpi_dev->pnp.unique_id); 547 acpi_free_power_resources_lists(acpi_dev);
489 kfree(acpi_dev); 548 kfree(acpi_dev);
490} 549}
491 550
@@ -494,7 +553,8 @@ static int acpi_bus_match(struct device *dev, struct device_driver *drv)
494 struct acpi_device *acpi_dev = to_acpi_device(dev); 553 struct acpi_device *acpi_dev = to_acpi_device(dev);
495 struct acpi_driver *acpi_drv = to_acpi_driver(drv); 554 struct acpi_driver *acpi_drv = to_acpi_driver(drv);
496 555
497 return !acpi_match_device_ids(acpi_dev, acpi_drv->ids); 556 return acpi_dev->flags.match_driver
557 && !acpi_match_device_ids(acpi_dev, acpi_drv->ids);
498} 558}
499 559
500static int acpi_device_uevent(struct device *dev, struct kobj_uevent_env *env) 560static int acpi_device_uevent(struct device *dev, struct kobj_uevent_env *env)
@@ -570,7 +630,6 @@ static void acpi_device_remove_notify_handler(struct acpi_device *device)
570} 630}
571 631
572static int acpi_bus_driver_init(struct acpi_device *, struct acpi_driver *); 632static int acpi_bus_driver_init(struct acpi_device *, struct acpi_driver *);
573static int acpi_start_single_object(struct acpi_device *);
574static int acpi_device_probe(struct device * dev) 633static int acpi_device_probe(struct device * dev)
575{ 634{
576 struct acpi_device *acpi_dev = to_acpi_device(dev); 635 struct acpi_device *acpi_dev = to_acpi_device(dev);
@@ -579,15 +638,13 @@ static int acpi_device_probe(struct device * dev)
579 638
580 ret = acpi_bus_driver_init(acpi_dev, acpi_drv); 639 ret = acpi_bus_driver_init(acpi_dev, acpi_drv);
581 if (!ret) { 640 if (!ret) {
582 if (acpi_dev->bus_ops.acpi_op_start)
583 acpi_start_single_object(acpi_dev);
584
585 if (acpi_drv->ops.notify) { 641 if (acpi_drv->ops.notify) {
586 ret = acpi_device_install_notify_handler(acpi_dev); 642 ret = acpi_device_install_notify_handler(acpi_dev);
587 if (ret) { 643 if (ret) {
588 if (acpi_drv->ops.remove) 644 if (acpi_drv->ops.remove)
589 acpi_drv->ops.remove(acpi_dev, 645 acpi_drv->ops.remove(acpi_dev);
590 acpi_dev->removal_type); 646 acpi_dev->driver = NULL;
647 acpi_dev->driver_data = NULL;
591 return ret; 648 return ret;
592 } 649 }
593 } 650 }
@@ -609,7 +666,7 @@ static int acpi_device_remove(struct device * dev)
609 if (acpi_drv->ops.notify) 666 if (acpi_drv->ops.notify)
610 acpi_device_remove_notify_handler(acpi_dev); 667 acpi_device_remove_notify_handler(acpi_dev);
611 if (acpi_drv->ops.remove) 668 if (acpi_drv->ops.remove)
612 acpi_drv->ops.remove(acpi_dev, acpi_dev->removal_type); 669 acpi_drv->ops.remove(acpi_dev);
613 } 670 }
614 acpi_dev->driver = NULL; 671 acpi_dev->driver = NULL;
615 acpi_dev->driver_data = NULL; 672 acpi_dev->driver_data = NULL;
@@ -626,12 +683,25 @@ struct bus_type acpi_bus_type = {
626 .uevent = acpi_device_uevent, 683 .uevent = acpi_device_uevent,
627}; 684};
628 685
629static int acpi_device_register(struct acpi_device *device) 686int acpi_device_add(struct acpi_device *device,
687 void (*release)(struct device *))
630{ 688{
631 int result; 689 int result;
632 struct acpi_device_bus_id *acpi_device_bus_id, *new_bus_id; 690 struct acpi_device_bus_id *acpi_device_bus_id, *new_bus_id;
633 int found = 0; 691 int found = 0;
634 692
693 if (device->handle) {
694 acpi_status status;
695
696 status = acpi_attach_data(device->handle, acpi_bus_data_handler,
697 device);
698 if (ACPI_FAILURE(status)) {
699 acpi_handle_err(device->handle,
700 "Unable to attach device data\n");
701 return -ENODEV;
702 }
703 }
704
635 /* 705 /*
636 * Linkage 706 * Linkage
637 * ------- 707 * -------
@@ -642,11 +712,13 @@ static int acpi_device_register(struct acpi_device *device)
642 INIT_LIST_HEAD(&device->wakeup_list); 712 INIT_LIST_HEAD(&device->wakeup_list);
643 INIT_LIST_HEAD(&device->physical_node_list); 713 INIT_LIST_HEAD(&device->physical_node_list);
644 mutex_init(&device->physical_node_lock); 714 mutex_init(&device->physical_node_lock);
715 INIT_LIST_HEAD(&device->power_dependent);
645 716
646 new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL); 717 new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL);
647 if (!new_bus_id) { 718 if (!new_bus_id) {
648 printk(KERN_ERR PREFIX "Memory allocation error\n"); 719 pr_err(PREFIX "Memory allocation error\n");
649 return -ENOMEM; 720 result = -ENOMEM;
721 goto err_detach;
650 } 722 }
651 723
652 mutex_lock(&acpi_device_lock); 724 mutex_lock(&acpi_device_lock);
@@ -681,11 +753,11 @@ static int acpi_device_register(struct acpi_device *device)
681 if (device->parent) 753 if (device->parent)
682 device->dev.parent = &device->parent->dev; 754 device->dev.parent = &device->parent->dev;
683 device->dev.bus = &acpi_bus_type; 755 device->dev.bus = &acpi_bus_type;
684 device->dev.release = &acpi_device_release; 756 device->dev.release = release;
685 result = device_register(&device->dev); 757 result = device_add(&device->dev);
686 if (result) { 758 if (result) {
687 dev_err(&device->dev, "Error registering device\n"); 759 dev_err(&device->dev, "Error registering device\n");
688 goto end; 760 goto err;
689 } 761 }
690 762
691 result = acpi_device_setup_files(device); 763 result = acpi_device_setup_files(device);
@@ -695,16 +767,20 @@ static int acpi_device_register(struct acpi_device *device)
695 767
696 device->removal_type = ACPI_BUS_REMOVAL_NORMAL; 768 device->removal_type = ACPI_BUS_REMOVAL_NORMAL;
697 return 0; 769 return 0;
698end: 770
771 err:
699 mutex_lock(&acpi_device_lock); 772 mutex_lock(&acpi_device_lock);
700 if (device->parent) 773 if (device->parent)
701 list_del(&device->node); 774 list_del(&device->node);
702 list_del(&device->wakeup_list); 775 list_del(&device->wakeup_list);
703 mutex_unlock(&acpi_device_lock); 776 mutex_unlock(&acpi_device_lock);
777
778 err_detach:
779 acpi_detach_data(device->handle, acpi_bus_data_handler);
704 return result; 780 return result;
705} 781}
706 782
707static void acpi_device_unregister(struct acpi_device *device, int type) 783static void acpi_device_unregister(struct acpi_device *device)
708{ 784{
709 mutex_lock(&acpi_device_lock); 785 mutex_lock(&acpi_device_lock);
710 if (device->parent) 786 if (device->parent)
@@ -715,8 +791,20 @@ static void acpi_device_unregister(struct acpi_device *device, int type)
715 791
716 acpi_detach_data(device->handle, acpi_bus_data_handler); 792 acpi_detach_data(device->handle, acpi_bus_data_handler);
717 793
794 acpi_power_add_remove_device(device, false);
718 acpi_device_remove_files(device); 795 acpi_device_remove_files(device);
719 device_unregister(&device->dev); 796 if (device->remove)
797 device->remove(device);
798
799 device_del(&device->dev);
800 /*
801 * Transition the device to D3cold to drop the reference counts of all
802 * power resources the device depends on and turn off the ones that have
803 * no more references.
804 */
805 acpi_device_set_power(device, ACPI_STATE_D3_COLD);
806 device->handle = NULL;
807 put_device(&device->dev);
720} 808}
721 809
722/* -------------------------------------------------------------------------- 810/* --------------------------------------------------------------------------
@@ -760,24 +848,6 @@ acpi_bus_driver_init(struct acpi_device *device, struct acpi_driver *driver)
760 return 0; 848 return 0;
761} 849}
762 850
763static int acpi_start_single_object(struct acpi_device *device)
764{
765 int result = 0;
766 struct acpi_driver *driver;
767
768
769 if (!(driver = device->driver))
770 return 0;
771
772 if (driver->ops.start) {
773 result = driver->ops.start(device);
774 if (result && driver->ops.remove)
775 driver->ops.remove(device, ACPI_BUS_REMOVAL_NORMAL);
776 }
777
778 return result;
779}
780
781/** 851/**
782 * acpi_bus_register_driver - register a driver with the ACPI bus 852 * acpi_bus_register_driver - register a driver with the ACPI bus
783 * @driver: driver being registered 853 * @driver: driver being registered
@@ -821,29 +891,23 @@ EXPORT_SYMBOL(acpi_bus_unregister_driver);
821 -------------------------------------------------------------------------- */ 891 -------------------------------------------------------------------------- */
822static struct acpi_device *acpi_bus_get_parent(acpi_handle handle) 892static struct acpi_device *acpi_bus_get_parent(acpi_handle handle)
823{ 893{
894 struct acpi_device *device = NULL;
824 acpi_status status; 895 acpi_status status;
825 int ret;
826 struct acpi_device *device;
827 896
828 /* 897 /*
829 * Fixed hardware devices do not appear in the namespace and do not 898 * Fixed hardware devices do not appear in the namespace and do not
830 * have handles, but we fabricate acpi_devices for them, so we have 899 * have handles, but we fabricate acpi_devices for them, so we have
831 * to deal with them specially. 900 * to deal with them specially.
832 */ 901 */
833 if (handle == NULL) 902 if (!handle)
834 return acpi_root; 903 return acpi_root;
835 904
836 do { 905 do {
837 status = acpi_get_parent(handle, &handle); 906 status = acpi_get_parent(handle, &handle);
838 if (status == AE_NULL_ENTRY)
839 return NULL;
840 if (ACPI_FAILURE(status)) 907 if (ACPI_FAILURE(status))
841 return acpi_root; 908 return status == AE_NULL_ENTRY ? NULL : acpi_root;
842 909 } while (acpi_bus_get_device(handle, &device));
843 ret = acpi_bus_get_device(handle, &device); 910 return device;
844 if (ret == 0)
845 return device;
846 } while (1);
847} 911}
848 912
849acpi_status 913acpi_status
@@ -877,52 +941,43 @@ void acpi_bus_data_handler(acpi_handle handle, void *context)
877 return; 941 return;
878} 942}
879 943
880static int acpi_bus_get_perf_flags(struct acpi_device *device) 944static int acpi_bus_extract_wakeup_device_power_package(acpi_handle handle,
881{ 945 struct acpi_device_wakeup *wakeup)
882 device->performance.state = ACPI_STATE_UNKNOWN;
883 return 0;
884}
885
886static acpi_status
887acpi_bus_extract_wakeup_device_power_package(acpi_handle handle,
888 struct acpi_device_wakeup *wakeup)
889{ 946{
890 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 947 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
891 union acpi_object *package = NULL; 948 union acpi_object *package = NULL;
892 union acpi_object *element = NULL; 949 union acpi_object *element = NULL;
893 acpi_status status; 950 acpi_status status;
894 int i = 0; 951 int err = -ENODATA;
895 952
896 if (!wakeup) 953 if (!wakeup)
897 return AE_BAD_PARAMETER; 954 return -EINVAL;
955
956 INIT_LIST_HEAD(&wakeup->resources);
898 957
899 /* _PRW */ 958 /* _PRW */
900 status = acpi_evaluate_object(handle, "_PRW", NULL, &buffer); 959 status = acpi_evaluate_object(handle, "_PRW", NULL, &buffer);
901 if (ACPI_FAILURE(status)) { 960 if (ACPI_FAILURE(status)) {
902 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PRW")); 961 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PRW"));
903 return status; 962 return err;
904 } 963 }
905 964
906 package = (union acpi_object *)buffer.pointer; 965 package = (union acpi_object *)buffer.pointer;
907 966
908 if (!package || (package->package.count < 2)) { 967 if (!package || package->package.count < 2)
909 status = AE_BAD_DATA;
910 goto out; 968 goto out;
911 }
912 969
913 element = &(package->package.elements[0]); 970 element = &(package->package.elements[0]);
914 if (!element) { 971 if (!element)
915 status = AE_BAD_DATA;
916 goto out; 972 goto out;
917 } 973
918 if (element->type == ACPI_TYPE_PACKAGE) { 974 if (element->type == ACPI_TYPE_PACKAGE) {
919 if ((element->package.count < 2) || 975 if ((element->package.count < 2) ||
920 (element->package.elements[0].type != 976 (element->package.elements[0].type !=
921 ACPI_TYPE_LOCAL_REFERENCE) 977 ACPI_TYPE_LOCAL_REFERENCE)
922 || (element->package.elements[1].type != ACPI_TYPE_INTEGER)) { 978 || (element->package.elements[1].type != ACPI_TYPE_INTEGER))
923 status = AE_BAD_DATA;
924 goto out; 979 goto out;
925 } 980
926 wakeup->gpe_device = 981 wakeup->gpe_device =
927 element->package.elements[0].reference.handle; 982 element->package.elements[0].reference.handle;
928 wakeup->gpe_number = 983 wakeup->gpe_number =
@@ -931,38 +986,35 @@ acpi_bus_extract_wakeup_device_power_package(acpi_handle handle,
931 wakeup->gpe_device = NULL; 986 wakeup->gpe_device = NULL;
932 wakeup->gpe_number = element->integer.value; 987 wakeup->gpe_number = element->integer.value;
933 } else { 988 } else {
934 status = AE_BAD_DATA;
935 goto out; 989 goto out;
936 } 990 }
937 991
938 element = &(package->package.elements[1]); 992 element = &(package->package.elements[1]);
939 if (element->type != ACPI_TYPE_INTEGER) { 993 if (element->type != ACPI_TYPE_INTEGER)
940 status = AE_BAD_DATA;
941 goto out; 994 goto out;
942 } 995
943 wakeup->sleep_state = element->integer.value; 996 wakeup->sleep_state = element->integer.value;
944 997
945 if ((package->package.count - 2) > ACPI_MAX_HANDLES) { 998 err = acpi_extract_power_resources(package, 2, &wakeup->resources);
946 status = AE_NO_MEMORY; 999 if (err)
947 goto out; 1000 goto out;
948 }
949 wakeup->resources.count = package->package.count - 2;
950 for (i = 0; i < wakeup->resources.count; i++) {
951 element = &(package->package.elements[i + 2]);
952 if (element->type != ACPI_TYPE_LOCAL_REFERENCE) {
953 status = AE_BAD_DATA;
954 goto out;
955 }
956 1001
957 wakeup->resources.handles[i] = element->reference.handle; 1002 if (!list_empty(&wakeup->resources)) {
958 } 1003 int sleep_state;
959 1004
1005 sleep_state = acpi_power_min_system_level(&wakeup->resources);
1006 if (sleep_state < wakeup->sleep_state) {
1007 acpi_handle_warn(handle, "Overriding _PRW sleep state "
1008 "(S%d) by S%d from power resources\n",
1009 (int)wakeup->sleep_state, sleep_state);
1010 wakeup->sleep_state = sleep_state;
1011 }
1012 }
960 acpi_setup_gpe_for_wake(handle, wakeup->gpe_device, wakeup->gpe_number); 1013 acpi_setup_gpe_for_wake(handle, wakeup->gpe_device, wakeup->gpe_number);
961 1014
962 out: 1015 out:
963 kfree(buffer.pointer); 1016 kfree(buffer.pointer);
964 1017 return err;
965 return status;
966} 1018}
967 1019
968static void acpi_bus_set_run_wake_flags(struct acpi_device *device) 1020static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
@@ -1002,17 +1054,17 @@ static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
1002{ 1054{
1003 acpi_handle temp; 1055 acpi_handle temp;
1004 acpi_status status = 0; 1056 acpi_status status = 0;
1005 int psw_error; 1057 int err;
1006 1058
1007 /* Presence of _PRW indicates wake capable */ 1059 /* Presence of _PRW indicates wake capable */
1008 status = acpi_get_handle(device->handle, "_PRW", &temp); 1060 status = acpi_get_handle(device->handle, "_PRW", &temp);
1009 if (ACPI_FAILURE(status)) 1061 if (ACPI_FAILURE(status))
1010 return; 1062 return;
1011 1063
1012 status = acpi_bus_extract_wakeup_device_power_package(device->handle, 1064 err = acpi_bus_extract_wakeup_device_power_package(device->handle,
1013 &device->wakeup); 1065 &device->wakeup);
1014 if (ACPI_FAILURE(status)) { 1066 if (err) {
1015 ACPI_EXCEPTION((AE_INFO, status, "Extracting _PRW package")); 1067 dev_err(&device->dev, "_PRW evaluation error: %d\n", err);
1016 return; 1068 return;
1017 } 1069 }
1018 1070
@@ -1025,20 +1077,73 @@ static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
1025 * So it is necessary to call _DSW object first. Only when it is not 1077 * So it is necessary to call _DSW object first. Only when it is not
1026 * present will the _PSW object used. 1078 * present will the _PSW object used.
1027 */ 1079 */
1028 psw_error = acpi_device_sleep_wake(device, 0, 0, 0); 1080 err = acpi_device_sleep_wake(device, 0, 0, 0);
1029 if (psw_error) 1081 if (err)
1030 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1082 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1031 "error in _DSW or _PSW evaluation\n")); 1083 "error in _DSW or _PSW evaluation\n"));
1032} 1084}
1033 1085
1034static void acpi_bus_add_power_resource(acpi_handle handle); 1086static void acpi_bus_init_power_state(struct acpi_device *device, int state)
1087{
1088 struct acpi_device_power_state *ps = &device->power.states[state];
1089 char pathname[5] = { '_', 'P', 'R', '0' + state, '\0' };
1090 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
1091 acpi_handle handle;
1092 acpi_status status;
1093
1094 INIT_LIST_HEAD(&ps->resources);
1095
1096 /* Evaluate "_PRx" to get referenced power resources */
1097 status = acpi_evaluate_object(device->handle, pathname, NULL, &buffer);
1098 if (ACPI_SUCCESS(status)) {
1099 union acpi_object *package = buffer.pointer;
1100
1101 if (buffer.length && package
1102 && package->type == ACPI_TYPE_PACKAGE
1103 && package->package.count) {
1104 int err = acpi_extract_power_resources(package, 0,
1105 &ps->resources);
1106 if (!err)
1107 device->power.flags.power_resources = 1;
1108 }
1109 ACPI_FREE(buffer.pointer);
1110 }
1111
1112 /* Evaluate "_PSx" to see if we can do explicit sets */
1113 pathname[2] = 'S';
1114 status = acpi_get_handle(device->handle, pathname, &handle);
1115 if (ACPI_SUCCESS(status))
1116 ps->flags.explicit_set = 1;
1117
1118 /*
1119 * State is valid if there are means to put the device into it.
1120 * D3hot is only valid if _PR3 present.
1121 */
1122 if (!list_empty(&ps->resources)
1123 || (ps->flags.explicit_set && state < ACPI_STATE_D3_HOT)) {
1124 ps->flags.valid = 1;
1125 ps->flags.os_accessible = 1;
1126 }
1127
1128 ps->power = -1; /* Unknown - driver assigned */
1129 ps->latency = -1; /* Unknown - driver assigned */
1130}
1035 1131
1036static int acpi_bus_get_power_flags(struct acpi_device *device) 1132static void acpi_bus_get_power_flags(struct acpi_device *device)
1037{ 1133{
1038 acpi_status status = 0; 1134 acpi_status status;
1039 acpi_handle handle = NULL; 1135 acpi_handle handle;
1040 u32 i = 0; 1136 u32 i;
1137
1138 /* Presence of _PS0|_PR0 indicates 'power manageable' */
1139 status = acpi_get_handle(device->handle, "_PS0", &handle);
1140 if (ACPI_FAILURE(status)) {
1141 status = acpi_get_handle(device->handle, "_PR0", &handle);
1142 if (ACPI_FAILURE(status))
1143 return;
1144 }
1041 1145
1146 device->flags.power_manageable = 1;
1042 1147
1043 /* 1148 /*
1044 * Power Management Flags 1149 * Power Management Flags
@@ -1053,40 +1158,10 @@ static int acpi_bus_get_power_flags(struct acpi_device *device)
1053 /* 1158 /*
1054 * Enumerate supported power management states 1159 * Enumerate supported power management states
1055 */ 1160 */
1056 for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) { 1161 for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++)
1057 struct acpi_device_power_state *ps = &device->power.states[i]; 1162 acpi_bus_init_power_state(device, i);
1058 char object_name[5] = { '_', 'P', 'R', '0' + i, '\0' };
1059
1060 /* Evaluate "_PRx" to se if power resources are referenced */
1061 acpi_evaluate_reference(device->handle, object_name, NULL,
1062 &ps->resources);
1063 if (ps->resources.count) {
1064 int j;
1065
1066 device->power.flags.power_resources = 1;
1067 for (j = 0; j < ps->resources.count; j++)
1068 acpi_bus_add_power_resource(ps->resources.handles[j]);
1069 }
1070 1163
1071 /* Evaluate "_PSx" to see if we can do explicit sets */ 1164 INIT_LIST_HEAD(&device->power.states[ACPI_STATE_D3_COLD].resources);
1072 object_name[2] = 'S';
1073 status = acpi_get_handle(device->handle, object_name, &handle);
1074 if (ACPI_SUCCESS(status))
1075 ps->flags.explicit_set = 1;
1076
1077 /*
1078 * State is valid if there are means to put the device into it.
1079 * D3hot is only valid if _PR3 present.
1080 */
1081 if (ps->resources.count ||
1082 (ps->flags.explicit_set && i < ACPI_STATE_D3_HOT)) {
1083 ps->flags.valid = 1;
1084 ps->flags.os_accessible = 1;
1085 }
1086
1087 ps->power = -1; /* Unknown - driver assigned */
1088 ps->latency = -1; /* Unknown - driver assigned */
1089 }
1090 1165
1091 /* Set defaults for D0 and D3 states (always valid) */ 1166 /* Set defaults for D0 and D3 states (always valid) */
1092 device->power.states[ACPI_STATE_D0].flags.valid = 1; 1167 device->power.states[ACPI_STATE_D0].flags.valid = 1;
@@ -1103,17 +1178,17 @@ static int acpi_bus_get_power_flags(struct acpi_device *device)
1103 device->power.flags.power_resources) 1178 device->power.flags.power_resources)
1104 device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible = 1; 1179 device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible = 1;
1105 1180
1106 acpi_bus_init_power(device); 1181 if (acpi_bus_init_power(device)) {
1107 1182 acpi_free_power_resources_lists(device);
1108 return 0; 1183 device->flags.power_manageable = 0;
1184 }
1109} 1185}
1110 1186
1111static int acpi_bus_get_flags(struct acpi_device *device) 1187static void acpi_bus_get_flags(struct acpi_device *device)
1112{ 1188{
1113 acpi_status status = AE_OK; 1189 acpi_status status = AE_OK;
1114 acpi_handle temp = NULL; 1190 acpi_handle temp = NULL;
1115 1191
1116
1117 /* Presence of _STA indicates 'dynamic_status' */ 1192 /* Presence of _STA indicates 'dynamic_status' */
1118 status = acpi_get_handle(device->handle, "_STA", &temp); 1193 status = acpi_get_handle(device->handle, "_STA", &temp);
1119 if (ACPI_SUCCESS(status)) 1194 if (ACPI_SUCCESS(status))
@@ -1133,21 +1208,6 @@ static int acpi_bus_get_flags(struct acpi_device *device)
1133 if (ACPI_SUCCESS(status)) 1208 if (ACPI_SUCCESS(status))
1134 device->flags.ejectable = 1; 1209 device->flags.ejectable = 1;
1135 } 1210 }
1136
1137 /* Power resources cannot be power manageable. */
1138 if (device->device_type == ACPI_BUS_TYPE_POWER)
1139 return 0;
1140
1141 /* Presence of _PS0|_PR0 indicates 'power manageable' */
1142 status = acpi_get_handle(device->handle, "_PS0", &temp);
1143 if (ACPI_FAILURE(status))
1144 status = acpi_get_handle(device->handle, "_PR0", &temp);
1145 if (ACPI_SUCCESS(status))
1146 device->flags.power_manageable = 1;
1147
1148 /* TBD: Performance management */
1149
1150 return 0;
1151} 1211}
1152 1212
1153static void acpi_device_get_busid(struct acpi_device *device) 1213static void acpi_device_get_busid(struct acpi_device *device)
@@ -1372,56 +1432,32 @@ static void acpi_device_set_id(struct acpi_device *device)
1372 } 1432 }
1373} 1433}
1374 1434
1375static int acpi_device_set_context(struct acpi_device *device) 1435void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
1436 int type, unsigned long long sta)
1376{ 1437{
1377 acpi_status status; 1438 INIT_LIST_HEAD(&device->pnp.ids);
1378 1439 device->device_type = type;
1379 /* 1440 device->handle = handle;
1380 * Context 1441 device->parent = acpi_bus_get_parent(handle);
1381 * ------- 1442 STRUCT_TO_INT(device->status) = sta;
1382 * Attach this 'struct acpi_device' to the ACPI object. This makes 1443 acpi_device_get_busid(device);
1383 * resolutions from handle->device very efficient. Fixed hardware 1444 acpi_device_set_id(device);
1384 * devices have no handles, so we skip them. 1445 acpi_bus_get_flags(device);
1385 */ 1446 device->flags.match_driver = false;
1386 if (!device->handle) 1447 device_initialize(&device->dev);
1387 return 0; 1448 dev_set_uevent_suppress(&device->dev, true);
1388
1389 status = acpi_attach_data(device->handle,
1390 acpi_bus_data_handler, device);
1391 if (ACPI_SUCCESS(status))
1392 return 0;
1393
1394 printk(KERN_ERR PREFIX "Error attaching device data\n");
1395 return -ENODEV;
1396} 1449}
1397 1450
1398static int acpi_bus_remove(struct acpi_device *dev, int rmdevice) 1451void acpi_device_add_finalize(struct acpi_device *device)
1399{ 1452{
1400 if (!dev) 1453 device->flags.match_driver = true;
1401 return -EINVAL; 1454 dev_set_uevent_suppress(&device->dev, false);
1402 1455 kobject_uevent(&device->dev.kobj, KOBJ_ADD);
1403 dev->removal_type = ACPI_BUS_REMOVAL_EJECT;
1404 device_release_driver(&dev->dev);
1405
1406 if (!rmdevice)
1407 return 0;
1408
1409 /*
1410 * unbind _ADR-Based Devices when hot removal
1411 */
1412 if (dev->flags.bus_address) {
1413 if ((dev->parent) && (dev->parent->ops.unbind))
1414 dev->parent->ops.unbind(dev);
1415 }
1416 acpi_device_unregister(dev, ACPI_BUS_REMOVAL_EJECT);
1417
1418 return 0;
1419} 1456}
1420 1457
1421static int acpi_add_single_object(struct acpi_device **child, 1458static int acpi_add_single_object(struct acpi_device **child,
1422 acpi_handle handle, int type, 1459 acpi_handle handle, int type,
1423 unsigned long long sta, 1460 unsigned long long sta)
1424 struct acpi_bus_ops *ops)
1425{ 1461{
1426 int result; 1462 int result;
1427 struct acpi_device *device; 1463 struct acpi_device *device;
@@ -1433,102 +1469,25 @@ static int acpi_add_single_object(struct acpi_device **child,
1433 return -ENOMEM; 1469 return -ENOMEM;
1434 } 1470 }
1435 1471
1436 INIT_LIST_HEAD(&device->pnp.ids); 1472 acpi_init_device_object(device, handle, type, sta);
1437 device->device_type = type; 1473 acpi_bus_get_power_flags(device);
1438 device->handle = handle;
1439 device->parent = acpi_bus_get_parent(handle);
1440 device->bus_ops = *ops; /* workround for not call .start */
1441 STRUCT_TO_INT(device->status) = sta;
1442
1443 acpi_device_get_busid(device);
1444
1445 /*
1446 * Flags
1447 * -----
1448 * Note that we only look for object handles -- cannot evaluate objects
1449 * until we know the device is present and properly initialized.
1450 */
1451 result = acpi_bus_get_flags(device);
1452 if (result)
1453 goto end;
1454
1455 /*
1456 * Initialize Device
1457 * -----------------
1458 * TBD: Synch with Core's enumeration/initialization process.
1459 */
1460 acpi_device_set_id(device);
1461
1462 /*
1463 * Power Management
1464 * ----------------
1465 */
1466 if (device->flags.power_manageable) {
1467 result = acpi_bus_get_power_flags(device);
1468 if (result)
1469 goto end;
1470 }
1471
1472 /*
1473 * Wakeup device management
1474 *-----------------------
1475 */
1476 acpi_bus_get_wakeup_device_flags(device); 1474 acpi_bus_get_wakeup_device_flags(device);
1477 1475
1478 /* 1476 result = acpi_device_add(device, acpi_device_release);
1479 * Performance Management 1477 if (result) {
1480 * ----------------------
1481 */
1482 if (device->flags.performance_manageable) {
1483 result = acpi_bus_get_perf_flags(device);
1484 if (result)
1485 goto end;
1486 }
1487
1488 if ((result = acpi_device_set_context(device)))
1489 goto end;
1490
1491 result = acpi_device_register(device);
1492
1493 /*
1494 * Bind _ADR-Based Devices when hot add
1495 */
1496 if (device->flags.bus_address) {
1497 if (device->parent && device->parent->ops.bind)
1498 device->parent->ops.bind(device);
1499 }
1500
1501end:
1502 if (!result) {
1503 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
1504 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1505 "Adding %s [%s] parent %s\n", dev_name(&device->dev),
1506 (char *) buffer.pointer,
1507 device->parent ? dev_name(&device->parent->dev) :
1508 "(null)"));
1509 kfree(buffer.pointer);
1510 *child = device;
1511 } else
1512 acpi_device_release(&device->dev); 1478 acpi_device_release(&device->dev);
1479 return result;
1480 }
1513 1481
1514 return result; 1482 acpi_power_add_remove_device(device, true);
1515} 1483 acpi_device_add_finalize(device);
1516 1484 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
1517#define ACPI_STA_DEFAULT (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED | \ 1485 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Added %s [%s] parent %s\n",
1518 ACPI_STA_DEVICE_UI | ACPI_STA_DEVICE_FUNCTIONING) 1486 dev_name(&device->dev), (char *) buffer.pointer,
1519 1487 device->parent ? dev_name(&device->parent->dev) : "(null)"));
1520static void acpi_bus_add_power_resource(acpi_handle handle) 1488 kfree(buffer.pointer);
1521{ 1489 *child = device;
1522 struct acpi_bus_ops ops = { 1490 return 0;
1523 .acpi_op_add = 1,
1524 .acpi_op_start = 1,
1525 };
1526 struct acpi_device *device = NULL;
1527
1528 acpi_bus_get_device(handle, &device);
1529 if (!device)
1530 acpi_add_single_object(&device, handle, ACPI_BUS_TYPE_POWER,
1531 ACPI_STA_DEFAULT, &ops);
1532} 1491}
1533 1492
1534static int acpi_bus_type_and_status(acpi_handle handle, int *type, 1493static int acpi_bus_type_and_status(acpi_handle handle, int *type,
@@ -1570,218 +1529,248 @@ static int acpi_bus_type_and_status(acpi_handle handle, int *type,
1570 return 0; 1529 return 0;
1571} 1530}
1572 1531
1573static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl, 1532static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
1574 void *context, void **return_value) 1533 void *not_used, void **return_value)
1575{ 1534{
1576 struct acpi_bus_ops *ops = context; 1535 struct acpi_device *device = NULL;
1577 int type; 1536 int type;
1578 unsigned long long sta; 1537 unsigned long long sta;
1579 struct acpi_device *device;
1580 acpi_status status; 1538 acpi_status status;
1581 int result; 1539 int result;
1582 1540
1541 acpi_bus_get_device(handle, &device);
1542 if (device)
1543 goto out;
1544
1583 result = acpi_bus_type_and_status(handle, &type, &sta); 1545 result = acpi_bus_type_and_status(handle, &type, &sta);
1584 if (result) 1546 if (result)
1585 return AE_OK; 1547 return AE_OK;
1586 1548
1549 if (type == ACPI_BUS_TYPE_POWER) {
1550 acpi_add_power_resource(handle);
1551 return AE_OK;
1552 }
1553
1587 if (!(sta & ACPI_STA_DEVICE_PRESENT) && 1554 if (!(sta & ACPI_STA_DEVICE_PRESENT) &&
1588 !(sta & ACPI_STA_DEVICE_FUNCTIONING)) { 1555 !(sta & ACPI_STA_DEVICE_FUNCTIONING)) {
1589 struct acpi_device_wakeup wakeup; 1556 struct acpi_device_wakeup wakeup;
1590 acpi_handle temp; 1557 acpi_handle temp;
1591 1558
1592 status = acpi_get_handle(handle, "_PRW", &temp); 1559 status = acpi_get_handle(handle, "_PRW", &temp);
1593 if (ACPI_SUCCESS(status)) 1560 if (ACPI_SUCCESS(status)) {
1594 acpi_bus_extract_wakeup_device_power_package(handle, 1561 acpi_bus_extract_wakeup_device_power_package(handle,
1595 &wakeup); 1562 &wakeup);
1563 acpi_power_resources_list_free(&wakeup.resources);
1564 }
1596 return AE_CTRL_DEPTH; 1565 return AE_CTRL_DEPTH;
1597 } 1566 }
1598 1567
1599 /* 1568 acpi_add_single_object(&device, handle, type, sta);
1600 * We may already have an acpi_device from a previous enumeration. If
1601 * so, we needn't add it again, but we may still have to start it.
1602 */
1603 device = NULL;
1604 acpi_bus_get_device(handle, &device);
1605 if (ops->acpi_op_add && !device) {
1606 acpi_add_single_object(&device, handle, type, sta, ops);
1607 /* Is the device a known good platform device? */
1608 if (device
1609 && !acpi_match_device_ids(device, acpi_platform_device_ids))
1610 acpi_create_platform_device(device);
1611 }
1612
1613 if (!device) 1569 if (!device)
1614 return AE_CTRL_DEPTH; 1570 return AE_CTRL_DEPTH;
1615 1571
1616 if (ops->acpi_op_start && !(ops->acpi_op_add)) { 1572 out:
1617 status = acpi_start_single_object(device);
1618 if (ACPI_FAILURE(status))
1619 return AE_CTRL_DEPTH;
1620 }
1621
1622 if (!*return_value) 1573 if (!*return_value)
1623 *return_value = device; 1574 *return_value = device;
1575
1624 return AE_OK; 1576 return AE_OK;
1625} 1577}
1626 1578
1627static int acpi_bus_scan(acpi_handle handle, struct acpi_bus_ops *ops, 1579static int acpi_scan_do_attach_handler(struct acpi_device *device, char *id)
1628 struct acpi_device **child)
1629{ 1580{
1630 acpi_status status; 1581 struct acpi_scan_handler *handler;
1631 void *device = NULL;
1632 1582
1633 status = acpi_bus_check_add(handle, 0, ops, &device); 1583 list_for_each_entry(handler, &acpi_scan_handlers_list, list_node) {
1634 if (ACPI_SUCCESS(status)) 1584 const struct acpi_device_id *devid;
1635 acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
1636 acpi_bus_check_add, NULL, ops, &device);
1637 1585
1638 if (child) 1586 for (devid = handler->ids; devid->id[0]; devid++) {
1639 *child = device; 1587 int ret;
1640 1588
1641 if (device) 1589 if (strcmp((char *)devid->id, id))
1642 return 0; 1590 continue;
1643 else
1644 return -ENODEV;
1645}
1646 1591
1647/* 1592 ret = handler->attach(device, devid);
1648 * acpi_bus_add and acpi_bus_start 1593 if (ret > 0) {
1649 * 1594 device->handler = handler;
1650 * scan a given ACPI tree and (probably recently hot-plugged) 1595 return ret;
1651 * create and add or starts found devices. 1596 } else if (ret < 0) {
1652 * 1597 return ret;
1653 * If no devices were found -ENODEV is returned which does not 1598 }
1654 * mean that this is a real error, there just have been no suitable 1599 }
1655 * ACPI objects in the table trunk from which the kernel could create 1600 }
1656 * a device and add/start an appropriate driver. 1601 return 0;
1657 */ 1602}
1658 1603
1659int 1604static int acpi_scan_attach_handler(struct acpi_device *device)
1660acpi_bus_add(struct acpi_device **child,
1661 struct acpi_device *parent, acpi_handle handle, int type)
1662{ 1605{
1663 struct acpi_bus_ops ops; 1606 struct acpi_hardware_id *hwid;
1607 int ret = 0;
1664 1608
1665 memset(&ops, 0, sizeof(ops)); 1609 list_for_each_entry(hwid, &device->pnp.ids, list) {
1666 ops.acpi_op_add = 1; 1610 ret = acpi_scan_do_attach_handler(device, hwid->id);
1611 if (ret)
1612 break;
1667 1613
1668 return acpi_bus_scan(handle, &ops, child); 1614 }
1615 return ret;
1669} 1616}
1670EXPORT_SYMBOL(acpi_bus_add);
1671 1617
1672int acpi_bus_start(struct acpi_device *device) 1618static acpi_status acpi_bus_device_attach(acpi_handle handle, u32 lvl_not_used,
1619 void *not_used, void **ret_not_used)
1673{ 1620{
1674 struct acpi_bus_ops ops; 1621 struct acpi_device *device;
1675 int result; 1622 unsigned long long sta_not_used;
1676 1623 int ret;
1677 if (!device)
1678 return -EINVAL;
1679 1624
1680 memset(&ops, 0, sizeof(ops)); 1625 /*
1681 ops.acpi_op_start = 1; 1626 * Ignore errors ignored by acpi_bus_check_add() to avoid terminating
1627 * namespace walks prematurely.
1628 */
1629 if (acpi_bus_type_and_status(handle, &ret, &sta_not_used))
1630 return AE_OK;
1682 1631
1683 result = acpi_bus_scan(device->handle, &ops, NULL); 1632 if (acpi_bus_get_device(handle, &device))
1633 return AE_CTRL_DEPTH;
1684 1634
1685 acpi_update_all_gpes(); 1635 ret = acpi_scan_attach_handler(device);
1636 if (ret)
1637 return ret > 0 ? AE_OK : AE_CTRL_DEPTH;
1686 1638
1687 return result; 1639 ret = device_attach(&device->dev);
1640 return ret >= 0 ? AE_OK : AE_CTRL_DEPTH;
1688} 1641}
1689EXPORT_SYMBOL(acpi_bus_start);
1690 1642
1691int acpi_bus_trim(struct acpi_device *start, int rmdevice) 1643/**
1644 * acpi_bus_scan - Add ACPI device node objects in a given namespace scope.
1645 * @handle: Root of the namespace scope to scan.
1646 *
1647 * Scan a given ACPI tree (probably recently hot-plugged) and create and add
1648 * found devices.
1649 *
1650 * If no devices were found, -ENODEV is returned, but it does not mean that
1651 * there has been a real error. There just have been no suitable ACPI objects
1652 * in the table trunk from which the kernel could create a device and add an
1653 * appropriate driver.
1654 *
1655 * Must be called under acpi_scan_lock.
1656 */
1657int acpi_bus_scan(acpi_handle handle)
1692{ 1658{
1693 acpi_status status; 1659 void *device = NULL;
1694 struct acpi_device *parent, *child; 1660 int error = 0;
1695 acpi_handle phandle, chandle;
1696 acpi_object_type type;
1697 u32 level = 1;
1698 int err = 0;
1699 1661
1700 parent = start; 1662 if (ACPI_SUCCESS(acpi_bus_check_add(handle, 0, NULL, &device)))
1701 phandle = start->handle; 1663 acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
1702 child = chandle = NULL; 1664 acpi_bus_check_add, NULL, NULL, &device);
1703 1665
1704 while ((level > 0) && parent && (!err)) { 1666 if (!device)
1705 status = acpi_get_next_object(ACPI_TYPE_ANY, phandle, 1667 error = -ENODEV;
1706 chandle, &chandle); 1668 else if (ACPI_SUCCESS(acpi_bus_device_attach(handle, 0, NULL, NULL)))
1669 acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
1670 acpi_bus_device_attach, NULL, NULL, NULL);
1707 1671
1708 /* 1672 return error;
1709 * If this scope is exhausted then move our way back up. 1673}
1710 */ 1674EXPORT_SYMBOL(acpi_bus_scan);
1711 if (ACPI_FAILURE(status)) {
1712 level--;
1713 chandle = phandle;
1714 acpi_get_parent(phandle, &phandle);
1715 child = parent;
1716 parent = parent->parent;
1717
1718 if (level == 0)
1719 err = acpi_bus_remove(child, rmdevice);
1720 else
1721 err = acpi_bus_remove(child, 1);
1722 1675
1723 continue; 1676static acpi_status acpi_bus_device_detach(acpi_handle handle, u32 lvl_not_used,
1724 } 1677 void *not_used, void **ret_not_used)
1678{
1679 struct acpi_device *device = NULL;
1725 1680
1726 status = acpi_get_type(chandle, &type); 1681 if (!acpi_bus_get_device(handle, &device)) {
1727 if (ACPI_FAILURE(status)) { 1682 struct acpi_scan_handler *dev_handler = device->handler;
1728 continue; 1683
1729 } 1684 device->removal_type = ACPI_BUS_REMOVAL_EJECT;
1730 /* 1685 if (dev_handler) {
1731 * If there is a device corresponding to chandle then 1686 if (dev_handler->detach)
1732 * parse it (depth-first). 1687 dev_handler->detach(device);
1733 */ 1688
1734 if (acpi_bus_get_device(chandle, &child) == 0) { 1689 device->handler = NULL;
1735 level++; 1690 } else {
1736 phandle = chandle; 1691 device_release_driver(&device->dev);
1737 chandle = NULL;
1738 parent = child;
1739 } 1692 }
1740 continue;
1741 } 1693 }
1742 return err; 1694 return AE_OK;
1695}
1696
1697static acpi_status acpi_bus_remove(acpi_handle handle, u32 lvl_not_used,
1698 void *not_used, void **ret_not_used)
1699{
1700 struct acpi_device *device = NULL;
1701
1702 if (!acpi_bus_get_device(handle, &device))
1703 acpi_device_unregister(device);
1704
1705 return AE_OK;
1706}
1707
1708/**
1709 * acpi_bus_trim - Remove ACPI device node and all of its descendants
1710 * @start: Root of the ACPI device nodes subtree to remove.
1711 *
1712 * Must be called under acpi_scan_lock.
1713 */
1714void acpi_bus_trim(struct acpi_device *start)
1715{
1716 /*
1717 * Execute acpi_bus_device_detach() as a post-order callback to detach
1718 * all ACPI drivers from the device nodes being removed.
1719 */
1720 acpi_walk_namespace(ACPI_TYPE_ANY, start->handle, ACPI_UINT32_MAX, NULL,
1721 acpi_bus_device_detach, NULL, NULL);
1722 acpi_bus_device_detach(start->handle, 0, NULL, NULL);
1723 /*
1724 * Execute acpi_bus_remove() as a post-order callback to remove device
1725 * nodes in the given namespace scope.
1726 */
1727 acpi_walk_namespace(ACPI_TYPE_ANY, start->handle, ACPI_UINT32_MAX, NULL,
1728 acpi_bus_remove, NULL, NULL);
1729 acpi_bus_remove(start->handle, 0, NULL, NULL);
1743} 1730}
1744EXPORT_SYMBOL_GPL(acpi_bus_trim); 1731EXPORT_SYMBOL_GPL(acpi_bus_trim);
1745 1732
1746static int acpi_bus_scan_fixed(void) 1733static int acpi_bus_scan_fixed(void)
1747{ 1734{
1748 int result = 0; 1735 int result = 0;
1749 struct acpi_device *device = NULL;
1750 struct acpi_bus_ops ops;
1751
1752 memset(&ops, 0, sizeof(ops));
1753 ops.acpi_op_add = 1;
1754 ops.acpi_op_start = 1;
1755 1736
1756 /* 1737 /*
1757 * Enumerate all fixed-feature devices. 1738 * Enumerate all fixed-feature devices.
1758 */ 1739 */
1759 if ((acpi_gbl_FADT.flags & ACPI_FADT_POWER_BUTTON) == 0) { 1740 if (!(acpi_gbl_FADT.flags & ACPI_FADT_POWER_BUTTON)) {
1741 struct acpi_device *device = NULL;
1742
1760 result = acpi_add_single_object(&device, NULL, 1743 result = acpi_add_single_object(&device, NULL,
1761 ACPI_BUS_TYPE_POWER_BUTTON, 1744 ACPI_BUS_TYPE_POWER_BUTTON,
1762 ACPI_STA_DEFAULT, 1745 ACPI_STA_DEFAULT);
1763 &ops); 1746 if (result)
1747 return result;
1748
1749 result = device_attach(&device->dev);
1750 if (result < 0)
1751 return result;
1752
1764 device_init_wakeup(&device->dev, true); 1753 device_init_wakeup(&device->dev, true);
1765 } 1754 }
1766 1755
1767 if ((acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON) == 0) { 1756 if (!(acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON)) {
1757 struct acpi_device *device = NULL;
1758
1768 result = acpi_add_single_object(&device, NULL, 1759 result = acpi_add_single_object(&device, NULL,
1769 ACPI_BUS_TYPE_SLEEP_BUTTON, 1760 ACPI_BUS_TYPE_SLEEP_BUTTON,
1770 ACPI_STA_DEFAULT, 1761 ACPI_STA_DEFAULT);
1771 &ops); 1762 if (result)
1763 return result;
1764
1765 result = device_attach(&device->dev);
1772 } 1766 }
1773 1767
1774 return result; 1768 return result < 0 ? result : 0;
1775} 1769}
1776 1770
1777int __init acpi_scan_init(void) 1771int __init acpi_scan_init(void)
1778{ 1772{
1779 int result; 1773 int result;
1780 struct acpi_bus_ops ops;
1781
1782 memset(&ops, 0, sizeof(ops));
1783 ops.acpi_op_add = 1;
1784 ops.acpi_op_start = 1;
1785 1774
1786 result = bus_register(&acpi_bus_type); 1775 result = bus_register(&acpi_bus_type);
1787 if (result) { 1776 if (result) {
@@ -1789,20 +1778,33 @@ int __init acpi_scan_init(void)
1789 printk(KERN_ERR PREFIX "Could not register bus type\n"); 1778 printk(KERN_ERR PREFIX "Could not register bus type\n");
1790 } 1779 }
1791 1780
1792 acpi_power_init(); 1781 acpi_pci_root_init();
1782 acpi_pci_link_init();
1783 acpi_platform_init();
1784 acpi_csrt_init();
1785 acpi_container_init();
1793 1786
1787 mutex_lock(&acpi_scan_lock);
1794 /* 1788 /*
1795 * Enumerate devices in the ACPI namespace. 1789 * Enumerate devices in the ACPI namespace.
1796 */ 1790 */
1797 result = acpi_bus_scan(ACPI_ROOT_OBJECT, &ops, &acpi_root); 1791 result = acpi_bus_scan(ACPI_ROOT_OBJECT);
1798 1792 if (result)
1799 if (!result) 1793 goto out;
1800 result = acpi_bus_scan_fixed();
1801 1794
1795 result = acpi_bus_get_device(ACPI_ROOT_OBJECT, &acpi_root);
1802 if (result) 1796 if (result)
1803 acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL); 1797 goto out;
1804 else 1798
1805 acpi_update_all_gpes(); 1799 result = acpi_bus_scan_fixed();
1800 if (result) {
1801 acpi_device_unregister(acpi_root);
1802 goto out;
1803 }
1806 1804
1805 acpi_update_all_gpes();
1806
1807 out:
1808 mutex_unlock(&acpi_scan_lock);
1807 return result; 1809 return result;
1808} 1810}
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 2fcc67d34b11..6d3a06a629a1 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -177,6 +177,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
177 }, 177 },
178 { 178 {
179 .callback = init_nvs_nosave, 179 .callback = init_nvs_nosave,
180 .ident = "Sony Vaio VGN-FW41E_H",
181 .matches = {
182 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
183 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW41E_H"),
184 },
185 },
186 {
187 .callback = init_nvs_nosave,
180 .ident = "Sony Vaio VGN-FW21E", 188 .ident = "Sony Vaio VGN-FW21E",
181 .matches = { 189 .matches = {
182 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), 190 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
@@ -386,6 +394,8 @@ static void acpi_pm_finish(void)
386 394
387 acpi_target_sleep_state = ACPI_STATE_S0; 395 acpi_target_sleep_state = ACPI_STATE_S0;
388 396
397 acpi_resume_power_resources();
398
389 /* If we were woken with the fixed power button, provide a small 399 /* If we were woken with the fixed power button, provide a small
390 * hint to userspace in the form of a wakeup event on the fixed power 400 * hint to userspace in the form of a wakeup event on the fixed power
391 * button device (if it can be found). 401 * button device (if it can be found).
@@ -577,7 +587,28 @@ static const struct platform_suspend_ops acpi_suspend_ops_old = {
577 .end = acpi_pm_end, 587 .end = acpi_pm_end,
578 .recover = acpi_pm_finish, 588 .recover = acpi_pm_finish,
579}; 589};
580#endif /* CONFIG_SUSPEND */ 590
591static void acpi_sleep_suspend_setup(void)
592{
593 int i;
594
595 for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++) {
596 acpi_status status;
597 u8 type_a, type_b;
598
599 status = acpi_get_sleep_type_data(i, &type_a, &type_b);
600 if (ACPI_SUCCESS(status)) {
601 sleep_states[i] = 1;
602 pr_cont(" S%d", i);
603 }
604 }
605
606 suspend_set_ops(old_suspend_ordering ?
607 &acpi_suspend_ops_old : &acpi_suspend_ops);
608}
609#else /* !CONFIG_SUSPEND */
610static inline void acpi_sleep_suspend_setup(void) {}
611#endif /* !CONFIG_SUSPEND */
581 612
582#ifdef CONFIG_HIBERNATION 613#ifdef CONFIG_HIBERNATION
583static unsigned long s4_hardware_signature; 614static unsigned long s4_hardware_signature;
@@ -698,7 +729,30 @@ static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
698 .restore_cleanup = acpi_pm_thaw, 729 .restore_cleanup = acpi_pm_thaw,
699 .recover = acpi_pm_finish, 730 .recover = acpi_pm_finish,
700}; 731};
701#endif /* CONFIG_HIBERNATION */ 732
733static void acpi_sleep_hibernate_setup(void)
734{
735 acpi_status status;
736 u8 type_a, type_b;
737
738 status = acpi_get_sleep_type_data(ACPI_STATE_S4, &type_a, &type_b);
739 if (ACPI_FAILURE(status))
740 return;
741
742 hibernation_set_ops(old_suspend_ordering ?
743 &acpi_hibernation_ops_old : &acpi_hibernation_ops);
744 sleep_states[ACPI_STATE_S4] = 1;
745 pr_cont(KERN_CONT " S4");
746 if (nosigcheck)
747 return;
748
749 acpi_get_table(ACPI_SIG_FACS, 1, (struct acpi_table_header **)&facs);
750 if (facs)
751 s4_hardware_signature = facs->hardware_signature;
752}
753#else /* !CONFIG_HIBERNATION */
754static inline void acpi_sleep_hibernate_setup(void) {}
755#endif /* !CONFIG_HIBERNATION */
702 756
703int acpi_suspend(u32 acpi_state) 757int acpi_suspend(u32 acpi_state)
704{ 758{
@@ -734,9 +788,6 @@ int __init acpi_sleep_init(void)
734{ 788{
735 acpi_status status; 789 acpi_status status;
736 u8 type_a, type_b; 790 u8 type_a, type_b;
737#ifdef CONFIG_SUSPEND
738 int i = 0;
739#endif
740 791
741 if (acpi_disabled) 792 if (acpi_disabled)
742 return 0; 793 return 0;
@@ -744,45 +795,19 @@ int __init acpi_sleep_init(void)
744 acpi_sleep_dmi_check(); 795 acpi_sleep_dmi_check();
745 796
746 sleep_states[ACPI_STATE_S0] = 1; 797 sleep_states[ACPI_STATE_S0] = 1;
747 printk(KERN_INFO PREFIX "(supports S0"); 798 pr_info(PREFIX "(supports S0");
748
749#ifdef CONFIG_SUSPEND
750 for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++) {
751 status = acpi_get_sleep_type_data(i, &type_a, &type_b);
752 if (ACPI_SUCCESS(status)) {
753 sleep_states[i] = 1;
754 printk(KERN_CONT " S%d", i);
755 }
756 }
757 799
758 suspend_set_ops(old_suspend_ordering ? 800 acpi_sleep_suspend_setup();
759 &acpi_suspend_ops_old : &acpi_suspend_ops); 801 acpi_sleep_hibernate_setup();
760#endif
761 802
762#ifdef CONFIG_HIBERNATION
763 status = acpi_get_sleep_type_data(ACPI_STATE_S4, &type_a, &type_b);
764 if (ACPI_SUCCESS(status)) {
765 hibernation_set_ops(old_suspend_ordering ?
766 &acpi_hibernation_ops_old : &acpi_hibernation_ops);
767 sleep_states[ACPI_STATE_S4] = 1;
768 printk(KERN_CONT " S4");
769 if (!nosigcheck) {
770 acpi_get_table(ACPI_SIG_FACS, 1,
771 (struct acpi_table_header **)&facs);
772 if (facs)
773 s4_hardware_signature =
774 facs->hardware_signature;
775 }
776 }
777#endif
778 status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b); 803 status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b);
779 if (ACPI_SUCCESS(status)) { 804 if (ACPI_SUCCESS(status)) {
780 sleep_states[ACPI_STATE_S5] = 1; 805 sleep_states[ACPI_STATE_S5] = 1;
781 printk(KERN_CONT " S5"); 806 pr_cont(" S5");
782 pm_power_off_prepare = acpi_power_off_prepare; 807 pm_power_off_prepare = acpi_power_off_prepare;
783 pm_power_off = acpi_power_off; 808 pm_power_off = acpi_power_off;
784 } 809 }
785 printk(KERN_CONT ")\n"); 810 pr_cont(")\n");
786 /* 811 /*
787 * Register the tts_notifier to reboot notifier list so that the _TTS 812 * Register the tts_notifier to reboot notifier list so that the _TTS
788 * object can also be evaluated when the system enters S5. 813 * object can also be evaluated when the system enters S5.
diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h
index 74d59c8f4678..0143540a2519 100644
--- a/drivers/acpi/sleep.h
+++ b/drivers/acpi/sleep.h
@@ -6,3 +6,5 @@ extern void acpi_disable_wakeup_devices(u8 sleep_state);
6 6
7extern struct list_head acpi_wakeup_device_list; 7extern struct list_head acpi_wakeup_device_list;
8extern struct mutex acpi_device_lock; 8extern struct mutex acpi_device_lock;
9
10extern void acpi_resume_power_resources(void);
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index ea61ca9129cd..41c0504470db 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -498,7 +498,7 @@ static int get_status(u32 index, acpi_event_status *status,
498 result = acpi_get_gpe_device(index, handle); 498 result = acpi_get_gpe_device(index, handle);
499 if (result) { 499 if (result) {
500 ACPI_EXCEPTION((AE_INFO, AE_NOT_FOUND, 500 ACPI_EXCEPTION((AE_INFO, AE_NOT_FOUND,
501 "Invalid GPE 0x%x\n", index)); 501 "Invalid GPE 0x%x", index));
502 goto end; 502 goto end;
503 } 503 }
504 result = acpi_get_gpe_status(*handle, index, status); 504 result = acpi_get_gpe_status(*handle, index, status);
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 2572d9715bda..d67a1fe07f0e 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -204,7 +204,7 @@ int __init
204acpi_table_parse_entries(char *id, 204acpi_table_parse_entries(char *id,
205 unsigned long table_size, 205 unsigned long table_size,
206 int entry_id, 206 int entry_id,
207 acpi_table_entry_handler handler, 207 acpi_tbl_entry_handler handler,
208 unsigned int max_entries) 208 unsigned int max_entries)
209{ 209{
210 struct acpi_table_header *table_header = NULL; 210 struct acpi_table_header *table_header = NULL;
@@ -269,7 +269,7 @@ err:
269 269
270int __init 270int __init
271acpi_table_parse_madt(enum acpi_madt_type id, 271acpi_table_parse_madt(enum acpi_madt_type id,
272 acpi_table_entry_handler handler, unsigned int max_entries) 272 acpi_tbl_entry_handler handler, unsigned int max_entries)
273{ 273{
274 return acpi_table_parse_entries(ACPI_SIG_MADT, 274 return acpi_table_parse_entries(ACPI_SIG_MADT,
275 sizeof(struct acpi_table_madt), id, 275 sizeof(struct acpi_table_madt), id,
@@ -285,7 +285,7 @@ acpi_table_parse_madt(enum acpi_madt_type id,
285 * Scan the ACPI System Descriptor Table (STD) for a table matching @id, 285 * Scan the ACPI System Descriptor Table (STD) for a table matching @id,
286 * run @handler on it. Return 0 if table found, return on if not. 286 * run @handler on it. Return 0 if table found, return on if not.
287 */ 287 */
288int __init acpi_table_parse(char *id, acpi_table_handler handler) 288int __init acpi_table_parse(char *id, acpi_tbl_table_handler handler)
289{ 289{
290 struct acpi_table_header *table = NULL; 290 struct acpi_table_header *table = NULL;
291 acpi_size tbl_size; 291 acpi_size tbl_size;
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 506fbd4b5733..8470771e5eae 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -97,7 +97,7 @@ module_param(psv, int, 0644);
97MODULE_PARM_DESC(psv, "Disable or override all passive trip points."); 97MODULE_PARM_DESC(psv, "Disable or override all passive trip points.");
98 98
99static int acpi_thermal_add(struct acpi_device *device); 99static int acpi_thermal_add(struct acpi_device *device);
100static int acpi_thermal_remove(struct acpi_device *device, int type); 100static int acpi_thermal_remove(struct acpi_device *device);
101static void acpi_thermal_notify(struct acpi_device *device, u32 event); 101static void acpi_thermal_notify(struct acpi_device *device, u32 event);
102 102
103static const struct acpi_device_id thermal_device_ids[] = { 103static const struct acpi_device_id thermal_device_ids[] = {
@@ -288,7 +288,7 @@ do { \
288 if (flags != ACPI_TRIPS_INIT) \ 288 if (flags != ACPI_TRIPS_INIT) \
289 ACPI_EXCEPTION((AE_INFO, AE_ERROR, \ 289 ACPI_EXCEPTION((AE_INFO, AE_ERROR, \
290 "ACPI thermal trip point %s changed\n" \ 290 "ACPI thermal trip point %s changed\n" \
291 "Please send acpidump to linux-acpi@vger.kernel.org\n", str)); \ 291 "Please send acpidump to linux-acpi@vger.kernel.org", str)); \
292} while (0) 292} while (0)
293 293
294static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag) 294static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
@@ -531,6 +531,10 @@ static void acpi_thermal_check(void *data)
531{ 531{
532 struct acpi_thermal *tz = data; 532 struct acpi_thermal *tz = data;
533 533
534 if (!tz->tz_enabled) {
535 pr_warn("thermal zone is disabled \n");
536 return;
537 }
534 thermal_zone_device_update(tz->thermal_zone); 538 thermal_zone_device_update(tz->thermal_zone);
535} 539}
536 540
@@ -1111,7 +1115,7 @@ end:
1111 return result; 1115 return result;
1112} 1116}
1113 1117
1114static int acpi_thermal_remove(struct acpi_device *device, int type) 1118static int acpi_thermal_remove(struct acpi_device *device)
1115{ 1119{
1116 struct acpi_thermal *tz = NULL; 1120 struct acpi_thermal *tz = NULL;
1117 1121
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index ac9a69cd45f5..313f959413dc 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -88,7 +88,7 @@ module_param(use_bios_initial_backlight, bool, 0644);
88 88
89static int register_count = 0; 89static int register_count = 0;
90static int acpi_video_bus_add(struct acpi_device *device); 90static int acpi_video_bus_add(struct acpi_device *device);
91static int acpi_video_bus_remove(struct acpi_device *device, int type); 91static int acpi_video_bus_remove(struct acpi_device *device);
92static void acpi_video_bus_notify(struct acpi_device *device, u32 event); 92static void acpi_video_bus_notify(struct acpi_device *device, u32 event);
93 93
94static const struct acpi_device_id video_device_ids[] = { 94static const struct acpi_device_id video_device_ids[] = {
@@ -673,7 +673,7 @@ acpi_video_init_brightness(struct acpi_video_device *device)
673 br->levels[i] = br->levels[i - level_ac_battery]; 673 br->levels[i] = br->levels[i - level_ac_battery];
674 count += level_ac_battery; 674 count += level_ac_battery;
675 } else if (level_ac_battery > 2) 675 } else if (level_ac_battery > 2)
676 ACPI_ERROR((AE_INFO, "Too many duplicates in _BCL package\n")); 676 ACPI_ERROR((AE_INFO, "Too many duplicates in _BCL package"));
677 677
678 /* Check if the _BCL package is in a reversed order */ 678 /* Check if the _BCL package is in a reversed order */
679 if (max_level == br->levels[2]) { 679 if (max_level == br->levels[2]) {
@@ -682,7 +682,7 @@ acpi_video_init_brightness(struct acpi_video_device *device)
682 acpi_video_cmp_level, NULL); 682 acpi_video_cmp_level, NULL);
683 } else if (max_level != br->levels[count - 1]) 683 } else if (max_level != br->levels[count - 1])
684 ACPI_ERROR((AE_INFO, 684 ACPI_ERROR((AE_INFO,
685 "Found unordered _BCL package\n")); 685 "Found unordered _BCL package"));
686 686
687 br->count = count; 687 br->count = count;
688 device->brightness = br; 688 device->brightness = br;
@@ -1740,7 +1740,7 @@ static int acpi_video_bus_add(struct acpi_device *device)
1740 return error; 1740 return error;
1741} 1741}
1742 1742
1743static int acpi_video_bus_remove(struct acpi_device *device, int type) 1743static int acpi_video_bus_remove(struct acpi_device *device)
1744{ 1744{
1745 struct acpi_video_bus *video = NULL; 1745 struct acpi_video_bus *video = NULL;
1746 1746
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 7862d17976b7..495aeed26779 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -53,6 +53,7 @@
53 53
54enum { 54enum {
55 AHCI_PCI_BAR_STA2X11 = 0, 55 AHCI_PCI_BAR_STA2X11 = 0,
56 AHCI_PCI_BAR_ENMOTUS = 2,
56 AHCI_PCI_BAR_STANDARD = 5, 57 AHCI_PCI_BAR_STANDARD = 5,
57}; 58};
58 59
@@ -410,6 +411,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
410 { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */ 411 { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */
411 { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */ 412 { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */
412 413
414 /* Enmotus */
415 { PCI_DEVICE(0x1c44, 0x8000), board_ahci },
416
413 /* Generic, PCI class code for AHCI */ 417 /* Generic, PCI class code for AHCI */
414 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 418 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
415 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci }, 419 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
@@ -1057,6 +1061,86 @@ static inline void ahci_gtf_filter_workaround(struct ata_host *host)
1057{} 1061{}
1058#endif 1062#endif
1059 1063
1064int ahci_init_interrupts(struct pci_dev *pdev, struct ahci_host_priv *hpriv)
1065{
1066 int rc;
1067 unsigned int maxvec;
1068
1069 if (!(hpriv->flags & AHCI_HFLAG_NO_MSI)) {
1070 rc = pci_enable_msi_block_auto(pdev, &maxvec);
1071 if (rc > 0) {
1072 if ((rc == maxvec) || (rc == 1))
1073 return rc;
1074 /*
1075 * Assume that advantage of multipe MSIs is negated,
1076 * so fallback to single MSI mode to save resources
1077 */
1078 pci_disable_msi(pdev);
1079 if (!pci_enable_msi(pdev))
1080 return 1;
1081 }
1082 }
1083
1084 pci_intx(pdev, 1);
1085 return 0;
1086}
1087
1088/**
1089 * ahci_host_activate - start AHCI host, request IRQs and register it
1090 * @host: target ATA host
1091 * @irq: base IRQ number to request
1092 * @n_msis: number of MSIs allocated for this host
1093 * @irq_handler: irq_handler used when requesting IRQs
1094 * @irq_flags: irq_flags used when requesting IRQs
1095 *
1096 * Similar to ata_host_activate, but requests IRQs according to AHCI-1.1
1097 * when multiple MSIs were allocated. That is one MSI per port, starting
1098 * from @irq.
1099 *
1100 * LOCKING:
1101 * Inherited from calling layer (may sleep).
1102 *
1103 * RETURNS:
1104 * 0 on success, -errno otherwise.
1105 */
1106int ahci_host_activate(struct ata_host *host, int irq, unsigned int n_msis)
1107{
1108 int i, rc;
1109
1110 /* Sharing Last Message among several ports is not supported */
1111 if (n_msis < host->n_ports)
1112 return -EINVAL;
1113
1114 rc = ata_host_start(host);
1115 if (rc)
1116 return rc;
1117
1118 for (i = 0; i < host->n_ports; i++) {
1119 rc = devm_request_threaded_irq(host->dev,
1120 irq + i, ahci_hw_interrupt, ahci_thread_fn, IRQF_SHARED,
1121 dev_driver_string(host->dev), host->ports[i]);
1122 if (rc)
1123 goto out_free_irqs;
1124 }
1125
1126 for (i = 0; i < host->n_ports; i++)
1127 ata_port_desc(host->ports[i], "irq %d", irq + i);
1128
1129 rc = ata_host_register(host, &ahci_sht);
1130 if (rc)
1131 goto out_free_all_irqs;
1132
1133 return 0;
1134
1135out_free_all_irqs:
1136 i = host->n_ports;
1137out_free_irqs:
1138 for (i--; i >= 0; i--)
1139 devm_free_irq(host->dev, irq + i, host->ports[i]);
1140
1141 return rc;
1142}
1143
1060static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 1144static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1061{ 1145{
1062 unsigned int board_id = ent->driver_data; 1146 unsigned int board_id = ent->driver_data;
@@ -1065,7 +1149,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1065 struct device *dev = &pdev->dev; 1149 struct device *dev = &pdev->dev;
1066 struct ahci_host_priv *hpriv; 1150 struct ahci_host_priv *hpriv;
1067 struct ata_host *host; 1151 struct ata_host *host;
1068 int n_ports, i, rc; 1152 int n_ports, n_msis, i, rc;
1069 int ahci_pci_bar = AHCI_PCI_BAR_STANDARD; 1153 int ahci_pci_bar = AHCI_PCI_BAR_STANDARD;
1070 1154
1071 VPRINTK("ENTER\n"); 1155 VPRINTK("ENTER\n");
@@ -1098,9 +1182,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1098 dev_info(&pdev->dev, 1182 dev_info(&pdev->dev,
1099 "PDC42819 can only drive SATA devices with this driver\n"); 1183 "PDC42819 can only drive SATA devices with this driver\n");
1100 1184
1101 /* The Connext uses non-standard BAR */ 1185 /* Both Connext and Enmotus devices use non-standard BARs */
1102 if (pdev->vendor == PCI_VENDOR_ID_STMICRO && pdev->device == 0xCC06) 1186 if (pdev->vendor == PCI_VENDOR_ID_STMICRO && pdev->device == 0xCC06)
1103 ahci_pci_bar = AHCI_PCI_BAR_STA2X11; 1187 ahci_pci_bar = AHCI_PCI_BAR_STA2X11;
1188 else if (pdev->vendor == 0x1c44 && pdev->device == 0x8000)
1189 ahci_pci_bar = AHCI_PCI_BAR_ENMOTUS;
1104 1190
1105 /* acquire resources */ 1191 /* acquire resources */
1106 rc = pcim_enable_device(pdev); 1192 rc = pcim_enable_device(pdev);
@@ -1150,11 +1236,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1150 if (ahci_sb600_enable_64bit(pdev)) 1236 if (ahci_sb600_enable_64bit(pdev))
1151 hpriv->flags &= ~AHCI_HFLAG_32BIT_ONLY; 1237 hpriv->flags &= ~AHCI_HFLAG_32BIT_ONLY;
1152 1238
1153 if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
1154 pci_intx(pdev, 1);
1155
1156 hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar]; 1239 hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar];
1157 1240
1241 n_msis = ahci_init_interrupts(pdev, hpriv);
1242 if (n_msis > 1)
1243 hpriv->flags |= AHCI_HFLAG_MULTI_MSI;
1244
1158 /* save initial config */ 1245 /* save initial config */
1159 ahci_pci_save_initial_config(pdev, hpriv); 1246 ahci_pci_save_initial_config(pdev, hpriv);
1160 1247
@@ -1250,6 +1337,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1250 ahci_pci_print_info(host); 1337 ahci_pci_print_info(host);
1251 1338
1252 pci_set_master(pdev); 1339 pci_set_master(pdev);
1340
1341 if (hpriv->flags & AHCI_HFLAG_MULTI_MSI)
1342 return ahci_host_activate(host, pdev->irq, n_msis);
1343
1253 return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED, 1344 return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
1254 &ahci_sht); 1345 &ahci_sht);
1255} 1346}
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 9be471200a07..b830e6c9fe49 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -231,6 +231,7 @@ enum {
231 AHCI_HFLAG_DELAY_ENGINE = (1 << 15), /* do not start engine on 231 AHCI_HFLAG_DELAY_ENGINE = (1 << 15), /* do not start engine on
232 port start (wait until 232 port start (wait until
233 error-handling stage) */ 233 error-handling stage) */
234 AHCI_HFLAG_MULTI_MSI = (1 << 16), /* multiple PCI MSIs */
234 235
235 /* ap->flags bits */ 236 /* ap->flags bits */
236 237
@@ -297,6 +298,8 @@ struct ahci_port_priv {
297 unsigned int ncq_saw_d2h:1; 298 unsigned int ncq_saw_d2h:1;
298 unsigned int ncq_saw_dmas:1; 299 unsigned int ncq_saw_dmas:1;
299 unsigned int ncq_saw_sdb:1; 300 unsigned int ncq_saw_sdb:1;
301 u32 intr_status; /* interrupts to handle */
302 spinlock_t lock; /* protects parent ata_port */
300 u32 intr_mask; /* interrupts to enable */ 303 u32 intr_mask; /* interrupts to enable */
301 bool fbs_supported; /* set iff FBS is supported */ 304 bool fbs_supported; /* set iff FBS is supported */
302 bool fbs_enabled; /* set iff FBS is enabled */ 305 bool fbs_enabled; /* set iff FBS is enabled */
@@ -359,7 +362,10 @@ void ahci_set_em_messages(struct ahci_host_priv *hpriv,
359 struct ata_port_info *pi); 362 struct ata_port_info *pi);
360int ahci_reset_em(struct ata_host *host); 363int ahci_reset_em(struct ata_host *host);
361irqreturn_t ahci_interrupt(int irq, void *dev_instance); 364irqreturn_t ahci_interrupt(int irq, void *dev_instance);
365irqreturn_t ahci_hw_interrupt(int irq, void *dev_instance);
366irqreturn_t ahci_thread_fn(int irq, void *dev_instance);
362void ahci_print_info(struct ata_host *host, const char *scc_s); 367void ahci_print_info(struct ata_host *host, const char *scc_s);
368int ahci_host_activate(struct ata_host *host, int irq, unsigned int n_msis);
363 369
364static inline void __iomem *__ahci_port_base(struct ata_host *host, 370static inline void __iomem *__ahci_port_base(struct ata_host *host,
365 unsigned int port_no) 371 unsigned int port_no)
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 320712a7b9ea..34c82167b962 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -1655,19 +1655,16 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
1655 ata_port_abort(ap); 1655 ata_port_abort(ap);
1656} 1656}
1657 1657
1658static void ahci_port_intr(struct ata_port *ap) 1658static void ahci_handle_port_interrupt(struct ata_port *ap,
1659 void __iomem *port_mmio, u32 status)
1659{ 1660{
1660 void __iomem *port_mmio = ahci_port_base(ap);
1661 struct ata_eh_info *ehi = &ap->link.eh_info; 1661 struct ata_eh_info *ehi = &ap->link.eh_info;
1662 struct ahci_port_priv *pp = ap->private_data; 1662 struct ahci_port_priv *pp = ap->private_data;
1663 struct ahci_host_priv *hpriv = ap->host->private_data; 1663 struct ahci_host_priv *hpriv = ap->host->private_data;
1664 int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING); 1664 int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
1665 u32 status, qc_active = 0; 1665 u32 qc_active = 0;
1666 int rc; 1666 int rc;
1667 1667
1668 status = readl(port_mmio + PORT_IRQ_STAT);
1669 writel(status, port_mmio + PORT_IRQ_STAT);
1670
1671 /* ignore BAD_PMP while resetting */ 1668 /* ignore BAD_PMP while resetting */
1672 if (unlikely(resetting)) 1669 if (unlikely(resetting))
1673 status &= ~PORT_IRQ_BAD_PMP; 1670 status &= ~PORT_IRQ_BAD_PMP;
@@ -1743,6 +1740,107 @@ static void ahci_port_intr(struct ata_port *ap)
1743 } 1740 }
1744} 1741}
1745 1742
1743void ahci_port_intr(struct ata_port *ap)
1744{
1745 void __iomem *port_mmio = ahci_port_base(ap);
1746 u32 status;
1747
1748 status = readl(port_mmio + PORT_IRQ_STAT);
1749 writel(status, port_mmio + PORT_IRQ_STAT);
1750
1751 ahci_handle_port_interrupt(ap, port_mmio, status);
1752}
1753
1754irqreturn_t ahci_thread_fn(int irq, void *dev_instance)
1755{
1756 struct ata_port *ap = dev_instance;
1757 struct ahci_port_priv *pp = ap->private_data;
1758 void __iomem *port_mmio = ahci_port_base(ap);
1759 unsigned long flags;
1760 u32 status;
1761
1762 spin_lock_irqsave(&ap->host->lock, flags);
1763 status = pp->intr_status;
1764 if (status)
1765 pp->intr_status = 0;
1766 spin_unlock_irqrestore(&ap->host->lock, flags);
1767
1768 spin_lock_bh(ap->lock);
1769 ahci_handle_port_interrupt(ap, port_mmio, status);
1770 spin_unlock_bh(ap->lock);
1771
1772 return IRQ_HANDLED;
1773}
1774EXPORT_SYMBOL_GPL(ahci_thread_fn);
1775
1776void ahci_hw_port_interrupt(struct ata_port *ap)
1777{
1778 void __iomem *port_mmio = ahci_port_base(ap);
1779 struct ahci_port_priv *pp = ap->private_data;
1780 u32 status;
1781
1782 status = readl(port_mmio + PORT_IRQ_STAT);
1783 writel(status, port_mmio + PORT_IRQ_STAT);
1784
1785 pp->intr_status |= status;
1786}
1787
1788irqreturn_t ahci_hw_interrupt(int irq, void *dev_instance)
1789{
1790 struct ata_port *ap_this = dev_instance;
1791 struct ahci_port_priv *pp = ap_this->private_data;
1792 struct ata_host *host = ap_this->host;
1793 struct ahci_host_priv *hpriv = host->private_data;
1794 void __iomem *mmio = hpriv->mmio;
1795 unsigned int i;
1796 u32 irq_stat, irq_masked;
1797
1798 VPRINTK("ENTER\n");
1799
1800 spin_lock(&host->lock);
1801
1802 irq_stat = readl(mmio + HOST_IRQ_STAT);
1803
1804 if (!irq_stat) {
1805 u32 status = pp->intr_status;
1806
1807 spin_unlock(&host->lock);
1808
1809 VPRINTK("EXIT\n");
1810
1811 return status ? IRQ_WAKE_THREAD : IRQ_NONE;
1812 }
1813
1814 irq_masked = irq_stat & hpriv->port_map;
1815
1816 for (i = 0; i < host->n_ports; i++) {
1817 struct ata_port *ap;
1818
1819 if (!(irq_masked & (1 << i)))
1820 continue;
1821
1822 ap = host->ports[i];
1823 if (ap) {
1824 ahci_hw_port_interrupt(ap);
1825 VPRINTK("port %u\n", i);
1826 } else {
1827 VPRINTK("port %u (no irq)\n", i);
1828 if (ata_ratelimit())
1829 dev_warn(host->dev,
1830 "interrupt on disabled port %u\n", i);
1831 }
1832 }
1833
1834 writel(irq_stat, mmio + HOST_IRQ_STAT);
1835
1836 spin_unlock(&host->lock);
1837
1838 VPRINTK("EXIT\n");
1839
1840 return IRQ_WAKE_THREAD;
1841}
1842EXPORT_SYMBOL_GPL(ahci_hw_interrupt);
1843
1746irqreturn_t ahci_interrupt(int irq, void *dev_instance) 1844irqreturn_t ahci_interrupt(int irq, void *dev_instance)
1747{ 1845{
1748 struct ata_host *host = dev_instance; 1846 struct ata_host *host = dev_instance;
@@ -1951,13 +2049,13 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
1951 /* Use the nominal value 10 ms if the read MDAT is zero, 2049 /* Use the nominal value 10 ms if the read MDAT is zero,
1952 * the nominal value of DETO is 20 ms. 2050 * the nominal value of DETO is 20 ms.
1953 */ 2051 */
1954 if (dev->sata_settings[ATA_LOG_DEVSLP_VALID] & 2052 if (dev->devslp_timing[ATA_LOG_DEVSLP_VALID] &
1955 ATA_LOG_DEVSLP_VALID_MASK) { 2053 ATA_LOG_DEVSLP_VALID_MASK) {
1956 mdat = dev->sata_settings[ATA_LOG_DEVSLP_MDAT] & 2054 mdat = dev->devslp_timing[ATA_LOG_DEVSLP_MDAT] &
1957 ATA_LOG_DEVSLP_MDAT_MASK; 2055 ATA_LOG_DEVSLP_MDAT_MASK;
1958 if (!mdat) 2056 if (!mdat)
1959 mdat = 10; 2057 mdat = 10;
1960 deto = dev->sata_settings[ATA_LOG_DEVSLP_DETO]; 2058 deto = dev->devslp_timing[ATA_LOG_DEVSLP_DETO];
1961 if (!deto) 2059 if (!deto)
1962 deto = 20; 2060 deto = 20;
1963 } else { 2061 } else {
@@ -2196,6 +2294,14 @@ static int ahci_port_start(struct ata_port *ap)
2196 */ 2294 */
2197 pp->intr_mask = DEF_PORT_IRQ; 2295 pp->intr_mask = DEF_PORT_IRQ;
2198 2296
2297 /*
2298 * Switch to per-port locking in case each port has its own MSI vector.
2299 */
2300 if ((hpriv->flags & AHCI_HFLAG_MULTI_MSI)) {
2301 spin_lock_init(&pp->lock);
2302 ap->lock = &pp->lock;
2303 }
2304
2199 ap->private_data = pp; 2305 ap->private_data = pp;
2200 2306
2201 /* engage engines, captain */ 2307 /* engage engines, captain */
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index ef01ac07502e..6fc67f7efb22 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -1029,30 +1029,20 @@ static void ata_acpi_register_power_resource(struct ata_device *dev)
1029{ 1029{
1030 struct scsi_device *sdev = dev->sdev; 1030 struct scsi_device *sdev = dev->sdev;
1031 acpi_handle handle; 1031 acpi_handle handle;
1032 struct device *device;
1033 1032
1034 handle = ata_dev_acpi_handle(dev); 1033 handle = ata_dev_acpi_handle(dev);
1035 if (!handle) 1034 if (handle)
1036 return; 1035 acpi_dev_pm_remove_dependent(handle, &sdev->sdev_gendev);
1037
1038 device = &sdev->sdev_gendev;
1039
1040 acpi_power_resource_register_device(device, handle);
1041} 1036}
1042 1037
1043static void ata_acpi_unregister_power_resource(struct ata_device *dev) 1038static void ata_acpi_unregister_power_resource(struct ata_device *dev)
1044{ 1039{
1045 struct scsi_device *sdev = dev->sdev; 1040 struct scsi_device *sdev = dev->sdev;
1046 acpi_handle handle; 1041 acpi_handle handle;
1047 struct device *device;
1048 1042
1049 handle = ata_dev_acpi_handle(dev); 1043 handle = ata_dev_acpi_handle(dev);
1050 if (!handle) 1044 if (handle)
1051 return; 1045 acpi_dev_pm_remove_dependent(handle, &sdev->sdev_gendev);
1052
1053 device = &sdev->sdev_gendev;
1054
1055 acpi_power_resource_unregister_device(device, handle);
1056} 1046}
1057 1047
1058void ata_acpi_bind(struct ata_device *dev) 1048void ata_acpi_bind(struct ata_device *dev)
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 9e8b99af400d..46cd3f4c6aaa 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2325,24 +2325,28 @@ int ata_dev_configure(struct ata_device *dev)
2325 } 2325 }
2326 } 2326 }
2327 2327
2328 /* check and mark DevSlp capability */ 2328 /* Check and mark DevSlp capability. Get DevSlp timing variables
2329 if (ata_id_has_devslp(dev->id)) 2329 * from SATA Settings page of Identify Device Data Log.
2330 dev->flags |= ATA_DFLAG_DEVSLP;
2331
2332 /* Obtain SATA Settings page from Identify Device Data Log,
2333 * which contains DevSlp timing variables etc.
2334 * Exclude old devices with ata_id_has_ncq()
2335 */ 2330 */
2336 if (ata_id_has_ncq(dev->id)) { 2331 if (ata_id_has_devslp(dev->id)) {
2332 u8 sata_setting[ATA_SECT_SIZE];
2333 int i, j;
2334
2335 dev->flags |= ATA_DFLAG_DEVSLP;
2337 err_mask = ata_read_log_page(dev, 2336 err_mask = ata_read_log_page(dev,
2338 ATA_LOG_SATA_ID_DEV_DATA, 2337 ATA_LOG_SATA_ID_DEV_DATA,
2339 ATA_LOG_SATA_SETTINGS, 2338 ATA_LOG_SATA_SETTINGS,
2340 dev->sata_settings, 2339 sata_setting,
2341 1); 2340 1);
2342 if (err_mask) 2341 if (err_mask)
2343 ata_dev_dbg(dev, 2342 ata_dev_dbg(dev,
2344 "failed to get Identify Device Data, Emask 0x%x\n", 2343 "failed to get Identify Device Data, Emask 0x%x\n",
2345 err_mask); 2344 err_mask);
2345 else
2346 for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
2347 j = ATA_LOG_DEVSLP_OFFSET + i;
2348 dev->devslp_timing[i] = sata_setting[j];
2349 }
2346 } 2350 }
2347 2351
2348 dev->cdb_len = 16; 2352 dev->cdb_len = 16;
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index bf039b0e97b7..bcf4437214f5 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2094,7 +2094,7 @@ static unsigned int ata_eh_speed_down(struct ata_device *dev,
2094 */ 2094 */
2095static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc) 2095static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc)
2096{ 2096{
2097 if (qc->flags & AC_ERR_MEDIA) 2097 if (qc->err_mask & AC_ERR_MEDIA)
2098 return 0; /* don't retry media errors */ 2098 return 0; /* don't retry media errors */
2099 if (qc->flags & ATA_QCFLAG_IO) 2099 if (qc->flags & ATA_QCFLAG_IO)
2100 return 1; /* otherwise retry anything from fs stack */ 2100 return 1; /* otherwise retry anything from fs stack */
diff --git a/drivers/atm/iphase.h b/drivers/atm/iphase.h
index 6a0955e6d4fc..53ecac5a2161 100644
--- a/drivers/atm/iphase.h
+++ b/drivers/atm/iphase.h
@@ -636,82 +636,82 @@ struct rx_buf_desc {
636#define SEG_BASE IPHASE5575_FRAG_CONTROL_REG_BASE 636#define SEG_BASE IPHASE5575_FRAG_CONTROL_REG_BASE
637#define REASS_BASE IPHASE5575_REASS_CONTROL_REG_BASE 637#define REASS_BASE IPHASE5575_REASS_CONTROL_REG_BASE
638 638
639typedef volatile u_int freg_t; 639typedef volatile u_int ffreg_t;
640typedef u_int rreg_t; 640typedef u_int rreg_t;
641 641
642typedef struct _ffredn_t { 642typedef struct _ffredn_t {
643 freg_t idlehead_high; /* Idle cell header (high) */ 643 ffreg_t idlehead_high; /* Idle cell header (high) */
644 freg_t idlehead_low; /* Idle cell header (low) */ 644 ffreg_t idlehead_low; /* Idle cell header (low) */
645 freg_t maxrate; /* Maximum rate */ 645 ffreg_t maxrate; /* Maximum rate */
646 freg_t stparms; /* Traffic Management Parameters */ 646 ffreg_t stparms; /* Traffic Management Parameters */
647 freg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */ 647 ffreg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */
648 freg_t rm_type; /* */ 648 ffreg_t rm_type; /* */
649 u_int filler5[0x17 - 0x06]; 649 u_int filler5[0x17 - 0x06];
650 freg_t cmd_reg; /* Command register */ 650 ffreg_t cmd_reg; /* Command register */
651 u_int filler18[0x20 - 0x18]; 651 u_int filler18[0x20 - 0x18];
652 freg_t cbr_base; /* CBR Pointer Base */ 652 ffreg_t cbr_base; /* CBR Pointer Base */
653 freg_t vbr_base; /* VBR Pointer Base */ 653 ffreg_t vbr_base; /* VBR Pointer Base */
654 freg_t abr_base; /* ABR Pointer Base */ 654 ffreg_t abr_base; /* ABR Pointer Base */
655 freg_t ubr_base; /* UBR Pointer Base */ 655 ffreg_t ubr_base; /* UBR Pointer Base */
656 u_int filler24; 656 u_int filler24;
657 freg_t vbrwq_base; /* VBR Wait Queue Base */ 657 ffreg_t vbrwq_base; /* VBR Wait Queue Base */
658 freg_t abrwq_base; /* ABR Wait Queue Base */ 658 ffreg_t abrwq_base; /* ABR Wait Queue Base */
659 freg_t ubrwq_base; /* UBR Wait Queue Base */ 659 ffreg_t ubrwq_base; /* UBR Wait Queue Base */
660 freg_t vct_base; /* Main VC Table Base */ 660 ffreg_t vct_base; /* Main VC Table Base */
661 freg_t vcte_base; /* Extended Main VC Table Base */ 661 ffreg_t vcte_base; /* Extended Main VC Table Base */
662 u_int filler2a[0x2C - 0x2A]; 662 u_int filler2a[0x2C - 0x2A];
663 freg_t cbr_tab_beg; /* CBR Table Begin */ 663 ffreg_t cbr_tab_beg; /* CBR Table Begin */
664 freg_t cbr_tab_end; /* CBR Table End */ 664 ffreg_t cbr_tab_end; /* CBR Table End */
665 freg_t cbr_pointer; /* CBR Pointer */ 665 ffreg_t cbr_pointer; /* CBR Pointer */
666 u_int filler2f[0x30 - 0x2F]; 666 u_int filler2f[0x30 - 0x2F];
667 freg_t prq_st_adr; /* Packet Ready Queue Start Address */ 667 ffreg_t prq_st_adr; /* Packet Ready Queue Start Address */
668 freg_t prq_ed_adr; /* Packet Ready Queue End Address */ 668 ffreg_t prq_ed_adr; /* Packet Ready Queue End Address */
669 freg_t prq_rd_ptr; /* Packet Ready Queue read pointer */ 669 ffreg_t prq_rd_ptr; /* Packet Ready Queue read pointer */
670 freg_t prq_wr_ptr; /* Packet Ready Queue write pointer */ 670 ffreg_t prq_wr_ptr; /* Packet Ready Queue write pointer */
671 freg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/ 671 ffreg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/
672 freg_t tcq_ed_adr; /* Transmit Complete Queue End Address */ 672 ffreg_t tcq_ed_adr; /* Transmit Complete Queue End Address */
673 freg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */ 673 ffreg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */
674 freg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/ 674 ffreg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/
675 u_int filler38[0x40 - 0x38]; 675 u_int filler38[0x40 - 0x38];
676 freg_t queue_base; /* Base address for PRQ and TCQ */ 676 ffreg_t queue_base; /* Base address for PRQ and TCQ */
677 freg_t desc_base; /* Base address of descriptor table */ 677 ffreg_t desc_base; /* Base address of descriptor table */
678 u_int filler42[0x45 - 0x42]; 678 u_int filler42[0x45 - 0x42];
679 freg_t mode_reg_0; /* Mode register 0 */ 679 ffreg_t mode_reg_0; /* Mode register 0 */
680 freg_t mode_reg_1; /* Mode register 1 */ 680 ffreg_t mode_reg_1; /* Mode register 1 */
681 freg_t intr_status_reg;/* Interrupt Status register */ 681 ffreg_t intr_status_reg;/* Interrupt Status register */
682 freg_t mask_reg; /* Mask Register */ 682 ffreg_t mask_reg; /* Mask Register */
683 freg_t cell_ctr_high1; /* Total cell transfer count (high) */ 683 ffreg_t cell_ctr_high1; /* Total cell transfer count (high) */
684 freg_t cell_ctr_lo1; /* Total cell transfer count (low) */ 684 ffreg_t cell_ctr_lo1; /* Total cell transfer count (low) */
685 freg_t state_reg; /* Status register */ 685 ffreg_t state_reg; /* Status register */
686 u_int filler4c[0x58 - 0x4c]; 686 u_int filler4c[0x58 - 0x4c];
687 freg_t curr_desc_num; /* Contains the current descriptor num */ 687 ffreg_t curr_desc_num; /* Contains the current descriptor num */
688 freg_t next_desc; /* Next descriptor */ 688 ffreg_t next_desc; /* Next descriptor */
689 freg_t next_vc; /* Next VC */ 689 ffreg_t next_vc; /* Next VC */
690 u_int filler5b[0x5d - 0x5b]; 690 u_int filler5b[0x5d - 0x5b];
691 freg_t present_slot_cnt;/* Present slot count */ 691 ffreg_t present_slot_cnt;/* Present slot count */
692 u_int filler5e[0x6a - 0x5e]; 692 u_int filler5e[0x6a - 0x5e];
693 freg_t new_desc_num; /* New descriptor number */ 693 ffreg_t new_desc_num; /* New descriptor number */
694 freg_t new_vc; /* New VC */ 694 ffreg_t new_vc; /* New VC */
695 freg_t sched_tbl_ptr; /* Schedule table pointer */ 695 ffreg_t sched_tbl_ptr; /* Schedule table pointer */
696 freg_t vbrwq_wptr; /* VBR wait queue write pointer */ 696 ffreg_t vbrwq_wptr; /* VBR wait queue write pointer */
697 freg_t vbrwq_rptr; /* VBR wait queue read pointer */ 697 ffreg_t vbrwq_rptr; /* VBR wait queue read pointer */
698 freg_t abrwq_wptr; /* ABR wait queue write pointer */ 698 ffreg_t abrwq_wptr; /* ABR wait queue write pointer */
699 freg_t abrwq_rptr; /* ABR wait queue read pointer */ 699 ffreg_t abrwq_rptr; /* ABR wait queue read pointer */
700 freg_t ubrwq_wptr; /* UBR wait queue write pointer */ 700 ffreg_t ubrwq_wptr; /* UBR wait queue write pointer */
701 freg_t ubrwq_rptr; /* UBR wait queue read pointer */ 701 ffreg_t ubrwq_rptr; /* UBR wait queue read pointer */
702 freg_t cbr_vc; /* CBR VC */ 702 ffreg_t cbr_vc; /* CBR VC */
703 freg_t vbr_sb_vc; /* VBR SB VC */ 703 ffreg_t vbr_sb_vc; /* VBR SB VC */
704 freg_t abr_sb_vc; /* ABR SB VC */ 704 ffreg_t abr_sb_vc; /* ABR SB VC */
705 freg_t ubr_sb_vc; /* UBR SB VC */ 705 ffreg_t ubr_sb_vc; /* UBR SB VC */
706 freg_t vbr_next_link; /* VBR next link */ 706 ffreg_t vbr_next_link; /* VBR next link */
707 freg_t abr_next_link; /* ABR next link */ 707 ffreg_t abr_next_link; /* ABR next link */
708 freg_t ubr_next_link; /* UBR next link */ 708 ffreg_t ubr_next_link; /* UBR next link */
709 u_int filler7a[0x7c-0x7a]; 709 u_int filler7a[0x7c-0x7a];
710 freg_t out_rate_head; /* Out of rate head */ 710 ffreg_t out_rate_head; /* Out of rate head */
711 u_int filler7d[0xca-0x7d]; /* pad out to full address space */ 711 u_int filler7d[0xca-0x7d]; /* pad out to full address space */
712 freg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */ 712 ffreg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */
713 freg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */ 713 ffreg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */
714 u_int fillercc[0x100-0xcc]; /* pad out to full address space */ 714 u_int fillercc[0x100-0xcc]; /* pad out to full address space */
715} ffredn_t; 715} ffredn_t;
716 716
717typedef struct _rfredn_t { 717typedef struct _rfredn_t {
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 5aa2d703d19f..4e22ce3ed73d 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -21,6 +21,7 @@ endif
21obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o 21obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o
22obj-$(CONFIG_REGMAP) += regmap/ 22obj-$(CONFIG_REGMAP) += regmap/
23obj-$(CONFIG_SOC_BUS) += soc.o 23obj-$(CONFIG_SOC_BUS) += soc.o
24obj-$(CONFIG_PINCTRL) += pinctrl.o
24 25
25ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG 26ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
26 27
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index e3bbed8a617c..656310156dde 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -24,6 +24,7 @@
24#include <linux/wait.h> 24#include <linux/wait.h>
25#include <linux/async.h> 25#include <linux/async.h>
26#include <linux/pm_runtime.h> 26#include <linux/pm_runtime.h>
27#include <linux/pinctrl/devinfo.h>
27 28
28#include "base.h" 29#include "base.h"
29#include "power/power.h" 30#include "power/power.h"
@@ -269,6 +270,12 @@ static int really_probe(struct device *dev, struct device_driver *drv)
269 WARN_ON(!list_empty(&dev->devres_head)); 270 WARN_ON(!list_empty(&dev->devres_head));
270 271
271 dev->driver = drv; 272 dev->driver = drv;
273
274 /* If using pinctrl, bind pins now before probing */
275 ret = pinctrl_bind_pins(dev);
276 if (ret)
277 goto probe_failed;
278
272 if (driver_sysfs_add(dev)) { 279 if (driver_sysfs_add(dev)) {
273 printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n", 280 printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n",
274 __func__, dev_name(dev)); 281 __func__, dev_name(dev));
diff --git a/drivers/base/pinctrl.c b/drivers/base/pinctrl.c
new file mode 100644
index 000000000000..67a274e86727
--- /dev/null
+++ b/drivers/base/pinctrl.c
@@ -0,0 +1,69 @@
1/*
2 * Driver core interface to the pinctrl subsystem.
3 *
4 * Copyright (C) 2012 ST-Ericsson SA
5 * Written on behalf of Linaro for ST-Ericsson
6 * Based on bits of regulator core, gpio core and clk core
7 *
8 * Author: Linus Walleij <linus.walleij@linaro.org>
9 *
10 * License terms: GNU General Public License (GPL) version 2
11 */
12
13#include <linux/device.h>
14#include <linux/pinctrl/devinfo.h>
15#include <linux/pinctrl/consumer.h>
16#include <linux/slab.h>
17
18/**
19 * pinctrl_bind_pins() - called by the device core before probe
20 * @dev: the device that is just about to probe
21 */
22int pinctrl_bind_pins(struct device *dev)
23{
24 int ret;
25
26 dev->pins = devm_kzalloc(dev, sizeof(*(dev->pins)), GFP_KERNEL);
27 if (!dev->pins)
28 return -ENOMEM;
29
30 dev->pins->p = devm_pinctrl_get(dev);
31 if (IS_ERR(dev->pins->p)) {
32 dev_dbg(dev, "no pinctrl handle\n");
33 ret = PTR_ERR(dev->pins->p);
34 goto cleanup_alloc;
35 }
36
37 dev->pins->default_state = pinctrl_lookup_state(dev->pins->p,
38 PINCTRL_STATE_DEFAULT);
39 if (IS_ERR(dev->pins->default_state)) {
40 dev_dbg(dev, "no default pinctrl state\n");
41 ret = 0;
42 goto cleanup_get;
43 }
44
45 ret = pinctrl_select_state(dev->pins->p, dev->pins->default_state);
46 if (ret) {
47 dev_dbg(dev, "failed to activate default pinctrl state\n");
48 goto cleanup_get;
49 }
50
51 return 0;
52
53 /*
54 * If no pinctrl handle or default state was found for this device,
55 * let's explicitly free the pin container in the device, there is
56 * no point in keeping it around.
57 */
58cleanup_get:
59 devm_pinctrl_put(dev->pins->p);
60cleanup_alloc:
61 devm_kfree(dev, dev->pins);
62 dev->pins = NULL;
63
64 /* Only return deferrals */
65 if (ret != -EPROBE_DEFER)
66 ret = 0;
67
68 return ret;
69}
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index acc3a8ded29d..9a6b05a35603 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -433,8 +433,7 @@ static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
433 */ 433 */
434void genpd_queue_power_off_work(struct generic_pm_domain *genpd) 434void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
435{ 435{
436 if (!work_pending(&genpd->power_off_work)) 436 queue_work(pm_wq, &genpd->power_off_work);
437 queue_work(pm_wq, &genpd->power_off_work);
438} 437}
439 438
440/** 439/**
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 50b2831e027d..32ee0fc7ea54 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -162,7 +162,7 @@ unsigned long opp_get_voltage(struct opp *opp)
162 162
163 return v; 163 return v;
164} 164}
165EXPORT_SYMBOL(opp_get_voltage); 165EXPORT_SYMBOL_GPL(opp_get_voltage);
166 166
167/** 167/**
168 * opp_get_freq() - Gets the frequency corresponding to an available opp 168 * opp_get_freq() - Gets the frequency corresponding to an available opp
@@ -192,7 +192,7 @@ unsigned long opp_get_freq(struct opp *opp)
192 192
193 return f; 193 return f;
194} 194}
195EXPORT_SYMBOL(opp_get_freq); 195EXPORT_SYMBOL_GPL(opp_get_freq);
196 196
197/** 197/**
198 * opp_get_opp_count() - Get number of opps available in the opp list 198 * opp_get_opp_count() - Get number of opps available in the opp list
@@ -225,7 +225,7 @@ int opp_get_opp_count(struct device *dev)
225 225
226 return count; 226 return count;
227} 227}
228EXPORT_SYMBOL(opp_get_opp_count); 228EXPORT_SYMBOL_GPL(opp_get_opp_count);
229 229
230/** 230/**
231 * opp_find_freq_exact() - search for an exact frequency 231 * opp_find_freq_exact() - search for an exact frequency
@@ -276,7 +276,7 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
276 276
277 return opp; 277 return opp;
278} 278}
279EXPORT_SYMBOL(opp_find_freq_exact); 279EXPORT_SYMBOL_GPL(opp_find_freq_exact);
280 280
281/** 281/**
282 * opp_find_freq_ceil() - Search for an rounded ceil freq 282 * opp_find_freq_ceil() - Search for an rounded ceil freq
@@ -323,7 +323,7 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
323 323
324 return opp; 324 return opp;
325} 325}
326EXPORT_SYMBOL(opp_find_freq_ceil); 326EXPORT_SYMBOL_GPL(opp_find_freq_ceil);
327 327
328/** 328/**
329 * opp_find_freq_floor() - Search for a rounded floor freq 329 * opp_find_freq_floor() - Search for a rounded floor freq
@@ -374,7 +374,7 @@ struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)
374 374
375 return opp; 375 return opp;
376} 376}
377EXPORT_SYMBOL(opp_find_freq_floor); 377EXPORT_SYMBOL_GPL(opp_find_freq_floor);
378 378
379/** 379/**
380 * opp_add() - Add an OPP table from a table definitions 380 * opp_add() - Add an OPP table from a table definitions
@@ -568,7 +568,7 @@ int opp_enable(struct device *dev, unsigned long freq)
568{ 568{
569 return opp_set_availability(dev, freq, true); 569 return opp_set_availability(dev, freq, true);
570} 570}
571EXPORT_SYMBOL(opp_enable); 571EXPORT_SYMBOL_GPL(opp_enable);
572 572
573/** 573/**
574 * opp_disable() - Disable a specific OPP 574 * opp_disable() - Disable a specific OPP
@@ -590,7 +590,7 @@ int opp_disable(struct device *dev, unsigned long freq)
590{ 590{
591 return opp_set_availability(dev, freq, false); 591 return opp_set_availability(dev, freq, false);
592} 592}
593EXPORT_SYMBOL(opp_disable); 593EXPORT_SYMBOL_GPL(opp_disable);
594 594
595#ifdef CONFIG_CPU_FREQ 595#ifdef CONFIG_CPU_FREQ
596/** 596/**
@@ -661,6 +661,7 @@ int opp_init_cpufreq_table(struct device *dev,
661 661
662 return 0; 662 return 0;
663} 663}
664EXPORT_SYMBOL_GPL(opp_init_cpufreq_table);
664 665
665/** 666/**
666 * opp_free_cpufreq_table() - free the cpufreq table 667 * opp_free_cpufreq_table() - free the cpufreq table
@@ -678,6 +679,7 @@ void opp_free_cpufreq_table(struct device *dev,
678 kfree(*table); 679 kfree(*table);
679 *table = NULL; 680 *table = NULL;
680} 681}
682EXPORT_SYMBOL_GPL(opp_free_cpufreq_table);
681#endif /* CONFIG_CPU_FREQ */ 683#endif /* CONFIG_CPU_FREQ */
682 684
683/** 685/**
@@ -738,4 +740,5 @@ int of_init_opp_table(struct device *dev)
738 740
739 return 0; 741 return 0;
740} 742}
743EXPORT_SYMBOL_GPL(of_init_opp_table);
741#endif 744#endif
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index e6ee5e80e546..79715e7fa43e 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -382,6 +382,12 @@ static void wakeup_source_activate(struct wakeup_source *ws)
382{ 382{
383 unsigned int cec; 383 unsigned int cec;
384 384
385 /*
386 * active wakeup source should bring the system
387 * out of PM_SUSPEND_FREEZE state
388 */
389 freeze_wake();
390
385 ws->active = true; 391 ws->active = true;
386 ws->active_count++; 392 ws->active_count++;
387 ws->last_time = ktime_get(); 393 ws->last_time = ktime_get();
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile
index 5e75d1b683e2..cf129980abd0 100644
--- a/drivers/base/regmap/Makefile
+++ b/drivers/base/regmap/Makefile
@@ -1,5 +1,5 @@
1obj-$(CONFIG_REGMAP) += regmap.o regcache.o 1obj-$(CONFIG_REGMAP) += regmap.o regcache.o
2obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-lzo.o 2obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-lzo.o regcache-flat.o
3obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o 3obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o
4obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o 4obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o
5obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o 5obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 401d1919635a..5a22bd33ce3d 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -16,6 +16,7 @@
16#include <linux/regmap.h> 16#include <linux/regmap.h>
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/list.h> 18#include <linux/list.h>
19#include <linux/wait.h>
19 20
20struct regmap; 21struct regmap;
21struct regcache_ops; 22struct regcache_ops;
@@ -25,6 +26,7 @@ struct regmap_debugfs_off_cache {
25 off_t min; 26 off_t min;
26 off_t max; 27 off_t max;
27 unsigned int base_reg; 28 unsigned int base_reg;
29 unsigned int max_reg;
28}; 30};
29 31
30struct regmap_format { 32struct regmap_format {
@@ -39,6 +41,13 @@ struct regmap_format {
39 unsigned int (*parse_val)(void *buf); 41 unsigned int (*parse_val)(void *buf);
40}; 42};
41 43
44struct regmap_async {
45 struct list_head list;
46 struct work_struct cleanup;
47 struct regmap *map;
48 void *work_buf;
49};
50
42struct regmap { 51struct regmap {
43 struct mutex mutex; 52 struct mutex mutex;
44 spinlock_t spinlock; 53 spinlock_t spinlock;
@@ -53,6 +62,11 @@ struct regmap {
53 void *bus_context; 62 void *bus_context;
54 const char *name; 63 const char *name;
55 64
65 spinlock_t async_lock;
66 wait_queue_head_t async_waitq;
67 struct list_head async_list;
68 int async_ret;
69
56#ifdef CONFIG_DEBUG_FS 70#ifdef CONFIG_DEBUG_FS
57 struct dentry *debugfs; 71 struct dentry *debugfs;
58 const char *debugfs_name; 72 const char *debugfs_name;
@@ -74,6 +88,11 @@ struct regmap {
74 const struct regmap_access_table *volatile_table; 88 const struct regmap_access_table *volatile_table;
75 const struct regmap_access_table *precious_table; 89 const struct regmap_access_table *precious_table;
76 90
91 int (*reg_read)(void *context, unsigned int reg, unsigned int *val);
92 int (*reg_write)(void *context, unsigned int reg, unsigned int val);
93
94 bool defer_caching;
95
77 u8 read_flag_mask; 96 u8 read_flag_mask;
78 u8 write_flag_mask; 97 u8 write_flag_mask;
79 98
@@ -175,7 +194,10 @@ bool regcache_set_val(void *base, unsigned int idx,
175 unsigned int val, unsigned int word_size); 194 unsigned int val, unsigned int word_size);
176int regcache_lookup_reg(struct regmap *map, unsigned int reg); 195int regcache_lookup_reg(struct regmap *map, unsigned int reg);
177 196
197void regmap_async_complete_cb(struct regmap_async *async, int ret);
198
178extern struct regcache_ops regcache_rbtree_ops; 199extern struct regcache_ops regcache_rbtree_ops;
179extern struct regcache_ops regcache_lzo_ops; 200extern struct regcache_ops regcache_lzo_ops;
201extern struct regcache_ops regcache_flat_ops;
180 202
181#endif 203#endif
diff --git a/drivers/base/regmap/regcache-flat.c b/drivers/base/regmap/regcache-flat.c
new file mode 100644
index 000000000000..d9762e41959b
--- /dev/null
+++ b/drivers/base/regmap/regcache-flat.c
@@ -0,0 +1,72 @@
1/*
2 * Register cache access API - flat caching support
3 *
4 * Copyright 2012 Wolfson Microelectronics plc
5 *
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/slab.h>
14#include <linux/device.h>
15#include <linux/seq_file.h>
16
17#include "internal.h"
18
19static int regcache_flat_init(struct regmap *map)
20{
21 int i;
22 unsigned int *cache;
23
24 map->cache = kzalloc(sizeof(unsigned int) * (map->max_register + 1),
25 GFP_KERNEL);
26 if (!map->cache)
27 return -ENOMEM;
28
29 cache = map->cache;
30
31 for (i = 0; i < map->num_reg_defaults; i++)
32 cache[map->reg_defaults[i].reg] = map->reg_defaults[i].def;
33
34 return 0;
35}
36
37static int regcache_flat_exit(struct regmap *map)
38{
39 kfree(map->cache);
40 map->cache = NULL;
41
42 return 0;
43}
44
45static int regcache_flat_read(struct regmap *map,
46 unsigned int reg, unsigned int *value)
47{
48 unsigned int *cache = map->cache;
49
50 *value = cache[reg];
51
52 return 0;
53}
54
55static int regcache_flat_write(struct regmap *map, unsigned int reg,
56 unsigned int value)
57{
58 unsigned int *cache = map->cache;
59
60 cache[reg] = value;
61
62 return 0;
63}
64
65struct regcache_ops regcache_flat_ops = {
66 .type = REGCACHE_FLAT,
67 .name = "flat",
68 .init = regcache_flat_init,
69 .exit = regcache_flat_exit,
70 .read = regcache_flat_read,
71 .write = regcache_flat_write,
72};
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 835883bda977..e69ff3e4742c 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -22,6 +22,7 @@
22static const struct regcache_ops *cache_types[] = { 22static const struct regcache_ops *cache_types[] = {
23 &regcache_rbtree_ops, 23 &regcache_rbtree_ops,
24 &regcache_lzo_ops, 24 &regcache_lzo_ops,
25 &regcache_flat_ops,
25}; 26};
26 27
27static int regcache_hw_init(struct regmap *map) 28static int regcache_hw_init(struct regmap *map)
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 46a213a596e2..78d5f20c5f5b 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -81,6 +81,8 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
81 struct regmap_debugfs_off_cache *c = NULL; 81 struct regmap_debugfs_off_cache *c = NULL;
82 loff_t p = 0; 82 loff_t p = 0;
83 unsigned int i, ret; 83 unsigned int i, ret;
84 unsigned int fpos_offset;
85 unsigned int reg_offset;
84 86
85 /* 87 /*
86 * If we don't have a cache build one so we don't have to do a 88 * If we don't have a cache build one so we don't have to do a
@@ -93,6 +95,9 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
93 regmap_precious(map, i)) { 95 regmap_precious(map, i)) {
94 if (c) { 96 if (c) {
95 c->max = p - 1; 97 c->max = p - 1;
98 fpos_offset = c->max - c->min;
99 reg_offset = fpos_offset / map->debugfs_tot_len;
100 c->max_reg = c->base_reg + reg_offset;
96 list_add_tail(&c->list, 101 list_add_tail(&c->list,
97 &map->debugfs_off_cache); 102 &map->debugfs_off_cache);
98 c = NULL; 103 c = NULL;
@@ -119,10 +124,11 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
119 /* Close the last entry off if we didn't scan beyond it */ 124 /* Close the last entry off if we didn't scan beyond it */
120 if (c) { 125 if (c) {
121 c->max = p - 1; 126 c->max = p - 1;
127 fpos_offset = c->max - c->min;
128 reg_offset = fpos_offset / map->debugfs_tot_len;
129 c->max_reg = c->base_reg + reg_offset;
122 list_add_tail(&c->list, 130 list_add_tail(&c->list,
123 &map->debugfs_off_cache); 131 &map->debugfs_off_cache);
124 } else {
125 return base;
126 } 132 }
127 133
128 /* 134 /*
@@ -130,25 +136,38 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
130 * allocate and we should never be in this code if there are 136 * allocate and we should never be in this code if there are
131 * no registers at all. 137 * no registers at all.
132 */ 138 */
133 if (list_empty(&map->debugfs_off_cache)) { 139 WARN_ON(list_empty(&map->debugfs_off_cache));
134 WARN_ON(list_empty(&map->debugfs_off_cache)); 140 ret = base;
135 return base;
136 }
137 141
138 /* Find the relevant block */ 142 /* Find the relevant block:offset */
139 list_for_each_entry(c, &map->debugfs_off_cache, list) { 143 list_for_each_entry(c, &map->debugfs_off_cache, list) {
140 if (from >= c->min && from <= c->max) { 144 if (from >= c->min && from <= c->max) {
141 *pos = c->min; 145 fpos_offset = from - c->min;
142 return c->base_reg; 146 reg_offset = fpos_offset / map->debugfs_tot_len;
147 *pos = c->min + (reg_offset * map->debugfs_tot_len);
148 return c->base_reg + reg_offset;
143 } 149 }
144 150
145 *pos = c->min; 151 *pos = c->max;
146 ret = c->base_reg; 152 ret = c->max_reg;
147 } 153 }
148 154
149 return ret; 155 return ret;
150} 156}
151 157
158static inline void regmap_calc_tot_len(struct regmap *map,
159 void *buf, size_t count)
160{
161 /* Calculate the length of a fixed format */
162 if (!map->debugfs_tot_len) {
163 map->debugfs_reg_len = regmap_calc_reg_len(map->max_register,
164 buf, count);
165 map->debugfs_val_len = 2 * map->format.val_bytes;
166 map->debugfs_tot_len = map->debugfs_reg_len +
167 map->debugfs_val_len + 3; /* : \n */
168 }
169}
170
152static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from, 171static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
153 unsigned int to, char __user *user_buf, 172 unsigned int to, char __user *user_buf,
154 size_t count, loff_t *ppos) 173 size_t count, loff_t *ppos)
@@ -167,14 +186,7 @@ static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
167 if (!buf) 186 if (!buf)
168 return -ENOMEM; 187 return -ENOMEM;
169 188
170 /* Calculate the length of a fixed format */ 189 regmap_calc_tot_len(map, buf, count);
171 if (!map->debugfs_tot_len) {
172 map->debugfs_reg_len = regmap_calc_reg_len(map->max_register,
173 buf, count);
174 map->debugfs_val_len = 2 * map->format.val_bytes;
175 map->debugfs_tot_len = map->debugfs_reg_len +
176 map->debugfs_val_len + 3; /* : \n */
177 }
178 190
179 /* Work out which register we're starting at */ 191 /* Work out which register we're starting at */
180 start_reg = regmap_debugfs_get_dump_start(map, from, *ppos, &p); 192 start_reg = regmap_debugfs_get_dump_start(map, from, *ppos, &p);
@@ -189,7 +201,7 @@ static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
189 /* If we're in the region the user is trying to read */ 201 /* If we're in the region the user is trying to read */
190 if (p >= *ppos) { 202 if (p >= *ppos) {
191 /* ...but not beyond it */ 203 /* ...but not beyond it */
192 if (buf_pos + 1 + map->debugfs_tot_len >= count) 204 if (buf_pos + map->debugfs_tot_len > count)
193 break; 205 break;
194 206
195 /* Format the register */ 207 /* Format the register */
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 5972ad958544..4706c63d0bc6 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -34,6 +34,7 @@ struct regmap_irq_chip_data {
34 int irq; 34 int irq;
35 int wake_count; 35 int wake_count;
36 36
37 void *status_reg_buf;
37 unsigned int *status_buf; 38 unsigned int *status_buf;
38 unsigned int *mask_buf; 39 unsigned int *mask_buf;
39 unsigned int *mask_buf_def; 40 unsigned int *mask_buf_def;
@@ -87,6 +88,23 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
87 if (ret != 0) 88 if (ret != 0)
88 dev_err(d->map->dev, "Failed to sync masks in %x\n", 89 dev_err(d->map->dev, "Failed to sync masks in %x\n",
89 reg); 90 reg);
91
92 reg = d->chip->wake_base +
93 (i * map->reg_stride * d->irq_reg_stride);
94 if (d->wake_buf) {
95 if (d->chip->wake_invert)
96 ret = regmap_update_bits(d->map, reg,
97 d->mask_buf_def[i],
98 ~d->wake_buf[i]);
99 else
100 ret = regmap_update_bits(d->map, reg,
101 d->mask_buf_def[i],
102 d->wake_buf[i]);
103 if (ret != 0)
104 dev_err(d->map->dev,
105 "Failed to sync wakes in %x: %d\n",
106 reg, ret);
107 }
90 } 108 }
91 109
92 if (d->chip->runtime_pm) 110 if (d->chip->runtime_pm)
@@ -129,16 +147,15 @@ static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
129 struct regmap *map = d->map; 147 struct regmap *map = d->map;
130 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); 148 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
131 149
132 if (!d->chip->wake_base)
133 return -EINVAL;
134
135 if (on) { 150 if (on) {
136 d->wake_buf[irq_data->reg_offset / map->reg_stride] 151 if (d->wake_buf)
137 &= ~irq_data->mask; 152 d->wake_buf[irq_data->reg_offset / map->reg_stride]
153 &= ~irq_data->mask;
138 d->wake_count++; 154 d->wake_count++;
139 } else { 155 } else {
140 d->wake_buf[irq_data->reg_offset / map->reg_stride] 156 if (d->wake_buf)
141 |= irq_data->mask; 157 d->wake_buf[irq_data->reg_offset / map->reg_stride]
158 |= irq_data->mask;
142 d->wake_count--; 159 d->wake_count--;
143 } 160 }
144 161
@@ -172,25 +189,69 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
172 } 189 }
173 190
174 /* 191 /*
175 * Ignore masked IRQs and ack if we need to; we ack early so 192 * Read in the statuses, using a single bulk read if possible
176 * there is no race between handling and acknowleding the 193 * in order to reduce the I/O overheads.
177 * interrupt. We assume that typically few of the interrupts
178 * will fire simultaneously so don't worry about overhead from
179 * doing a write per register.
180 */ 194 */
181 for (i = 0; i < data->chip->num_regs; i++) { 195 if (!map->use_single_rw && map->reg_stride == 1 &&
182 ret = regmap_read(map, chip->status_base + (i * map->reg_stride 196 data->irq_reg_stride == 1) {
183 * data->irq_reg_stride), 197 u8 *buf8 = data->status_reg_buf;
184 &data->status_buf[i]); 198 u16 *buf16 = data->status_reg_buf;
199 u32 *buf32 = data->status_reg_buf;
185 200
201 BUG_ON(!data->status_reg_buf);
202
203 ret = regmap_bulk_read(map, chip->status_base,
204 data->status_reg_buf,
205 chip->num_regs);
186 if (ret != 0) { 206 if (ret != 0) {
187 dev_err(map->dev, "Failed to read IRQ status: %d\n", 207 dev_err(map->dev, "Failed to read IRQ status: %d\n",
188 ret); 208 ret);
189 if (chip->runtime_pm)
190 pm_runtime_put(map->dev);
191 return IRQ_NONE; 209 return IRQ_NONE;
192 } 210 }
193 211
212 for (i = 0; i < data->chip->num_regs; i++) {
213 switch (map->format.val_bytes) {
214 case 1:
215 data->status_buf[i] = buf8[i];
216 break;
217 case 2:
218 data->status_buf[i] = buf16[i];
219 break;
220 case 4:
221 data->status_buf[i] = buf32[i];
222 break;
223 default:
224 BUG();
225 return IRQ_NONE;
226 }
227 }
228
229 } else {
230 for (i = 0; i < data->chip->num_regs; i++) {
231 ret = regmap_read(map, chip->status_base +
232 (i * map->reg_stride
233 * data->irq_reg_stride),
234 &data->status_buf[i]);
235
236 if (ret != 0) {
237 dev_err(map->dev,
238 "Failed to read IRQ status: %d\n",
239 ret);
240 if (chip->runtime_pm)
241 pm_runtime_put(map->dev);
242 return IRQ_NONE;
243 }
244 }
245 }
246
247 /*
248 * Ignore masked IRQs and ack if we need to; we ack early so
249 * there is no race between handling and acknowleding the
250 * interrupt. We assume that typically few of the interrupts
251 * will fire simultaneously so don't worry about overhead from
252 * doing a write per register.
253 */
254 for (i = 0; i < data->chip->num_regs; i++) {
194 data->status_buf[i] &= ~data->mask_buf[i]; 255 data->status_buf[i] &= ~data->mask_buf[i];
195 256
196 if (data->status_buf[i] && chip->ack_base) { 257 if (data->status_buf[i] && chip->ack_base) {
@@ -316,11 +377,6 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
316 377
317 d->irq_chip = regmap_irq_chip; 378 d->irq_chip = regmap_irq_chip;
318 d->irq_chip.name = chip->name; 379 d->irq_chip.name = chip->name;
319 if (!chip->wake_base) {
320 d->irq_chip.irq_set_wake = NULL;
321 d->irq_chip.flags |= IRQCHIP_MASK_ON_SUSPEND |
322 IRQCHIP_SKIP_SET_WAKE;
323 }
324 d->irq = irq; 380 d->irq = irq;
325 d->map = map; 381 d->map = map;
326 d->chip = chip; 382 d->chip = chip;
@@ -331,6 +387,14 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
331 else 387 else
332 d->irq_reg_stride = 1; 388 d->irq_reg_stride = 1;
333 389
390 if (!map->use_single_rw && map->reg_stride == 1 &&
391 d->irq_reg_stride == 1) {
392 d->status_reg_buf = kmalloc(map->format.val_bytes *
393 chip->num_regs, GFP_KERNEL);
394 if (!d->status_reg_buf)
395 goto err_alloc;
396 }
397
334 mutex_init(&d->lock); 398 mutex_init(&d->lock);
335 399
336 for (i = 0; i < chip->num_irqs; i++) 400 for (i = 0; i < chip->num_irqs; i++)
@@ -361,8 +425,15 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
361 d->wake_buf[i] = d->mask_buf_def[i]; 425 d->wake_buf[i] = d->mask_buf_def[i];
362 reg = chip->wake_base + 426 reg = chip->wake_base +
363 (i * map->reg_stride * d->irq_reg_stride); 427 (i * map->reg_stride * d->irq_reg_stride);
364 ret = regmap_update_bits(map, reg, d->wake_buf[i], 428
365 d->wake_buf[i]); 429 if (chip->wake_invert)
430 ret = regmap_update_bits(map, reg,
431 d->mask_buf_def[i],
432 0);
433 else
434 ret = regmap_update_bits(map, reg,
435 d->mask_buf_def[i],
436 d->wake_buf[i]);
366 if (ret != 0) { 437 if (ret != 0) {
367 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", 438 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
368 reg, ret); 439 reg, ret);
@@ -401,6 +472,7 @@ err_alloc:
401 kfree(d->mask_buf_def); 472 kfree(d->mask_buf_def);
402 kfree(d->mask_buf); 473 kfree(d->mask_buf);
403 kfree(d->status_buf); 474 kfree(d->status_buf);
475 kfree(d->status_reg_buf);
404 kfree(d); 476 kfree(d);
405 return ret; 477 return ret;
406} 478}
@@ -422,6 +494,7 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
422 kfree(d->wake_buf); 494 kfree(d->wake_buf);
423 kfree(d->mask_buf_def); 495 kfree(d->mask_buf_def);
424 kfree(d->mask_buf); 496 kfree(d->mask_buf);
497 kfree(d->status_reg_buf);
425 kfree(d->status_buf); 498 kfree(d->status_buf);
426 kfree(d); 499 kfree(d);
427} 500}
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
index f05fc74dd84a..98745dd77e8c 100644
--- a/drivers/base/regmap/regmap-mmio.c
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -16,6 +16,7 @@
16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */ 17 */
18 18
19#include <linux/clk.h>
19#include <linux/err.h> 20#include <linux/err.h>
20#include <linux/init.h> 21#include <linux/init.h>
21#include <linux/io.h> 22#include <linux/io.h>
@@ -26,6 +27,7 @@
26struct regmap_mmio_context { 27struct regmap_mmio_context {
27 void __iomem *regs; 28 void __iomem *regs;
28 unsigned val_bytes; 29 unsigned val_bytes;
30 struct clk *clk;
29}; 31};
30 32
31static int regmap_mmio_gather_write(void *context, 33static int regmap_mmio_gather_write(void *context,
@@ -34,9 +36,16 @@ static int regmap_mmio_gather_write(void *context,
34{ 36{
35 struct regmap_mmio_context *ctx = context; 37 struct regmap_mmio_context *ctx = context;
36 u32 offset; 38 u32 offset;
39 int ret;
37 40
38 BUG_ON(reg_size != 4); 41 BUG_ON(reg_size != 4);
39 42
43 if (ctx->clk) {
44 ret = clk_enable(ctx->clk);
45 if (ret < 0)
46 return ret;
47 }
48
40 offset = *(u32 *)reg; 49 offset = *(u32 *)reg;
41 50
42 while (val_size) { 51 while (val_size) {
@@ -64,6 +73,9 @@ static int regmap_mmio_gather_write(void *context,
64 offset += ctx->val_bytes; 73 offset += ctx->val_bytes;
65 } 74 }
66 75
76 if (ctx->clk)
77 clk_disable(ctx->clk);
78
67 return 0; 79 return 0;
68} 80}
69 81
@@ -80,9 +92,16 @@ static int regmap_mmio_read(void *context,
80{ 92{
81 struct regmap_mmio_context *ctx = context; 93 struct regmap_mmio_context *ctx = context;
82 u32 offset; 94 u32 offset;
95 int ret;
83 96
84 BUG_ON(reg_size != 4); 97 BUG_ON(reg_size != 4);
85 98
99 if (ctx->clk) {
100 ret = clk_enable(ctx->clk);
101 if (ret < 0)
102 return ret;
103 }
104
86 offset = *(u32 *)reg; 105 offset = *(u32 *)reg;
87 106
88 while (val_size) { 107 while (val_size) {
@@ -110,11 +129,20 @@ static int regmap_mmio_read(void *context,
110 offset += ctx->val_bytes; 129 offset += ctx->val_bytes;
111 } 130 }
112 131
132 if (ctx->clk)
133 clk_disable(ctx->clk);
134
113 return 0; 135 return 0;
114} 136}
115 137
116static void regmap_mmio_free_context(void *context) 138static void regmap_mmio_free_context(void *context)
117{ 139{
140 struct regmap_mmio_context *ctx = context;
141
142 if (ctx->clk) {
143 clk_unprepare(ctx->clk);
144 clk_put(ctx->clk);
145 }
118 kfree(context); 146 kfree(context);
119} 147}
120 148
@@ -128,11 +156,14 @@ static struct regmap_bus regmap_mmio = {
128 .val_format_endian_default = REGMAP_ENDIAN_NATIVE, 156 .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
129}; 157};
130 158
131static struct regmap_mmio_context *regmap_mmio_gen_context(void __iomem *regs, 159static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
160 const char *clk_id,
161 void __iomem *regs,
132 const struct regmap_config *config) 162 const struct regmap_config *config)
133{ 163{
134 struct regmap_mmio_context *ctx; 164 struct regmap_mmio_context *ctx;
135 int min_stride; 165 int min_stride;
166 int ret;
136 167
137 if (config->reg_bits != 32) 168 if (config->reg_bits != 32)
138 return ERR_PTR(-EINVAL); 169 return ERR_PTR(-EINVAL);
@@ -179,37 +210,59 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(void __iomem *regs,
179 ctx->regs = regs; 210 ctx->regs = regs;
180 ctx->val_bytes = config->val_bits / 8; 211 ctx->val_bytes = config->val_bits / 8;
181 212
213 if (clk_id == NULL)
214 return ctx;
215
216 ctx->clk = clk_get(dev, clk_id);
217 if (IS_ERR(ctx->clk)) {
218 ret = PTR_ERR(ctx->clk);
219 goto err_free;
220 }
221
222 ret = clk_prepare(ctx->clk);
223 if (ret < 0) {
224 clk_put(ctx->clk);
225 goto err_free;
226 }
227
182 return ctx; 228 return ctx;
229
230err_free:
231 kfree(ctx);
232
233 return ERR_PTR(ret);
183} 234}
184 235
185/** 236/**
186 * regmap_init_mmio(): Initialise register map 237 * regmap_init_mmio_clk(): Initialise register map with register clock
187 * 238 *
188 * @dev: Device that will be interacted with 239 * @dev: Device that will be interacted with
240 * @clk_id: register clock consumer ID
189 * @regs: Pointer to memory-mapped IO region 241 * @regs: Pointer to memory-mapped IO region
190 * @config: Configuration for register map 242 * @config: Configuration for register map
191 * 243 *
192 * The return value will be an ERR_PTR() on error or a valid pointer to 244 * The return value will be an ERR_PTR() on error or a valid pointer to
193 * a struct regmap. 245 * a struct regmap.
194 */ 246 */
195struct regmap *regmap_init_mmio(struct device *dev, 247struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id,
196 void __iomem *regs, 248 void __iomem *regs,
197 const struct regmap_config *config) 249 const struct regmap_config *config)
198{ 250{
199 struct regmap_mmio_context *ctx; 251 struct regmap_mmio_context *ctx;
200 252
201 ctx = regmap_mmio_gen_context(regs, config); 253 ctx = regmap_mmio_gen_context(dev, clk_id, regs, config);
202 if (IS_ERR(ctx)) 254 if (IS_ERR(ctx))
203 return ERR_CAST(ctx); 255 return ERR_CAST(ctx);
204 256
205 return regmap_init(dev, &regmap_mmio, ctx, config); 257 return regmap_init(dev, &regmap_mmio, ctx, config);
206} 258}
207EXPORT_SYMBOL_GPL(regmap_init_mmio); 259EXPORT_SYMBOL_GPL(regmap_init_mmio_clk);
208 260
209/** 261/**
210 * devm_regmap_init_mmio(): Initialise managed register map 262 * devm_regmap_init_mmio_clk(): Initialise managed register map with clock
211 * 263 *
212 * @dev: Device that will be interacted with 264 * @dev: Device that will be interacted with
265 * @clk_id: register clock consumer ID
213 * @regs: Pointer to memory-mapped IO region 266 * @regs: Pointer to memory-mapped IO region
214 * @config: Configuration for register map 267 * @config: Configuration for register map
215 * 268 *
@@ -217,18 +270,18 @@ EXPORT_SYMBOL_GPL(regmap_init_mmio);
217 * to a struct regmap. The regmap will be automatically freed by the 270 * to a struct regmap. The regmap will be automatically freed by the
218 * device management code. 271 * device management code.
219 */ 272 */
220struct regmap *devm_regmap_init_mmio(struct device *dev, 273struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id,
221 void __iomem *regs, 274 void __iomem *regs,
222 const struct regmap_config *config) 275 const struct regmap_config *config)
223{ 276{
224 struct regmap_mmio_context *ctx; 277 struct regmap_mmio_context *ctx;
225 278
226 ctx = regmap_mmio_gen_context(regs, config); 279 ctx = regmap_mmio_gen_context(dev, clk_id, regs, config);
227 if (IS_ERR(ctx)) 280 if (IS_ERR(ctx))
228 return ERR_CAST(ctx); 281 return ERR_CAST(ctx);
229 282
230 return devm_regmap_init(dev, &regmap_mmio, ctx, config); 283 return devm_regmap_init(dev, &regmap_mmio, ctx, config);
231} 284}
232EXPORT_SYMBOL_GPL(devm_regmap_init_mmio); 285EXPORT_SYMBOL_GPL(devm_regmap_init_mmio_clk);
233 286
234MODULE_LICENSE("GPL v2"); 287MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index ffa46a92ad33..4c506bd940f3 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -15,6 +15,21 @@
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/module.h> 16#include <linux/module.h>
17 17
18#include "internal.h"
19
20struct regmap_async_spi {
21 struct regmap_async core;
22 struct spi_message m;
23 struct spi_transfer t[2];
24};
25
26static void regmap_spi_complete(void *data)
27{
28 struct regmap_async_spi *async = data;
29
30 regmap_async_complete_cb(&async->core, async->m.status);
31}
32
18static int regmap_spi_write(void *context, const void *data, size_t count) 33static int regmap_spi_write(void *context, const void *data, size_t count)
19{ 34{
20 struct device *dev = context; 35 struct device *dev = context;
@@ -40,6 +55,43 @@ static int regmap_spi_gather_write(void *context,
40 return spi_sync(spi, &m); 55 return spi_sync(spi, &m);
41} 56}
42 57
58static int regmap_spi_async_write(void *context,
59 const void *reg, size_t reg_len,
60 const void *val, size_t val_len,
61 struct regmap_async *a)
62{
63 struct regmap_async_spi *async = container_of(a,
64 struct regmap_async_spi,
65 core);
66 struct device *dev = context;
67 struct spi_device *spi = to_spi_device(dev);
68
69 async->t[0].tx_buf = reg;
70 async->t[0].len = reg_len;
71 async->t[1].tx_buf = val;
72 async->t[1].len = val_len;
73
74 spi_message_init(&async->m);
75 spi_message_add_tail(&async->t[0], &async->m);
76 spi_message_add_tail(&async->t[1], &async->m);
77
78 async->m.complete = regmap_spi_complete;
79 async->m.context = async;
80
81 return spi_async(spi, &async->m);
82}
83
84static struct regmap_async *regmap_spi_async_alloc(void)
85{
86 struct regmap_async_spi *async_spi;
87
88 async_spi = kzalloc(sizeof(*async_spi), GFP_KERNEL);
89 if (!async_spi)
90 return NULL;
91
92 return &async_spi->core;
93}
94
43static int regmap_spi_read(void *context, 95static int regmap_spi_read(void *context,
44 const void *reg, size_t reg_size, 96 const void *reg, size_t reg_size,
45 void *val, size_t val_size) 97 void *val, size_t val_size)
@@ -53,6 +105,8 @@ static int regmap_spi_read(void *context,
53static struct regmap_bus regmap_spi = { 105static struct regmap_bus regmap_spi = {
54 .write = regmap_spi_write, 106 .write = regmap_spi_write,
55 .gather_write = regmap_spi_gather_write, 107 .gather_write = regmap_spi_gather_write,
108 .async_write = regmap_spi_async_write,
109 .async_alloc = regmap_spi_async_alloc,
56 .read = regmap_spi_read, 110 .read = regmap_spi_read,
57 .read_flag_mask = 0x80, 111 .read_flag_mask = 0x80,
58}; 112};
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 42d5cb0f503f..3d2367501fd0 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -16,6 +16,7 @@
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17#include <linux/err.h> 17#include <linux/err.h>
18#include <linux/rbtree.h> 18#include <linux/rbtree.h>
19#include <linux/sched.h>
19 20
20#define CREATE_TRACE_POINTS 21#define CREATE_TRACE_POINTS
21#include <trace/events/regmap.h> 22#include <trace/events/regmap.h>
@@ -34,6 +35,22 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
34 unsigned int mask, unsigned int val, 35 unsigned int mask, unsigned int val,
35 bool *change); 36 bool *change);
36 37
38static int _regmap_bus_read(void *context, unsigned int reg,
39 unsigned int *val);
40static int _regmap_bus_formatted_write(void *context, unsigned int reg,
41 unsigned int val);
42static int _regmap_bus_raw_write(void *context, unsigned int reg,
43 unsigned int val);
44
45static void async_cleanup(struct work_struct *work)
46{
47 struct regmap_async *async = container_of(work, struct regmap_async,
48 cleanup);
49
50 kfree(async->work_buf);
51 kfree(async);
52}
53
37bool regmap_reg_in_ranges(unsigned int reg, 54bool regmap_reg_in_ranges(unsigned int reg,
38 const struct regmap_range *ranges, 55 const struct regmap_range *ranges,
39 unsigned int nranges) 56 unsigned int nranges)
@@ -372,7 +389,7 @@ struct regmap *regmap_init(struct device *dev,
372 enum regmap_endian reg_endian, val_endian; 389 enum regmap_endian reg_endian, val_endian;
373 int i, j; 390 int i, j;
374 391
375 if (!bus || !config) 392 if (!config)
376 goto err; 393 goto err;
377 394
378 map = kzalloc(sizeof(*map), GFP_KERNEL); 395 map = kzalloc(sizeof(*map), GFP_KERNEL);
@@ -386,7 +403,8 @@ struct regmap *regmap_init(struct device *dev,
386 map->unlock = config->unlock; 403 map->unlock = config->unlock;
387 map->lock_arg = config->lock_arg; 404 map->lock_arg = config->lock_arg;
388 } else { 405 } else {
389 if (bus->fast_io) { 406 if ((bus && bus->fast_io) ||
407 config->fast_io) {
390 spin_lock_init(&map->spinlock); 408 spin_lock_init(&map->spinlock);
391 map->lock = regmap_lock_spinlock; 409 map->lock = regmap_lock_spinlock;
392 map->unlock = regmap_unlock_spinlock; 410 map->unlock = regmap_unlock_spinlock;
@@ -423,13 +441,27 @@ struct regmap *regmap_init(struct device *dev,
423 map->cache_type = config->cache_type; 441 map->cache_type = config->cache_type;
424 map->name = config->name; 442 map->name = config->name;
425 443
444 spin_lock_init(&map->async_lock);
445 INIT_LIST_HEAD(&map->async_list);
446 init_waitqueue_head(&map->async_waitq);
447
426 if (config->read_flag_mask || config->write_flag_mask) { 448 if (config->read_flag_mask || config->write_flag_mask) {
427 map->read_flag_mask = config->read_flag_mask; 449 map->read_flag_mask = config->read_flag_mask;
428 map->write_flag_mask = config->write_flag_mask; 450 map->write_flag_mask = config->write_flag_mask;
429 } else { 451 } else if (bus) {
430 map->read_flag_mask = bus->read_flag_mask; 452 map->read_flag_mask = bus->read_flag_mask;
431 } 453 }
432 454
455 if (!bus) {
456 map->reg_read = config->reg_read;
457 map->reg_write = config->reg_write;
458
459 map->defer_caching = false;
460 goto skip_format_initialization;
461 } else {
462 map->reg_read = _regmap_bus_read;
463 }
464
433 reg_endian = config->reg_format_endian; 465 reg_endian = config->reg_format_endian;
434 if (reg_endian == REGMAP_ENDIAN_DEFAULT) 466 if (reg_endian == REGMAP_ENDIAN_DEFAULT)
435 reg_endian = bus->reg_format_endian_default; 467 reg_endian = bus->reg_format_endian_default;
@@ -500,6 +532,12 @@ struct regmap *regmap_init(struct device *dev,
500 } 532 }
501 break; 533 break;
502 534
535 case 24:
536 if (reg_endian != REGMAP_ENDIAN_BIG)
537 goto err_map;
538 map->format.format_reg = regmap_format_24;
539 break;
540
503 case 32: 541 case 32:
504 switch (reg_endian) { 542 switch (reg_endian) {
505 case REGMAP_ENDIAN_BIG: 543 case REGMAP_ENDIAN_BIG:
@@ -575,6 +613,16 @@ struct regmap *regmap_init(struct device *dev,
575 goto err_map; 613 goto err_map;
576 } 614 }
577 615
616 if (map->format.format_write) {
617 map->defer_caching = false;
618 map->reg_write = _regmap_bus_formatted_write;
619 } else if (map->format.format_val) {
620 map->defer_caching = true;
621 map->reg_write = _regmap_bus_raw_write;
622 }
623
624skip_format_initialization:
625
578 map->range_tree = RB_ROOT; 626 map->range_tree = RB_ROOT;
579 for (i = 0; i < config->num_ranges; i++) { 627 for (i = 0; i < config->num_ranges; i++) {
580 const struct regmap_range_cfg *range_cfg = &config->ranges[i]; 628 const struct regmap_range_cfg *range_cfg = &config->ranges[i];
@@ -776,7 +824,7 @@ void regmap_exit(struct regmap *map)
776 regcache_exit(map); 824 regcache_exit(map);
777 regmap_debugfs_exit(map); 825 regmap_debugfs_exit(map);
778 regmap_range_exit(map); 826 regmap_range_exit(map);
779 if (map->bus->free_context) 827 if (map->bus && map->bus->free_context)
780 map->bus->free_context(map->bus_context); 828 map->bus->free_context(map->bus_context);
781 kfree(map->work_buf); 829 kfree(map->work_buf);
782 kfree(map); 830 kfree(map);
@@ -870,15 +918,20 @@ static int _regmap_select_page(struct regmap *map, unsigned int *reg,
870} 918}
871 919
872static int _regmap_raw_write(struct regmap *map, unsigned int reg, 920static int _regmap_raw_write(struct regmap *map, unsigned int reg,
873 const void *val, size_t val_len) 921 const void *val, size_t val_len, bool async)
874{ 922{
875 struct regmap_range_node *range; 923 struct regmap_range_node *range;
924 unsigned long flags;
876 u8 *u8 = map->work_buf; 925 u8 *u8 = map->work_buf;
926 void *work_val = map->work_buf + map->format.reg_bytes +
927 map->format.pad_bytes;
877 void *buf; 928 void *buf;
878 int ret = -ENOTSUPP; 929 int ret = -ENOTSUPP;
879 size_t len; 930 size_t len;
880 int i; 931 int i;
881 932
933 BUG_ON(!map->bus);
934
882 /* Check for unwritable registers before we start */ 935 /* Check for unwritable registers before we start */
883 if (map->writeable_reg) 936 if (map->writeable_reg)
884 for (i = 0; i < val_len / map->format.val_bytes; i++) 937 for (i = 0; i < val_len / map->format.val_bytes; i++)
@@ -918,7 +971,7 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
918 dev_dbg(map->dev, "Writing window %d/%zu\n", 971 dev_dbg(map->dev, "Writing window %d/%zu\n",
919 win_residue, val_len / map->format.val_bytes); 972 win_residue, val_len / map->format.val_bytes);
920 ret = _regmap_raw_write(map, reg, val, win_residue * 973 ret = _regmap_raw_write(map, reg, val, win_residue *
921 map->format.val_bytes); 974 map->format.val_bytes, async);
922 if (ret != 0) 975 if (ret != 0)
923 return ret; 976 return ret;
924 977
@@ -941,6 +994,50 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
941 994
942 u8[0] |= map->write_flag_mask; 995 u8[0] |= map->write_flag_mask;
943 996
997 if (async && map->bus->async_write) {
998 struct regmap_async *async = map->bus->async_alloc();
999 if (!async)
1000 return -ENOMEM;
1001
1002 async->work_buf = kzalloc(map->format.buf_size,
1003 GFP_KERNEL | GFP_DMA);
1004 if (!async->work_buf) {
1005 kfree(async);
1006 return -ENOMEM;
1007 }
1008
1009 INIT_WORK(&async->cleanup, async_cleanup);
1010 async->map = map;
1011
1012 /* If the caller supplied the value we can use it safely. */
1013 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +
1014 map->format.reg_bytes + map->format.val_bytes);
1015 if (val == work_val)
1016 val = async->work_buf + map->format.pad_bytes +
1017 map->format.reg_bytes;
1018
1019 spin_lock_irqsave(&map->async_lock, flags);
1020 list_add_tail(&async->list, &map->async_list);
1021 spin_unlock_irqrestore(&map->async_lock, flags);
1022
1023 ret = map->bus->async_write(map->bus_context, async->work_buf,
1024 map->format.reg_bytes +
1025 map->format.pad_bytes,
1026 val, val_len, async);
1027
1028 if (ret != 0) {
1029 dev_err(map->dev, "Failed to schedule write: %d\n",
1030 ret);
1031
1032 spin_lock_irqsave(&map->async_lock, flags);
1033 list_del(&async->list);
1034 spin_unlock_irqrestore(&map->async_lock, flags);
1035
1036 kfree(async->work_buf);
1037 kfree(async);
1038 }
1039 }
1040
944 trace_regmap_hw_write_start(map->dev, reg, 1041 trace_regmap_hw_write_start(map->dev, reg,
945 val_len / map->format.val_bytes); 1042 val_len / map->format.val_bytes);
946 1043
@@ -948,8 +1045,7 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
948 * send the work_buf directly, otherwise try to do a gather 1045 * send the work_buf directly, otherwise try to do a gather
949 * write. 1046 * write.
950 */ 1047 */
951 if (val == (map->work_buf + map->format.pad_bytes + 1048 if (val == work_val)
952 map->format.reg_bytes))
953 ret = map->bus->write(map->bus_context, map->work_buf, 1049 ret = map->bus->write(map->bus_context, map->work_buf,
954 map->format.reg_bytes + 1050 map->format.reg_bytes +
955 map->format.pad_bytes + 1051 map->format.pad_bytes +
@@ -981,14 +1077,62 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
981 return ret; 1077 return ret;
982} 1078}
983 1079
1080static int _regmap_bus_formatted_write(void *context, unsigned int reg,
1081 unsigned int val)
1082{
1083 int ret;
1084 struct regmap_range_node *range;
1085 struct regmap *map = context;
1086
1087 BUG_ON(!map->bus || !map->format.format_write);
1088
1089 range = _regmap_range_lookup(map, reg);
1090 if (range) {
1091 ret = _regmap_select_page(map, &reg, range, 1);
1092 if (ret != 0)
1093 return ret;
1094 }
1095
1096 map->format.format_write(map, reg, val);
1097
1098 trace_regmap_hw_write_start(map->dev, reg, 1);
1099
1100 ret = map->bus->write(map->bus_context, map->work_buf,
1101 map->format.buf_size);
1102
1103 trace_regmap_hw_write_done(map->dev, reg, 1);
1104
1105 return ret;
1106}
1107
1108static int _regmap_bus_raw_write(void *context, unsigned int reg,
1109 unsigned int val)
1110{
1111 struct regmap *map = context;
1112
1113 BUG_ON(!map->bus || !map->format.format_val);
1114
1115 map->format.format_val(map->work_buf + map->format.reg_bytes
1116 + map->format.pad_bytes, val, 0);
1117 return _regmap_raw_write(map, reg,
1118 map->work_buf +
1119 map->format.reg_bytes +
1120 map->format.pad_bytes,
1121 map->format.val_bytes, false);
1122}
1123
1124static inline void *_regmap_map_get_context(struct regmap *map)
1125{
1126 return (map->bus) ? map : map->bus_context;
1127}
1128
984int _regmap_write(struct regmap *map, unsigned int reg, 1129int _regmap_write(struct regmap *map, unsigned int reg,
985 unsigned int val) 1130 unsigned int val)
986{ 1131{
987 struct regmap_range_node *range;
988 int ret; 1132 int ret;
989 BUG_ON(!map->format.format_write && !map->format.format_val); 1133 void *context = _regmap_map_get_context(map);
990 1134
991 if (!map->cache_bypass && map->format.format_write) { 1135 if (!map->cache_bypass && !map->defer_caching) {
992 ret = regcache_write(map, reg, val); 1136 ret = regcache_write(map, reg, val);
993 if (ret != 0) 1137 if (ret != 0)
994 return ret; 1138 return ret;
@@ -1005,33 +1149,7 @@ int _regmap_write(struct regmap *map, unsigned int reg,
1005 1149
1006 trace_regmap_reg_write(map->dev, reg, val); 1150 trace_regmap_reg_write(map->dev, reg, val);
1007 1151
1008 if (map->format.format_write) { 1152 return map->reg_write(context, reg, val);
1009 range = _regmap_range_lookup(map, reg);
1010 if (range) {
1011 ret = _regmap_select_page(map, &reg, range, 1);
1012 if (ret != 0)
1013 return ret;
1014 }
1015
1016 map->format.format_write(map, reg, val);
1017
1018 trace_regmap_hw_write_start(map->dev, reg, 1);
1019
1020 ret = map->bus->write(map->bus_context, map->work_buf,
1021 map->format.buf_size);
1022
1023 trace_regmap_hw_write_done(map->dev, reg, 1);
1024
1025 return ret;
1026 } else {
1027 map->format.format_val(map->work_buf + map->format.reg_bytes
1028 + map->format.pad_bytes, val, 0);
1029 return _regmap_raw_write(map, reg,
1030 map->work_buf +
1031 map->format.reg_bytes +
1032 map->format.pad_bytes,
1033 map->format.val_bytes);
1034 }
1035} 1153}
1036 1154
1037/** 1155/**
@@ -1082,6 +1200,8 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
1082{ 1200{
1083 int ret; 1201 int ret;
1084 1202
1203 if (!map->bus)
1204 return -EINVAL;
1085 if (val_len % map->format.val_bytes) 1205 if (val_len % map->format.val_bytes)
1086 return -EINVAL; 1206 return -EINVAL;
1087 if (reg % map->reg_stride) 1207 if (reg % map->reg_stride)
@@ -1089,7 +1209,7 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
1089 1209
1090 map->lock(map->lock_arg); 1210 map->lock(map->lock_arg);
1091 1211
1092 ret = _regmap_raw_write(map, reg, val, val_len); 1212 ret = _regmap_raw_write(map, reg, val, val_len, false);
1093 1213
1094 map->unlock(map->lock_arg); 1214 map->unlock(map->lock_arg);
1095 1215
@@ -1106,7 +1226,7 @@ EXPORT_SYMBOL_GPL(regmap_raw_write);
1106 * @val_count: Number of registers to write 1226 * @val_count: Number of registers to write
1107 * 1227 *
1108 * This function is intended to be used for writing a large block of 1228 * This function is intended to be used for writing a large block of
1109 * data to be device either in single transfer or multiple transfer. 1229 * data to the device either in single transfer or multiple transfer.
1110 * 1230 *
1111 * A value of zero will be returned on success, a negative errno will 1231 * A value of zero will be returned on success, a negative errno will
1112 * be returned in error cases. 1232 * be returned in error cases.
@@ -1118,6 +1238,8 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
1118 size_t val_bytes = map->format.val_bytes; 1238 size_t val_bytes = map->format.val_bytes;
1119 void *wval; 1239 void *wval;
1120 1240
1241 if (!map->bus)
1242 return -EINVAL;
1121 if (!map->format.parse_val) 1243 if (!map->format.parse_val)
1122 return -EINVAL; 1244 return -EINVAL;
1123 if (reg % map->reg_stride) 1245 if (reg % map->reg_stride)
@@ -1145,14 +1267,15 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
1145 if (map->use_single_rw) { 1267 if (map->use_single_rw) {
1146 for (i = 0; i < val_count; i++) { 1268 for (i = 0; i < val_count; i++) {
1147 ret = regmap_raw_write(map, 1269 ret = regmap_raw_write(map,
1148 reg + (i * map->reg_stride), 1270 reg + (i * map->reg_stride),
1149 val + (i * val_bytes), 1271 val + (i * val_bytes),
1150 val_bytes); 1272 val_bytes);
1151 if (ret != 0) 1273 if (ret != 0)
1152 return ret; 1274 return ret;
1153 } 1275 }
1154 } else { 1276 } else {
1155 ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count); 1277 ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count,
1278 false);
1156 } 1279 }
1157 1280
1158 if (val_bytes != 1) 1281 if (val_bytes != 1)
@@ -1164,6 +1287,48 @@ out:
1164} 1287}
1165EXPORT_SYMBOL_GPL(regmap_bulk_write); 1288EXPORT_SYMBOL_GPL(regmap_bulk_write);
1166 1289
1290/**
1291 * regmap_raw_write_async(): Write raw values to one or more registers
1292 * asynchronously
1293 *
1294 * @map: Register map to write to
1295 * @reg: Initial register to write to
1296 * @val: Block of data to be written, laid out for direct transmission to the
1297 * device. Must be valid until regmap_async_complete() is called.
1298 * @val_len: Length of data pointed to by val.
1299 *
1300 * This function is intended to be used for things like firmware
1301 * download where a large block of data needs to be transferred to the
1302 * device. No formatting will be done on the data provided.
1303 *
1304 * If supported by the underlying bus the write will be scheduled
1305 * asynchronously, helping maximise I/O speed on higher speed buses
1306 * like SPI. regmap_async_complete() can be called to ensure that all
1307 * asynchrnous writes have been completed.
1308 *
1309 * A value of zero will be returned on success, a negative errno will
1310 * be returned in error cases.
1311 */
1312int regmap_raw_write_async(struct regmap *map, unsigned int reg,
1313 const void *val, size_t val_len)
1314{
1315 int ret;
1316
1317 if (val_len % map->format.val_bytes)
1318 return -EINVAL;
1319 if (reg % map->reg_stride)
1320 return -EINVAL;
1321
1322 map->lock(map->lock_arg);
1323
1324 ret = _regmap_raw_write(map, reg, val, val_len, true);
1325
1326 map->unlock(map->lock_arg);
1327
1328 return ret;
1329}
1330EXPORT_SYMBOL_GPL(regmap_raw_write_async);
1331
1167static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 1332static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
1168 unsigned int val_len) 1333 unsigned int val_len)
1169{ 1334{
@@ -1171,6 +1336,8 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
1171 u8 *u8 = map->work_buf; 1336 u8 *u8 = map->work_buf;
1172 int ret; 1337 int ret;
1173 1338
1339 BUG_ON(!map->bus);
1340
1174 range = _regmap_range_lookup(map, reg); 1341 range = _regmap_range_lookup(map, reg);
1175 if (range) { 1342 if (range) {
1176 ret = _regmap_select_page(map, &reg, range, 1343 ret = _regmap_select_page(map, &reg, range,
@@ -1202,10 +1369,29 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
1202 return ret; 1369 return ret;
1203} 1370}
1204 1371
1372static int _regmap_bus_read(void *context, unsigned int reg,
1373 unsigned int *val)
1374{
1375 int ret;
1376 struct regmap *map = context;
1377
1378 if (!map->format.parse_val)
1379 return -EINVAL;
1380
1381 ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes);
1382 if (ret == 0)
1383 *val = map->format.parse_val(map->work_buf);
1384
1385 return ret;
1386}
1387
1205static int _regmap_read(struct regmap *map, unsigned int reg, 1388static int _regmap_read(struct regmap *map, unsigned int reg,
1206 unsigned int *val) 1389 unsigned int *val)
1207{ 1390{
1208 int ret; 1391 int ret;
1392 void *context = _regmap_map_get_context(map);
1393
1394 BUG_ON(!map->reg_read);
1209 1395
1210 if (!map->cache_bypass) { 1396 if (!map->cache_bypass) {
1211 ret = regcache_read(map, reg, val); 1397 ret = regcache_read(map, reg, val);
@@ -1213,26 +1399,21 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
1213 return 0; 1399 return 0;
1214 } 1400 }
1215 1401
1216 if (!map->format.parse_val)
1217 return -EINVAL;
1218
1219 if (map->cache_only) 1402 if (map->cache_only)
1220 return -EBUSY; 1403 return -EBUSY;
1221 1404
1222 ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes); 1405 ret = map->reg_read(context, reg, val);
1223 if (ret == 0) { 1406 if (ret == 0) {
1224 *val = map->format.parse_val(map->work_buf);
1225
1226#ifdef LOG_DEVICE 1407#ifdef LOG_DEVICE
1227 if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0) 1408 if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
1228 dev_info(map->dev, "%x => %x\n", reg, *val); 1409 dev_info(map->dev, "%x => %x\n", reg, *val);
1229#endif 1410#endif
1230 1411
1231 trace_regmap_reg_read(map->dev, reg, *val); 1412 trace_regmap_reg_read(map->dev, reg, *val);
1232 }
1233 1413
1234 if (ret == 0 && !map->cache_bypass) 1414 if (!map->cache_bypass)
1235 regcache_write(map, reg, *val); 1415 regcache_write(map, reg, *val);
1416 }
1236 1417
1237 return ret; 1418 return ret;
1238} 1419}
@@ -1283,6 +1464,8 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
1283 unsigned int v; 1464 unsigned int v;
1284 int ret, i; 1465 int ret, i;
1285 1466
1467 if (!map->bus)
1468 return -EINVAL;
1286 if (val_len % map->format.val_bytes) 1469 if (val_len % map->format.val_bytes)
1287 return -EINVAL; 1470 return -EINVAL;
1288 if (reg % map->reg_stride) 1471 if (reg % map->reg_stride)
@@ -1334,6 +1517,8 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
1334 size_t val_bytes = map->format.val_bytes; 1517 size_t val_bytes = map->format.val_bytes;
1335 bool vol = regmap_volatile_range(map, reg, val_count); 1518 bool vol = regmap_volatile_range(map, reg, val_count);
1336 1519
1520 if (!map->bus)
1521 return -EINVAL;
1337 if (!map->format.parse_val) 1522 if (!map->format.parse_val)
1338 return -EINVAL; 1523 return -EINVAL;
1339 if (reg % map->reg_stride) 1524 if (reg % map->reg_stride)
@@ -1450,6 +1635,68 @@ int regmap_update_bits_check(struct regmap *map, unsigned int reg,
1450} 1635}
1451EXPORT_SYMBOL_GPL(regmap_update_bits_check); 1636EXPORT_SYMBOL_GPL(regmap_update_bits_check);
1452 1637
1638void regmap_async_complete_cb(struct regmap_async *async, int ret)
1639{
1640 struct regmap *map = async->map;
1641 bool wake;
1642
1643 spin_lock(&map->async_lock);
1644
1645 list_del(&async->list);
1646 wake = list_empty(&map->async_list);
1647
1648 if (ret != 0)
1649 map->async_ret = ret;
1650
1651 spin_unlock(&map->async_lock);
1652
1653 schedule_work(&async->cleanup);
1654
1655 if (wake)
1656 wake_up(&map->async_waitq);
1657}
1658EXPORT_SYMBOL_GPL(regmap_async_complete_cb);
1659
1660static int regmap_async_is_done(struct regmap *map)
1661{
1662 unsigned long flags;
1663 int ret;
1664
1665 spin_lock_irqsave(&map->async_lock, flags);
1666 ret = list_empty(&map->async_list);
1667 spin_unlock_irqrestore(&map->async_lock, flags);
1668
1669 return ret;
1670}
1671
1672/**
1673 * regmap_async_complete: Ensure all asynchronous I/O has completed.
1674 *
1675 * @map: Map to operate on.
1676 *
1677 * Blocks until any pending asynchronous I/O has completed. Returns
1678 * an error code for any failed I/O operations.
1679 */
1680int regmap_async_complete(struct regmap *map)
1681{
1682 unsigned long flags;
1683 int ret;
1684
1685 /* Nothing to do with no async support */
1686 if (!map->bus->async_write)
1687 return 0;
1688
1689 wait_event(map->async_waitq, regmap_async_is_done(map));
1690
1691 spin_lock_irqsave(&map->async_lock, flags);
1692 ret = map->async_ret;
1693 map->async_ret = 0;
1694 spin_unlock_irqrestore(&map->async_lock, flags);
1695
1696 return ret;
1697}
1698EXPORT_SYMBOL_GPL(regmap_async_complete);
1699
1453/** 1700/**
1454 * regmap_register_patch: Register and apply register updates to be applied 1701 * regmap_register_patch: Register and apply register updates to be applied
1455 * on device initialistion 1702 * on device initialistion
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index 19e3fbfd5757..cb0c45488572 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -94,11 +94,16 @@ void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc);
94#ifdef CONFIG_BCMA_DRIVER_GPIO 94#ifdef CONFIG_BCMA_DRIVER_GPIO
95/* driver_gpio.c */ 95/* driver_gpio.c */
96int bcma_gpio_init(struct bcma_drv_cc *cc); 96int bcma_gpio_init(struct bcma_drv_cc *cc);
97int bcma_gpio_unregister(struct bcma_drv_cc *cc);
97#else 98#else
98static inline int bcma_gpio_init(struct bcma_drv_cc *cc) 99static inline int bcma_gpio_init(struct bcma_drv_cc *cc)
99{ 100{
100 return -ENOTSUPP; 101 return -ENOTSUPP;
101} 102}
103static inline int bcma_gpio_unregister(struct bcma_drv_cc *cc)
104{
105 return 0;
106}
102#endif /* CONFIG_BCMA_DRIVER_GPIO */ 107#endif /* CONFIG_BCMA_DRIVER_GPIO */
103 108
104#endif 109#endif
diff --git a/drivers/bcma/driver_chipcommon_nflash.c b/drivers/bcma/driver_chipcommon_nflash.c
index dbda91e4dff5..1f0b83e18f68 100644
--- a/drivers/bcma/driver_chipcommon_nflash.c
+++ b/drivers/bcma/driver_chipcommon_nflash.c
@@ -21,7 +21,7 @@ int bcma_nflash_init(struct bcma_drv_cc *cc)
21 struct bcma_bus *bus = cc->core->bus; 21 struct bcma_bus *bus = cc->core->bus;
22 22
23 if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4706 && 23 if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4706 &&
24 cc->core->id.rev != 0x38) { 24 cc->core->id.rev != 38) {
25 bcma_err(bus, "NAND flash on unsupported board!\n"); 25 bcma_err(bus, "NAND flash on unsupported board!\n");
26 return -ENOTSUPP; 26 return -ENOTSUPP;
27 } 27 }
diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c
index 9a6f585da2d9..71f755c06fc6 100644
--- a/drivers/bcma/driver_gpio.c
+++ b/drivers/bcma/driver_gpio.c
@@ -96,3 +96,8 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
96 96
97 return gpiochip_add(chip); 97 return gpiochip_add(chip);
98} 98}
99
100int bcma_gpio_unregister(struct bcma_drv_cc *cc)
101{
102 return gpiochip_remove(&cc->gpio);
103}
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 4a92f647b58b..324f9debda88 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -268,6 +268,13 @@ int bcma_bus_register(struct bcma_bus *bus)
268void bcma_bus_unregister(struct bcma_bus *bus) 268void bcma_bus_unregister(struct bcma_bus *bus)
269{ 269{
270 struct bcma_device *cores[3]; 270 struct bcma_device *cores[3];
271 int err;
272
273 err = bcma_gpio_unregister(&bus->drv_cc);
274 if (err == -EBUSY)
275 bcma_err(bus, "Some GPIOs are still in use.\n");
276 else if (err)
277 bcma_err(bus, "Can not unregister GPIO driver: %i\n", err);
271 278
272 cores[0] = bcma_find_core(bus, BCMA_CORE_MIPS_74K); 279 cores[0] = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
273 cores[1] = bcma_find_core(bus, BCMA_CORE_PCIE); 280 cores[1] = bcma_find_core(bus, BCMA_CORE_PCIE);
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index f58a4a4b4dfb..2b8303ad63c9 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -168,7 +168,7 @@ static void wake_all_senders(struct drbd_tconn *tconn) {
168} 168}
169 169
170/* must hold resource->req_lock */ 170/* must hold resource->req_lock */
171static void start_new_tl_epoch(struct drbd_tconn *tconn) 171void start_new_tl_epoch(struct drbd_tconn *tconn)
172{ 172{
173 /* no point closing an epoch, if it is empty, anyways. */ 173 /* no point closing an epoch, if it is empty, anyways. */
174 if (tconn->current_tle_writes == 0) 174 if (tconn->current_tle_writes == 0)
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index 016de6b8bb57..c08d22964d06 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -267,6 +267,7 @@ struct bio_and_error {
267 int error; 267 int error;
268}; 268};
269 269
270extern void start_new_tl_epoch(struct drbd_tconn *tconn);
270extern void drbd_req_destroy(struct kref *kref); 271extern void drbd_req_destroy(struct kref *kref);
271extern void _req_may_be_done(struct drbd_request *req, 272extern void _req_may_be_done(struct drbd_request *req,
272 struct bio_and_error *m); 273 struct bio_and_error *m);
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c
index 53bf6182bac4..0fe220cfb9e9 100644
--- a/drivers/block/drbd/drbd_state.c
+++ b/drivers/block/drbd/drbd_state.c
@@ -931,6 +931,7 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
931 enum drbd_state_rv rv = SS_SUCCESS; 931 enum drbd_state_rv rv = SS_SUCCESS;
932 enum sanitize_state_warnings ssw; 932 enum sanitize_state_warnings ssw;
933 struct after_state_chg_work *ascw; 933 struct after_state_chg_work *ascw;
934 bool did_remote, should_do_remote;
934 935
935 os = drbd_read_state(mdev); 936 os = drbd_read_state(mdev);
936 937
@@ -981,11 +982,17 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
981 (os.disk != D_DISKLESS && ns.disk == D_DISKLESS)) 982 (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
982 atomic_inc(&mdev->local_cnt); 983 atomic_inc(&mdev->local_cnt);
983 984
985 did_remote = drbd_should_do_remote(mdev->state);
984 mdev->state.i = ns.i; 986 mdev->state.i = ns.i;
987 should_do_remote = drbd_should_do_remote(mdev->state);
985 mdev->tconn->susp = ns.susp; 988 mdev->tconn->susp = ns.susp;
986 mdev->tconn->susp_nod = ns.susp_nod; 989 mdev->tconn->susp_nod = ns.susp_nod;
987 mdev->tconn->susp_fen = ns.susp_fen; 990 mdev->tconn->susp_fen = ns.susp_fen;
988 991
992 /* put replicated vs not-replicated requests in seperate epochs */
993 if (did_remote != should_do_remote)
994 start_new_tl_epoch(mdev->tconn);
995
989 if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING) 996 if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)
990 drbd_print_uuids(mdev, "attached to UUIDs"); 997 drbd_print_uuids(mdev, "attached to UUIDs");
991 998
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 9694dd99bbbc..3fd100990453 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -626,12 +626,13 @@ static void mtip_timeout_function(unsigned long int data)
626 } 626 }
627 } 627 }
628 628
629 if (cmdto_cnt && !test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) { 629 if (cmdto_cnt) {
630 print_tags(port->dd, "timed out", tagaccum, cmdto_cnt); 630 print_tags(port->dd, "timed out", tagaccum, cmdto_cnt);
631 631 if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
632 mtip_restart_port(port); 632 mtip_restart_port(port);
633 wake_up_interruptible(&port->svc_wait);
634 }
633 clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); 635 clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
634 wake_up_interruptible(&port->svc_wait);
635 } 636 }
636 637
637 if (port->ic_pause_timer) { 638 if (port->ic_pause_timer) {
@@ -3887,7 +3888,12 @@ static int mtip_block_remove(struct driver_data *dd)
3887 * Delete our gendisk structure. This also removes the device 3888 * Delete our gendisk structure. This also removes the device
3888 * from /dev 3889 * from /dev
3889 */ 3890 */
3890 del_gendisk(dd->disk); 3891 if (dd->disk) {
3892 if (dd->disk->queue)
3893 del_gendisk(dd->disk);
3894 else
3895 put_disk(dd->disk);
3896 }
3891 3897
3892 spin_lock(&rssd_index_lock); 3898 spin_lock(&rssd_index_lock);
3893 ida_remove(&rssd_index_ida, dd->index); 3899 ida_remove(&rssd_index_ida, dd->index);
@@ -3921,7 +3927,13 @@ static int mtip_block_shutdown(struct driver_data *dd)
3921 "Shutting down %s ...\n", dd->disk->disk_name); 3927 "Shutting down %s ...\n", dd->disk->disk_name);
3922 3928
3923 /* Delete our gendisk structure, and cleanup the blk queue. */ 3929 /* Delete our gendisk structure, and cleanup the blk queue. */
3924 del_gendisk(dd->disk); 3930 if (dd->disk) {
3931 if (dd->disk->queue)
3932 del_gendisk(dd->disk);
3933 else
3934 put_disk(dd->disk);
3935 }
3936
3925 3937
3926 spin_lock(&rssd_index_lock); 3938 spin_lock(&rssd_index_lock);
3927 ida_remove(&rssd_index_ida, dd->index); 3939 ida_remove(&rssd_index_ida, dd->index);
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 564156a8e572..5814deb6963d 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -461,7 +461,7 @@ static int generic_request(struct vdc_port *port, u8 op, void *buf, int len)
461 int op_len, err; 461 int op_len, err;
462 void *req_buf; 462 void *req_buf;
463 463
464 if (!(((u64)1 << ((u64)op - 1)) & port->operations)) 464 if (!(((u64)1 << (u64)op) & port->operations))
465 return -EOPNOTSUPP; 465 return -EOPNOTSUPP;
466 466
467 switch (op) { 467 switch (op) {
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 765fa2b3d337..8766a2257091 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -844,6 +844,7 @@ static int swim_floppy_init(struct swim_priv *swd)
844 swd->unit[drive].swd = swd; 844 swd->unit[drive].swd = swd;
845 } 845 }
846 846
847 spin_lock_init(&swd->lock);
847 swd->queue = blk_init_queue(do_fd_request, &swd->lock); 848 swd->queue = blk_init_queue(do_fd_request, &swd->lock);
848 if (!swd->queue) { 849 if (!swd->queue) {
849 err = -ENOMEM; 850 err = -ENOMEM;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 9d8409c02082..8ad21a25bc0d 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -889,6 +889,7 @@ static void virtblk_remove(struct virtio_device *vdev)
889{ 889{
890 struct virtio_blk *vblk = vdev->priv; 890 struct virtio_blk *vblk = vdev->priv;
891 int index = vblk->index; 891 int index = vblk->index;
892 int refc;
892 893
893 /* Prevent config work handler from accessing the device. */ 894 /* Prevent config work handler from accessing the device. */
894 mutex_lock(&vblk->config_lock); 895 mutex_lock(&vblk->config_lock);
@@ -903,11 +904,15 @@ static void virtblk_remove(struct virtio_device *vdev)
903 904
904 flush_work(&vblk->config_work); 905 flush_work(&vblk->config_work);
905 906
907 refc = atomic_read(&disk_to_dev(vblk->disk)->kobj.kref.refcount);
906 put_disk(vblk->disk); 908 put_disk(vblk->disk);
907 mempool_destroy(vblk->pool); 909 mempool_destroy(vblk->pool);
908 vdev->config->del_vqs(vdev); 910 vdev->config->del_vqs(vdev);
909 kfree(vblk); 911 kfree(vblk);
910 ida_simple_remove(&vd_index_ida, index); 912
913 /* Only free device id if we don't have any users */
914 if (refc == 1)
915 ida_simple_remove(&vd_index_ida, index);
911} 916}
912 917
913#ifdef CONFIG_PM 918#ifdef CONFIG_PM
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 74374fb762aa..5ac841ff6cc7 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -161,10 +161,12 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
161static void make_response(struct xen_blkif *blkif, u64 id, 161static void make_response(struct xen_blkif *blkif, u64 id,
162 unsigned short op, int st); 162 unsigned short op, int st);
163 163
164#define foreach_grant(pos, rbtree, node) \ 164#define foreach_grant_safe(pos, n, rbtree, node) \
165 for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node); \ 165 for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
166 (n) = rb_next(&(pos)->node); \
166 &(pos)->node != NULL; \ 167 &(pos)->node != NULL; \
167 (pos) = container_of(rb_next(&(pos)->node), typeof(*(pos)), node)) 168 (pos) = container_of(n, typeof(*(pos)), node), \
169 (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
168 170
169 171
170static void add_persistent_gnt(struct rb_root *root, 172static void add_persistent_gnt(struct rb_root *root,
@@ -217,10 +219,11 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num)
217 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 219 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
218 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 220 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
219 struct persistent_gnt *persistent_gnt; 221 struct persistent_gnt *persistent_gnt;
222 struct rb_node *n;
220 int ret = 0; 223 int ret = 0;
221 int segs_to_unmap = 0; 224 int segs_to_unmap = 0;
222 225
223 foreach_grant(persistent_gnt, root, node) { 226 foreach_grant_safe(persistent_gnt, n, root, node) {
224 BUG_ON(persistent_gnt->handle == 227 BUG_ON(persistent_gnt->handle ==
225 BLKBACK_INVALID_HANDLE); 228 BLKBACK_INVALID_HANDLE);
226 gnttab_set_unmap_op(&unmap[segs_to_unmap], 229 gnttab_set_unmap_op(&unmap[segs_to_unmap],
@@ -230,9 +233,6 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num)
230 persistent_gnt->handle); 233 persistent_gnt->handle);
231 234
232 pages[segs_to_unmap] = persistent_gnt->page; 235 pages[segs_to_unmap] = persistent_gnt->page;
233 rb_erase(&persistent_gnt->node, root);
234 kfree(persistent_gnt);
235 num--;
236 236
237 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST || 237 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
238 !rb_next(&persistent_gnt->node)) { 238 !rb_next(&persistent_gnt->node)) {
@@ -241,6 +241,10 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num)
241 BUG_ON(ret); 241 BUG_ON(ret);
242 segs_to_unmap = 0; 242 segs_to_unmap = 0;
243 } 243 }
244
245 rb_erase(&persistent_gnt->node, root);
246 kfree(persistent_gnt);
247 num--;
244 } 248 }
245 BUG_ON(num != 0); 249 BUG_ON(num != 0);
246} 250}
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 96e9b00db081..11043c18ac5a 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -792,6 +792,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
792{ 792{
793 struct llist_node *all_gnts; 793 struct llist_node *all_gnts;
794 struct grant *persistent_gnt; 794 struct grant *persistent_gnt;
795 struct llist_node *n;
795 796
796 /* Prevent new requests being issued until we fix things up. */ 797 /* Prevent new requests being issued until we fix things up. */
797 spin_lock_irq(&info->io_lock); 798 spin_lock_irq(&info->io_lock);
@@ -804,7 +805,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
804 /* Remove all persistent grants */ 805 /* Remove all persistent grants */
805 if (info->persistent_gnts_c) { 806 if (info->persistent_gnts_c) {
806 all_gnts = llist_del_all(&info->persistent_gnts); 807 all_gnts = llist_del_all(&info->persistent_gnts);
807 llist_for_each_entry(persistent_gnt, all_gnts, node) { 808 llist_for_each_entry_safe(persistent_gnt, n, all_gnts, node) {
808 gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); 809 gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
809 __free_page(pfn_to_page(persistent_gnt->pfn)); 810 __free_page(pfn_to_page(persistent_gnt->pfn));
810 kfree(persistent_gnt); 811 kfree(persistent_gnt);
@@ -835,7 +836,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
835static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, 836static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
836 struct blkif_response *bret) 837 struct blkif_response *bret)
837{ 838{
838 int i; 839 int i = 0;
839 struct bio_vec *bvec; 840 struct bio_vec *bvec;
840 struct req_iterator iter; 841 struct req_iterator iter;
841 unsigned long flags; 842 unsigned long flags;
@@ -852,7 +853,8 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
852 */ 853 */
853 rq_for_each_segment(bvec, s->request, iter) { 854 rq_for_each_segment(bvec, s->request, iter) {
854 BUG_ON((bvec->bv_offset + bvec->bv_len) > PAGE_SIZE); 855 BUG_ON((bvec->bv_offset + bvec->bv_len) > PAGE_SIZE);
855 i = offset >> PAGE_SHIFT; 856 if (bvec->bv_offset < offset)
857 i++;
856 BUG_ON(i >= s->req.u.rw.nr_segments); 858 BUG_ON(i >= s->req.u.rw.nr_segments);
857 shared_data = kmap_atomic( 859 shared_data = kmap_atomic(
858 pfn_to_page(s->grants_used[i]->pfn)); 860 pfn_to_page(s->grants_used[i]->pfn));
@@ -861,7 +863,7 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
861 bvec->bv_len); 863 bvec->bv_len);
862 bvec_kunmap_irq(bvec_data, &flags); 864 bvec_kunmap_irq(bvec_data, &flags);
863 kunmap_atomic(shared_data); 865 kunmap_atomic(shared_data);
864 offset += bvec->bv_len; 866 offset = bvec->bv_offset + bvec->bv_len;
865 } 867 }
866 } 868 }
867 /* Add the persistent grant into the list of free grants */ 869 /* Add the persistent grant into the list of free grants */
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index b00000e8aef6..33c9a44a9678 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -77,10 +77,15 @@ static struct usb_device_id ath3k_table[] = {
77 { USB_DEVICE(0x0CF3, 0x311D) }, 77 { USB_DEVICE(0x0CF3, 0x311D) },
78 { USB_DEVICE(0x13d3, 0x3375) }, 78 { USB_DEVICE(0x13d3, 0x3375) },
79 { USB_DEVICE(0x04CA, 0x3005) }, 79 { USB_DEVICE(0x04CA, 0x3005) },
80 { USB_DEVICE(0x04CA, 0x3006) },
81 { USB_DEVICE(0x04CA, 0x3008) },
80 { USB_DEVICE(0x13d3, 0x3362) }, 82 { USB_DEVICE(0x13d3, 0x3362) },
81 { USB_DEVICE(0x0CF3, 0xE004) }, 83 { USB_DEVICE(0x0CF3, 0xE004) },
82 { USB_DEVICE(0x0930, 0x0219) }, 84 { USB_DEVICE(0x0930, 0x0219) },
83 { USB_DEVICE(0x0489, 0xe057) }, 85 { USB_DEVICE(0x0489, 0xe057) },
86 { USB_DEVICE(0x13d3, 0x3393) },
87 { USB_DEVICE(0x0489, 0xe04e) },
88 { USB_DEVICE(0x0489, 0xe056) },
84 89
85 /* Atheros AR5BBU12 with sflash firmware */ 90 /* Atheros AR5BBU12 with sflash firmware */
86 { USB_DEVICE(0x0489, 0xE02C) }, 91 { USB_DEVICE(0x0489, 0xE02C) },
@@ -104,10 +109,15 @@ static struct usb_device_id ath3k_blist_tbl[] = {
104 { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 }, 109 { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
105 { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, 110 { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
106 { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, 111 { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
112 { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
113 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
107 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, 114 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
108 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, 115 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
109 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, 116 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
110 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, 117 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
118 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
119 { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
120 { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
111 121
112 /* Atheros AR5BBU22 with sflash firmware */ 122 /* Atheros AR5BBU22 with sflash firmware */
113 { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 }, 123 { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index a1d4ede5b892..7e351e345476 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -135,10 +135,15 @@ static struct usb_device_id blacklist_table[] = {
135 { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 }, 135 { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
136 { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, 136 { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
137 { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, 137 { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
138 { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
139 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
138 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, 140 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
139 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, 141 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
140 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, 142 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
141 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, 143 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
144 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
145 { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
146 { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
142 147
143 /* Atheros AR5BBU12 with sflash firmware */ 148 /* Atheros AR5BBU12 with sflash firmware */
144 { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE }, 149 { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index fe6d4be48296..e3f9a99b8522 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -1041,7 +1041,7 @@ static int hpet_acpi_add(struct acpi_device *device)
1041 return hpet_alloc(&data); 1041 return hpet_alloc(&data);
1042} 1042}
1043 1043
1044static int hpet_acpi_remove(struct acpi_device *device, int type) 1044static int hpet_acpi_remove(struct acpi_device *device)
1045{ 1045{
1046 /* XXX need to unregister clocksource, dealloc mem, etc */ 1046 /* XXX need to unregister clocksource, dealloc mem, etc */
1047 return -EINVAL; 1047 return -EINVAL;
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index d780295a1473..6386a98e43c1 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -1142,7 +1142,7 @@ static int sonypi_acpi_add(struct acpi_device *device)
1142 return 0; 1142 return 0;
1143} 1143}
1144 1144
1145static int sonypi_acpi_remove(struct acpi_device *device, int type) 1145static int sonypi_acpi_remove(struct acpi_device *device)
1146{ 1146{
1147 sonypi_acpi_device = NULL; 1147 sonypi_acpi_device = NULL;
1148 return 0; 1148 return 0;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 684b0d53764f..ee4dbeafb377 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -2062,7 +2062,8 @@ static void virtcons_remove(struct virtio_device *vdev)
2062 /* Disable interrupts for vqs */ 2062 /* Disable interrupts for vqs */
2063 vdev->config->reset(vdev); 2063 vdev->config->reset(vdev);
2064 /* Finish up work that's lined up */ 2064 /* Finish up work that's lined up */
2065 cancel_work_sync(&portdev->control_work); 2065 if (use_multiport(portdev))
2066 cancel_work_sync(&portdev->control_work);
2066 2067
2067 list_for_each_entry_safe(port, port2, &portdev->ports, list) 2068 list_for_each_entry_safe(port, port2, &portdev->ports, list)
2068 unplug_port(port); 2069 unplug_port(port);
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index ee90e87e7675..14fde73ea6ff 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -1,8 +1,13 @@
1# common clock types 1# common clock types
2obj-$(CONFIG_HAVE_CLK) += clk-devres.o 2obj-$(CONFIG_HAVE_CLK) += clk-devres.o
3obj-$(CONFIG_CLKDEV_LOOKUP) += clkdev.o 3obj-$(CONFIG_CLKDEV_LOOKUP) += clkdev.o
4obj-$(CONFIG_COMMON_CLK) += clk.o clk-fixed-rate.o clk-gate.o \ 4obj-$(CONFIG_COMMON_CLK) += clk.o
5 clk-mux.o clk-divider.o clk-fixed-factor.o 5obj-$(CONFIG_COMMON_CLK) += clk-divider.o
6obj-$(CONFIG_COMMON_CLK) += clk-fixed-factor.o
7obj-$(CONFIG_COMMON_CLK) += clk-fixed-rate.o
8obj-$(CONFIG_COMMON_CLK) += clk-gate.o
9obj-$(CONFIG_COMMON_CLK) += clk-mux.o
10
6# SoCs specific 11# SoCs specific
7obj-$(CONFIG_ARCH_BCM2835) += clk-bcm2835.o 12obj-$(CONFIG_ARCH_BCM2835) += clk-bcm2835.o
8obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o 13obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o
@@ -20,8 +25,8 @@ endif
20obj-$(CONFIG_MACH_LOONGSON1) += clk-ls1x.o 25obj-$(CONFIG_MACH_LOONGSON1) += clk-ls1x.o
21obj-$(CONFIG_ARCH_U8500) += ux500/ 26obj-$(CONFIG_ARCH_U8500) += ux500/
22obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o 27obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o
23obj-$(CONFIG_ARCH_SUNXI) += clk-sunxi.o
24obj-$(CONFIG_ARCH_ZYNQ) += clk-zynq.o 28obj-$(CONFIG_ARCH_ZYNQ) += clk-zynq.o
29obj-$(CONFIG_X86) += x86/
25 30
26# Chip specific 31# Chip specific
27obj-$(CONFIG_COMMON_CLK_WM831X) += clk-wm831x.o 32obj-$(CONFIG_COMMON_CLK_WM831X) += clk-wm831x.o
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index a9204c69148d..68b402101170 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -16,6 +16,7 @@
16#include <linux/io.h> 16#include <linux/io.h>
17#include <linux/err.h> 17#include <linux/err.h>
18#include <linux/string.h> 18#include <linux/string.h>
19#include <linux/log2.h>
19 20
20/* 21/*
21 * DOC: basic adjustable divider clock that cannot gate 22 * DOC: basic adjustable divider clock that cannot gate
@@ -29,8 +30,7 @@
29 30
30#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw) 31#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
31 32
32#define div_mask(d) ((1 << (d->width)) - 1) 33#define div_mask(d) ((1 << ((d)->width)) - 1)
33#define is_power_of_two(i) !(i & ~i)
34 34
35static unsigned int _get_table_maxdiv(const struct clk_div_table *table) 35static unsigned int _get_table_maxdiv(const struct clk_div_table *table)
36{ 36{
@@ -137,7 +137,7 @@ static bool _is_valid_table_div(const struct clk_div_table *table,
137static bool _is_valid_div(struct clk_divider *divider, unsigned int div) 137static bool _is_valid_div(struct clk_divider *divider, unsigned int div)
138{ 138{
139 if (divider->flags & CLK_DIVIDER_POWER_OF_TWO) 139 if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
140 return is_power_of_two(div); 140 return is_power_of_2(div);
141 if (divider->table) 141 if (divider->table)
142 return _is_valid_table_div(divider->table, div); 142 return _is_valid_table_div(divider->table, div);
143 return true; 143 return true;
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index a4899855c0f6..1ef271e47594 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -28,8 +28,11 @@ static unsigned long clk_factor_recalc_rate(struct clk_hw *hw,
28 unsigned long parent_rate) 28 unsigned long parent_rate)
29{ 29{
30 struct clk_fixed_factor *fix = to_clk_fixed_factor(hw); 30 struct clk_fixed_factor *fix = to_clk_fixed_factor(hw);
31 unsigned long long int rate;
31 32
32 return parent_rate * fix->mult / fix->div; 33 rate = (unsigned long long int)parent_rate * fix->mult;
34 do_div(rate, fix->div);
35 return (unsigned long)rate;
33} 36}
34 37
35static long clk_factor_round_rate(struct clk_hw *hw, unsigned long rate, 38static long clk_factor_round_rate(struct clk_hw *hw, unsigned long rate,
diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
index af78ed6b67ef..dc58fbd8516f 100644
--- a/drivers/clk/clk-fixed-rate.c
+++ b/drivers/clk/clk-fixed-rate.c
@@ -85,7 +85,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
85/** 85/**
86 * of_fixed_clk_setup() - Setup function for simple fixed rate clock 86 * of_fixed_clk_setup() - Setup function for simple fixed rate clock
87 */ 87 */
88void __init of_fixed_clk_setup(struct device_node *node) 88void of_fixed_clk_setup(struct device_node *node)
89{ 89{
90 struct clk *clk; 90 struct clk *clk;
91 const char *clk_name = node->name; 91 const char *clk_name = node->name;
@@ -101,4 +101,5 @@ void __init of_fixed_clk_setup(struct device_node *node)
101 of_clk_add_provider(node, of_clk_src_simple_get, clk); 101 of_clk_add_provider(node, of_clk_src_simple_get, clk);
102} 102}
103EXPORT_SYMBOL_GPL(of_fixed_clk_setup); 103EXPORT_SYMBOL_GPL(of_fixed_clk_setup);
104CLK_OF_DECLARE(fixed_clk, "fixed-clock", of_fixed_clk_setup);
104#endif 105#endif
diff --git a/drivers/clk/clk-highbank.c b/drivers/clk/clk-highbank.c
index 52fecadf004a..2e08cb001936 100644
--- a/drivers/clk/clk-highbank.c
+++ b/drivers/clk/clk-highbank.c
@@ -182,8 +182,10 @@ static int clk_pll_set_rate(struct clk_hw *hwclk, unsigned long rate,
182 reg |= HB_PLL_EXT_ENA; 182 reg |= HB_PLL_EXT_ENA;
183 reg &= ~HB_PLL_EXT_BYPASS; 183 reg &= ~HB_PLL_EXT_BYPASS;
184 } else { 184 } else {
185 writel(reg | HB_PLL_EXT_BYPASS, hbclk->reg);
185 reg &= ~HB_PLL_DIVQ_MASK; 186 reg &= ~HB_PLL_DIVQ_MASK;
186 reg |= divq << HB_PLL_DIVQ_SHIFT; 187 reg |= divq << HB_PLL_DIVQ_SHIFT;
188 writel(reg | HB_PLL_EXT_BYPASS, hbclk->reg);
187 } 189 }
188 writel(reg, hbclk->reg); 190 writel(reg, hbclk->reg);
189 191
@@ -314,33 +316,23 @@ static void __init hb_pll_init(struct device_node *node)
314{ 316{
315 hb_clk_init(node, &clk_pll_ops); 317 hb_clk_init(node, &clk_pll_ops);
316} 318}
319CLK_OF_DECLARE(hb_pll, "calxeda,hb-pll-clock", hb_pll_init);
317 320
318static void __init hb_a9periph_init(struct device_node *node) 321static void __init hb_a9periph_init(struct device_node *node)
319{ 322{
320 hb_clk_init(node, &a9periphclk_ops); 323 hb_clk_init(node, &a9periphclk_ops);
321} 324}
325CLK_OF_DECLARE(hb_a9periph, "calxeda,hb-a9periph-clock", hb_a9periph_init);
322 326
323static void __init hb_a9bus_init(struct device_node *node) 327static void __init hb_a9bus_init(struct device_node *node)
324{ 328{
325 struct clk *clk = hb_clk_init(node, &a9bclk_ops); 329 struct clk *clk = hb_clk_init(node, &a9bclk_ops);
326 clk_prepare_enable(clk); 330 clk_prepare_enable(clk);
327} 331}
332CLK_OF_DECLARE(hb_a9bus, "calxeda,hb-a9bus-clock", hb_a9bus_init);
328 333
329static void __init hb_emmc_init(struct device_node *node) 334static void __init hb_emmc_init(struct device_node *node)
330{ 335{
331 hb_clk_init(node, &periclk_ops); 336 hb_clk_init(node, &periclk_ops);
332} 337}
333 338CLK_OF_DECLARE(hb_emmc, "calxeda,hb-emmc-clock", hb_emmc_init);
334static const __initconst struct of_device_id clk_match[] = {
335 { .compatible = "fixed-clock", .data = of_fixed_clk_setup, },
336 { .compatible = "calxeda,hb-pll-clock", .data = hb_pll_init, },
337 { .compatible = "calxeda,hb-a9periph-clock", .data = hb_a9periph_init, },
338 { .compatible = "calxeda,hb-a9bus-clock", .data = hb_a9bus_init, },
339 { .compatible = "calxeda,hb-emmc-clock", .data = hb_emmc_init, },
340 {}
341};
342
343void __init highbank_clocks_init(void)
344{
345 of_clk_init(clk_match);
346}
diff --git a/drivers/clk/clk-max77686.c b/drivers/clk/clk-max77686.c
index d098f72e1d5f..9f57bc37cd60 100644
--- a/drivers/clk/clk-max77686.c
+++ b/drivers/clk/clk-max77686.c
@@ -44,33 +44,23 @@ struct max77686_clk {
44 struct clk_lookup *lookup; 44 struct clk_lookup *lookup;
45}; 45};
46 46
47static struct max77686_clk *get_max77686_clk(struct clk_hw *hw) 47static struct max77686_clk *to_max77686_clk(struct clk_hw *hw)
48{ 48{
49 return container_of(hw, struct max77686_clk, hw); 49 return container_of(hw, struct max77686_clk, hw);
50} 50}
51 51
52static int max77686_clk_prepare(struct clk_hw *hw) 52static int max77686_clk_prepare(struct clk_hw *hw)
53{ 53{
54 struct max77686_clk *max77686; 54 struct max77686_clk *max77686 = to_max77686_clk(hw);
55 int ret;
56
57 max77686 = get_max77686_clk(hw);
58 if (!max77686)
59 return -ENOMEM;
60
61 ret = regmap_update_bits(max77686->iodev->regmap,
62 MAX77686_REG_32KHZ, max77686->mask, max77686->mask);
63 55
64 return ret; 56 return regmap_update_bits(max77686->iodev->regmap,
57 MAX77686_REG_32KHZ, max77686->mask,
58 max77686->mask);
65} 59}
66 60
67static void max77686_clk_unprepare(struct clk_hw *hw) 61static void max77686_clk_unprepare(struct clk_hw *hw)
68{ 62{
69 struct max77686_clk *max77686; 63 struct max77686_clk *max77686 = to_max77686_clk(hw);
70
71 max77686 = get_max77686_clk(hw);
72 if (!max77686)
73 return;
74 64
75 regmap_update_bits(max77686->iodev->regmap, 65 regmap_update_bits(max77686->iodev->regmap,
76 MAX77686_REG_32KHZ, max77686->mask, ~max77686->mask); 66 MAX77686_REG_32KHZ, max77686->mask, ~max77686->mask);
@@ -78,14 +68,10 @@ static void max77686_clk_unprepare(struct clk_hw *hw)
78 68
79static int max77686_clk_is_enabled(struct clk_hw *hw) 69static int max77686_clk_is_enabled(struct clk_hw *hw)
80{ 70{
81 struct max77686_clk *max77686; 71 struct max77686_clk *max77686 = to_max77686_clk(hw);
82 int ret; 72 int ret;
83 u32 val; 73 u32 val;
84 74
85 max77686 = get_max77686_clk(hw);
86 if (!max77686)
87 return -ENOMEM;
88
89 ret = regmap_read(max77686->iodev->regmap, 75 ret = regmap_read(max77686->iodev->regmap,
90 MAX77686_REG_32KHZ, &val); 76 MAX77686_REG_32KHZ, &val);
91 77
@@ -130,9 +116,8 @@ static int max77686_clk_register(struct device *dev,
130 if (IS_ERR(clk)) 116 if (IS_ERR(clk))
131 return -ENOMEM; 117 return -ENOMEM;
132 118
133 max77686->lookup = devm_kzalloc(dev, sizeof(struct clk_lookup), 119 max77686->lookup = kzalloc(sizeof(struct clk_lookup), GFP_KERNEL);
134 GFP_KERNEL); 120 if (!max77686->lookup)
135 if (IS_ERR(max77686->lookup))
136 return -ENOMEM; 121 return -ENOMEM;
137 122
138 max77686->lookup->con_id = hw->init->name; 123 max77686->lookup->con_id = hw->init->name;
@@ -151,13 +136,13 @@ static int max77686_clk_probe(struct platform_device *pdev)
151 136
152 max77686_clks = devm_kzalloc(&pdev->dev, sizeof(struct max77686_clk *) 137 max77686_clks = devm_kzalloc(&pdev->dev, sizeof(struct max77686_clk *)
153 * MAX77686_CLKS_NUM, GFP_KERNEL); 138 * MAX77686_CLKS_NUM, GFP_KERNEL);
154 if (IS_ERR(max77686_clks)) 139 if (!max77686_clks)
155 return -ENOMEM; 140 return -ENOMEM;
156 141
157 for (i = 0; i < MAX77686_CLKS_NUM; i++) { 142 for (i = 0; i < MAX77686_CLKS_NUM; i++) {
158 max77686_clks[i] = devm_kzalloc(&pdev->dev, 143 max77686_clks[i] = devm_kzalloc(&pdev->dev,
159 sizeof(struct max77686_clk), GFP_KERNEL); 144 sizeof(struct max77686_clk), GFP_KERNEL);
160 if (IS_ERR(max77686_clks[i])) 145 if (!max77686_clks[i])
161 return -ENOMEM; 146 return -ENOMEM;
162 } 147 }
163 148
diff --git a/drivers/clk/clk-prima2.c b/drivers/clk/clk-prima2.c
index a203ecccdc4f..f8e9d0c27be2 100644
--- a/drivers/clk/clk-prima2.c
+++ b/drivers/clk/clk-prima2.c
@@ -1025,20 +1025,67 @@ static struct of_device_id rsc_ids[] = {
1025 {}, 1025 {},
1026}; 1026};
1027 1027
1028enum prima2_clk_index {
1029 /* 0 1 2 3 4 5 6 7 8 9 */
1030 rtc, osc, pll1, pll2, pll3, mem, sys, security, dsp, gps,
1031 mf, io, cpu, uart0, uart1, uart2, tsc, i2c0, i2c1, spi0,
1032 spi1, pwmc, efuse, pulse, dmac0, dmac1, nand, audio, usp0, usp1,
1033 usp2, vip, gfx, mm, lcd, vpp, mmc01, mmc23, mmc45, usbpll,
1034 usb0, usb1, maxclk,
1035};
1036
1037static __initdata struct clk_hw* prima2_clk_hw_array[maxclk] = {
1038 NULL, /* dummy */
1039 NULL,
1040 &clk_pll1.hw,
1041 &clk_pll2.hw,
1042 &clk_pll3.hw,
1043 &clk_mem.hw,
1044 &clk_sys.hw,
1045 &clk_security.hw,
1046 &clk_dsp.hw,
1047 &clk_gps.hw,
1048 &clk_mf.hw,
1049 &clk_io.hw,
1050 &clk_cpu.hw,
1051 &clk_uart0.hw,
1052 &clk_uart1.hw,
1053 &clk_uart2.hw,
1054 &clk_tsc.hw,
1055 &clk_i2c0.hw,
1056 &clk_i2c1.hw,
1057 &clk_spi0.hw,
1058 &clk_spi1.hw,
1059 &clk_pwmc.hw,
1060 &clk_efuse.hw,
1061 &clk_pulse.hw,
1062 &clk_dmac0.hw,
1063 &clk_dmac1.hw,
1064 &clk_nand.hw,
1065 &clk_audio.hw,
1066 &clk_usp0.hw,
1067 &clk_usp1.hw,
1068 &clk_usp2.hw,
1069 &clk_vip.hw,
1070 &clk_gfx.hw,
1071 &clk_mm.hw,
1072 &clk_lcd.hw,
1073 &clk_vpp.hw,
1074 &clk_mmc01.hw,
1075 &clk_mmc23.hw,
1076 &clk_mmc45.hw,
1077 &usb_pll_clk_hw,
1078 &clk_usb0.hw,
1079 &clk_usb1.hw,
1080};
1081
1082static struct clk *prima2_clks[maxclk];
1083static struct clk_onecell_data clk_data;
1084
1028void __init sirfsoc_of_clk_init(void) 1085void __init sirfsoc_of_clk_init(void)
1029{ 1086{
1030 struct clk *clk;
1031 struct device_node *np; 1087 struct device_node *np;
1032 1088 int i;
1033 np = of_find_matching_node(NULL, clkc_ids);
1034 if (!np)
1035 panic("unable to find compatible clkc node in dtb\n");
1036
1037 sirfsoc_clk_vbase = of_iomap(np, 0);
1038 if (!sirfsoc_clk_vbase)
1039 panic("unable to map clkc registers\n");
1040
1041 of_node_put(np);
1042 1089
1043 np = of_find_matching_node(NULL, rsc_ids); 1090 np = of_find_matching_node(NULL, rsc_ids);
1044 if (!np) 1091 if (!np)
@@ -1050,122 +1097,30 @@ void __init sirfsoc_of_clk_init(void)
1050 1097
1051 of_node_put(np); 1098 of_node_put(np);
1052 1099
1100 np = of_find_matching_node(NULL, clkc_ids);
1101 if (!np)
1102 return;
1103
1104 sirfsoc_clk_vbase = of_iomap(np, 0);
1105 if (!sirfsoc_clk_vbase)
1106 panic("unable to map clkc registers\n");
1053 1107
1054 /* These are always available (RTC and 26MHz OSC)*/ 1108 /* These are always available (RTC and 26MHz OSC)*/
1055 clk = clk_register_fixed_rate(NULL, "rtc", NULL, 1109 prima2_clks[rtc] = clk_register_fixed_rate(NULL, "rtc", NULL,
1056 CLK_IS_ROOT, 32768); 1110 CLK_IS_ROOT, 32768);
1057 BUG_ON(IS_ERR(clk)); 1111 prima2_clks[osc]= clk_register_fixed_rate(NULL, "osc", NULL,
1058 clk = clk_register_fixed_rate(NULL, "osc", NULL,
1059 CLK_IS_ROOT, 26000000); 1112 CLK_IS_ROOT, 26000000);
1060 BUG_ON(IS_ERR(clk)); 1113
1061 1114 for (i = pll1; i < maxclk; i++) {
1062 clk = clk_register(NULL, &clk_pll1.hw); 1115 prima2_clks[i] = clk_register(NULL, prima2_clk_hw_array[i]);
1063 BUG_ON(IS_ERR(clk)); 1116 BUG_ON(!prima2_clks[i]);
1064 clk = clk_register(NULL, &clk_pll2.hw); 1117 }
1065 BUG_ON(IS_ERR(clk)); 1118 clk_register_clkdev(prima2_clks[cpu], NULL, "cpu");
1066 clk = clk_register(NULL, &clk_pll3.hw); 1119 clk_register_clkdev(prima2_clks[io], NULL, "io");
1067 BUG_ON(IS_ERR(clk)); 1120 clk_register_clkdev(prima2_clks[mem], NULL, "mem");
1068 clk = clk_register(NULL, &clk_mem.hw); 1121
1069 BUG_ON(IS_ERR(clk)); 1122 clk_data.clks = prima2_clks;
1070 clk = clk_register(NULL, &clk_sys.hw); 1123 clk_data.clk_num = maxclk;
1071 BUG_ON(IS_ERR(clk)); 1124
1072 clk = clk_register(NULL, &clk_security.hw); 1125 of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
1073 BUG_ON(IS_ERR(clk));
1074 clk_register_clkdev(clk, NULL, "b8030000.security");
1075 clk = clk_register(NULL, &clk_dsp.hw);
1076 BUG_ON(IS_ERR(clk));
1077 clk = clk_register(NULL, &clk_gps.hw);
1078 BUG_ON(IS_ERR(clk));
1079 clk_register_clkdev(clk, NULL, "a8010000.gps");
1080 clk = clk_register(NULL, &clk_mf.hw);
1081 BUG_ON(IS_ERR(clk));
1082 clk = clk_register(NULL, &clk_io.hw);
1083 BUG_ON(IS_ERR(clk));
1084 clk_register_clkdev(clk, NULL, "io");
1085 clk = clk_register(NULL, &clk_cpu.hw);
1086 BUG_ON(IS_ERR(clk));
1087 clk_register_clkdev(clk, NULL, "cpu");
1088 clk = clk_register(NULL, &clk_uart0.hw);
1089 BUG_ON(IS_ERR(clk));
1090 clk_register_clkdev(clk, NULL, "b0050000.uart");
1091 clk = clk_register(NULL, &clk_uart1.hw);
1092 BUG_ON(IS_ERR(clk));
1093 clk_register_clkdev(clk, NULL, "b0060000.uart");
1094 clk = clk_register(NULL, &clk_uart2.hw);
1095 BUG_ON(IS_ERR(clk));
1096 clk_register_clkdev(clk, NULL, "b0070000.uart");
1097 clk = clk_register(NULL, &clk_tsc.hw);
1098 BUG_ON(IS_ERR(clk));
1099 clk_register_clkdev(clk, NULL, "b0110000.tsc");
1100 clk = clk_register(NULL, &clk_i2c0.hw);
1101 BUG_ON(IS_ERR(clk));
1102 clk_register_clkdev(clk, NULL, "b00e0000.i2c");
1103 clk = clk_register(NULL, &clk_i2c1.hw);
1104 BUG_ON(IS_ERR(clk));
1105 clk_register_clkdev(clk, NULL, "b00f0000.i2c");
1106 clk = clk_register(NULL, &clk_spi0.hw);
1107 BUG_ON(IS_ERR(clk));
1108 clk_register_clkdev(clk, NULL, "b00d0000.spi");
1109 clk = clk_register(NULL, &clk_spi1.hw);
1110 BUG_ON(IS_ERR(clk));
1111 clk_register_clkdev(clk, NULL, "b0170000.spi");
1112 clk = clk_register(NULL, &clk_pwmc.hw);
1113 BUG_ON(IS_ERR(clk));
1114 clk_register_clkdev(clk, NULL, "b0130000.pwm");
1115 clk = clk_register(NULL, &clk_efuse.hw);
1116 BUG_ON(IS_ERR(clk));
1117 clk_register_clkdev(clk, NULL, "b0140000.efusesys");
1118 clk = clk_register(NULL, &clk_pulse.hw);
1119 BUG_ON(IS_ERR(clk));
1120 clk_register_clkdev(clk, NULL, "b0150000.pulsec");
1121 clk = clk_register(NULL, &clk_dmac0.hw);
1122 BUG_ON(IS_ERR(clk));
1123 clk_register_clkdev(clk, NULL, "b00b0000.dma-controller");
1124 clk = clk_register(NULL, &clk_dmac1.hw);
1125 BUG_ON(IS_ERR(clk));
1126 clk_register_clkdev(clk, NULL, "b0160000.dma-controller");
1127 clk = clk_register(NULL, &clk_nand.hw);
1128 BUG_ON(IS_ERR(clk));
1129 clk_register_clkdev(clk, NULL, "b0030000.nand");
1130 clk = clk_register(NULL, &clk_audio.hw);
1131 BUG_ON(IS_ERR(clk));
1132 clk_register_clkdev(clk, NULL, "b0040000.audio");
1133 clk = clk_register(NULL, &clk_usp0.hw);
1134 BUG_ON(IS_ERR(clk));
1135 clk_register_clkdev(clk, NULL, "b0080000.usp");
1136 clk = clk_register(NULL, &clk_usp1.hw);
1137 BUG_ON(IS_ERR(clk));
1138 clk_register_clkdev(clk, NULL, "b0090000.usp");
1139 clk = clk_register(NULL, &clk_usp2.hw);
1140 BUG_ON(IS_ERR(clk));
1141 clk_register_clkdev(clk, NULL, "b00a0000.usp");
1142 clk = clk_register(NULL, &clk_vip.hw);
1143 BUG_ON(IS_ERR(clk));
1144 clk_register_clkdev(clk, NULL, "b00c0000.vip");
1145 clk = clk_register(NULL, &clk_gfx.hw);
1146 BUG_ON(IS_ERR(clk));
1147 clk_register_clkdev(clk, NULL, "98000000.graphics");
1148 clk = clk_register(NULL, &clk_mm.hw);
1149 BUG_ON(IS_ERR(clk));
1150 clk_register_clkdev(clk, NULL, "a0000000.multimedia");
1151 clk = clk_register(NULL, &clk_lcd.hw);
1152 BUG_ON(IS_ERR(clk));
1153 clk_register_clkdev(clk, NULL, "90010000.display");
1154 clk = clk_register(NULL, &clk_vpp.hw);
1155 BUG_ON(IS_ERR(clk));
1156 clk_register_clkdev(clk, NULL, "90020000.vpp");
1157 clk = clk_register(NULL, &clk_mmc01.hw);
1158 BUG_ON(IS_ERR(clk));
1159 clk = clk_register(NULL, &clk_mmc23.hw);
1160 BUG_ON(IS_ERR(clk));
1161 clk = clk_register(NULL, &clk_mmc45.hw);
1162 BUG_ON(IS_ERR(clk));
1163 clk = clk_register(NULL, &usb_pll_clk_hw);
1164 BUG_ON(IS_ERR(clk));
1165 clk = clk_register(NULL, &clk_usb0.hw);
1166 BUG_ON(IS_ERR(clk));
1167 clk_register_clkdev(clk, NULL, "b00e0000.usb");
1168 clk = clk_register(NULL, &clk_usb1.hw);
1169 BUG_ON(IS_ERR(clk));
1170 clk_register_clkdev(clk, NULL, "b00f0000.usb");
1171} 1126}
diff --git a/drivers/clk/clk-sunxi.c b/drivers/clk/clk-sunxi.c
deleted file mode 100644
index 0e831b584ba7..000000000000
--- a/drivers/clk/clk-sunxi.c
+++ /dev/null
@@ -1,30 +0,0 @@
1/*
2 * Copyright 2012 Maxime Ripard
3 *
4 * Maxime Ripard <maxime.ripard@free-electrons.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include <linux/clk-provider.h>
18#include <linux/clkdev.h>
19#include <linux/clk/sunxi.h>
20#include <linux/of.h>
21
22static const __initconst struct of_device_id clk_match[] = {
23 { .compatible = "fixed-clock", .data = of_fixed_clk_setup, },
24 {}
25};
26
27void __init sunxi_init_clocks(void)
28{
29 of_clk_init(clk_match);
30}
diff --git a/drivers/clk/clk-vt8500.c b/drivers/clk/clk-vt8500.c
index fe25570874d6..b5538bba7a10 100644
--- a/drivers/clk/clk-vt8500.c
+++ b/drivers/clk/clk-vt8500.c
@@ -41,6 +41,7 @@ struct clk_device {
41 41
42#define PLL_TYPE_VT8500 0 42#define PLL_TYPE_VT8500 0
43#define PLL_TYPE_WM8650 1 43#define PLL_TYPE_WM8650 1
44#define PLL_TYPE_WM8750 2
44 45
45struct clk_pll { 46struct clk_pll {
46 struct clk_hw hw; 47 struct clk_hw hw;
@@ -121,7 +122,16 @@ static long vt8500_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
121 unsigned long *prate) 122 unsigned long *prate)
122{ 123{
123 struct clk_device *cdev = to_clk_device(hw); 124 struct clk_device *cdev = to_clk_device(hw);
124 u32 divisor = *prate / rate; 125 u32 divisor;
126
127 if (rate == 0)
128 return 0;
129
130 divisor = *prate / rate;
131
132 /* If prate / rate would be decimal, incr the divisor */
133 if (rate * divisor < *prate)
134 divisor++;
125 135
126 /* 136 /*
127 * If this is a request for SDMMC we have to adjust the divisor 137 * If this is a request for SDMMC we have to adjust the divisor
@@ -138,9 +148,18 @@ static int vt8500_dclk_set_rate(struct clk_hw *hw, unsigned long rate,
138 unsigned long parent_rate) 148 unsigned long parent_rate)
139{ 149{
140 struct clk_device *cdev = to_clk_device(hw); 150 struct clk_device *cdev = to_clk_device(hw);
141 u32 divisor = parent_rate / rate; 151 u32 divisor;
142 unsigned long flags = 0; 152 unsigned long flags = 0;
143 153
154 if (rate == 0)
155 return 0;
156
157 divisor = parent_rate / rate;
158
159 /* If prate / rate would be decimal, incr the divisor */
160 if (rate * divisor < *prate)
161 divisor++;
162
144 if (divisor == cdev->div_mask + 1) 163 if (divisor == cdev->div_mask + 1)
145 divisor = 0; 164 divisor = 0;
146 165
@@ -272,7 +291,7 @@ static __init void vtwm_device_clk_init(struct device_node *node)
272 rc = of_clk_add_provider(node, of_clk_src_simple_get, clk); 291 rc = of_clk_add_provider(node, of_clk_src_simple_get, clk);
273 clk_register_clkdev(clk, clk_name, NULL); 292 clk_register_clkdev(clk, clk_name, NULL);
274} 293}
275 294CLK_OF_DECLARE(vt8500_device, "via,vt8500-device-clock", vtwm_device_clk_init);
276 295
277/* PLL clock related functions */ 296/* PLL clock related functions */
278 297
@@ -298,6 +317,16 @@ static __init void vtwm_device_clk_init(struct device_node *node)
298#define WM8650_BITS_TO_VAL(m, d1, d2) \ 317#define WM8650_BITS_TO_VAL(m, d1, d2) \
299 ((d2 << 13) | (d1 << 10) | (m & 0x3FF)) 318 ((d2 << 13) | (d1 << 10) | (m & 0x3FF))
300 319
320/* Helper macros for PLL_WM8750 */
321#define WM8750_PLL_MUL(x) (((x >> 16) & 0xFF) + 1)
322#define WM8750_PLL_DIV(x) ((((x >> 8) & 1) + 1) * (1 << (x & 7)))
323
324#define WM8750_BITS_TO_FREQ(r, m, d1, d2) \
325 (r * (m+1) / ((d1+1) * (1 << d2)))
326
327#define WM8750_BITS_TO_VAL(f, m, d1, d2) \
328 ((f << 24) | ((m - 1) << 16) | ((d1 - 1) << 8) | d2)
329
301 330
302static void vt8500_find_pll_bits(unsigned long rate, unsigned long parent_rate, 331static void vt8500_find_pll_bits(unsigned long rate, unsigned long parent_rate,
303 u32 *multiplier, u32 *prediv) 332 u32 *multiplier, u32 *prediv)
@@ -361,16 +390,87 @@ static void wm8650_find_pll_bits(unsigned long rate, unsigned long parent_rate,
361 /* if we got here, it wasn't an exact match */ 390 /* if we got here, it wasn't an exact match */
362 pr_warn("%s: requested rate %lu, found rate %lu\n", __func__, rate, 391 pr_warn("%s: requested rate %lu, found rate %lu\n", __func__, rate,
363 rate - best_err); 392 rate - best_err);
364 *multiplier = mul; 393 *multiplier = best_mul;
365 *divisor1 = div1; 394 *divisor1 = best_div1;
366 *divisor2 = div2; 395 *divisor2 = best_div2;
396}
397
398static u32 wm8750_get_filter(u32 parent_rate, u32 divisor1)
399{
400 /* calculate frequency (MHz) after pre-divisor */
401 u32 freq = (parent_rate / 1000000) / (divisor1 + 1);
402
403 if ((freq < 10) || (freq > 200))
404 pr_warn("%s: PLL recommended input frequency 10..200Mhz (requested %d Mhz)\n",
405 __func__, freq);
406
407 if (freq >= 166)
408 return 7;
409 else if (freq >= 104)
410 return 6;
411 else if (freq >= 65)
412 return 5;
413 else if (freq >= 42)
414 return 4;
415 else if (freq >= 26)
416 return 3;
417 else if (freq >= 16)
418 return 2;
419 else if (freq >= 10)
420 return 1;
421
422 return 0;
423}
424
425static void wm8750_find_pll_bits(unsigned long rate, unsigned long parent_rate,
426 u32 *filter, u32 *multiplier, u32 *divisor1, u32 *divisor2)
427{
428 u32 mul, div1, div2;
429 u32 best_mul, best_div1, best_div2;
430 unsigned long tclk, rate_err, best_err;
431
432 best_err = (unsigned long)-1;
433
434 /* Find the closest match (lower or equal to requested) */
435 for (div1 = 1; div1 >= 0; div1--)
436 for (div2 = 7; div2 >= 0; div2--)
437 for (mul = 0; mul <= 255; mul++) {
438 tclk = parent_rate * (mul + 1) / ((div1 + 1) * (1 << div2));
439 if (tclk > rate)
440 continue;
441 /* error will always be +ve */
442 rate_err = rate - tclk;
443 if (rate_err == 0) {
444 *filter = wm8750_get_filter(parent_rate, div1);
445 *multiplier = mul;
446 *divisor1 = div1;
447 *divisor2 = div2;
448 return;
449 }
450
451 if (rate_err < best_err) {
452 best_err = rate_err;
453 best_mul = mul;
454 best_div1 = div1;
455 best_div2 = div2;
456 }
457 }
458
459 /* if we got here, it wasn't an exact match */
460 pr_warn("%s: requested rate %lu, found rate %lu\n", __func__, rate,
461 rate - best_err);
462
463 *filter = wm8750_get_filter(parent_rate, best_div1);
464 *multiplier = best_mul;
465 *divisor1 = best_div1;
466 *divisor2 = best_div2;
367} 467}
368 468
369static int vtwm_pll_set_rate(struct clk_hw *hw, unsigned long rate, 469static int vtwm_pll_set_rate(struct clk_hw *hw, unsigned long rate,
370 unsigned long parent_rate) 470 unsigned long parent_rate)
371{ 471{
372 struct clk_pll *pll = to_clk_pll(hw); 472 struct clk_pll *pll = to_clk_pll(hw);
373 u32 mul, div1, div2; 473 u32 filter, mul, div1, div2;
374 u32 pll_val; 474 u32 pll_val;
375 unsigned long flags = 0; 475 unsigned long flags = 0;
376 476
@@ -385,6 +485,9 @@ static int vtwm_pll_set_rate(struct clk_hw *hw, unsigned long rate,
385 wm8650_find_pll_bits(rate, parent_rate, &mul, &div1, &div2); 485 wm8650_find_pll_bits(rate, parent_rate, &mul, &div1, &div2);
386 pll_val = WM8650_BITS_TO_VAL(mul, div1, div2); 486 pll_val = WM8650_BITS_TO_VAL(mul, div1, div2);
387 break; 487 break;
488 case PLL_TYPE_WM8750:
489 wm8750_find_pll_bits(rate, parent_rate, &filter, &mul, &div1, &div2);
490 pll_val = WM8750_BITS_TO_VAL(filter, mul, div1, div2);
388 default: 491 default:
389 pr_err("%s: invalid pll type\n", __func__); 492 pr_err("%s: invalid pll type\n", __func__);
390 return 0; 493 return 0;
@@ -405,7 +508,7 @@ static long vtwm_pll_round_rate(struct clk_hw *hw, unsigned long rate,
405 unsigned long *prate) 508 unsigned long *prate)
406{ 509{
407 struct clk_pll *pll = to_clk_pll(hw); 510 struct clk_pll *pll = to_clk_pll(hw);
408 u32 mul, div1, div2; 511 u32 filter, mul, div1, div2;
409 long round_rate; 512 long round_rate;
410 513
411 switch (pll->type) { 514 switch (pll->type) {
@@ -417,6 +520,9 @@ static long vtwm_pll_round_rate(struct clk_hw *hw, unsigned long rate,
417 wm8650_find_pll_bits(rate, *prate, &mul, &div1, &div2); 520 wm8650_find_pll_bits(rate, *prate, &mul, &div1, &div2);
418 round_rate = WM8650_BITS_TO_FREQ(*prate, mul, div1, div2); 521 round_rate = WM8650_BITS_TO_FREQ(*prate, mul, div1, div2);
419 break; 522 break;
523 case PLL_TYPE_WM8750:
524 wm8750_find_pll_bits(rate, *prate, &filter, &mul, &div1, &div2);
525 round_rate = WM8750_BITS_TO_FREQ(*prate, mul, div1, div2);
420 default: 526 default:
421 round_rate = 0; 527 round_rate = 0;
422 } 528 }
@@ -440,6 +546,10 @@ static unsigned long vtwm_pll_recalc_rate(struct clk_hw *hw,
440 pll_freq = parent_rate * WM8650_PLL_MUL(pll_val); 546 pll_freq = parent_rate * WM8650_PLL_MUL(pll_val);
441 pll_freq /= WM8650_PLL_DIV(pll_val); 547 pll_freq /= WM8650_PLL_DIV(pll_val);
442 break; 548 break;
549 case PLL_TYPE_WM8750:
550 pll_freq = parent_rate * WM8750_PLL_MUL(pll_val);
551 pll_freq /= WM8750_PLL_DIV(pll_val);
552 break;
443 default: 553 default:
444 pll_freq = 0; 554 pll_freq = 0;
445 } 555 }
@@ -502,20 +612,19 @@ static void __init vt8500_pll_init(struct device_node *node)
502{ 612{
503 vtwm_pll_clk_init(node, PLL_TYPE_VT8500); 613 vtwm_pll_clk_init(node, PLL_TYPE_VT8500);
504} 614}
615CLK_OF_DECLARE(vt8500_pll, "via,vt8500-pll-clock", vt8500_pll_init);
505 616
506static void __init wm8650_pll_init(struct device_node *node) 617static void __init wm8650_pll_init(struct device_node *node)
507{ 618{
508 vtwm_pll_clk_init(node, PLL_TYPE_WM8650); 619 vtwm_pll_clk_init(node, PLL_TYPE_WM8650);
509} 620}
621CLK_OF_DECLARE(wm8650_pll, "wm,wm8650-pll-clock", wm8650_pll_init);
510 622
511static const __initconst struct of_device_id clk_match[] = { 623static void __init wm8750_pll_init(struct device_node *node)
512 { .compatible = "fixed-clock", .data = of_fixed_clk_setup, }, 624{
513 { .compatible = "via,vt8500-pll-clock", .data = vt8500_pll_init, }, 625 vtwm_pll_clk_init(node, PLL_TYPE_WM8750);
514 { .compatible = "wm,wm8650-pll-clock", .data = wm8650_pll_init, }, 626}
515 { .compatible = "via,vt8500-device-clock", 627CLK_OF_DECLARE(wm8750_pll, "wm,wm8750-pll-clock", wm8750_pll_init);
516 .data = vtwm_device_clk_init, },
517 { /* sentinel */ }
518};
519 628
520void __init vtwm_clk_init(void __iomem *base) 629void __init vtwm_clk_init(void __iomem *base)
521{ 630{
@@ -524,5 +633,5 @@ void __init vtwm_clk_init(void __iomem *base)
524 633
525 pmc_base = base; 634 pmc_base = base;
526 635
527 of_clk_init(clk_match); 636 of_clk_init(NULL);
528} 637}
diff --git a/drivers/clk/clk-zynq.c b/drivers/clk/clk-zynq.c
index 37a30514fd66..b14a25f39255 100644
--- a/drivers/clk/clk-zynq.c
+++ b/drivers/clk/clk-zynq.c
@@ -81,6 +81,7 @@ static void __init zynq_pll_clk_setup(struct device_node *np)
81 if (WARN_ON(ret)) 81 if (WARN_ON(ret))
82 return; 82 return;
83} 83}
84CLK_OF_DECLARE(zynq_pll, "xlnx,zynq-pll", zynq_pll_clk_setup);
84 85
85struct zynq_periph_clk { 86struct zynq_periph_clk {
86 struct clk_hw hw; 87 struct clk_hw hw;
@@ -187,6 +188,7 @@ static void __init zynq_periph_clk_setup(struct device_node *np)
187 if (WARN_ON(err)) 188 if (WARN_ON(err))
188 return; 189 return;
189} 190}
191CLK_OF_DECLARE(zynq_periph, "xlnx,zynq-periph-clock", zynq_periph_clk_setup);
190 192
191/* CPU Clock domain is modelled as a mux with 4 children subclks, whose 193/* CPU Clock domain is modelled as a mux with 4 children subclks, whose
192 * derivative rates depend on CLK_621_TRUE 194 * derivative rates depend on CLK_621_TRUE
@@ -366,18 +368,10 @@ static void __init zynq_cpu_clk_setup(struct device_node *np)
366 if (WARN_ON(err)) 368 if (WARN_ON(err))
367 return; 369 return;
368} 370}
369 371CLK_OF_DECLARE(zynq_cpu, "xlnx,zynq-cpu-clock", zynq_cpu_clk_setup);
370static const __initconst struct of_device_id zynq_clk_match[] = {
371 { .compatible = "fixed-clock", .data = of_fixed_clk_setup, },
372 { .compatible = "xlnx,zynq-pll", .data = zynq_pll_clk_setup, },
373 { .compatible = "xlnx,zynq-periph-clock",
374 .data = zynq_periph_clk_setup, },
375 { .compatible = "xlnx,zynq-cpu-clock", .data = zynq_cpu_clk_setup, },
376 {}
377};
378 372
379void __init xilinx_zynq_clocks_init(void __iomem *slcr) 373void __init xilinx_zynq_clocks_init(void __iomem *slcr)
380{ 374{
381 slcr_base = slcr; 375 slcr_base = slcr;
382 of_clk_init(zynq_clk_match); 376 of_clk_init(NULL);
383} 377}
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 251e45d6024d..fabbfe1a9253 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -18,6 +18,7 @@
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/of.h> 19#include <linux/of.h>
20#include <linux/device.h> 20#include <linux/device.h>
21#include <linux/init.h>
21 22
22static DEFINE_SPINLOCK(enable_lock); 23static DEFINE_SPINLOCK(enable_lock);
23static DEFINE_MUTEX(prepare_lock); 24static DEFINE_MUTEX(prepare_lock);
@@ -35,6 +36,137 @@ static struct dentry *rootdir;
35static struct dentry *orphandir; 36static struct dentry *orphandir;
36static int inited = 0; 37static int inited = 0;
37 38
39static void clk_summary_show_one(struct seq_file *s, struct clk *c, int level)
40{
41 if (!c)
42 return;
43
44 seq_printf(s, "%*s%-*s %-11d %-12d %-10lu",
45 level * 3 + 1, "",
46 30 - level * 3, c->name,
47 c->enable_count, c->prepare_count, c->rate);
48 seq_printf(s, "\n");
49}
50
51static void clk_summary_show_subtree(struct seq_file *s, struct clk *c,
52 int level)
53{
54 struct clk *child;
55 struct hlist_node *tmp;
56
57 if (!c)
58 return;
59
60 clk_summary_show_one(s, c, level);
61
62 hlist_for_each_entry(child, tmp, &c->children, child_node)
63 clk_summary_show_subtree(s, child, level + 1);
64}
65
66static int clk_summary_show(struct seq_file *s, void *data)
67{
68 struct clk *c;
69 struct hlist_node *tmp;
70
71 seq_printf(s, " clock enable_cnt prepare_cnt rate\n");
72 seq_printf(s, "---------------------------------------------------------------------\n");
73
74 mutex_lock(&prepare_lock);
75
76 hlist_for_each_entry(c, tmp, &clk_root_list, child_node)
77 clk_summary_show_subtree(s, c, 0);
78
79 hlist_for_each_entry(c, tmp, &clk_orphan_list, child_node)
80 clk_summary_show_subtree(s, c, 0);
81
82 mutex_unlock(&prepare_lock);
83
84 return 0;
85}
86
87
88static int clk_summary_open(struct inode *inode, struct file *file)
89{
90 return single_open(file, clk_summary_show, inode->i_private);
91}
92
93static const struct file_operations clk_summary_fops = {
94 .open = clk_summary_open,
95 .read = seq_read,
96 .llseek = seq_lseek,
97 .release = single_release,
98};
99
100static void clk_dump_one(struct seq_file *s, struct clk *c, int level)
101{
102 if (!c)
103 return;
104
105 seq_printf(s, "\"%s\": { ", c->name);
106 seq_printf(s, "\"enable_count\": %d,", c->enable_count);
107 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
108 seq_printf(s, "\"rate\": %lu", c->rate);
109}
110
111static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
112{
113 struct clk *child;
114 struct hlist_node *tmp;
115
116 if (!c)
117 return;
118
119 clk_dump_one(s, c, level);
120
121 hlist_for_each_entry(child, tmp, &c->children, child_node) {
122 seq_printf(s, ",");
123 clk_dump_subtree(s, child, level + 1);
124 }
125
126 seq_printf(s, "}");
127}
128
129static int clk_dump(struct seq_file *s, void *data)
130{
131 struct clk *c;
132 struct hlist_node *tmp;
133 bool first_node = true;
134
135 seq_printf(s, "{");
136
137 mutex_lock(&prepare_lock);
138
139 hlist_for_each_entry(c, tmp, &clk_root_list, child_node) {
140 if (!first_node)
141 seq_printf(s, ",");
142 first_node = false;
143 clk_dump_subtree(s, c, 0);
144 }
145
146 hlist_for_each_entry(c, tmp, &clk_orphan_list, child_node) {
147 seq_printf(s, ",");
148 clk_dump_subtree(s, c, 0);
149 }
150
151 mutex_unlock(&prepare_lock);
152
153 seq_printf(s, "}");
154 return 0;
155}
156
157
158static int clk_dump_open(struct inode *inode, struct file *file)
159{
160 return single_open(file, clk_dump, inode->i_private);
161}
162
163static const struct file_operations clk_dump_fops = {
164 .open = clk_dump_open,
165 .read = seq_read,
166 .llseek = seq_lseek,
167 .release = single_release,
168};
169
38/* caller must hold prepare_lock */ 170/* caller must hold prepare_lock */
39static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry) 171static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
40{ 172{
@@ -168,12 +300,23 @@ static int __init clk_debug_init(void)
168{ 300{
169 struct clk *clk; 301 struct clk *clk;
170 struct hlist_node *tmp; 302 struct hlist_node *tmp;
303 struct dentry *d;
171 304
172 rootdir = debugfs_create_dir("clk", NULL); 305 rootdir = debugfs_create_dir("clk", NULL);
173 306
174 if (!rootdir) 307 if (!rootdir)
175 return -ENOMEM; 308 return -ENOMEM;
176 309
310 d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, NULL,
311 &clk_summary_fops);
312 if (!d)
313 return -ENOMEM;
314
315 d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, NULL,
316 &clk_dump_fops);
317 if (!d)
318 return -ENOMEM;
319
177 orphandir = debugfs_create_dir("orphans", rootdir); 320 orphandir = debugfs_create_dir("orphans", rootdir);
178 321
179 if (!orphandir) 322 if (!orphandir)
@@ -259,32 +402,33 @@ late_initcall(clk_disable_unused);
259 402
260/*** helper functions ***/ 403/*** helper functions ***/
261 404
262inline const char *__clk_get_name(struct clk *clk) 405const char *__clk_get_name(struct clk *clk)
263{ 406{
264 return !clk ? NULL : clk->name; 407 return !clk ? NULL : clk->name;
265} 408}
409EXPORT_SYMBOL_GPL(__clk_get_name);
266 410
267inline struct clk_hw *__clk_get_hw(struct clk *clk) 411struct clk_hw *__clk_get_hw(struct clk *clk)
268{ 412{
269 return !clk ? NULL : clk->hw; 413 return !clk ? NULL : clk->hw;
270} 414}
271 415
272inline u8 __clk_get_num_parents(struct clk *clk) 416u8 __clk_get_num_parents(struct clk *clk)
273{ 417{
274 return !clk ? 0 : clk->num_parents; 418 return !clk ? 0 : clk->num_parents;
275} 419}
276 420
277inline struct clk *__clk_get_parent(struct clk *clk) 421struct clk *__clk_get_parent(struct clk *clk)
278{ 422{
279 return !clk ? NULL : clk->parent; 423 return !clk ? NULL : clk->parent;
280} 424}
281 425
282inline unsigned int __clk_get_enable_count(struct clk *clk) 426unsigned int __clk_get_enable_count(struct clk *clk)
283{ 427{
284 return !clk ? 0 : clk->enable_count; 428 return !clk ? 0 : clk->enable_count;
285} 429}
286 430
287inline unsigned int __clk_get_prepare_count(struct clk *clk) 431unsigned int __clk_get_prepare_count(struct clk *clk)
288{ 432{
289 return !clk ? 0 : clk->prepare_count; 433 return !clk ? 0 : clk->prepare_count;
290} 434}
@@ -310,7 +454,7 @@ out:
310 return ret; 454 return ret;
311} 455}
312 456
313inline unsigned long __clk_get_flags(struct clk *clk) 457unsigned long __clk_get_flags(struct clk *clk)
314{ 458{
315 return !clk ? 0 : clk->flags; 459 return !clk ? 0 : clk->flags;
316} 460}
@@ -950,9 +1094,6 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
950 /* change the rates */ 1094 /* change the rates */
951 clk_change_rate(top); 1095 clk_change_rate(top);
952 1096
953 mutex_unlock(&prepare_lock);
954
955 return 0;
956out: 1097out:
957 mutex_unlock(&prepare_lock); 1098 mutex_unlock(&prepare_lock);
958 1099
@@ -1663,6 +1804,11 @@ struct of_clk_provider {
1663 void *data; 1804 void *data;
1664}; 1805};
1665 1806
1807extern struct of_device_id __clk_of_table[];
1808
1809static const struct of_device_id __clk_of_table_sentinel
1810 __used __section(__clk_of_table_end);
1811
1666static LIST_HEAD(of_clk_providers); 1812static LIST_HEAD(of_clk_providers);
1667static DEFINE_MUTEX(of_clk_lock); 1813static DEFINE_MUTEX(of_clk_lock);
1668 1814
@@ -1791,6 +1937,9 @@ void __init of_clk_init(const struct of_device_id *matches)
1791{ 1937{
1792 struct device_node *np; 1938 struct device_node *np;
1793 1939
1940 if (!matches)
1941 matches = __clk_of_table;
1942
1794 for_each_matching_node(np, matches) { 1943 for_each_matching_node(np, matches) {
1795 const struct of_device_id *match = of_match_node(matches, np); 1944 const struct of_device_id *match = of_match_node(matches, np);
1796 of_clk_init_cb_t clk_init_cb = match->data; 1945 of_clk_init_cb_t clk_init_cb = match->data;
diff --git a/drivers/clk/mvebu/clk-cpu.c b/drivers/clk/mvebu/clk-cpu.c
index ff004578a119..9dd2551a0a41 100644
--- a/drivers/clk/mvebu/clk-cpu.c
+++ b/drivers/clk/mvebu/clk-cpu.c
@@ -124,7 +124,7 @@ void __init of_cpu_clk_setup(struct device_node *node)
124 124
125 clks = kzalloc(ncpus * sizeof(*clks), GFP_KERNEL); 125 clks = kzalloc(ncpus * sizeof(*clks), GFP_KERNEL);
126 if (WARN_ON(!clks)) 126 if (WARN_ON(!clks))
127 return; 127 goto clks_out;
128 128
129 for_each_node_by_type(dn, "cpu") { 129 for_each_node_by_type(dn, "cpu") {
130 struct clk_init_data init; 130 struct clk_init_data init;
@@ -134,11 +134,11 @@ void __init of_cpu_clk_setup(struct device_node *node)
134 int cpu, err; 134 int cpu, err;
135 135
136 if (WARN_ON(!clk_name)) 136 if (WARN_ON(!clk_name))
137 return; 137 goto bail_out;
138 138
139 err = of_property_read_u32(dn, "reg", &cpu); 139 err = of_property_read_u32(dn, "reg", &cpu);
140 if (WARN_ON(err)) 140 if (WARN_ON(err))
141 return; 141 goto bail_out;
142 142
143 sprintf(clk_name, "cpu%d", cpu); 143 sprintf(clk_name, "cpu%d", cpu);
144 parent_clk = of_clk_get(node, 0); 144 parent_clk = of_clk_get(node, 0);
@@ -167,6 +167,9 @@ void __init of_cpu_clk_setup(struct device_node *node)
167 return; 167 return;
168bail_out: 168bail_out:
169 kfree(clks); 169 kfree(clks);
170 while(ncpus--)
171 kfree(cpuclk[ncpus].clk_name);
172clks_out:
170 kfree(cpuclk); 173 kfree(cpuclk);
171} 174}
172 175
diff --git a/drivers/clk/mvebu/clk-gating-ctrl.c b/drivers/clk/mvebu/clk-gating-ctrl.c
index 8fa5408b6c7d..ebf141d4374b 100644
--- a/drivers/clk/mvebu/clk-gating-ctrl.c
+++ b/drivers/clk/mvebu/clk-gating-ctrl.c
@@ -193,6 +193,7 @@ static const struct mvebu_soc_descr __initconst kirkwood_gating_descr[] = {
193 { "runit", NULL, 7 }, 193 { "runit", NULL, 7 },
194 { "xor0", NULL, 8 }, 194 { "xor0", NULL, 8 },
195 { "audio", NULL, 9 }, 195 { "audio", NULL, 9 },
196 { "powersave", "cpuclk", 11 },
196 { "sata0", NULL, 14 }, 197 { "sata0", NULL, 14 },
197 { "sata1", NULL, 15 }, 198 { "sata1", NULL, 15 },
198 { "xor1", NULL, 16 }, 199 { "xor1", NULL, 16 },
diff --git a/drivers/clk/mxs/clk-imx23.c b/drivers/clk/mxs/clk-imx23.c
index 8dd476e2a9c5..b5c06f9766f6 100644
--- a/drivers/clk/mxs/clk-imx23.c
+++ b/drivers/clk/mxs/clk-imx23.c
@@ -99,7 +99,7 @@ static enum imx23_clk clks_init_on[] __initdata = {
99int __init mx23_clocks_init(void) 99int __init mx23_clocks_init(void)
100{ 100{
101 struct device_node *np; 101 struct device_node *np;
102 int i; 102 u32 i;
103 103
104 clk_misc_init(); 104 clk_misc_init();
105 105
diff --git a/drivers/clk/mxs/clk-imx28.c b/drivers/clk/mxs/clk-imx28.c
index db3af0874121..126370a62ce2 100644
--- a/drivers/clk/mxs/clk-imx28.c
+++ b/drivers/clk/mxs/clk-imx28.c
@@ -154,7 +154,7 @@ static enum imx28_clk clks_init_on[] __initdata = {
154int __init mx28_clocks_init(void) 154int __init mx28_clocks_init(void)
155{ 155{
156 struct device_node *np; 156 struct device_node *np;
157 int i; 157 u32 i;
158 158
159 clk_misc_init(); 159 clk_misc_init();
160 160
diff --git a/drivers/clk/versatile/clk-vexpress-osc.c b/drivers/clk/versatile/clk-vexpress-osc.c
index dcb6ae0a0425..256c8be74df8 100644
--- a/drivers/clk/versatile/clk-vexpress-osc.c
+++ b/drivers/clk/versatile/clk-vexpress-osc.c
@@ -144,3 +144,4 @@ error:
144 vexpress_config_func_put(osc->func); 144 vexpress_config_func_put(osc->func);
145 kfree(osc); 145 kfree(osc);
146} 146}
147CLK_OF_DECLARE(vexpress_soc, "arm,vexpress-osc", vexpress_osc_of_setup);
diff --git a/drivers/clk/versatile/clk-vexpress.c b/drivers/clk/versatile/clk-vexpress.c
index c742ac7c60bb..f889f2f07b37 100644
--- a/drivers/clk/versatile/clk-vexpress.c
+++ b/drivers/clk/versatile/clk-vexpress.c
@@ -99,19 +99,13 @@ struct clk *vexpress_sp810_of_get(struct of_phandle_args *clkspec, void *data)
99 return vexpress_sp810_timerclken[clkspec->args[0]]; 99 return vexpress_sp810_timerclken[clkspec->args[0]];
100} 100}
101 101
102static const __initconst struct of_device_id vexpress_fixed_clk_match[] = {
103 { .compatible = "fixed-clock", .data = of_fixed_clk_setup, },
104 { .compatible = "arm,vexpress-osc", .data = vexpress_osc_of_setup, },
105 {}
106};
107
108void __init vexpress_clk_of_init(void) 102void __init vexpress_clk_of_init(void)
109{ 103{
110 struct device_node *node; 104 struct device_node *node;
111 struct clk *clk; 105 struct clk *clk;
112 struct clk *refclk, *timclk; 106 struct clk *refclk, *timclk;
113 107
114 of_clk_init(vexpress_fixed_clk_match); 108 of_clk_init(NULL);
115 109
116 node = of_find_compatible_node(NULL, NULL, "arm,sp810"); 110 node = of_find_compatible_node(NULL, NULL, "arm,sp810");
117 vexpress_sp810_init(of_iomap(node, 0)); 111 vexpress_sp810_init(of_iomap(node, 0));
diff --git a/drivers/clk/x86/Makefile b/drivers/clk/x86/Makefile
new file mode 100644
index 000000000000..f9ba4fab0ddc
--- /dev/null
+++ b/drivers/clk/x86/Makefile
@@ -0,0 +1,2 @@
1clk-x86-lpss-objs := clk-lpss.o clk-lpt.o
2obj-$(CONFIG_X86_INTEL_LPSS) += clk-x86-lpss.o
diff --git a/drivers/clk/x86/clk-lpss.c b/drivers/clk/x86/clk-lpss.c
new file mode 100644
index 000000000000..b5e229f3c3d9
--- /dev/null
+++ b/drivers/clk/x86/clk-lpss.c
@@ -0,0 +1,99 @@
1/*
2 * Intel Low Power Subsystem clocks.
3 *
4 * Copyright (C) 2013, Intel Corporation
5 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
6 * Heikki Krogerus <heikki.krogerus@linux.intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/acpi.h>
14#include <linux/clk.h>
15#include <linux/clk-provider.h>
16#include <linux/err.h>
17#include <linux/io.h>
18#include <linux/module.h>
19
20static int clk_lpss_is_mmio_resource(struct acpi_resource *res, void *data)
21{
22 struct resource r;
23 return !acpi_dev_resource_memory(res, &r);
24}
25
26static acpi_status clk_lpss_find_mmio(acpi_handle handle, u32 level,
27 void *data, void **retval)
28{
29 struct resource_list_entry *rentry;
30 struct list_head resource_list;
31 struct acpi_device *adev;
32 const char *uid = data;
33 int ret;
34
35 if (acpi_bus_get_device(handle, &adev))
36 return AE_OK;
37
38 if (uid) {
39 if (!adev->pnp.unique_id)
40 return AE_OK;
41 if (strcmp(uid, adev->pnp.unique_id))
42 return AE_OK;
43 }
44
45 INIT_LIST_HEAD(&resource_list);
46 ret = acpi_dev_get_resources(adev, &resource_list,
47 clk_lpss_is_mmio_resource, NULL);
48 if (ret < 0)
49 return AE_NO_MEMORY;
50
51 list_for_each_entry(rentry, &resource_list, node)
52 if (resource_type(&rentry->res) == IORESOURCE_MEM) {
53 *(struct resource *)retval = rentry->res;
54 break;
55 }
56
57 acpi_dev_free_resource_list(&resource_list);
58 return AE_OK;
59}
60
61/**
62 * clk_register_lpss_gate - register LPSS clock gate
63 * @name: name of this clock gate
64 * @parent_name: parent clock name
65 * @hid: ACPI _HID of the device
66 * @uid: ACPI _UID of the device (optional)
67 * @offset: LPSS PRV_CLOCK_PARAMS offset
68 *
69 * Creates and registers LPSS clock gate.
70 */
71struct clk *clk_register_lpss_gate(const char *name, const char *parent_name,
72 const char *hid, const char *uid,
73 unsigned offset)
74{
75 struct resource res = { };
76 void __iomem *mmio_base;
77 acpi_status status;
78 struct clk *clk;
79
80 /*
81 * First try to look the device and its mmio resource from the
82 * ACPI namespace.
83 */
84 status = acpi_get_devices(hid, clk_lpss_find_mmio, (void *)uid,
85 (void **)&res);
86 if (ACPI_FAILURE(status) || !res.start)
87 return ERR_PTR(-ENODEV);
88
89 mmio_base = ioremap(res.start, resource_size(&res));
90 if (!mmio_base)
91 return ERR_PTR(-ENOMEM);
92
93 clk = clk_register_gate(NULL, name, parent_name, 0, mmio_base + offset,
94 0, 0, NULL);
95 if (IS_ERR(clk))
96 iounmap(mmio_base);
97
98 return clk;
99}
diff --git a/drivers/clk/x86/clk-lpss.h b/drivers/clk/x86/clk-lpss.h
new file mode 100644
index 000000000000..e9460f442297
--- /dev/null
+++ b/drivers/clk/x86/clk-lpss.h
@@ -0,0 +1,36 @@
1/*
2 * Intel Low Power Subsystem clock.
3 *
4 * Copyright (C) 2013, Intel Corporation
5 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
6 * Heikki Krogerus <heikki.krogerus@linux.intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#ifndef __CLK_LPSS_H
14#define __CLK_LPSS_H
15
16#include <linux/err.h>
17#include <linux/errno.h>
18#include <linux/clk.h>
19
20#ifdef CONFIG_ACPI
21extern struct clk *clk_register_lpss_gate(const char *name,
22 const char *parent_name,
23 const char *hid, const char *uid,
24 unsigned offset);
25#else
26static inline struct clk *clk_register_lpss_gate(const char *name,
27 const char *parent_name,
28 const char *hid,
29 const char *uid,
30 unsigned offset)
31{
32 return ERR_PTR(-ENODEV);
33}
34#endif
35
36#endif /* __CLK_LPSS_H */
diff --git a/drivers/clk/x86/clk-lpt.c b/drivers/clk/x86/clk-lpt.c
new file mode 100644
index 000000000000..81298aeef7e3
--- /dev/null
+++ b/drivers/clk/x86/clk-lpt.c
@@ -0,0 +1,86 @@
1/*
2 * Intel Lynxpoint LPSS clocks.
3 *
4 * Copyright (C) 2013, Intel Corporation
5 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
6 * Heikki Krogerus <heikki.krogerus@linux.intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/acpi.h>
14#include <linux/clk.h>
15#include <linux/clkdev.h>
16#include <linux/clk-provider.h>
17#include <linux/err.h>
18#include <linux/module.h>
19#include <linux/platform_device.h>
20
21#include "clk-lpss.h"
22
23#define PRV_CLOCK_PARAMS 0x800
24
25static int lpt_clk_probe(struct platform_device *pdev)
26{
27 struct clk *clk;
28
29 /* LPSS free running clock */
30 clk = clk_register_fixed_rate(&pdev->dev, "lpss_clk", NULL, CLK_IS_ROOT,
31 100000000);
32 if (IS_ERR(clk))
33 return PTR_ERR(clk);
34
35 /* Shared DMA clock */
36 clk_register_clkdev(clk, "hclk", "INTL9C60.0.auto");
37
38 /* SPI clocks */
39 clk = clk_register_lpss_gate("spi0_clk", "lpss_clk", "INT33C0", NULL,
40 PRV_CLOCK_PARAMS);
41 if (!IS_ERR(clk))
42 clk_register_clkdev(clk, NULL, "INT33C0:00");
43
44 clk = clk_register_lpss_gate("spi1_clk", "lpss_clk", "INT33C1", NULL,
45 PRV_CLOCK_PARAMS);
46 if (!IS_ERR(clk))
47 clk_register_clkdev(clk, NULL, "INT33C1:00");
48
49 /* I2C clocks */
50 clk = clk_register_lpss_gate("i2c0_clk", "lpss_clk", "INT33C2", NULL,
51 PRV_CLOCK_PARAMS);
52 if (!IS_ERR(clk))
53 clk_register_clkdev(clk, NULL, "INT33C2:00");
54
55 clk = clk_register_lpss_gate("i2c1_clk", "lpss_clk", "INT33C3", NULL,
56 PRV_CLOCK_PARAMS);
57 if (!IS_ERR(clk))
58 clk_register_clkdev(clk, NULL, "INT33C3:00");
59
60 /* UART clocks */
61 clk = clk_register_lpss_gate("uart0_clk", "lpss_clk", "INT33C4", NULL,
62 PRV_CLOCK_PARAMS);
63 if (!IS_ERR(clk))
64 clk_register_clkdev(clk, NULL, "INT33C4:00");
65
66 clk = clk_register_lpss_gate("uart1_clk", "lpss_clk", "INT33C5", NULL,
67 PRV_CLOCK_PARAMS);
68 if (!IS_ERR(clk))
69 clk_register_clkdev(clk, NULL, "INT33C5:00");
70
71 return 0;
72}
73
74static struct platform_driver lpt_clk_driver = {
75 .driver = {
76 .name = "clk-lpt",
77 .owner = THIS_MODULE,
78 },
79 .probe = lpt_clk_probe,
80};
81
82static int __init lpt_clk_init(void)
83{
84 return platform_driver_register(&lpt_clk_driver);
85}
86arch_initcall(lpt_clk_init);
diff --git a/drivers/clocksource/sunxi_timer.c b/drivers/clocksource/sunxi_timer.c
index 3cd1bd3d7aee..93d09d0e009f 100644
--- a/drivers/clocksource/sunxi_timer.c
+++ b/drivers/clocksource/sunxi_timer.c
@@ -23,7 +23,7 @@
23#include <linux/of_address.h> 23#include <linux/of_address.h>
24#include <linux/of_irq.h> 24#include <linux/of_irq.h>
25#include <linux/sunxi_timer.h> 25#include <linux/sunxi_timer.h>
26#include <linux/clk/sunxi.h> 26#include <linux/clk-provider.h>
27 27
28#define TIMER_CTL_REG 0x00 28#define TIMER_CTL_REG 0x00
29#define TIMER_CTL_ENABLE (1 << 0) 29#define TIMER_CTL_ENABLE (1 << 0)
@@ -124,7 +124,7 @@ static void __init sunxi_timer_init(void)
124 if (irq <= 0) 124 if (irq <= 0)
125 panic("Can't parse IRQ"); 125 panic("Can't parse IRQ");
126 126
127 sunxi_init_clocks(); 127 of_clk_init(NULL);
128 128
129 clk = of_clk_get(node, 0); 129 clk = of_clk_get(node, 0);
130 if (IS_ERR(clk)) 130 if (IS_ERR(clk))
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index e0a899f25e37..cbcb21e32771 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -185,7 +185,7 @@ config CPU_FREQ_GOV_CONSERVATIVE
185 If in doubt, say N. 185 If in doubt, say N.
186 186
187config GENERIC_CPUFREQ_CPU0 187config GENERIC_CPUFREQ_CPU0
188 bool "Generic CPU0 cpufreq driver" 188 tristate "Generic CPU0 cpufreq driver"
189 depends on HAVE_CLK && REGULATOR && PM_OPP && OF 189 depends on HAVE_CLK && REGULATOR && PM_OPP && OF
190 select CPU_FREQ_TABLE 190 select CPU_FREQ_TABLE
191 help 191 help
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index a0b3661d90b0..7f333af1c059 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -77,9 +77,39 @@ config ARM_EXYNOS5250_CPUFREQ
77 This adds the CPUFreq driver for Samsung EXYNOS5250 77 This adds the CPUFreq driver for Samsung EXYNOS5250
78 SoC. 78 SoC.
79 79
80config ARM_KIRKWOOD_CPUFREQ
81 def_bool ARCH_KIRKWOOD && OF
82 help
83 This adds the CPUFreq driver for Marvell Kirkwood
84 SoCs.
85
86config ARM_IMX6Q_CPUFREQ
87 tristate "Freescale i.MX6Q cpufreq support"
88 depends on SOC_IMX6Q
89 depends on REGULATOR_ANATOP
90 help
91 This adds cpufreq driver support for Freescale i.MX6Q SOC.
92
93 If in doubt, say N.
94
80config ARM_SPEAR_CPUFREQ 95config ARM_SPEAR_CPUFREQ
81 bool "SPEAr CPUFreq support" 96 bool "SPEAr CPUFreq support"
82 depends on PLAT_SPEAR 97 depends on PLAT_SPEAR
83 default y 98 default y
84 help 99 help
85 This adds the CPUFreq driver support for SPEAr SOCs. 100 This adds the CPUFreq driver support for SPEAr SOCs.
101
102config ARM_HIGHBANK_CPUFREQ
103 tristate "Calxeda Highbank-based"
104 depends on ARCH_HIGHBANK
105 select CPU_FREQ_TABLE
106 select GENERIC_CPUFREQ_CPU0
107 select PM_OPP
108 select REGULATOR
109
110 default m
111 help
112 This adds the CPUFreq driver for Calxeda Highbank SoC
113 based boards.
114
115 If in doubt, say N.
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index 934854ae5eb4..98e5abbe8f26 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -2,6 +2,19 @@
2# x86 CPU Frequency scaling drivers 2# x86 CPU Frequency scaling drivers
3# 3#
4 4
5config X86_INTEL_PSTATE
6 bool "Intel P state control"
7 depends on X86
8 help
9 This driver provides a P state for Intel core processors.
10 The driver implements an internal governor and will become
11 the scaling driver and governor for Sandy bridge processors.
12
13 When this driver is enabled it will become the perferred
14 scaling driver for Sandy bridge processors.
15
16 If in doubt, say N.
17
5config X86_PCC_CPUFREQ 18config X86_PCC_CPUFREQ
6 tristate "Processor Clocking Control interface driver" 19 tristate "Processor Clocking Control interface driver"
7 depends on ACPI && ACPI_PROCESSOR 20 depends on ACPI && ACPI_PROCESSOR
@@ -106,7 +119,7 @@ config X86_POWERNOW_K7_ACPI
106config X86_POWERNOW_K8 119config X86_POWERNOW_K8
107 tristate "AMD Opteron/Athlon64 PowerNow!" 120 tristate "AMD Opteron/Athlon64 PowerNow!"
108 select CPU_FREQ_TABLE 121 select CPU_FREQ_TABLE
109 depends on ACPI && ACPI_PROCESSOR 122 depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ
110 help 123 help
111 This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors. 124 This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors.
112 Support for K10 and newer processors is now in acpi-cpufreq. 125 Support for K10 and newer processors is now in acpi-cpufreq.
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index fadc4d496e2f..5399c45ac311 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -19,11 +19,12 @@ obj-$(CONFIG_GENERIC_CPUFREQ_CPU0) += cpufreq-cpu0.o
19################################################################################## 19##################################################################################
20# x86 drivers. 20# x86 drivers.
21# Link order matters. K8 is preferred to ACPI because of firmware bugs in early 21# Link order matters. K8 is preferred to ACPI because of firmware bugs in early
22# K8 systems. ACPI is preferred to all other hardware-specific drivers. 22# K8 systems. This is still the case but acpi-cpufreq errors out so that
23# powernow-k8 can load then. ACPI is preferred to all other hardware-specific drivers.
23# speedstep-* is preferred over p4-clockmod. 24# speedstep-* is preferred over p4-clockmod.
24 25
25obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o
26obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o 26obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o
27obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o
27obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o 28obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o
28obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o 29obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o
29obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o 30obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o
@@ -39,6 +40,7 @@ obj-$(CONFIG_X86_SPEEDSTEP_SMI) += speedstep-smi.o
39obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o 40obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o
40obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o 41obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o
41obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o 42obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o
43obj-$(CONFIG_X86_INTEL_PSTATE) += intel_pstate.o
42 44
43################################################################################## 45##################################################################################
44# ARM SoC drivers 46# ARM SoC drivers
@@ -50,8 +52,11 @@ obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o
50obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o 52obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o
51obj-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o 53obj-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o
52obj-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o 54obj-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o
53obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o 55obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o
56obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
54obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o 57obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o
58obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o
59obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
55 60
56################################################################################## 61##################################################################################
57# PowerPC platform drivers 62# PowerPC platform drivers
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 0d048f6a2b23..937bc286591f 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -734,7 +734,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
734 734
735#ifdef CONFIG_SMP 735#ifdef CONFIG_SMP
736 dmi_check_system(sw_any_bug_dmi_table); 736 dmi_check_system(sw_any_bug_dmi_table);
737 if (bios_with_sw_any_bug && cpumask_weight(policy->cpus) == 1) { 737 if (bios_with_sw_any_bug && !policy_is_shared(policy)) {
738 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; 738 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
739 cpumask_copy(policy->cpus, cpu_core_mask(cpu)); 739 cpumask_copy(policy->cpus, cpu_core_mask(cpu));
740 } 740 }
@@ -762,6 +762,12 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
762 762
763 switch (perf->control_register.space_id) { 763 switch (perf->control_register.space_id) {
764 case ACPI_ADR_SPACE_SYSTEM_IO: 764 case ACPI_ADR_SPACE_SYSTEM_IO:
765 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
766 boot_cpu_data.x86 == 0xf) {
767 pr_debug("AMD K8 systems must use native drivers.\n");
768 result = -ENODEV;
769 goto err_unreg;
770 }
765 pr_debug("SYSTEM IO addr space\n"); 771 pr_debug("SYSTEM IO addr space\n");
766 data->cpu_feature = SYSTEM_IO_CAPABLE; 772 data->cpu_feature = SYSTEM_IO_CAPABLE;
767 break; 773 break;
@@ -1030,4 +1036,11 @@ MODULE_PARM_DESC(acpi_pstate_strict,
1030late_initcall(acpi_cpufreq_init); 1036late_initcall(acpi_cpufreq_init);
1031module_exit(acpi_cpufreq_exit); 1037module_exit(acpi_cpufreq_exit);
1032 1038
1039static const struct x86_cpu_id acpi_cpufreq_ids[] = {
1040 X86_FEATURE_MATCH(X86_FEATURE_ACPI),
1041 X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE),
1042 {}
1043};
1044MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids);
1045
1033MODULE_ALIAS("acpi"); 1046MODULE_ALIAS("acpi");
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index 52bf36d599f5..4e5b7fb8927c 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -12,12 +12,12 @@
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 13
14#include <linux/clk.h> 14#include <linux/clk.h>
15#include <linux/cpu.h>
16#include <linux/cpufreq.h> 15#include <linux/cpufreq.h>
17#include <linux/err.h> 16#include <linux/err.h>
18#include <linux/module.h> 17#include <linux/module.h>
19#include <linux/of.h> 18#include <linux/of.h>
20#include <linux/opp.h> 19#include <linux/opp.h>
20#include <linux/platform_device.h>
21#include <linux/regulator/consumer.h> 21#include <linux/regulator/consumer.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23 23
@@ -71,12 +71,15 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
71 } 71 }
72 72
73 if (cpu_reg) { 73 if (cpu_reg) {
74 rcu_read_lock();
74 opp = opp_find_freq_ceil(cpu_dev, &freq_Hz); 75 opp = opp_find_freq_ceil(cpu_dev, &freq_Hz);
75 if (IS_ERR(opp)) { 76 if (IS_ERR(opp)) {
77 rcu_read_unlock();
76 pr_err("failed to find OPP for %ld\n", freq_Hz); 78 pr_err("failed to find OPP for %ld\n", freq_Hz);
77 return PTR_ERR(opp); 79 return PTR_ERR(opp);
78 } 80 }
79 volt = opp_get_voltage(opp); 81 volt = opp_get_voltage(opp);
82 rcu_read_unlock();
80 tol = volt * voltage_tolerance / 100; 83 tol = volt * voltage_tolerance / 100;
81 volt_old = regulator_get_voltage(cpu_reg); 84 volt_old = regulator_get_voltage(cpu_reg);
82 } 85 }
@@ -143,7 +146,6 @@ static int cpu0_cpufreq_init(struct cpufreq_policy *policy)
143 * share the clock and voltage and clock. Use cpufreq affected_cpus 146 * share the clock and voltage and clock. Use cpufreq affected_cpus
144 * interface to have all CPUs scaled together. 147 * interface to have all CPUs scaled together.
145 */ 148 */
146 policy->shared_type = CPUFREQ_SHARED_TYPE_ANY;
147 cpumask_setall(policy->cpus); 149 cpumask_setall(policy->cpus);
148 150
149 cpufreq_frequency_table_get_attr(freq_table, policy->cpu); 151 cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
@@ -174,34 +176,32 @@ static struct cpufreq_driver cpu0_cpufreq_driver = {
174 .attr = cpu0_cpufreq_attr, 176 .attr = cpu0_cpufreq_attr,
175}; 177};
176 178
177static int cpu0_cpufreq_driver_init(void) 179static int cpu0_cpufreq_probe(struct platform_device *pdev)
178{ 180{
179 struct device_node *np; 181 struct device_node *np;
180 int ret; 182 int ret;
181 183
182 np = of_find_node_by_path("/cpus/cpu@0"); 184 for_each_child_of_node(of_find_node_by_path("/cpus"), np) {
185 if (of_get_property(np, "operating-points", NULL))
186 break;
187 }
188
183 if (!np) { 189 if (!np) {
184 pr_err("failed to find cpu0 node\n"); 190 pr_err("failed to find cpu0 node\n");
185 return -ENOENT; 191 return -ENOENT;
186 } 192 }
187 193
188 cpu_dev = get_cpu_device(0); 194 cpu_dev = &pdev->dev;
189 if (!cpu_dev) {
190 pr_err("failed to get cpu0 device\n");
191 ret = -ENODEV;
192 goto out_put_node;
193 }
194
195 cpu_dev->of_node = np; 195 cpu_dev->of_node = np;
196 196
197 cpu_clk = clk_get(cpu_dev, NULL); 197 cpu_clk = devm_clk_get(cpu_dev, NULL);
198 if (IS_ERR(cpu_clk)) { 198 if (IS_ERR(cpu_clk)) {
199 ret = PTR_ERR(cpu_clk); 199 ret = PTR_ERR(cpu_clk);
200 pr_err("failed to get cpu0 clock: %d\n", ret); 200 pr_err("failed to get cpu0 clock: %d\n", ret);
201 goto out_put_node; 201 goto out_put_node;
202 } 202 }
203 203
204 cpu_reg = regulator_get(cpu_dev, "cpu0"); 204 cpu_reg = devm_regulator_get(cpu_dev, "cpu0");
205 if (IS_ERR(cpu_reg)) { 205 if (IS_ERR(cpu_reg)) {
206 pr_warn("failed to get cpu0 regulator\n"); 206 pr_warn("failed to get cpu0 regulator\n");
207 cpu_reg = NULL; 207 cpu_reg = NULL;
@@ -236,12 +236,14 @@ static int cpu0_cpufreq_driver_init(void)
236 */ 236 */
237 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) 237 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
238 ; 238 ;
239 rcu_read_lock();
239 opp = opp_find_freq_exact(cpu_dev, 240 opp = opp_find_freq_exact(cpu_dev,
240 freq_table[0].frequency * 1000, true); 241 freq_table[0].frequency * 1000, true);
241 min_uV = opp_get_voltage(opp); 242 min_uV = opp_get_voltage(opp);
242 opp = opp_find_freq_exact(cpu_dev, 243 opp = opp_find_freq_exact(cpu_dev,
243 freq_table[i-1].frequency * 1000, true); 244 freq_table[i-1].frequency * 1000, true);
244 max_uV = opp_get_voltage(opp); 245 max_uV = opp_get_voltage(opp);
246 rcu_read_unlock();
245 ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV); 247 ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);
246 if (ret > 0) 248 if (ret > 0)
247 transition_latency += ret * 1000; 249 transition_latency += ret * 1000;
@@ -262,7 +264,24 @@ out_put_node:
262 of_node_put(np); 264 of_node_put(np);
263 return ret; 265 return ret;
264} 266}
265late_initcall(cpu0_cpufreq_driver_init); 267
268static int cpu0_cpufreq_remove(struct platform_device *pdev)
269{
270 cpufreq_unregister_driver(&cpu0_cpufreq_driver);
271 opp_free_cpufreq_table(cpu_dev, &freq_table);
272
273 return 0;
274}
275
276static struct platform_driver cpu0_cpufreq_platdrv = {
277 .driver = {
278 .name = "cpufreq-cpu0",
279 .owner = THIS_MODULE,
280 },
281 .probe = cpu0_cpufreq_probe,
282 .remove = cpu0_cpufreq_remove,
283};
284module_platform_driver(cpu0_cpufreq_platdrv);
266 285
267MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>"); 286MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
268MODULE_DESCRIPTION("Generic CPU0 cpufreq driver"); 287MODULE_DESCRIPTION("Generic CPU0 cpufreq driver");
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 1f93dbd72355..b02824d092e7 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -59,8 +59,6 @@ static DEFINE_SPINLOCK(cpufreq_driver_lock);
59 * mode before doing so. 59 * mode before doing so.
60 * 60 *
61 * Additional rules: 61 * Additional rules:
62 * - All holders of the lock should check to make sure that the CPU they
63 * are concerned with are online after they get the lock.
64 * - Governor routines that can be called in cpufreq hotplug path should not 62 * - Governor routines that can be called in cpufreq hotplug path should not
65 * take this sem as top level hotplug notifier handler takes this. 63 * take this sem as top level hotplug notifier handler takes this.
66 * - Lock should not be held across 64 * - Lock should not be held across
@@ -70,38 +68,28 @@ static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
70static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem); 68static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
71 69
72#define lock_policy_rwsem(mode, cpu) \ 70#define lock_policy_rwsem(mode, cpu) \
73static int lock_policy_rwsem_##mode \ 71static int lock_policy_rwsem_##mode(int cpu) \
74(int cpu) \
75{ \ 72{ \
76 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \ 73 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
77 BUG_ON(policy_cpu == -1); \ 74 BUG_ON(policy_cpu == -1); \
78 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \ 75 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
79 if (unlikely(!cpu_online(cpu))) { \
80 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
81 return -1; \
82 } \
83 \ 76 \
84 return 0; \ 77 return 0; \
85} 78}
86 79
87lock_policy_rwsem(read, cpu); 80lock_policy_rwsem(read, cpu);
88
89lock_policy_rwsem(write, cpu); 81lock_policy_rwsem(write, cpu);
90 82
91static void unlock_policy_rwsem_read(int cpu) 83#define unlock_policy_rwsem(mode, cpu) \
92{ 84static void unlock_policy_rwsem_##mode(int cpu) \
93 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); 85{ \
94 BUG_ON(policy_cpu == -1); 86 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
95 up_read(&per_cpu(cpu_policy_rwsem, policy_cpu)); 87 BUG_ON(policy_cpu == -1); \
96} 88 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
97
98static void unlock_policy_rwsem_write(int cpu)
99{
100 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
101 BUG_ON(policy_cpu == -1);
102 up_write(&per_cpu(cpu_policy_rwsem, policy_cpu));
103} 89}
104 90
91unlock_policy_rwsem(read, cpu);
92unlock_policy_rwsem(write, cpu);
105 93
106/* internal prototypes */ 94/* internal prototypes */
107static int __cpufreq_governor(struct cpufreq_policy *policy, 95static int __cpufreq_governor(struct cpufreq_policy *policy,
@@ -180,6 +168,9 @@ err_out:
180 168
181struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) 169struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
182{ 170{
171 if (cpufreq_disabled())
172 return NULL;
173
183 return __cpufreq_cpu_get(cpu, false); 174 return __cpufreq_cpu_get(cpu, false);
184} 175}
185EXPORT_SYMBOL_GPL(cpufreq_cpu_get); 176EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
@@ -198,6 +189,9 @@ static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
198 189
199void cpufreq_cpu_put(struct cpufreq_policy *data) 190void cpufreq_cpu_put(struct cpufreq_policy *data)
200{ 191{
192 if (cpufreq_disabled())
193 return;
194
201 __cpufreq_cpu_put(data, false); 195 __cpufreq_cpu_put(data, false);
202} 196}
203EXPORT_SYMBOL_GPL(cpufreq_cpu_put); 197EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
@@ -261,14 +255,21 @@ static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
261void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) 255void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
262{ 256{
263 struct cpufreq_policy *policy; 257 struct cpufreq_policy *policy;
258 unsigned long flags;
264 259
265 BUG_ON(irqs_disabled()); 260 BUG_ON(irqs_disabled());
266 261
262 if (cpufreq_disabled())
263 return;
264
267 freqs->flags = cpufreq_driver->flags; 265 freqs->flags = cpufreq_driver->flags;
268 pr_debug("notification %u of frequency transition to %u kHz\n", 266 pr_debug("notification %u of frequency transition to %u kHz\n",
269 state, freqs->new); 267 state, freqs->new);
270 268
269 spin_lock_irqsave(&cpufreq_driver_lock, flags);
271 policy = per_cpu(cpufreq_cpu_data, freqs->cpu); 270 policy = per_cpu(cpufreq_cpu_data, freqs->cpu);
271 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
272
272 switch (state) { 273 switch (state) {
273 274
274 case CPUFREQ_PRECHANGE: 275 case CPUFREQ_PRECHANGE:
@@ -294,7 +295,6 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
294 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); 295 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
295 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new, 296 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
296 (unsigned long)freqs->cpu); 297 (unsigned long)freqs->cpu);
297 trace_power_frequency(POWER_PSTATE, freqs->new, freqs->cpu);
298 trace_cpu_frequency(freqs->new, freqs->cpu); 298 trace_cpu_frequency(freqs->new, freqs->cpu);
299 srcu_notifier_call_chain(&cpufreq_transition_notifier_list, 299 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
300 CPUFREQ_POSTCHANGE, freqs); 300 CPUFREQ_POSTCHANGE, freqs);
@@ -543,8 +543,6 @@ static ssize_t show_cpus(const struct cpumask *mask, char *buf)
543 */ 543 */
544static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf) 544static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
545{ 545{
546 if (cpumask_empty(policy->related_cpus))
547 return show_cpus(policy->cpus, buf);
548 return show_cpus(policy->related_cpus, buf); 546 return show_cpus(policy->related_cpus, buf);
549} 547}
550 548
@@ -700,87 +698,6 @@ static struct kobj_type ktype_cpufreq = {
700 .release = cpufreq_sysfs_release, 698 .release = cpufreq_sysfs_release,
701}; 699};
702 700
703/*
704 * Returns:
705 * Negative: Failure
706 * 0: Success
707 * Positive: When we have a managed CPU and the sysfs got symlinked
708 */
709static int cpufreq_add_dev_policy(unsigned int cpu,
710 struct cpufreq_policy *policy,
711 struct device *dev)
712{
713 int ret = 0;
714#ifdef CONFIG_SMP
715 unsigned long flags;
716 unsigned int j;
717#ifdef CONFIG_HOTPLUG_CPU
718 struct cpufreq_governor *gov;
719
720 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
721 if (gov) {
722 policy->governor = gov;
723 pr_debug("Restoring governor %s for cpu %d\n",
724 policy->governor->name, cpu);
725 }
726#endif
727
728 for_each_cpu(j, policy->cpus) {
729 struct cpufreq_policy *managed_policy;
730
731 if (cpu == j)
732 continue;
733
734 /* Check for existing affected CPUs.
735 * They may not be aware of it due to CPU Hotplug.
736 * cpufreq_cpu_put is called when the device is removed
737 * in __cpufreq_remove_dev()
738 */
739 managed_policy = cpufreq_cpu_get(j);
740 if (unlikely(managed_policy)) {
741
742 /* Set proper policy_cpu */
743 unlock_policy_rwsem_write(cpu);
744 per_cpu(cpufreq_policy_cpu, cpu) = managed_policy->cpu;
745
746 if (lock_policy_rwsem_write(cpu) < 0) {
747 /* Should not go through policy unlock path */
748 if (cpufreq_driver->exit)
749 cpufreq_driver->exit(policy);
750 cpufreq_cpu_put(managed_policy);
751 return -EBUSY;
752 }
753
754 spin_lock_irqsave(&cpufreq_driver_lock, flags);
755 cpumask_copy(managed_policy->cpus, policy->cpus);
756 per_cpu(cpufreq_cpu_data, cpu) = managed_policy;
757 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
758
759 pr_debug("CPU already managed, adding link\n");
760 ret = sysfs_create_link(&dev->kobj,
761 &managed_policy->kobj,
762 "cpufreq");
763 if (ret)
764 cpufreq_cpu_put(managed_policy);
765 /*
766 * Success. We only needed to be added to the mask.
767 * Call driver->exit() because only the cpu parent of
768 * the kobj needed to call init().
769 */
770 if (cpufreq_driver->exit)
771 cpufreq_driver->exit(policy);
772
773 if (!ret)
774 return 1;
775 else
776 return ret;
777 }
778 }
779#endif
780 return ret;
781}
782
783
784/* symlink affected CPUs */ 701/* symlink affected CPUs */
785static int cpufreq_add_dev_symlink(unsigned int cpu, 702static int cpufreq_add_dev_symlink(unsigned int cpu,
786 struct cpufreq_policy *policy) 703 struct cpufreq_policy *policy)
@@ -794,8 +711,6 @@ static int cpufreq_add_dev_symlink(unsigned int cpu,
794 711
795 if (j == cpu) 712 if (j == cpu)
796 continue; 713 continue;
797 if (!cpu_online(j))
798 continue;
799 714
800 pr_debug("CPU %u already managed, adding link\n", j); 715 pr_debug("CPU %u already managed, adding link\n", j);
801 managed_policy = cpufreq_cpu_get(cpu); 716 managed_policy = cpufreq_cpu_get(cpu);
@@ -852,8 +767,6 @@ static int cpufreq_add_dev_interface(unsigned int cpu,
852 767
853 spin_lock_irqsave(&cpufreq_driver_lock, flags); 768 spin_lock_irqsave(&cpufreq_driver_lock, flags);
854 for_each_cpu(j, policy->cpus) { 769 for_each_cpu(j, policy->cpus) {
855 if (!cpu_online(j))
856 continue;
857 per_cpu(cpufreq_cpu_data, j) = policy; 770 per_cpu(cpufreq_cpu_data, j) = policy;
858 per_cpu(cpufreq_policy_cpu, j) = policy->cpu; 771 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
859 } 772 }
@@ -885,6 +798,42 @@ err_out_kobj_put:
885 return ret; 798 return ret;
886} 799}
887 800
801#ifdef CONFIG_HOTPLUG_CPU
802static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
803 struct device *dev)
804{
805 struct cpufreq_policy *policy;
806 int ret = 0;
807 unsigned long flags;
808
809 policy = cpufreq_cpu_get(sibling);
810 WARN_ON(!policy);
811
812 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
813
814 lock_policy_rwsem_write(sibling);
815
816 spin_lock_irqsave(&cpufreq_driver_lock, flags);
817
818 cpumask_set_cpu(cpu, policy->cpus);
819 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
820 per_cpu(cpufreq_cpu_data, cpu) = policy;
821 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
822
823 unlock_policy_rwsem_write(sibling);
824
825 __cpufreq_governor(policy, CPUFREQ_GOV_START);
826 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
827
828 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
829 if (ret) {
830 cpufreq_cpu_put(policy);
831 return ret;
832 }
833
834 return 0;
835}
836#endif
888 837
889/** 838/**
890 * cpufreq_add_dev - add a CPU device 839 * cpufreq_add_dev - add a CPU device
@@ -897,12 +846,12 @@ err_out_kobj_put:
897 */ 846 */
898static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) 847static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
899{ 848{
900 unsigned int cpu = dev->id; 849 unsigned int j, cpu = dev->id;
901 int ret = 0, found = 0; 850 int ret = -ENOMEM;
902 struct cpufreq_policy *policy; 851 struct cpufreq_policy *policy;
903 unsigned long flags; 852 unsigned long flags;
904 unsigned int j;
905#ifdef CONFIG_HOTPLUG_CPU 853#ifdef CONFIG_HOTPLUG_CPU
854 struct cpufreq_governor *gov;
906 int sibling; 855 int sibling;
907#endif 856#endif
908 857
@@ -919,6 +868,19 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
919 cpufreq_cpu_put(policy); 868 cpufreq_cpu_put(policy);
920 return 0; 869 return 0;
921 } 870 }
871
872#ifdef CONFIG_HOTPLUG_CPU
873 /* Check if this cpu was hot-unplugged earlier and has siblings */
874 spin_lock_irqsave(&cpufreq_driver_lock, flags);
875 for_each_online_cpu(sibling) {
876 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
877 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
878 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
879 return cpufreq_add_policy_cpu(cpu, sibling, dev);
880 }
881 }
882 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
883#endif
922#endif 884#endif
923 885
924 if (!try_module_get(cpufreq_driver->owner)) { 886 if (!try_module_get(cpufreq_driver->owner)) {
@@ -926,7 +888,6 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
926 goto module_out; 888 goto module_out;
927 } 889 }
928 890
929 ret = -ENOMEM;
930 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL); 891 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
931 if (!policy) 892 if (!policy)
932 goto nomem_out; 893 goto nomem_out;
@@ -938,66 +899,58 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
938 goto err_free_cpumask; 899 goto err_free_cpumask;
939 900
940 policy->cpu = cpu; 901 policy->cpu = cpu;
902 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
941 cpumask_copy(policy->cpus, cpumask_of(cpu)); 903 cpumask_copy(policy->cpus, cpumask_of(cpu));
942 904
943 /* Initially set CPU itself as the policy_cpu */ 905 /* Initially set CPU itself as the policy_cpu */
944 per_cpu(cpufreq_policy_cpu, cpu) = cpu; 906 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
945 ret = (lock_policy_rwsem_write(cpu) < 0);
946 WARN_ON(ret);
947 907
948 init_completion(&policy->kobj_unregister); 908 init_completion(&policy->kobj_unregister);
949 INIT_WORK(&policy->update, handle_update); 909 INIT_WORK(&policy->update, handle_update);
950 910
951 /* Set governor before ->init, so that driver could check it */
952#ifdef CONFIG_HOTPLUG_CPU
953 for_each_online_cpu(sibling) {
954 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
955 if (cp && cp->governor &&
956 (cpumask_test_cpu(cpu, cp->related_cpus))) {
957 policy->governor = cp->governor;
958 found = 1;
959 break;
960 }
961 }
962#endif
963 if (!found)
964 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
965 /* call driver. From then on the cpufreq must be able 911 /* call driver. From then on the cpufreq must be able
966 * to accept all calls to ->verify and ->setpolicy for this CPU 912 * to accept all calls to ->verify and ->setpolicy for this CPU
967 */ 913 */
968 ret = cpufreq_driver->init(policy); 914 ret = cpufreq_driver->init(policy);
969 if (ret) { 915 if (ret) {
970 pr_debug("initialization failed\n"); 916 pr_debug("initialization failed\n");
971 goto err_unlock_policy; 917 goto err_set_policy_cpu;
972 } 918 }
919
920 /* related cpus should atleast have policy->cpus */
921 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
922
923 /*
924 * affected cpus must always be the one, which are online. We aren't
925 * managing offline cpus here.
926 */
927 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
928
973 policy->user_policy.min = policy->min; 929 policy->user_policy.min = policy->min;
974 policy->user_policy.max = policy->max; 930 policy->user_policy.max = policy->max;
975 931
976 blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 932 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
977 CPUFREQ_START, policy); 933 CPUFREQ_START, policy);
978 934
979 ret = cpufreq_add_dev_policy(cpu, policy, dev); 935#ifdef CONFIG_HOTPLUG_CPU
980 if (ret) { 936 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
981 if (ret > 0) 937 if (gov) {
982 /* This is a managed cpu, symlink created, 938 policy->governor = gov;
983 exit with 0 */ 939 pr_debug("Restoring governor %s for cpu %d\n",
984 ret = 0; 940 policy->governor->name, cpu);
985 goto err_unlock_policy;
986 } 941 }
942#endif
987 943
988 ret = cpufreq_add_dev_interface(cpu, policy, dev); 944 ret = cpufreq_add_dev_interface(cpu, policy, dev);
989 if (ret) 945 if (ret)
990 goto err_out_unregister; 946 goto err_out_unregister;
991 947
992 unlock_policy_rwsem_write(cpu);
993
994 kobject_uevent(&policy->kobj, KOBJ_ADD); 948 kobject_uevent(&policy->kobj, KOBJ_ADD);
995 module_put(cpufreq_driver->owner); 949 module_put(cpufreq_driver->owner);
996 pr_debug("initialization complete\n"); 950 pr_debug("initialization complete\n");
997 951
998 return 0; 952 return 0;
999 953
1000
1001err_out_unregister: 954err_out_unregister:
1002 spin_lock_irqsave(&cpufreq_driver_lock, flags); 955 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1003 for_each_cpu(j, policy->cpus) 956 for_each_cpu(j, policy->cpus)
@@ -1007,8 +960,8 @@ err_out_unregister:
1007 kobject_put(&policy->kobj); 960 kobject_put(&policy->kobj);
1008 wait_for_completion(&policy->kobj_unregister); 961 wait_for_completion(&policy->kobj_unregister);
1009 962
1010err_unlock_policy: 963err_set_policy_cpu:
1011 unlock_policy_rwsem_write(cpu); 964 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1012 free_cpumask_var(policy->related_cpus); 965 free_cpumask_var(policy->related_cpus);
1013err_free_cpumask: 966err_free_cpumask:
1014 free_cpumask_var(policy->cpus); 967 free_cpumask_var(policy->cpus);
@@ -1020,6 +973,22 @@ module_out:
1020 return ret; 973 return ret;
1021} 974}
1022 975
976static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
977{
978 int j;
979
980 policy->last_cpu = policy->cpu;
981 policy->cpu = cpu;
982
983 for_each_cpu(j, policy->cpus)
984 per_cpu(cpufreq_policy_cpu, j) = cpu;
985
986#ifdef CONFIG_CPU_FREQ_TABLE
987 cpufreq_frequency_table_update_policy_cpu(policy);
988#endif
989 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
990 CPUFREQ_UPDATE_POLICY_CPU, policy);
991}
1023 992
1024/** 993/**
1025 * __cpufreq_remove_dev - remove a CPU device 994 * __cpufreq_remove_dev - remove a CPU device
@@ -1030,129 +999,103 @@ module_out:
1030 */ 999 */
1031static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) 1000static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1032{ 1001{
1033 unsigned int cpu = dev->id; 1002 unsigned int cpu = dev->id, ret, cpus;
1034 unsigned long flags; 1003 unsigned long flags;
1035 struct cpufreq_policy *data; 1004 struct cpufreq_policy *data;
1036 struct kobject *kobj; 1005 struct kobject *kobj;
1037 struct completion *cmp; 1006 struct completion *cmp;
1038#ifdef CONFIG_SMP
1039 struct device *cpu_dev; 1007 struct device *cpu_dev;
1040 unsigned int j;
1041#endif
1042 1008
1043 pr_debug("unregistering CPU %u\n", cpu); 1009 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1044 1010
1045 spin_lock_irqsave(&cpufreq_driver_lock, flags); 1011 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1012
1046 data = per_cpu(cpufreq_cpu_data, cpu); 1013 data = per_cpu(cpufreq_cpu_data, cpu);
1014 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1015
1016 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1047 1017
1048 if (!data) { 1018 if (!data) {
1049 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1019 pr_debug("%s: No cpu_data found\n", __func__);
1050 unlock_policy_rwsem_write(cpu);
1051 return -EINVAL; 1020 return -EINVAL;
1052 } 1021 }
1053 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1054 1022
1023 if (cpufreq_driver->target)
1024 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1055 1025
1056#ifdef CONFIG_SMP 1026#ifdef CONFIG_HOTPLUG_CPU
1057 /* if this isn't the CPU which is the parent of the kobj, we 1027 if (!cpufreq_driver->setpolicy)
1058 * only need to unlink, put and exit 1028 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1059 */ 1029 data->governor->name, CPUFREQ_NAME_LEN);
1060 if (unlikely(cpu != data->cpu)) {
1061 pr_debug("removing link\n");
1062 cpumask_clear_cpu(cpu, data->cpus);
1063 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1064 kobj = &dev->kobj;
1065 cpufreq_cpu_put(data);
1066 unlock_policy_rwsem_write(cpu);
1067 sysfs_remove_link(kobj, "cpufreq");
1068 return 0;
1069 }
1070#endif 1030#endif
1071 1031
1072#ifdef CONFIG_SMP 1032 WARN_ON(lock_policy_rwsem_write(cpu));
1033 cpus = cpumask_weight(data->cpus);
1034 cpumask_clear_cpu(cpu, data->cpus);
1035 unlock_policy_rwsem_write(cpu);
1073 1036
1074#ifdef CONFIG_HOTPLUG_CPU 1037 if (cpu != data->cpu) {
1075 strncpy(per_cpu(cpufreq_cpu_governor, cpu), data->governor->name, 1038 sysfs_remove_link(&dev->kobj, "cpufreq");
1076 CPUFREQ_NAME_LEN); 1039 } else if (cpus > 1) {
1077#endif 1040 /* first sibling now owns the new sysfs dir */
1041 cpu_dev = get_cpu_device(cpumask_first(data->cpus));
1042 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1043 ret = kobject_move(&data->kobj, &cpu_dev->kobj);
1044 if (ret) {
1045 pr_err("%s: Failed to move kobj: %d", __func__, ret);
1078 1046
1079 /* if we have other CPUs still registered, we need to unlink them, 1047 WARN_ON(lock_policy_rwsem_write(cpu));
1080 * or else wait_for_completion below will lock up. Clean the 1048 cpumask_set_cpu(cpu, data->cpus);
1081 * per_cpu(cpufreq_cpu_data) while holding the lock, and remove
1082 * the sysfs links afterwards.
1083 */
1084 if (unlikely(cpumask_weight(data->cpus) > 1)) {
1085 for_each_cpu(j, data->cpus) {
1086 if (j == cpu)
1087 continue;
1088 per_cpu(cpufreq_cpu_data, j) = NULL;
1089 }
1090 }
1091 1049
1092 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1050 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1051 per_cpu(cpufreq_cpu_data, cpu) = data;
1052 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1093 1053
1094 if (unlikely(cpumask_weight(data->cpus) > 1)) {
1095 for_each_cpu(j, data->cpus) {
1096 if (j == cpu)
1097 continue;
1098 pr_debug("removing link for cpu %u\n", j);
1099#ifdef CONFIG_HOTPLUG_CPU
1100 strncpy(per_cpu(cpufreq_cpu_governor, j),
1101 data->governor->name, CPUFREQ_NAME_LEN);
1102#endif
1103 cpu_dev = get_cpu_device(j);
1104 kobj = &cpu_dev->kobj;
1105 unlock_policy_rwsem_write(cpu); 1054 unlock_policy_rwsem_write(cpu);
1106 sysfs_remove_link(kobj, "cpufreq");
1107 lock_policy_rwsem_write(cpu);
1108 cpufreq_cpu_put(data);
1109 }
1110 }
1111#else
1112 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1113#endif
1114 1055
1115 if (cpufreq_driver->target) 1056 ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
1116 __cpufreq_governor(data, CPUFREQ_GOV_STOP); 1057 "cpufreq");
1058 return -EINVAL;
1059 }
1117 1060
1118 kobj = &data->kobj; 1061 WARN_ON(lock_policy_rwsem_write(cpu));
1119 cmp = &data->kobj_unregister; 1062 update_policy_cpu(data, cpu_dev->id);
1120 unlock_policy_rwsem_write(cpu); 1063 unlock_policy_rwsem_write(cpu);
1121 kobject_put(kobj); 1064 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1065 __func__, cpu_dev->id, cpu);
1066 }
1122 1067
1123 /* we need to make sure that the underlying kobj is actually 1068 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1124 * not referenced anymore by anybody before we proceed with 1069 cpufreq_cpu_put(data);
1125 * unloading.
1126 */
1127 pr_debug("waiting for dropping of refcount\n");
1128 wait_for_completion(cmp);
1129 pr_debug("wait complete\n");
1130 1070
1131 lock_policy_rwsem_write(cpu); 1071 /* If cpu is last user of policy, free policy */
1132 if (cpufreq_driver->exit) 1072 if (cpus == 1) {
1133 cpufreq_driver->exit(data); 1073 lock_policy_rwsem_read(cpu);
1134 unlock_policy_rwsem_write(cpu); 1074 kobj = &data->kobj;
1075 cmp = &data->kobj_unregister;
1076 unlock_policy_rwsem_read(cpu);
1077 kobject_put(kobj);
1078
1079 /* we need to make sure that the underlying kobj is actually
1080 * not referenced anymore by anybody before we proceed with
1081 * unloading.
1082 */
1083 pr_debug("waiting for dropping of refcount\n");
1084 wait_for_completion(cmp);
1085 pr_debug("wait complete\n");
1135 1086
1136#ifdef CONFIG_HOTPLUG_CPU 1087 if (cpufreq_driver->exit)
1137 /* when the CPU which is the parent of the kobj is hotplugged 1088 cpufreq_driver->exit(data);
1138 * offline, check for siblings, and create cpufreq sysfs interface
1139 * and symlinks
1140 */
1141 if (unlikely(cpumask_weight(data->cpus) > 1)) {
1142 /* first sibling now owns the new sysfs dir */
1143 cpumask_clear_cpu(cpu, data->cpus);
1144 cpufreq_add_dev(get_cpu_device(cpumask_first(data->cpus)), NULL);
1145 1089
1146 /* finally remove our own symlink */ 1090 free_cpumask_var(data->related_cpus);
1147 lock_policy_rwsem_write(cpu); 1091 free_cpumask_var(data->cpus);
1148 __cpufreq_remove_dev(dev, sif); 1092 kfree(data);
1093 } else if (cpufreq_driver->target) {
1094 __cpufreq_governor(data, CPUFREQ_GOV_START);
1095 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1149 } 1096 }
1150#endif
1151
1152 free_cpumask_var(data->related_cpus);
1153 free_cpumask_var(data->cpus);
1154 kfree(data);
1155 1097
1098 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1156 return 0; 1099 return 0;
1157} 1100}
1158 1101
@@ -1165,9 +1108,6 @@ static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1165 if (cpu_is_offline(cpu)) 1108 if (cpu_is_offline(cpu))
1166 return 0; 1109 return 0;
1167 1110
1168 if (unlikely(lock_policy_rwsem_write(cpu)))
1169 BUG();
1170
1171 retval = __cpufreq_remove_dev(dev, sif); 1111 retval = __cpufreq_remove_dev(dev, sif);
1172 return retval; 1112 return retval;
1173} 1113}
@@ -1216,9 +1156,13 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1216 */ 1156 */
1217unsigned int cpufreq_quick_get(unsigned int cpu) 1157unsigned int cpufreq_quick_get(unsigned int cpu)
1218{ 1158{
1219 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 1159 struct cpufreq_policy *policy;
1220 unsigned int ret_freq = 0; 1160 unsigned int ret_freq = 0;
1221 1161
1162 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1163 return cpufreq_driver->get(cpu);
1164
1165 policy = cpufreq_cpu_get(cpu);
1222 if (policy) { 1166 if (policy) {
1223 ret_freq = policy->cur; 1167 ret_freq = policy->cur;
1224 cpufreq_cpu_put(policy); 1168 cpufreq_cpu_put(policy);
@@ -1386,6 +1330,20 @@ static struct syscore_ops cpufreq_syscore_ops = {
1386 .resume = cpufreq_bp_resume, 1330 .resume = cpufreq_bp_resume,
1387}; 1331};
1388 1332
1333/**
1334 * cpufreq_get_current_driver - return current driver's name
1335 *
1336 * Return the name string of the currently loaded cpufreq driver
1337 * or NULL, if none.
1338 */
1339const char *cpufreq_get_current_driver(void)
1340{
1341 if (cpufreq_driver)
1342 return cpufreq_driver->name;
1343
1344 return NULL;
1345}
1346EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1389 1347
1390/********************************************************************* 1348/*********************************************************************
1391 * NOTIFIER LISTS INTERFACE * 1349 * NOTIFIER LISTS INTERFACE *
@@ -1408,6 +1366,9 @@ int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1408{ 1366{
1409 int ret; 1367 int ret;
1410 1368
1369 if (cpufreq_disabled())
1370 return -EINVAL;
1371
1411 WARN_ON(!init_cpufreq_transition_notifier_list_called); 1372 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1412 1373
1413 switch (list) { 1374 switch (list) {
@@ -1442,6 +1403,9 @@ int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1442{ 1403{
1443 int ret; 1404 int ret;
1444 1405
1406 if (cpufreq_disabled())
1407 return -EINVAL;
1408
1445 switch (list) { 1409 switch (list) {
1446 case CPUFREQ_TRANSITION_NOTIFIER: 1410 case CPUFREQ_TRANSITION_NOTIFIER:
1447 ret = srcu_notifier_chain_unregister( 1411 ret = srcu_notifier_chain_unregister(
@@ -1487,7 +1451,7 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
1487 if (target_freq == policy->cur) 1451 if (target_freq == policy->cur)
1488 return 0; 1452 return 0;
1489 1453
1490 if (cpu_online(policy->cpu) && cpufreq_driver->target) 1454 if (cpufreq_driver->target)
1491 retval = cpufreq_driver->target(policy, target_freq, relation); 1455 retval = cpufreq_driver->target(policy, target_freq, relation);
1492 1456
1493 return retval; 1457 return retval;
@@ -1522,7 +1486,10 @@ int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
1522{ 1486{
1523 int ret = 0; 1487 int ret = 0;
1524 1488
1525 if (!(cpu_online(cpu) && cpufreq_driver->getavg)) 1489 if (cpufreq_disabled())
1490 return ret;
1491
1492 if (!cpufreq_driver->getavg)
1526 return 0; 1493 return 0;
1527 1494
1528 policy = cpufreq_cpu_get(policy->cpu); 1495 policy = cpufreq_cpu_get(policy->cpu);
@@ -1577,6 +1544,11 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
1577 policy->cpu, event); 1544 policy->cpu, event);
1578 ret = policy->governor->governor(policy, event); 1545 ret = policy->governor->governor(policy, event);
1579 1546
1547 if (event == CPUFREQ_GOV_START)
1548 policy->governor->initialized++;
1549 else if (event == CPUFREQ_GOV_STOP)
1550 policy->governor->initialized--;
1551
1580 /* we keep one module reference alive for 1552 /* we keep one module reference alive for
1581 each CPU governed by this CPU */ 1553 each CPU governed by this CPU */
1582 if ((event != CPUFREQ_GOV_START) || ret) 1554 if ((event != CPUFREQ_GOV_START) || ret)
@@ -1600,6 +1572,7 @@ int cpufreq_register_governor(struct cpufreq_governor *governor)
1600 1572
1601 mutex_lock(&cpufreq_governor_mutex); 1573 mutex_lock(&cpufreq_governor_mutex);
1602 1574
1575 governor->initialized = 0;
1603 err = -EBUSY; 1576 err = -EBUSY;
1604 if (__find_governor(governor->name) == NULL) { 1577 if (__find_governor(governor->name) == NULL) {
1605 err = 0; 1578 err = 0;
@@ -1797,7 +1770,7 @@ int cpufreq_update_policy(unsigned int cpu)
1797 pr_debug("Driver did not initialize current freq"); 1770 pr_debug("Driver did not initialize current freq");
1798 data->cur = policy.cur; 1771 data->cur = policy.cur;
1799 } else { 1772 } else {
1800 if (data->cur != policy.cur) 1773 if (data->cur != policy.cur && cpufreq_driver->target)
1801 cpufreq_out_of_sync(cpu, data->cur, 1774 cpufreq_out_of_sync(cpu, data->cur,
1802 policy.cur); 1775 policy.cur);
1803 } 1776 }
@@ -1829,9 +1802,6 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
1829 break; 1802 break;
1830 case CPU_DOWN_PREPARE: 1803 case CPU_DOWN_PREPARE:
1831 case CPU_DOWN_PREPARE_FROZEN: 1804 case CPU_DOWN_PREPARE_FROZEN:
1832 if (unlikely(lock_policy_rwsem_write(cpu)))
1833 BUG();
1834
1835 __cpufreq_remove_dev(dev, NULL); 1805 __cpufreq_remove_dev(dev, NULL);
1836 break; 1806 break;
1837 case CPU_DOWN_FAILED: 1807 case CPU_DOWN_FAILED:
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 64ef737e7e72..4fd0006b1291 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -25,7 +25,7 @@
25 25
26#include "cpufreq_governor.h" 26#include "cpufreq_governor.h"
27 27
28/* Conservative governor macors */ 28/* Conservative governor macros */
29#define DEF_FREQUENCY_UP_THRESHOLD (80) 29#define DEF_FREQUENCY_UP_THRESHOLD (80)
30#define DEF_FREQUENCY_DOWN_THRESHOLD (20) 30#define DEF_FREQUENCY_DOWN_THRESHOLD (20)
31#define DEF_SAMPLING_DOWN_FACTOR (1) 31#define DEF_SAMPLING_DOWN_FACTOR (1)
@@ -113,17 +113,20 @@ static void cs_check_cpu(int cpu, unsigned int load)
113 113
114static void cs_dbs_timer(struct work_struct *work) 114static void cs_dbs_timer(struct work_struct *work)
115{ 115{
116 struct delayed_work *dw = to_delayed_work(work);
116 struct cs_cpu_dbs_info_s *dbs_info = container_of(work, 117 struct cs_cpu_dbs_info_s *dbs_info = container_of(work,
117 struct cs_cpu_dbs_info_s, cdbs.work.work); 118 struct cs_cpu_dbs_info_s, cdbs.work.work);
118 unsigned int cpu = dbs_info->cdbs.cpu; 119 unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
120 struct cs_cpu_dbs_info_s *core_dbs_info = &per_cpu(cs_cpu_dbs_info,
121 cpu);
119 int delay = delay_for_sampling_rate(cs_tuners.sampling_rate); 122 int delay = delay_for_sampling_rate(cs_tuners.sampling_rate);
120 123
121 mutex_lock(&dbs_info->cdbs.timer_mutex); 124 mutex_lock(&core_dbs_info->cdbs.timer_mutex);
125 if (need_load_eval(&core_dbs_info->cdbs, cs_tuners.sampling_rate))
126 dbs_check_cpu(&cs_dbs_data, cpu);
122 127
123 dbs_check_cpu(&cs_dbs_data, cpu); 128 schedule_delayed_work_on(smp_processor_id(), dw, delay);
124 129 mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
125 schedule_delayed_work_on(cpu, &dbs_info->cdbs.work, delay);
126 mutex_unlock(&dbs_info->cdbs.timer_mutex);
127} 130}
128 131
129static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, 132static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
@@ -141,7 +144,7 @@ static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
141 144
142 /* 145 /*
143 * we only care if our internally tracked freq moves outside the 'valid' 146 * we only care if our internally tracked freq moves outside the 'valid'
144 * ranges of freqency available to us otherwise we do not change it 147 * ranges of frequency available to us otherwise we do not change it
145 */ 148 */
146 if (dbs_info->requested_freq > policy->max 149 if (dbs_info->requested_freq > policy->max
147 || dbs_info->requested_freq < policy->min) 150 || dbs_info->requested_freq < policy->min)
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 6c5f1d383cdc..5a76086ff09b 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -161,25 +161,48 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
161} 161}
162EXPORT_SYMBOL_GPL(dbs_check_cpu); 162EXPORT_SYMBOL_GPL(dbs_check_cpu);
163 163
164static inline void dbs_timer_init(struct dbs_data *dbs_data, 164static inline void dbs_timer_init(struct dbs_data *dbs_data, int cpu,
165 struct cpu_dbs_common_info *cdbs, unsigned int sampling_rate) 165 unsigned int sampling_rate)
166{ 166{
167 int delay = delay_for_sampling_rate(sampling_rate); 167 int delay = delay_for_sampling_rate(sampling_rate);
168 struct cpu_dbs_common_info *cdbs = dbs_data->get_cpu_cdbs(cpu);
168 169
169 INIT_DEFERRABLE_WORK(&cdbs->work, dbs_data->gov_dbs_timer); 170 schedule_delayed_work_on(cpu, &cdbs->work, delay);
170 schedule_delayed_work_on(cdbs->cpu, &cdbs->work, delay);
171} 171}
172 172
173static inline void dbs_timer_exit(struct cpu_dbs_common_info *cdbs) 173static inline void dbs_timer_exit(struct dbs_data *dbs_data, int cpu)
174{ 174{
175 struct cpu_dbs_common_info *cdbs = dbs_data->get_cpu_cdbs(cpu);
176
175 cancel_delayed_work_sync(&cdbs->work); 177 cancel_delayed_work_sync(&cdbs->work);
176} 178}
177 179
180/* Will return if we need to evaluate cpu load again or not */
181bool need_load_eval(struct cpu_dbs_common_info *cdbs,
182 unsigned int sampling_rate)
183{
184 if (policy_is_shared(cdbs->cur_policy)) {
185 ktime_t time_now = ktime_get();
186 s64 delta_us = ktime_us_delta(time_now, cdbs->time_stamp);
187
188 /* Do nothing if we recently have sampled */
189 if (delta_us < (s64)(sampling_rate / 2))
190 return false;
191 else
192 cdbs->time_stamp = time_now;
193 }
194
195 return true;
196}
197EXPORT_SYMBOL_GPL(need_load_eval);
198
178int cpufreq_governor_dbs(struct dbs_data *dbs_data, 199int cpufreq_governor_dbs(struct dbs_data *dbs_data,
179 struct cpufreq_policy *policy, unsigned int event) 200 struct cpufreq_policy *policy, unsigned int event)
180{ 201{
181 struct od_cpu_dbs_info_s *od_dbs_info = NULL; 202 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
182 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL; 203 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
204 struct cs_ops *cs_ops = NULL;
205 struct od_ops *od_ops = NULL;
183 struct od_dbs_tuners *od_tuners = dbs_data->tuners; 206 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
184 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; 207 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
185 struct cpu_dbs_common_info *cpu_cdbs; 208 struct cpu_dbs_common_info *cpu_cdbs;
@@ -192,109 +215,111 @@ int cpufreq_governor_dbs(struct dbs_data *dbs_data,
192 cs_dbs_info = dbs_data->get_cpu_dbs_info_s(cpu); 215 cs_dbs_info = dbs_data->get_cpu_dbs_info_s(cpu);
193 sampling_rate = &cs_tuners->sampling_rate; 216 sampling_rate = &cs_tuners->sampling_rate;
194 ignore_nice = cs_tuners->ignore_nice; 217 ignore_nice = cs_tuners->ignore_nice;
218 cs_ops = dbs_data->gov_ops;
195 } else { 219 } else {
196 od_dbs_info = dbs_data->get_cpu_dbs_info_s(cpu); 220 od_dbs_info = dbs_data->get_cpu_dbs_info_s(cpu);
197 sampling_rate = &od_tuners->sampling_rate; 221 sampling_rate = &od_tuners->sampling_rate;
198 ignore_nice = od_tuners->ignore_nice; 222 ignore_nice = od_tuners->ignore_nice;
223 od_ops = dbs_data->gov_ops;
199 } 224 }
200 225
201 switch (event) { 226 switch (event) {
202 case CPUFREQ_GOV_START: 227 case CPUFREQ_GOV_START:
203 if ((!cpu_online(cpu)) || (!policy->cur)) 228 if (!policy->cur)
204 return -EINVAL; 229 return -EINVAL;
205 230
206 mutex_lock(&dbs_data->mutex); 231 mutex_lock(&dbs_data->mutex);
207 232
208 dbs_data->enable++;
209 cpu_cdbs->cpu = cpu;
210 for_each_cpu(j, policy->cpus) { 233 for_each_cpu(j, policy->cpus) {
211 struct cpu_dbs_common_info *j_cdbs; 234 struct cpu_dbs_common_info *j_cdbs =
212 j_cdbs = dbs_data->get_cpu_cdbs(j); 235 dbs_data->get_cpu_cdbs(j);
213 236
237 j_cdbs->cpu = j;
214 j_cdbs->cur_policy = policy; 238 j_cdbs->cur_policy = policy;
215 j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, 239 j_cdbs->prev_cpu_idle = get_cpu_idle_time(j,
216 &j_cdbs->prev_cpu_wall); 240 &j_cdbs->prev_cpu_wall);
217 if (ignore_nice) 241 if (ignore_nice)
218 j_cdbs->prev_cpu_nice = 242 j_cdbs->prev_cpu_nice =
219 kcpustat_cpu(j).cpustat[CPUTIME_NICE]; 243 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
220 }
221 244
222 /* 245 mutex_init(&j_cdbs->timer_mutex);
223 * Start the timerschedule work, when this governor is used for 246 INIT_DEFERRABLE_WORK(&j_cdbs->work,
224 * first time 247 dbs_data->gov_dbs_timer);
225 */
226 if (dbs_data->enable != 1)
227 goto second_time;
228
229 rc = sysfs_create_group(cpufreq_global_kobject,
230 dbs_data->attr_group);
231 if (rc) {
232 mutex_unlock(&dbs_data->mutex);
233 return rc;
234 } 248 }
235 249
236 /* policy latency is in nS. Convert it to uS first */ 250 if (!policy->governor->initialized) {
237 latency = policy->cpuinfo.transition_latency / 1000; 251 rc = sysfs_create_group(cpufreq_global_kobject,
238 if (latency == 0) 252 dbs_data->attr_group);
239 latency = 1; 253 if (rc) {
254 mutex_unlock(&dbs_data->mutex);
255 return rc;
256 }
257 }
240 258
241 /* 259 /*
242 * conservative does not implement micro like ondemand 260 * conservative does not implement micro like ondemand
243 * governor, thus we are bound to jiffes/HZ 261 * governor, thus we are bound to jiffes/HZ
244 */ 262 */
245 if (dbs_data->governor == GOV_CONSERVATIVE) { 263 if (dbs_data->governor == GOV_CONSERVATIVE) {
246 struct cs_ops *ops = dbs_data->gov_ops; 264 cs_dbs_info->down_skip = 0;
265 cs_dbs_info->enable = 1;
266 cs_dbs_info->requested_freq = policy->cur;
247 267
248 cpufreq_register_notifier(ops->notifier_block, 268 if (!policy->governor->initialized) {
249 CPUFREQ_TRANSITION_NOTIFIER); 269 cpufreq_register_notifier(cs_ops->notifier_block,
270 CPUFREQ_TRANSITION_NOTIFIER);
250 271
251 dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO * 272 dbs_data->min_sampling_rate =
252 jiffies_to_usecs(10); 273 MIN_SAMPLING_RATE_RATIO *
274 jiffies_to_usecs(10);
275 }
253 } else { 276 } else {
254 struct od_ops *ops = dbs_data->gov_ops; 277 od_dbs_info->rate_mult = 1;
278 od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
279 od_ops->powersave_bias_init_cpu(cpu);
255 280
256 od_tuners->io_is_busy = ops->io_busy(); 281 if (!policy->governor->initialized)
282 od_tuners->io_is_busy = od_ops->io_busy();
257 } 283 }
258 284
285 if (policy->governor->initialized)
286 goto unlock;
287
288 /* policy latency is in nS. Convert it to uS first */
289 latency = policy->cpuinfo.transition_latency / 1000;
290 if (latency == 0)
291 latency = 1;
292
259 /* Bring kernel and HW constraints together */ 293 /* Bring kernel and HW constraints together */
260 dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate, 294 dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
261 MIN_LATENCY_MULTIPLIER * latency); 295 MIN_LATENCY_MULTIPLIER * latency);
262 *sampling_rate = max(dbs_data->min_sampling_rate, latency * 296 *sampling_rate = max(dbs_data->min_sampling_rate, latency *
263 LATENCY_MULTIPLIER); 297 LATENCY_MULTIPLIER);
264 298unlock:
265second_time:
266 if (dbs_data->governor == GOV_CONSERVATIVE) {
267 cs_dbs_info->down_skip = 0;
268 cs_dbs_info->enable = 1;
269 cs_dbs_info->requested_freq = policy->cur;
270 } else {
271 struct od_ops *ops = dbs_data->gov_ops;
272 od_dbs_info->rate_mult = 1;
273 od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
274 ops->powersave_bias_init_cpu(cpu);
275 }
276 mutex_unlock(&dbs_data->mutex); 299 mutex_unlock(&dbs_data->mutex);
277 300
278 mutex_init(&cpu_cdbs->timer_mutex); 301 /* Initiate timer time stamp */
279 dbs_timer_init(dbs_data, cpu_cdbs, *sampling_rate); 302 cpu_cdbs->time_stamp = ktime_get();
303
304 for_each_cpu(j, policy->cpus)
305 dbs_timer_init(dbs_data, j, *sampling_rate);
280 break; 306 break;
281 307
282 case CPUFREQ_GOV_STOP: 308 case CPUFREQ_GOV_STOP:
283 if (dbs_data->governor == GOV_CONSERVATIVE) 309 if (dbs_data->governor == GOV_CONSERVATIVE)
284 cs_dbs_info->enable = 0; 310 cs_dbs_info->enable = 0;
285 311
286 dbs_timer_exit(cpu_cdbs); 312 for_each_cpu(j, policy->cpus)
313 dbs_timer_exit(dbs_data, j);
287 314
288 mutex_lock(&dbs_data->mutex); 315 mutex_lock(&dbs_data->mutex);
289 mutex_destroy(&cpu_cdbs->timer_mutex); 316 mutex_destroy(&cpu_cdbs->timer_mutex);
290 dbs_data->enable--;
291 if (!dbs_data->enable) {
292 struct cs_ops *ops = dbs_data->gov_ops;
293 317
318 if (policy->governor->initialized == 1) {
294 sysfs_remove_group(cpufreq_global_kobject, 319 sysfs_remove_group(cpufreq_global_kobject,
295 dbs_data->attr_group); 320 dbs_data->attr_group);
296 if (dbs_data->governor == GOV_CONSERVATIVE) 321 if (dbs_data->governor == GOV_CONSERVATIVE)
297 cpufreq_unregister_notifier(ops->notifier_block, 322 cpufreq_unregister_notifier(cs_ops->notifier_block,
298 CPUFREQ_TRANSITION_NOTIFIER); 323 CPUFREQ_TRANSITION_NOTIFIER);
299 } 324 }
300 mutex_unlock(&dbs_data->mutex); 325 mutex_unlock(&dbs_data->mutex);
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index f6616540c53d..d2ac91150600 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -82,6 +82,7 @@ struct cpu_dbs_common_info {
82 * the governor or limits. 82 * the governor or limits.
83 */ 83 */
84 struct mutex timer_mutex; 84 struct mutex timer_mutex;
85 ktime_t time_stamp;
85}; 86};
86 87
87struct od_cpu_dbs_info_s { 88struct od_cpu_dbs_info_s {
@@ -108,7 +109,7 @@ struct od_dbs_tuners {
108 unsigned int sampling_rate; 109 unsigned int sampling_rate;
109 unsigned int sampling_down_factor; 110 unsigned int sampling_down_factor;
110 unsigned int up_threshold; 111 unsigned int up_threshold;
111 unsigned int down_differential; 112 unsigned int adj_up_threshold;
112 unsigned int powersave_bias; 113 unsigned int powersave_bias;
113 unsigned int io_is_busy; 114 unsigned int io_is_busy;
114}; 115};
@@ -129,7 +130,6 @@ struct dbs_data {
129 #define GOV_CONSERVATIVE 1 130 #define GOV_CONSERVATIVE 1
130 int governor; 131 int governor;
131 unsigned int min_sampling_rate; 132 unsigned int min_sampling_rate;
132 unsigned int enable; /* number of CPUs using this policy */
133 struct attribute_group *attr_group; 133 struct attribute_group *attr_group;
134 void *tuners; 134 void *tuners;
135 135
@@ -171,6 +171,8 @@ static inline int delay_for_sampling_rate(unsigned int sampling_rate)
171 171
172u64 get_cpu_idle_time(unsigned int cpu, u64 *wall); 172u64 get_cpu_idle_time(unsigned int cpu, u64 *wall);
173void dbs_check_cpu(struct dbs_data *dbs_data, int cpu); 173void dbs_check_cpu(struct dbs_data *dbs_data, int cpu);
174bool need_load_eval(struct cpu_dbs_common_info *cdbs,
175 unsigned int sampling_rate);
174int cpufreq_governor_dbs(struct dbs_data *dbs_data, 176int cpufreq_governor_dbs(struct dbs_data *dbs_data,
175 struct cpufreq_policy *policy, unsigned int event); 177 struct cpufreq_policy *policy, unsigned int event);
176#endif /* _CPUFREQ_GOVERNER_H */ 178#endif /* _CPUFREQ_GOVERNER_H */
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 7731f7c7e79a..f3eb26cd848f 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -26,7 +26,7 @@
26 26
27#include "cpufreq_governor.h" 27#include "cpufreq_governor.h"
28 28
29/* On-demand governor macors */ 29/* On-demand governor macros */
30#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) 30#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
31#define DEF_FREQUENCY_UP_THRESHOLD (80) 31#define DEF_FREQUENCY_UP_THRESHOLD (80)
32#define DEF_SAMPLING_DOWN_FACTOR (1) 32#define DEF_SAMPLING_DOWN_FACTOR (1)
@@ -47,7 +47,8 @@ static struct cpufreq_governor cpufreq_gov_ondemand;
47static struct od_dbs_tuners od_tuners = { 47static struct od_dbs_tuners od_tuners = {
48 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, 48 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
49 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, 49 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
50 .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, 50 .adj_up_threshold = DEF_FREQUENCY_UP_THRESHOLD -
51 DEF_FREQUENCY_DOWN_DIFFERENTIAL,
51 .ignore_nice = 0, 52 .ignore_nice = 0,
52 .powersave_bias = 0, 53 .powersave_bias = 0,
53}; 54};
@@ -65,7 +66,7 @@ static void ondemand_powersave_bias_init_cpu(int cpu)
65 * efficient idling at a higher frequency/voltage is. 66 * efficient idling at a higher frequency/voltage is.
66 * Pavel Machek says this is not so for various generations of AMD and old 67 * Pavel Machek says this is not so for various generations of AMD and old
67 * Intel systems. 68 * Intel systems.
68 * Mike Chan (androidlcom) calis this is also not true for ARM. 69 * Mike Chan (android.com) claims this is also not true for ARM.
69 * Because of this, whitelist specific known (series) of CPUs by default, and 70 * Because of this, whitelist specific known (series) of CPUs by default, and
70 * leave all others up to the user. 71 * leave all others up to the user.
71 */ 72 */
@@ -73,7 +74,7 @@ static int should_io_be_busy(void)
73{ 74{
74#if defined(CONFIG_X86) 75#if defined(CONFIG_X86)
75 /* 76 /*
76 * For Intel, Core 2 (model 15) andl later have an efficient idle. 77 * For Intel, Core 2 (model 15) and later have an efficient idle.
77 */ 78 */
78 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && 79 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
79 boot_cpu_data.x86 == 6 && 80 boot_cpu_data.x86 == 6 &&
@@ -158,8 +159,8 @@ static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
158 159
159/* 160/*
160 * Every sampling_rate, we check, if current idle time is less than 20% 161 * Every sampling_rate, we check, if current idle time is less than 20%
161 * (default), then we try to increase frequency Every sampling_rate, we look for 162 * (default), then we try to increase frequency. Every sampling_rate, we look
162 * a the lowest frequency which can sustain the load while keeping idle time 163 * for the lowest frequency which can sustain the load while keeping idle time
163 * over 30%. If such a frequency exist, we try to decrease to this frequency. 164 * over 30%. If such a frequency exist, we try to decrease to this frequency.
164 * 165 *
165 * Any frequency increase takes it to the maximum frequency. Frequency reduction 166 * Any frequency increase takes it to the maximum frequency. Frequency reduction
@@ -192,11 +193,9 @@ static void od_check_cpu(int cpu, unsigned int load_freq)
192 * support the current CPU usage without triggering the up policy. To be 193 * support the current CPU usage without triggering the up policy. To be
193 * safe, we focus 10 points under the threshold. 194 * safe, we focus 10 points under the threshold.
194 */ 195 */
195 if (load_freq < (od_tuners.up_threshold - od_tuners.down_differential) * 196 if (load_freq < od_tuners.adj_up_threshold * policy->cur) {
196 policy->cur) {
197 unsigned int freq_next; 197 unsigned int freq_next;
198 freq_next = load_freq / (od_tuners.up_threshold - 198 freq_next = load_freq / od_tuners.adj_up_threshold;
199 od_tuners.down_differential);
200 199
201 /* No longer fully busy, reset rate_mult */ 200 /* No longer fully busy, reset rate_mult */
202 dbs_info->rate_mult = 1; 201 dbs_info->rate_mult = 1;
@@ -218,33 +217,42 @@ static void od_check_cpu(int cpu, unsigned int load_freq)
218 217
219static void od_dbs_timer(struct work_struct *work) 218static void od_dbs_timer(struct work_struct *work)
220{ 219{
220 struct delayed_work *dw = to_delayed_work(work);
221 struct od_cpu_dbs_info_s *dbs_info = 221 struct od_cpu_dbs_info_s *dbs_info =
222 container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work); 222 container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
223 unsigned int cpu = dbs_info->cdbs.cpu; 223 unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
224 int delay, sample_type = dbs_info->sample_type; 224 struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(od_cpu_dbs_info,
225 cpu);
226 int delay, sample_type = core_dbs_info->sample_type;
227 bool eval_load;
225 228
226 mutex_lock(&dbs_info->cdbs.timer_mutex); 229 mutex_lock(&core_dbs_info->cdbs.timer_mutex);
230 eval_load = need_load_eval(&core_dbs_info->cdbs,
231 od_tuners.sampling_rate);
227 232
228 /* Common NORMAL_SAMPLE setup */ 233 /* Common NORMAL_SAMPLE setup */
229 dbs_info->sample_type = OD_NORMAL_SAMPLE; 234 core_dbs_info->sample_type = OD_NORMAL_SAMPLE;
230 if (sample_type == OD_SUB_SAMPLE) { 235 if (sample_type == OD_SUB_SAMPLE) {
231 delay = dbs_info->freq_lo_jiffies; 236 delay = core_dbs_info->freq_lo_jiffies;
232 __cpufreq_driver_target(dbs_info->cdbs.cur_policy, 237 if (eval_load)
233 dbs_info->freq_lo, CPUFREQ_RELATION_H); 238 __cpufreq_driver_target(core_dbs_info->cdbs.cur_policy,
239 core_dbs_info->freq_lo,
240 CPUFREQ_RELATION_H);
234 } else { 241 } else {
235 dbs_check_cpu(&od_dbs_data, cpu); 242 if (eval_load)
236 if (dbs_info->freq_lo) { 243 dbs_check_cpu(&od_dbs_data, cpu);
244 if (core_dbs_info->freq_lo) {
237 /* Setup timer for SUB_SAMPLE */ 245 /* Setup timer for SUB_SAMPLE */
238 dbs_info->sample_type = OD_SUB_SAMPLE; 246 core_dbs_info->sample_type = OD_SUB_SAMPLE;
239 delay = dbs_info->freq_hi_jiffies; 247 delay = core_dbs_info->freq_hi_jiffies;
240 } else { 248 } else {
241 delay = delay_for_sampling_rate(od_tuners.sampling_rate 249 delay = delay_for_sampling_rate(od_tuners.sampling_rate
242 * dbs_info->rate_mult); 250 * core_dbs_info->rate_mult);
243 } 251 }
244 } 252 }
245 253
246 schedule_delayed_work_on(cpu, &dbs_info->cdbs.work, delay); 254 schedule_delayed_work_on(smp_processor_id(), dw, delay);
247 mutex_unlock(&dbs_info->cdbs.timer_mutex); 255 mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
248} 256}
249 257
250/************************** sysfs interface ************************/ 258/************************** sysfs interface ************************/
@@ -259,7 +267,7 @@ static ssize_t show_sampling_rate_min(struct kobject *kobj,
259 * update_sampling_rate - update sampling rate effective immediately if needed. 267 * update_sampling_rate - update sampling rate effective immediately if needed.
260 * @new_rate: new sampling rate 268 * @new_rate: new sampling rate
261 * 269 *
262 * If new rate is smaller than the old, simply updaing 270 * If new rate is smaller than the old, simply updating
263 * dbs_tuners_int.sampling_rate might not be appropriate. For example, if the 271 * dbs_tuners_int.sampling_rate might not be appropriate. For example, if the
264 * original sampling_rate was 1 second and the requested new sampling rate is 10 272 * original sampling_rate was 1 second and the requested new sampling rate is 10
265 * ms because the user needs immediate reaction from ondemand governor, but not 273 * ms because the user needs immediate reaction from ondemand governor, but not
@@ -287,7 +295,7 @@ static void update_sampling_rate(unsigned int new_rate)
287 cpufreq_cpu_put(policy); 295 cpufreq_cpu_put(policy);
288 continue; 296 continue;
289 } 297 }
290 dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu); 298 dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
291 cpufreq_cpu_put(policy); 299 cpufreq_cpu_put(policy);
292 300
293 mutex_lock(&dbs_info->cdbs.timer_mutex); 301 mutex_lock(&dbs_info->cdbs.timer_mutex);
@@ -306,8 +314,7 @@ static void update_sampling_rate(unsigned int new_rate)
306 cancel_delayed_work_sync(&dbs_info->cdbs.work); 314 cancel_delayed_work_sync(&dbs_info->cdbs.work);
307 mutex_lock(&dbs_info->cdbs.timer_mutex); 315 mutex_lock(&dbs_info->cdbs.timer_mutex);
308 316
309 schedule_delayed_work_on(dbs_info->cdbs.cpu, 317 schedule_delayed_work_on(cpu, &dbs_info->cdbs.work,
310 &dbs_info->cdbs.work,
311 usecs_to_jiffies(new_rate)); 318 usecs_to_jiffies(new_rate));
312 319
313 } 320 }
@@ -351,6 +358,10 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
351 input < MIN_FREQUENCY_UP_THRESHOLD) { 358 input < MIN_FREQUENCY_UP_THRESHOLD) {
352 return -EINVAL; 359 return -EINVAL;
353 } 360 }
361 /* Calculate the new adj_up_threshold */
362 od_tuners.adj_up_threshold += input;
363 od_tuners.adj_up_threshold -= od_tuners.up_threshold;
364
354 od_tuners.up_threshold = input; 365 od_tuners.up_threshold = input;
355 return count; 366 return count;
356} 367}
@@ -507,7 +518,8 @@ static int __init cpufreq_gov_dbs_init(void)
507 if (idle_time != -1ULL) { 518 if (idle_time != -1ULL) {
508 /* Idle micro accounting is supported. Use finer thresholds */ 519 /* Idle micro accounting is supported. Use finer thresholds */
509 od_tuners.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; 520 od_tuners.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
510 od_tuners.down_differential = MICRO_FREQUENCY_DOWN_DIFFERENTIAL; 521 od_tuners.adj_up_threshold = MICRO_FREQUENCY_UP_THRESHOLD -
522 MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
511 /* 523 /*
512 * In nohz/micro accounting case we set the minimum frequency 524 * In nohz/micro accounting case we set the minimum frequency
513 * not depending on HZ, but fixed (very low). The deferred 525 * not depending on HZ, but fixed (very low). The deferred
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 9d7732b81044..2fd779eb1ed1 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -24,12 +24,6 @@
24 24
25static spinlock_t cpufreq_stats_lock; 25static spinlock_t cpufreq_stats_lock;
26 26
27#define CPUFREQ_STATDEVICE_ATTR(_name, _mode, _show) \
28static struct freq_attr _attr_##_name = {\
29 .attr = {.name = __stringify(_name), .mode = _mode, }, \
30 .show = _show,\
31};
32
33struct cpufreq_stats { 27struct cpufreq_stats {
34 unsigned int cpu; 28 unsigned int cpu;
35 unsigned int total_trans; 29 unsigned int total_trans;
@@ -136,17 +130,17 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
136 return PAGE_SIZE; 130 return PAGE_SIZE;
137 return len; 131 return len;
138} 132}
139CPUFREQ_STATDEVICE_ATTR(trans_table, 0444, show_trans_table); 133cpufreq_freq_attr_ro(trans_table);
140#endif 134#endif
141 135
142CPUFREQ_STATDEVICE_ATTR(total_trans, 0444, show_total_trans); 136cpufreq_freq_attr_ro(total_trans);
143CPUFREQ_STATDEVICE_ATTR(time_in_state, 0444, show_time_in_state); 137cpufreq_freq_attr_ro(time_in_state);
144 138
145static struct attribute *default_attrs[] = { 139static struct attribute *default_attrs[] = {
146 &_attr_total_trans.attr, 140 &total_trans.attr,
147 &_attr_time_in_state.attr, 141 &time_in_state.attr,
148#ifdef CONFIG_CPU_FREQ_STAT_DETAILS 142#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
149 &_attr_trans_table.attr, 143 &trans_table.attr,
150#endif 144#endif
151 NULL 145 NULL
152}; 146};
@@ -170,11 +164,13 @@ static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
170static void cpufreq_stats_free_table(unsigned int cpu) 164static void cpufreq_stats_free_table(unsigned int cpu)
171{ 165{
172 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu); 166 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu);
167
173 if (stat) { 168 if (stat) {
169 pr_debug("%s: Free stat table\n", __func__);
174 kfree(stat->time_in_state); 170 kfree(stat->time_in_state);
175 kfree(stat); 171 kfree(stat);
172 per_cpu(cpufreq_stats_table, cpu) = NULL;
176 } 173 }
177 per_cpu(cpufreq_stats_table, cpu) = NULL;
178} 174}
179 175
180/* must be called early in the CPU removal sequence (before 176/* must be called early in the CPU removal sequence (before
@@ -183,8 +179,14 @@ static void cpufreq_stats_free_table(unsigned int cpu)
183static void cpufreq_stats_free_sysfs(unsigned int cpu) 179static void cpufreq_stats_free_sysfs(unsigned int cpu)
184{ 180{
185 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 181 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
186 if (policy && policy->cpu == cpu) 182
183 if (!cpufreq_frequency_get_table(cpu))
184 return;
185
186 if (policy && !policy_is_shared(policy)) {
187 pr_debug("%s: Free sysfs stat\n", __func__);
187 sysfs_remove_group(&policy->kobj, &stats_attr_group); 188 sysfs_remove_group(&policy->kobj, &stats_attr_group);
189 }
188 if (policy) 190 if (policy)
189 cpufreq_cpu_put(policy); 191 cpufreq_cpu_put(policy);
190} 192}
@@ -262,6 +264,19 @@ error_get_fail:
262 return ret; 264 return ret;
263} 265}
264 266
267static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy)
268{
269 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table,
270 policy->last_cpu);
271
272 pr_debug("Updating stats_table for new_cpu %u from last_cpu %u\n",
273 policy->cpu, policy->last_cpu);
274 per_cpu(cpufreq_stats_table, policy->cpu) = per_cpu(cpufreq_stats_table,
275 policy->last_cpu);
276 per_cpu(cpufreq_stats_table, policy->last_cpu) = NULL;
277 stat->cpu = policy->cpu;
278}
279
265static int cpufreq_stat_notifier_policy(struct notifier_block *nb, 280static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
266 unsigned long val, void *data) 281 unsigned long val, void *data)
267{ 282{
@@ -269,6 +284,12 @@ static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
269 struct cpufreq_policy *policy = data; 284 struct cpufreq_policy *policy = data;
270 struct cpufreq_frequency_table *table; 285 struct cpufreq_frequency_table *table;
271 unsigned int cpu = policy->cpu; 286 unsigned int cpu = policy->cpu;
287
288 if (val == CPUFREQ_UPDATE_POLICY_CPU) {
289 cpufreq_stats_update_policy_cpu(policy);
290 return 0;
291 }
292
272 if (val != CPUFREQ_NOTIFY) 293 if (val != CPUFREQ_NOTIFY)
273 return 0; 294 return 0;
274 table = cpufreq_frequency_get_table(cpu); 295 table = cpufreq_frequency_get_table(cpu);
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index c8c3d293cc57..bbeb9c0720a6 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -118,8 +118,6 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
118 118
119 switch (event) { 119 switch (event) {
120 case CPUFREQ_GOV_START: 120 case CPUFREQ_GOV_START:
121 if (!cpu_online(cpu))
122 return -EINVAL;
123 BUG_ON(!policy->cur); 121 BUG_ON(!policy->cur);
124 mutex_lock(&userspace_mutex); 122 mutex_lock(&userspace_mutex);
125 123
diff --git a/drivers/cpufreq/db8500-cpufreq.c b/drivers/cpufreq/db8500-cpufreq.c
index 4f154bc0ebe4..79a84860ea56 100644
--- a/drivers/cpufreq/db8500-cpufreq.c
+++ b/drivers/cpufreq/db8500-cpufreq.c
@@ -128,9 +128,7 @@ static int __cpuinit db8500_cpufreq_init(struct cpufreq_policy *policy)
128 policy->cpuinfo.transition_latency = 20 * 1000; /* in ns */ 128 policy->cpuinfo.transition_latency = 20 * 1000; /* in ns */
129 129
130 /* policy sharing between dual CPUs */ 130 /* policy sharing between dual CPUs */
131 cpumask_copy(policy->cpus, cpu_present_mask); 131 cpumask_setall(policy->cpus);
132
133 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
134 132
135 return 0; 133 return 0;
136} 134}
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
index 7012ea8bf1e7..69b676dd3358 100644
--- a/drivers/cpufreq/exynos-cpufreq.c
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -42,51 +42,56 @@ static unsigned int exynos_getspeed(unsigned int cpu)
42 return clk_get_rate(exynos_info->cpu_clk) / 1000; 42 return clk_get_rate(exynos_info->cpu_clk) / 1000;
43} 43}
44 44
45static int exynos_target(struct cpufreq_policy *policy, 45static int exynos_cpufreq_get_index(unsigned int freq)
46 unsigned int target_freq, 46{
47 unsigned int relation) 47 struct cpufreq_frequency_table *freq_table = exynos_info->freq_table;
48 int index;
49
50 for (index = 0;
51 freq_table[index].frequency != CPUFREQ_TABLE_END; index++)
52 if (freq_table[index].frequency == freq)
53 break;
54
55 if (freq_table[index].frequency == CPUFREQ_TABLE_END)
56 return -EINVAL;
57
58 return index;
59}
60
61static int exynos_cpufreq_scale(unsigned int target_freq)
48{ 62{
49 unsigned int index, old_index;
50 unsigned int arm_volt, safe_arm_volt = 0;
51 int ret = 0;
52 struct cpufreq_frequency_table *freq_table = exynos_info->freq_table; 63 struct cpufreq_frequency_table *freq_table = exynos_info->freq_table;
53 unsigned int *volt_table = exynos_info->volt_table; 64 unsigned int *volt_table = exynos_info->volt_table;
65 struct cpufreq_policy *policy = cpufreq_cpu_get(0);
66 unsigned int arm_volt, safe_arm_volt = 0;
54 unsigned int mpll_freq_khz = exynos_info->mpll_freq_khz; 67 unsigned int mpll_freq_khz = exynos_info->mpll_freq_khz;
55 68 int index, old_index;
56 mutex_lock(&cpufreq_lock); 69 int ret = 0;
57 70
58 freqs.old = policy->cur; 71 freqs.old = policy->cur;
72 freqs.new = target_freq;
73 freqs.cpu = policy->cpu;
59 74
60 if (frequency_locked && target_freq != locking_frequency) { 75 if (freqs.new == freqs.old)
61 ret = -EAGAIN;
62 goto out; 76 goto out;
63 }
64 77
65 /* 78 /*
66 * The policy max have been changed so that we cannot get proper 79 * The policy max have been changed so that we cannot get proper
67 * old_index with cpufreq_frequency_table_target(). Thus, ignore 80 * old_index with cpufreq_frequency_table_target(). Thus, ignore
68 * policy and get the index from the raw freqeuncy table. 81 * policy and get the index from the raw freqeuncy table.
69 */ 82 */
70 for (old_index = 0; 83 old_index = exynos_cpufreq_get_index(freqs.old);
71 freq_table[old_index].frequency != CPUFREQ_TABLE_END; 84 if (old_index < 0) {
72 old_index++) 85 ret = old_index;
73 if (freq_table[old_index].frequency == freqs.old)
74 break;
75
76 if (freq_table[old_index].frequency == CPUFREQ_TABLE_END) {
77 ret = -EINVAL;
78 goto out; 86 goto out;
79 } 87 }
80 88
81 if (cpufreq_frequency_table_target(policy, freq_table, 89 index = exynos_cpufreq_get_index(target_freq);
82 target_freq, relation, &index)) { 90 if (index < 0) {
83 ret = -EINVAL; 91 ret = index;
84 goto out; 92 goto out;
85 } 93 }
86 94
87 freqs.new = freq_table[index].frequency;
88 freqs.cpu = policy->cpu;
89
90 /* 95 /*
91 * ARM clock source will be changed APLL to MPLL temporary 96 * ARM clock source will be changed APLL to MPLL temporary
92 * To support this level, need to control regulator for 97 * To support this level, need to control regulator for
@@ -106,15 +111,25 @@ static int exynos_target(struct cpufreq_policy *policy,
106 /* When the new frequency is higher than current frequency */ 111 /* When the new frequency is higher than current frequency */
107 if ((freqs.new > freqs.old) && !safe_arm_volt) { 112 if ((freqs.new > freqs.old) && !safe_arm_volt) {
108 /* Firstly, voltage up to increase frequency */ 113 /* Firstly, voltage up to increase frequency */
109 regulator_set_voltage(arm_regulator, arm_volt, 114 ret = regulator_set_voltage(arm_regulator, arm_volt, arm_volt);
110 arm_volt); 115 if (ret) {
116 pr_err("%s: failed to set cpu voltage to %d\n",
117 __func__, arm_volt);
118 goto out;
119 }
111 } 120 }
112 121
113 if (safe_arm_volt) 122 if (safe_arm_volt) {
114 regulator_set_voltage(arm_regulator, safe_arm_volt, 123 ret = regulator_set_voltage(arm_regulator, safe_arm_volt,
115 safe_arm_volt); 124 safe_arm_volt);
116 if (freqs.new != freqs.old) 125 if (ret) {
117 exynos_info->set_freq(old_index, index); 126 pr_err("%s: failed to set cpu voltage to %d\n",
127 __func__, safe_arm_volt);
128 goto out;
129 }
130 }
131
132 exynos_info->set_freq(old_index, index);
118 133
119 for_each_cpu(freqs.cpu, policy->cpus) 134 for_each_cpu(freqs.cpu, policy->cpus)
120 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 135 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
@@ -125,8 +140,44 @@ static int exynos_target(struct cpufreq_policy *policy,
125 /* down the voltage after frequency change */ 140 /* down the voltage after frequency change */
126 regulator_set_voltage(arm_regulator, arm_volt, 141 regulator_set_voltage(arm_regulator, arm_volt,
127 arm_volt); 142 arm_volt);
143 if (ret) {
144 pr_err("%s: failed to set cpu voltage to %d\n",
145 __func__, arm_volt);
146 goto out;
147 }
148 }
149
150out:
151
152 cpufreq_cpu_put(policy);
153
154 return ret;
155}
156
157static int exynos_target(struct cpufreq_policy *policy,
158 unsigned int target_freq,
159 unsigned int relation)
160{
161 struct cpufreq_frequency_table *freq_table = exynos_info->freq_table;
162 unsigned int index;
163 unsigned int new_freq;
164 int ret = 0;
165
166 mutex_lock(&cpufreq_lock);
167
168 if (frequency_locked)
169 goto out;
170
171 if (cpufreq_frequency_table_target(policy, freq_table,
172 target_freq, relation, &index)) {
173 ret = -EINVAL;
174 goto out;
128 } 175 }
129 176
177 new_freq = freq_table[index].frequency;
178
179 ret = exynos_cpufreq_scale(new_freq);
180
130out: 181out:
131 mutex_unlock(&cpufreq_lock); 182 mutex_unlock(&cpufreq_lock);
132 183
@@ -163,51 +214,26 @@ static int exynos_cpufreq_resume(struct cpufreq_policy *policy)
163static int exynos_cpufreq_pm_notifier(struct notifier_block *notifier, 214static int exynos_cpufreq_pm_notifier(struct notifier_block *notifier,
164 unsigned long pm_event, void *v) 215 unsigned long pm_event, void *v)
165{ 216{
166 struct cpufreq_policy *policy = cpufreq_cpu_get(0); /* boot CPU */ 217 int ret;
167 static unsigned int saved_frequency;
168 unsigned int temp;
169 218
170 mutex_lock(&cpufreq_lock);
171 switch (pm_event) { 219 switch (pm_event) {
172 case PM_SUSPEND_PREPARE: 220 case PM_SUSPEND_PREPARE:
173 if (frequency_locked) 221 mutex_lock(&cpufreq_lock);
174 goto out;
175
176 frequency_locked = true; 222 frequency_locked = true;
223 mutex_unlock(&cpufreq_lock);
177 224
178 if (locking_frequency) { 225 ret = exynos_cpufreq_scale(locking_frequency);
179 saved_frequency = exynos_getspeed(0); 226 if (ret < 0)
227 return NOTIFY_BAD;
180 228
181 mutex_unlock(&cpufreq_lock);
182 exynos_target(policy, locking_frequency,
183 CPUFREQ_RELATION_H);
184 mutex_lock(&cpufreq_lock);
185 }
186 break; 229 break;
187 230
188 case PM_POST_SUSPEND: 231 case PM_POST_SUSPEND:
189 if (saved_frequency) { 232 mutex_lock(&cpufreq_lock);
190 /*
191 * While frequency_locked, only locking_frequency
192 * is valid for target(). In order to use
193 * saved_frequency while keeping frequency_locked,
194 * we temporarly overwrite locking_frequency.
195 */
196 temp = locking_frequency;
197 locking_frequency = saved_frequency;
198
199 mutex_unlock(&cpufreq_lock);
200 exynos_target(policy, locking_frequency,
201 CPUFREQ_RELATION_H);
202 mutex_lock(&cpufreq_lock);
203
204 locking_frequency = temp;
205 }
206 frequency_locked = false; 233 frequency_locked = false;
234 mutex_unlock(&cpufreq_lock);
207 break; 235 break;
208 } 236 }
209out:
210 mutex_unlock(&cpufreq_lock);
211 237
212 return NOTIFY_OK; 238 return NOTIFY_OK;
213} 239}
@@ -222,35 +248,34 @@ static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
222 248
223 cpufreq_frequency_table_get_attr(exynos_info->freq_table, policy->cpu); 249 cpufreq_frequency_table_get_attr(exynos_info->freq_table, policy->cpu);
224 250
225 locking_frequency = exynos_getspeed(0);
226
227 /* set the transition latency value */ 251 /* set the transition latency value */
228 policy->cpuinfo.transition_latency = 100000; 252 policy->cpuinfo.transition_latency = 100000;
229 253
230 /* 254 cpumask_setall(policy->cpus);
231 * EXYNOS4 multi-core processors has 2 cores
232 * that the frequency cannot be set independently.
233 * Each cpu is bound to the same speed.
234 * So the affected cpu is all of the cpus.
235 */
236 if (num_online_cpus() == 1) {
237 cpumask_copy(policy->related_cpus, cpu_possible_mask);
238 cpumask_copy(policy->cpus, cpu_online_mask);
239 } else {
240 policy->shared_type = CPUFREQ_SHARED_TYPE_ANY;
241 cpumask_setall(policy->cpus);
242 }
243 255
244 return cpufreq_frequency_table_cpuinfo(policy, exynos_info->freq_table); 256 return cpufreq_frequency_table_cpuinfo(policy, exynos_info->freq_table);
245} 257}
246 258
259static int exynos_cpufreq_cpu_exit(struct cpufreq_policy *policy)
260{
261 cpufreq_frequency_table_put_attr(policy->cpu);
262 return 0;
263}
264
265static struct freq_attr *exynos_cpufreq_attr[] = {
266 &cpufreq_freq_attr_scaling_available_freqs,
267 NULL,
268};
269
247static struct cpufreq_driver exynos_driver = { 270static struct cpufreq_driver exynos_driver = {
248 .flags = CPUFREQ_STICKY, 271 .flags = CPUFREQ_STICKY,
249 .verify = exynos_verify_speed, 272 .verify = exynos_verify_speed,
250 .target = exynos_target, 273 .target = exynos_target,
251 .get = exynos_getspeed, 274 .get = exynos_getspeed,
252 .init = exynos_cpufreq_cpu_init, 275 .init = exynos_cpufreq_cpu_init,
276 .exit = exynos_cpufreq_cpu_exit,
253 .name = "exynos_cpufreq", 277 .name = "exynos_cpufreq",
278 .attr = exynos_cpufreq_attr,
254#ifdef CONFIG_PM 279#ifdef CONFIG_PM
255 .suspend = exynos_cpufreq_suspend, 280 .suspend = exynos_cpufreq_suspend,
256 .resume = exynos_cpufreq_resume, 281 .resume = exynos_cpufreq_resume,
@@ -288,6 +313,8 @@ static int __init exynos_cpufreq_init(void)
288 goto err_vdd_arm; 313 goto err_vdd_arm;
289 } 314 }
290 315
316 locking_frequency = exynos_getspeed(0);
317
291 register_pm_notifier(&exynos_cpufreq_nb); 318 register_pm_notifier(&exynos_cpufreq_nb);
292 319
293 if (cpufreq_register_driver(&exynos_driver)) { 320 if (cpufreq_register_driver(&exynos_driver)) {
@@ -299,8 +326,7 @@ static int __init exynos_cpufreq_init(void)
299err_cpufreq: 326err_cpufreq:
300 unregister_pm_notifier(&exynos_cpufreq_nb); 327 unregister_pm_notifier(&exynos_cpufreq_nb);
301 328
302 if (!IS_ERR(arm_regulator)) 329 regulator_put(arm_regulator);
303 regulator_put(arm_regulator);
304err_vdd_arm: 330err_vdd_arm:
305 kfree(exynos_info); 331 kfree(exynos_info);
306 pr_debug("%s: failed initialization\n", __func__); 332 pr_debug("%s: failed initialization\n", __func__);
diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c
index fb148fa27678..de91755e2556 100644
--- a/drivers/cpufreq/exynos4210-cpufreq.c
+++ b/drivers/cpufreq/exynos4210-cpufreq.c
@@ -20,97 +20,37 @@
20#include <mach/regs-clock.h> 20#include <mach/regs-clock.h>
21#include <mach/cpufreq.h> 21#include <mach/cpufreq.h>
22 22
23#define CPUFREQ_LEVEL_END L5
24
25static int max_support_idx = L0;
26static int min_support_idx = (CPUFREQ_LEVEL_END - 1);
27
28static struct clk *cpu_clk; 23static struct clk *cpu_clk;
29static struct clk *moutcore; 24static struct clk *moutcore;
30static struct clk *mout_mpll; 25static struct clk *mout_mpll;
31static struct clk *mout_apll; 26static struct clk *mout_apll;
32 27
33struct cpufreq_clkdiv { 28static unsigned int exynos4210_volt_table[] = {
34 unsigned int index;
35 unsigned int clkdiv;
36};
37
38static unsigned int exynos4210_volt_table[CPUFREQ_LEVEL_END] = {
39 1250000, 1150000, 1050000, 975000, 950000, 29 1250000, 1150000, 1050000, 975000, 950000,
40}; 30};
41 31
42
43static struct cpufreq_clkdiv exynos4210_clkdiv_table[CPUFREQ_LEVEL_END];
44
45static struct cpufreq_frequency_table exynos4210_freq_table[] = { 32static struct cpufreq_frequency_table exynos4210_freq_table[] = {
46 {L0, 1200*1000}, 33 {L0, 1200 * 1000},
47 {L1, 1000*1000}, 34 {L1, 1000 * 1000},
48 {L2, 800*1000}, 35 {L2, 800 * 1000},
49 {L3, 500*1000}, 36 {L3, 500 * 1000},
50 {L4, 200*1000}, 37 {L4, 200 * 1000},
51 {0, CPUFREQ_TABLE_END}, 38 {0, CPUFREQ_TABLE_END},
52}; 39};
53 40
54static unsigned int clkdiv_cpu0[CPUFREQ_LEVEL_END][7] = { 41static struct apll_freq apll_freq_4210[] = {
55 /* 42 /*
56 * Clock divider value for following 43 * values:
57 * { DIVCORE, DIVCOREM0, DIVCOREM1, DIVPERIPH, 44 * freq
58 * DIVATB, DIVPCLK_DBG, DIVAPLL } 45 * clock divider for CORE, COREM0, COREM1, PERIPH, ATB, PCLK_DBG, APLL, RESERVED
46 * clock divider for COPY, HPM, RESERVED
47 * PLL M, P, S
59 */ 48 */
60 49 APLL_FREQ(1200, 0, 3, 7, 3, 4, 1, 7, 0, 5, 0, 0, 150, 3, 1),
61 /* ARM L0: 1200MHz */ 50 APLL_FREQ(1000, 0, 3, 7, 3, 4, 1, 7, 0, 4, 0, 0, 250, 6, 1),
62 { 0, 3, 7, 3, 4, 1, 7 }, 51 APLL_FREQ(800, 0, 3, 7, 3, 3, 1, 7, 0, 3, 0, 0, 200, 6, 1),
63 52 APLL_FREQ(500, 0, 3, 7, 3, 3, 1, 7, 0, 3, 0, 0, 250, 6, 2),
64 /* ARM L1: 1000MHz */ 53 APLL_FREQ(200, 0, 1, 3, 1, 3, 1, 0, 0, 3, 0, 0, 200, 6, 3),
65 { 0, 3, 7, 3, 4, 1, 7 },
66
67 /* ARM L2: 800MHz */
68 { 0, 3, 7, 3, 3, 1, 7 },
69
70 /* ARM L3: 500MHz */
71 { 0, 3, 7, 3, 3, 1, 7 },
72
73 /* ARM L4: 200MHz */
74 { 0, 1, 3, 1, 3, 1, 0 },
75};
76
77static unsigned int clkdiv_cpu1[CPUFREQ_LEVEL_END][2] = {
78 /*
79 * Clock divider value for following
80 * { DIVCOPY, DIVHPM }
81 */
82
83 /* ARM L0: 1200MHz */
84 { 5, 0 },
85
86 /* ARM L1: 1000MHz */
87 { 4, 0 },
88
89 /* ARM L2: 800MHz */
90 { 3, 0 },
91
92 /* ARM L3: 500MHz */
93 { 3, 0 },
94
95 /* ARM L4: 200MHz */
96 { 3, 0 },
97};
98
99static unsigned int exynos4210_apll_pms_table[CPUFREQ_LEVEL_END] = {
100 /* APLL FOUT L0: 1200MHz */
101 ((150 << 16) | (3 << 8) | 1),
102
103 /* APLL FOUT L1: 1000MHz */
104 ((250 << 16) | (6 << 8) | 1),
105
106 /* APLL FOUT L2: 800MHz */
107 ((200 << 16) | (6 << 8) | 1),
108
109 /* APLL FOUT L3: 500MHz */
110 ((250 << 16) | (6 << 8) | 2),
111
112 /* APLL FOUT L4: 200MHz */
113 ((200 << 16) | (6 << 8) | 3),
114}; 54};
115 55
116static void exynos4210_set_clkdiv(unsigned int div_index) 56static void exynos4210_set_clkdiv(unsigned int div_index)
@@ -119,7 +59,7 @@ static void exynos4210_set_clkdiv(unsigned int div_index)
119 59
120 /* Change Divider - CPU0 */ 60 /* Change Divider - CPU0 */
121 61
122 tmp = exynos4210_clkdiv_table[div_index].clkdiv; 62 tmp = apll_freq_4210[div_index].clk_div_cpu0;
123 63
124 __raw_writel(tmp, EXYNOS4_CLKDIV_CPU); 64 __raw_writel(tmp, EXYNOS4_CLKDIV_CPU);
125 65
@@ -129,12 +69,7 @@ static void exynos4210_set_clkdiv(unsigned int div_index)
129 69
130 /* Change Divider - CPU1 */ 70 /* Change Divider - CPU1 */
131 71
132 tmp = __raw_readl(EXYNOS4_CLKDIV_CPU1); 72 tmp = apll_freq_4210[div_index].clk_div_cpu1;
133
134 tmp &= ~((0x7 << 4) | 0x7);
135
136 tmp |= ((clkdiv_cpu1[div_index][0] << 4) |
137 (clkdiv_cpu1[div_index][1] << 0));
138 73
139 __raw_writel(tmp, EXYNOS4_CLKDIV_CPU1); 74 __raw_writel(tmp, EXYNOS4_CLKDIV_CPU1);
140 75
@@ -162,7 +97,7 @@ static void exynos4210_set_apll(unsigned int index)
162 /* 3. Change PLL PMS values */ 97 /* 3. Change PLL PMS values */
163 tmp = __raw_readl(EXYNOS4_APLL_CON0); 98 tmp = __raw_readl(EXYNOS4_APLL_CON0);
164 tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0)); 99 tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0));
165 tmp |= exynos4210_apll_pms_table[index]; 100 tmp |= apll_freq_4210[index].mps;
166 __raw_writel(tmp, EXYNOS4_APLL_CON0); 101 __raw_writel(tmp, EXYNOS4_APLL_CON0);
167 102
168 /* 4. wait_lock_time */ 103 /* 4. wait_lock_time */
@@ -179,10 +114,10 @@ static void exynos4210_set_apll(unsigned int index)
179 } while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT)); 114 } while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT));
180} 115}
181 116
182bool exynos4210_pms_change(unsigned int old_index, unsigned int new_index) 117static bool exynos4210_pms_change(unsigned int old_index, unsigned int new_index)
183{ 118{
184 unsigned int old_pm = (exynos4210_apll_pms_table[old_index] >> 8); 119 unsigned int old_pm = apll_freq_4210[old_index].mps >> 8;
185 unsigned int new_pm = (exynos4210_apll_pms_table[new_index] >> 8); 120 unsigned int new_pm = apll_freq_4210[new_index].mps >> 8;
186 121
187 return (old_pm == new_pm) ? 0 : 1; 122 return (old_pm == new_pm) ? 0 : 1;
188} 123}
@@ -200,7 +135,7 @@ static void exynos4210_set_frequency(unsigned int old_index,
200 /* 2. Change just s value in apll m,p,s value */ 135 /* 2. Change just s value in apll m,p,s value */
201 tmp = __raw_readl(EXYNOS4_APLL_CON0); 136 tmp = __raw_readl(EXYNOS4_APLL_CON0);
202 tmp &= ~(0x7 << 0); 137 tmp &= ~(0x7 << 0);
203 tmp |= (exynos4210_apll_pms_table[new_index] & 0x7); 138 tmp |= apll_freq_4210[new_index].mps & 0x7;
204 __raw_writel(tmp, EXYNOS4_APLL_CON0); 139 __raw_writel(tmp, EXYNOS4_APLL_CON0);
205 } else { 140 } else {
206 /* Clock Configuration Procedure */ 141 /* Clock Configuration Procedure */
@@ -214,7 +149,7 @@ static void exynos4210_set_frequency(unsigned int old_index,
214 /* 1. Change just s value in apll m,p,s value */ 149 /* 1. Change just s value in apll m,p,s value */
215 tmp = __raw_readl(EXYNOS4_APLL_CON0); 150 tmp = __raw_readl(EXYNOS4_APLL_CON0);
216 tmp &= ~(0x7 << 0); 151 tmp &= ~(0x7 << 0);
217 tmp |= (exynos4210_apll_pms_table[new_index] & 0x7); 152 tmp |= apll_freq_4210[new_index].mps & 0x7;
218 __raw_writel(tmp, EXYNOS4_APLL_CON0); 153 __raw_writel(tmp, EXYNOS4_APLL_CON0);
219 154
220 /* 2. Change the system clock divider values */ 155 /* 2. Change the system clock divider values */
@@ -231,8 +166,6 @@ static void exynos4210_set_frequency(unsigned int old_index,
231 166
232int exynos4210_cpufreq_init(struct exynos_dvfs_info *info) 167int exynos4210_cpufreq_init(struct exynos_dvfs_info *info)
233{ 168{
234 int i;
235 unsigned int tmp;
236 unsigned long rate; 169 unsigned long rate;
237 170
238 cpu_clk = clk_get(NULL, "armclk"); 171 cpu_clk = clk_get(NULL, "armclk");
@@ -253,33 +186,9 @@ int exynos4210_cpufreq_init(struct exynos_dvfs_info *info)
253 if (IS_ERR(mout_apll)) 186 if (IS_ERR(mout_apll))
254 goto err_mout_apll; 187 goto err_mout_apll;
255 188
256 tmp = __raw_readl(EXYNOS4_CLKDIV_CPU);
257
258 for (i = L0; i < CPUFREQ_LEVEL_END; i++) {
259 tmp &= ~(EXYNOS4_CLKDIV_CPU0_CORE_MASK |
260 EXYNOS4_CLKDIV_CPU0_COREM0_MASK |
261 EXYNOS4_CLKDIV_CPU0_COREM1_MASK |
262 EXYNOS4_CLKDIV_CPU0_PERIPH_MASK |
263 EXYNOS4_CLKDIV_CPU0_ATB_MASK |
264 EXYNOS4_CLKDIV_CPU0_PCLKDBG_MASK |
265 EXYNOS4_CLKDIV_CPU0_APLL_MASK);
266
267 tmp |= ((clkdiv_cpu0[i][0] << EXYNOS4_CLKDIV_CPU0_CORE_SHIFT) |
268 (clkdiv_cpu0[i][1] << EXYNOS4_CLKDIV_CPU0_COREM0_SHIFT) |
269 (clkdiv_cpu0[i][2] << EXYNOS4_CLKDIV_CPU0_COREM1_SHIFT) |
270 (clkdiv_cpu0[i][3] << EXYNOS4_CLKDIV_CPU0_PERIPH_SHIFT) |
271 (clkdiv_cpu0[i][4] << EXYNOS4_CLKDIV_CPU0_ATB_SHIFT) |
272 (clkdiv_cpu0[i][5] << EXYNOS4_CLKDIV_CPU0_PCLKDBG_SHIFT) |
273 (clkdiv_cpu0[i][6] << EXYNOS4_CLKDIV_CPU0_APLL_SHIFT));
274
275 exynos4210_clkdiv_table[i].clkdiv = tmp;
276 }
277
278 info->mpll_freq_khz = rate; 189 info->mpll_freq_khz = rate;
279 info->pm_lock_idx = L2; 190 /* 800Mhz */
280 info->pll_safe_idx = L2; 191 info->pll_safe_idx = L2;
281 info->max_support_idx = max_support_idx;
282 info->min_support_idx = min_support_idx;
283 info->cpu_clk = cpu_clk; 192 info->cpu_clk = cpu_clk;
284 info->volt_table = exynos4210_volt_table; 193 info->volt_table = exynos4210_volt_table;
285 info->freq_table = exynos4210_freq_table; 194 info->freq_table = exynos4210_freq_table;
@@ -289,14 +198,11 @@ int exynos4210_cpufreq_init(struct exynos_dvfs_info *info)
289 return 0; 198 return 0;
290 199
291err_mout_apll: 200err_mout_apll:
292 if (!IS_ERR(mout_mpll)) 201 clk_put(mout_mpll);
293 clk_put(mout_mpll);
294err_mout_mpll: 202err_mout_mpll:
295 if (!IS_ERR(moutcore)) 203 clk_put(moutcore);
296 clk_put(moutcore);
297err_moutcore: 204err_moutcore:
298 if (!IS_ERR(cpu_clk)) 205 clk_put(cpu_clk);
299 clk_put(cpu_clk);
300 206
301 pr_debug("%s: failed initialization\n", __func__); 207 pr_debug("%s: failed initialization\n", __func__);
302 return -EINVAL; 208 return -EINVAL;
diff --git a/drivers/cpufreq/exynos4x12-cpufreq.c b/drivers/cpufreq/exynos4x12-cpufreq.c
index 8c5a7afa5b0b..0661039e5d4a 100644
--- a/drivers/cpufreq/exynos4x12-cpufreq.c
+++ b/drivers/cpufreq/exynos4x12-cpufreq.c
@@ -20,26 +20,18 @@
20#include <mach/regs-clock.h> 20#include <mach/regs-clock.h>
21#include <mach/cpufreq.h> 21#include <mach/cpufreq.h>
22 22
23#define CPUFREQ_LEVEL_END (L13 + 1)
24
25static int max_support_idx;
26static int min_support_idx = (CPUFREQ_LEVEL_END - 1);
27
28static struct clk *cpu_clk; 23static struct clk *cpu_clk;
29static struct clk *moutcore; 24static struct clk *moutcore;
30static struct clk *mout_mpll; 25static struct clk *mout_mpll;
31static struct clk *mout_apll; 26static struct clk *mout_apll;
32 27
33struct cpufreq_clkdiv { 28static unsigned int exynos4x12_volt_table[] = {
34 unsigned int index; 29 1350000, 1287500, 1250000, 1187500, 1137500, 1087500, 1037500,
35 unsigned int clkdiv; 30 1000000, 987500, 975000, 950000, 925000, 900000, 900000
36 unsigned int clkdiv1;
37}; 31};
38 32
39static unsigned int exynos4x12_volt_table[CPUFREQ_LEVEL_END];
40
41static struct cpufreq_frequency_table exynos4x12_freq_table[] = { 33static struct cpufreq_frequency_table exynos4x12_freq_table[] = {
42 {L0, 1500 * 1000}, 34 {L0, CPUFREQ_ENTRY_INVALID},
43 {L1, 1400 * 1000}, 35 {L1, 1400 * 1000},
44 {L2, 1300 * 1000}, 36 {L2, 1300 * 1000},
45 {L3, 1200 * 1000}, 37 {L3, 1200 * 1000},
@@ -56,247 +48,54 @@ static struct cpufreq_frequency_table exynos4x12_freq_table[] = {
56 {0, CPUFREQ_TABLE_END}, 48 {0, CPUFREQ_TABLE_END},
57}; 49};
58 50
59static struct cpufreq_clkdiv exynos4x12_clkdiv_table[CPUFREQ_LEVEL_END]; 51static struct apll_freq *apll_freq_4x12;
60 52
61static unsigned int clkdiv_cpu0_4212[CPUFREQ_LEVEL_END][8] = { 53static struct apll_freq apll_freq_4212[] = {
62 /* 54 /*
63 * Clock divider value for following 55 * values:
64 * { DIVCORE, DIVCOREM0, DIVCOREM1, DIVPERIPH, 56 * freq
65 * DIVATB, DIVPCLK_DBG, DIVAPLL, DIVCORE2 } 57 * clock divider for CORE, COREM0, COREM1, PERIPH, ATB, PCLK_DBG, APLL, CORE2
58 * clock divider for COPY, HPM, RESERVED
59 * PLL M, P, S
66 */ 60 */
67 /* ARM L0: 1500 MHz */ 61 APLL_FREQ(1500, 0, 3, 7, 0, 6, 1, 2, 0, 6, 2, 0, 250, 4, 0),
68 { 0, 3, 7, 0, 6, 1, 2, 0 }, 62 APLL_FREQ(1400, 0, 3, 7, 0, 6, 1, 2, 0, 6, 2, 0, 175, 3, 0),
69 63 APLL_FREQ(1300, 0, 3, 7, 0, 5, 1, 2, 0, 5, 2, 0, 325, 6, 0),
70 /* ARM L1: 1400 MHz */ 64 APLL_FREQ(1200, 0, 3, 7, 0, 5, 1, 2, 0, 5, 2, 0, 200, 4, 0),
71 { 0, 3, 7, 0, 6, 1, 2, 0 }, 65 APLL_FREQ(1100, 0, 3, 6, 0, 4, 1, 2, 0, 4, 2, 0, 275, 6, 0),
72 66 APLL_FREQ(1000, 0, 2, 5, 0, 4, 1, 1, 0, 4, 2, 0, 125, 3, 0),
73 /* ARM L2: 1300 MHz */ 67 APLL_FREQ(900, 0, 2, 5, 0, 3, 1, 1, 0, 3, 2, 0, 150, 4, 0),
74 { 0, 3, 7, 0, 5, 1, 2, 0 }, 68 APLL_FREQ(800, 0, 2, 5, 0, 3, 1, 1, 0, 3, 2, 0, 100, 3, 0),
75 69 APLL_FREQ(700, 0, 2, 4, 0, 3, 1, 1, 0, 3, 2, 0, 175, 3, 1),
76 /* ARM L3: 1200 MHz */ 70 APLL_FREQ(600, 0, 2, 4, 0, 3, 1, 1, 0, 3, 2, 0, 200, 4, 1),
77 { 0, 3, 7, 0, 5, 1, 2, 0 }, 71 APLL_FREQ(500, 0, 2, 4, 0, 3, 1, 1, 0, 3, 2, 0, 125, 3, 1),
78 72 APLL_FREQ(400, 0, 2, 4, 0, 3, 1, 1, 0, 3, 2, 0, 100, 3, 1),
79 /* ARM L4: 1100 MHz */ 73 APLL_FREQ(300, 0, 2, 4, 0, 2, 1, 1, 0, 3, 2, 0, 200, 4, 2),
80 { 0, 3, 6, 0, 4, 1, 2, 0 }, 74 APLL_FREQ(200, 0, 1, 3, 0, 1, 1, 1, 0, 3, 2, 0, 100, 3, 2),
81
82 /* ARM L5: 1000 MHz */
83 { 0, 2, 5, 0, 4, 1, 1, 0 },
84
85 /* ARM L6: 900 MHz */
86 { 0, 2, 5, 0, 3, 1, 1, 0 },
87
88 /* ARM L7: 800 MHz */
89 { 0, 2, 5, 0, 3, 1, 1, 0 },
90
91 /* ARM L8: 700 MHz */
92 { 0, 2, 4, 0, 3, 1, 1, 0 },
93
94 /* ARM L9: 600 MHz */
95 { 0, 2, 4, 0, 3, 1, 1, 0 },
96
97 /* ARM L10: 500 MHz */
98 { 0, 2, 4, 0, 3, 1, 1, 0 },
99
100 /* ARM L11: 400 MHz */
101 { 0, 2, 4, 0, 3, 1, 1, 0 },
102
103 /* ARM L12: 300 MHz */
104 { 0, 2, 4, 0, 2, 1, 1, 0 },
105
106 /* ARM L13: 200 MHz */
107 { 0, 1, 3, 0, 1, 1, 1, 0 },
108}; 75};
109 76
110static unsigned int clkdiv_cpu0_4412[CPUFREQ_LEVEL_END][8] = { 77static struct apll_freq apll_freq_4412[] = {
111 /* 78 /*
112 * Clock divider value for following 79 * values:
113 * { DIVCORE, DIVCOREM0, DIVCOREM1, DIVPERIPH, 80 * freq
114 * DIVATB, DIVPCLK_DBG, DIVAPLL, DIVCORE2 } 81 * clock divider for CORE, COREM0, COREM1, PERIPH, ATB, PCLK_DBG, APLL, CORE2
115 */ 82 * clock divider for COPY, HPM, CORES
116 /* ARM L0: 1500 MHz */ 83 * PLL M, P, S
117 { 0, 3, 7, 0, 6, 1, 2, 0 },
118
119 /* ARM L1: 1400 MHz */
120 { 0, 3, 7, 0, 6, 1, 2, 0 },
121
122 /* ARM L2: 1300 MHz */
123 { 0, 3, 7, 0, 5, 1, 2, 0 },
124
125 /* ARM L3: 1200 MHz */
126 { 0, 3, 7, 0, 5, 1, 2, 0 },
127
128 /* ARM L4: 1100 MHz */
129 { 0, 3, 6, 0, 4, 1, 2, 0 },
130
131 /* ARM L5: 1000 MHz */
132 { 0, 2, 5, 0, 4, 1, 1, 0 },
133
134 /* ARM L6: 900 MHz */
135 { 0, 2, 5, 0, 3, 1, 1, 0 },
136
137 /* ARM L7: 800 MHz */
138 { 0, 2, 5, 0, 3, 1, 1, 0 },
139
140 /* ARM L8: 700 MHz */
141 { 0, 2, 4, 0, 3, 1, 1, 0 },
142
143 /* ARM L9: 600 MHz */
144 { 0, 2, 4, 0, 3, 1, 1, 0 },
145
146 /* ARM L10: 500 MHz */
147 { 0, 2, 4, 0, 3, 1, 1, 0 },
148
149 /* ARM L11: 400 MHz */
150 { 0, 2, 4, 0, 3, 1, 1, 0 },
151
152 /* ARM L12: 300 MHz */
153 { 0, 2, 4, 0, 2, 1, 1, 0 },
154
155 /* ARM L13: 200 MHz */
156 { 0, 1, 3, 0, 1, 1, 1, 0 },
157};
158
159static unsigned int clkdiv_cpu1_4212[CPUFREQ_LEVEL_END][2] = {
160 /* Clock divider value for following
161 * { DIVCOPY, DIVHPM }
162 */
163 /* ARM L0: 1500 MHz */
164 { 6, 0 },
165
166 /* ARM L1: 1400 MHz */
167 { 6, 0 },
168
169 /* ARM L2: 1300 MHz */
170 { 5, 0 },
171
172 /* ARM L3: 1200 MHz */
173 { 5, 0 },
174
175 /* ARM L4: 1100 MHz */
176 { 4, 0 },
177
178 /* ARM L5: 1000 MHz */
179 { 4, 0 },
180
181 /* ARM L6: 900 MHz */
182 { 3, 0 },
183
184 /* ARM L7: 800 MHz */
185 { 3, 0 },
186
187 /* ARM L8: 700 MHz */
188 { 3, 0 },
189
190 /* ARM L9: 600 MHz */
191 { 3, 0 },
192
193 /* ARM L10: 500 MHz */
194 { 3, 0 },
195
196 /* ARM L11: 400 MHz */
197 { 3, 0 },
198
199 /* ARM L12: 300 MHz */
200 { 3, 0 },
201
202 /* ARM L13: 200 MHz */
203 { 3, 0 },
204};
205
206static unsigned int clkdiv_cpu1_4412[CPUFREQ_LEVEL_END][3] = {
207 /* Clock divider value for following
208 * { DIVCOPY, DIVHPM, DIVCORES }
209 */ 84 */
210 /* ARM L0: 1500 MHz */ 85 APLL_FREQ(1500, 0, 3, 7, 0, 6, 1, 2, 0, 6, 0, 7, 250, 4, 0),
211 { 6, 0, 7 }, 86 APLL_FREQ(1400, 0, 3, 7, 0, 6, 1, 2, 0, 6, 0, 6, 175, 3, 0),
212 87 APLL_FREQ(1300, 0, 3, 7, 0, 5, 1, 2, 0, 5, 0, 6, 325, 6, 0),
213 /* ARM L1: 1400 MHz */ 88 APLL_FREQ(1200, 0, 3, 7, 0, 5, 1, 2, 0, 5, 0, 5, 200, 4, 0),
214 { 6, 0, 6 }, 89 APLL_FREQ(1100, 0, 3, 6, 0, 4, 1, 2, 0, 4, 0, 5, 275, 6, 0),
215 90 APLL_FREQ(1000, 0, 2, 5, 0, 4, 1, 1, 0, 4, 0, 4, 125, 3, 0),
216 /* ARM L2: 1300 MHz */ 91 APLL_FREQ(900, 0, 2, 5, 0, 3, 1, 1, 0, 3, 0, 4, 150, 4, 0),
217 { 5, 0, 6 }, 92 APLL_FREQ(800, 0, 2, 5, 0, 3, 1, 1, 0, 3, 0, 3, 100, 3, 0),
218 93 APLL_FREQ(700, 0, 2, 4, 0, 3, 1, 1, 0, 3, 0, 3, 175, 3, 1),
219 /* ARM L3: 1200 MHz */ 94 APLL_FREQ(600, 0, 2, 4, 0, 3, 1, 1, 0, 3, 0, 2, 200, 4, 1),
220 { 5, 0, 5 }, 95 APLL_FREQ(500, 0, 2, 4, 0, 3, 1, 1, 0, 3, 0, 2, 125, 3, 1),
221 96 APLL_FREQ(400, 0, 2, 4, 0, 3, 1, 1, 0, 3, 0, 1, 100, 3, 1),
222 /* ARM L4: 1100 MHz */ 97 APLL_FREQ(300, 0, 2, 4, 0, 2, 1, 1, 0, 3, 0, 1, 200, 4, 2),
223 { 4, 0, 5 }, 98 APLL_FREQ(200, 0, 1, 3, 0, 1, 1, 1, 0, 3, 0, 0, 100, 3, 2),
224
225 /* ARM L5: 1000 MHz */
226 { 4, 0, 4 },
227
228 /* ARM L6: 900 MHz */
229 { 3, 0, 4 },
230
231 /* ARM L7: 800 MHz */
232 { 3, 0, 3 },
233
234 /* ARM L8: 700 MHz */
235 { 3, 0, 3 },
236
237 /* ARM L9: 600 MHz */
238 { 3, 0, 2 },
239
240 /* ARM L10: 500 MHz */
241 { 3, 0, 2 },
242
243 /* ARM L11: 400 MHz */
244 { 3, 0, 1 },
245
246 /* ARM L12: 300 MHz */
247 { 3, 0, 1 },
248
249 /* ARM L13: 200 MHz */
250 { 3, 0, 0 },
251};
252
253static unsigned int exynos4x12_apll_pms_table[CPUFREQ_LEVEL_END] = {
254 /* APLL FOUT L0: 1500 MHz */
255 ((250 << 16) | (4 << 8) | (0x0)),
256
257 /* APLL FOUT L1: 1400 MHz */
258 ((175 << 16) | (3 << 8) | (0x0)),
259
260 /* APLL FOUT L2: 1300 MHz */
261 ((325 << 16) | (6 << 8) | (0x0)),
262
263 /* APLL FOUT L3: 1200 MHz */
264 ((200 << 16) | (4 << 8) | (0x0)),
265
266 /* APLL FOUT L4: 1100 MHz */
267 ((275 << 16) | (6 << 8) | (0x0)),
268
269 /* APLL FOUT L5: 1000 MHz */
270 ((125 << 16) | (3 << 8) | (0x0)),
271
272 /* APLL FOUT L6: 900 MHz */
273 ((150 << 16) | (4 << 8) | (0x0)),
274
275 /* APLL FOUT L7: 800 MHz */
276 ((100 << 16) | (3 << 8) | (0x0)),
277
278 /* APLL FOUT L8: 700 MHz */
279 ((175 << 16) | (3 << 8) | (0x1)),
280
281 /* APLL FOUT L9: 600 MHz */
282 ((200 << 16) | (4 << 8) | (0x1)),
283
284 /* APLL FOUT L10: 500 MHz */
285 ((125 << 16) | (3 << 8) | (0x1)),
286
287 /* APLL FOUT L11 400 MHz */
288 ((100 << 16) | (3 << 8) | (0x1)),
289
290 /* APLL FOUT L12: 300 MHz */
291 ((200 << 16) | (4 << 8) | (0x2)),
292
293 /* APLL FOUT L13: 200 MHz */
294 ((100 << 16) | (3 << 8) | (0x2)),
295};
296
297static const unsigned int asv_voltage_4x12[CPUFREQ_LEVEL_END] = {
298 1350000, 1287500, 1250000, 1187500, 1137500, 1087500, 1037500,
299 1000000, 987500, 975000, 950000, 925000, 900000, 900000
300}; 99};
301 100
302static void exynos4x12_set_clkdiv(unsigned int div_index) 101static void exynos4x12_set_clkdiv(unsigned int div_index)
@@ -306,7 +105,7 @@ static void exynos4x12_set_clkdiv(unsigned int div_index)
306 105
307 /* Change Divider - CPU0 */ 106 /* Change Divider - CPU0 */
308 107
309 tmp = exynos4x12_clkdiv_table[div_index].clkdiv; 108 tmp = apll_freq_4x12[div_index].clk_div_cpu0;
310 109
311 __raw_writel(tmp, EXYNOS4_CLKDIV_CPU); 110 __raw_writel(tmp, EXYNOS4_CLKDIV_CPU);
312 111
@@ -314,7 +113,7 @@ static void exynos4x12_set_clkdiv(unsigned int div_index)
314 cpu_relax(); 113 cpu_relax();
315 114
316 /* Change Divider - CPU1 */ 115 /* Change Divider - CPU1 */
317 tmp = exynos4x12_clkdiv_table[div_index].clkdiv1; 116 tmp = apll_freq_4x12[div_index].clk_div_cpu1;
318 117
319 __raw_writel(tmp, EXYNOS4_CLKDIV_CPU1); 118 __raw_writel(tmp, EXYNOS4_CLKDIV_CPU1);
320 if (soc_is_exynos4212()) 119 if (soc_is_exynos4212())
@@ -341,14 +140,14 @@ static void exynos4x12_set_apll(unsigned int index)
341 } while (tmp != 0x2); 140 } while (tmp != 0x2);
342 141
343 /* 2. Set APLL Lock time */ 142 /* 2. Set APLL Lock time */
344 pdiv = ((exynos4x12_apll_pms_table[index] >> 8) & 0x3f); 143 pdiv = ((apll_freq_4x12[index].mps >> 8) & 0x3f);
345 144
346 __raw_writel((pdiv * 250), EXYNOS4_APLL_LOCK); 145 __raw_writel((pdiv * 250), EXYNOS4_APLL_LOCK);
347 146
348 /* 3. Change PLL PMS values */ 147 /* 3. Change PLL PMS values */
349 tmp = __raw_readl(EXYNOS4_APLL_CON0); 148 tmp = __raw_readl(EXYNOS4_APLL_CON0);
350 tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0)); 149 tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0));
351 tmp |= exynos4x12_apll_pms_table[index]; 150 tmp |= apll_freq_4x12[index].mps;
352 __raw_writel(tmp, EXYNOS4_APLL_CON0); 151 __raw_writel(tmp, EXYNOS4_APLL_CON0);
353 152
354 /* 4. wait_lock_time */ 153 /* 4. wait_lock_time */
@@ -367,10 +166,10 @@ static void exynos4x12_set_apll(unsigned int index)
367 } while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT)); 166 } while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT));
368} 167}
369 168
370bool exynos4x12_pms_change(unsigned int old_index, unsigned int new_index) 169static bool exynos4x12_pms_change(unsigned int old_index, unsigned int new_index)
371{ 170{
372 unsigned int old_pm = exynos4x12_apll_pms_table[old_index] >> 8; 171 unsigned int old_pm = apll_freq_4x12[old_index].mps >> 8;
373 unsigned int new_pm = exynos4x12_apll_pms_table[new_index] >> 8; 172 unsigned int new_pm = apll_freq_4x12[new_index].mps >> 8;
374 173
375 return (old_pm == new_pm) ? 0 : 1; 174 return (old_pm == new_pm) ? 0 : 1;
376} 175}
@@ -387,7 +186,7 @@ static void exynos4x12_set_frequency(unsigned int old_index,
387 /* 2. Change just s value in apll m,p,s value */ 186 /* 2. Change just s value in apll m,p,s value */
388 tmp = __raw_readl(EXYNOS4_APLL_CON0); 187 tmp = __raw_readl(EXYNOS4_APLL_CON0);
389 tmp &= ~(0x7 << 0); 188 tmp &= ~(0x7 << 0);
390 tmp |= (exynos4x12_apll_pms_table[new_index] & 0x7); 189 tmp |= apll_freq_4x12[new_index].mps & 0x7;
391 __raw_writel(tmp, EXYNOS4_APLL_CON0); 190 __raw_writel(tmp, EXYNOS4_APLL_CON0);
392 191
393 } else { 192 } else {
@@ -402,7 +201,7 @@ static void exynos4x12_set_frequency(unsigned int old_index,
402 /* 1. Change just s value in apll m,p,s value */ 201 /* 1. Change just s value in apll m,p,s value */
403 tmp = __raw_readl(EXYNOS4_APLL_CON0); 202 tmp = __raw_readl(EXYNOS4_APLL_CON0);
404 tmp &= ~(0x7 << 0); 203 tmp &= ~(0x7 << 0);
405 tmp |= (exynos4x12_apll_pms_table[new_index] & 0x7); 204 tmp |= apll_freq_4x12[new_index].mps & 0x7;
406 __raw_writel(tmp, EXYNOS4_APLL_CON0); 205 __raw_writel(tmp, EXYNOS4_APLL_CON0);
407 /* 2. Change the system clock divider values */ 206 /* 2. Change the system clock divider values */
408 exynos4x12_set_clkdiv(new_index); 207 exynos4x12_set_clkdiv(new_index);
@@ -416,27 +215,10 @@ static void exynos4x12_set_frequency(unsigned int old_index,
416 } 215 }
417} 216}
418 217
419static void __init set_volt_table(void)
420{
421 unsigned int i;
422
423 max_support_idx = L1;
424
425 /* Not supported */
426 exynos4x12_freq_table[L0].frequency = CPUFREQ_ENTRY_INVALID;
427
428 for (i = 0 ; i < CPUFREQ_LEVEL_END ; i++)
429 exynos4x12_volt_table[i] = asv_voltage_4x12[i];
430}
431
432int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info) 218int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info)
433{ 219{
434 int i;
435 unsigned int tmp;
436 unsigned long rate; 220 unsigned long rate;
437 221
438 set_volt_table();
439
440 cpu_clk = clk_get(NULL, "armclk"); 222 cpu_clk = clk_get(NULL, "armclk");
441 if (IS_ERR(cpu_clk)) 223 if (IS_ERR(cpu_clk))
442 return PTR_ERR(cpu_clk); 224 return PTR_ERR(cpu_clk);
@@ -455,66 +237,14 @@ int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info)
455 if (IS_ERR(mout_apll)) 237 if (IS_ERR(mout_apll))
456 goto err_mout_apll; 238 goto err_mout_apll;
457 239
458 for (i = L0; i < CPUFREQ_LEVEL_END; i++) { 240 if (soc_is_exynos4212())
459 241 apll_freq_4x12 = apll_freq_4212;
460 exynos4x12_clkdiv_table[i].index = i; 242 else
461 243 apll_freq_4x12 = apll_freq_4412;
462 tmp = __raw_readl(EXYNOS4_CLKDIV_CPU);
463
464 tmp &= ~(EXYNOS4_CLKDIV_CPU0_CORE_MASK |
465 EXYNOS4_CLKDIV_CPU0_COREM0_MASK |
466 EXYNOS4_CLKDIV_CPU0_COREM1_MASK |
467 EXYNOS4_CLKDIV_CPU0_PERIPH_MASK |
468 EXYNOS4_CLKDIV_CPU0_ATB_MASK |
469 EXYNOS4_CLKDIV_CPU0_PCLKDBG_MASK |
470 EXYNOS4_CLKDIV_CPU0_APLL_MASK);
471
472 if (soc_is_exynos4212()) {
473 tmp |= ((clkdiv_cpu0_4212[i][0] << EXYNOS4_CLKDIV_CPU0_CORE_SHIFT) |
474 (clkdiv_cpu0_4212[i][1] << EXYNOS4_CLKDIV_CPU0_COREM0_SHIFT) |
475 (clkdiv_cpu0_4212[i][2] << EXYNOS4_CLKDIV_CPU0_COREM1_SHIFT) |
476 (clkdiv_cpu0_4212[i][3] << EXYNOS4_CLKDIV_CPU0_PERIPH_SHIFT) |
477 (clkdiv_cpu0_4212[i][4] << EXYNOS4_CLKDIV_CPU0_ATB_SHIFT) |
478 (clkdiv_cpu0_4212[i][5] << EXYNOS4_CLKDIV_CPU0_PCLKDBG_SHIFT) |
479 (clkdiv_cpu0_4212[i][6] << EXYNOS4_CLKDIV_CPU0_APLL_SHIFT));
480 } else {
481 tmp &= ~EXYNOS4_CLKDIV_CPU0_CORE2_MASK;
482
483 tmp |= ((clkdiv_cpu0_4412[i][0] << EXYNOS4_CLKDIV_CPU0_CORE_SHIFT) |
484 (clkdiv_cpu0_4412[i][1] << EXYNOS4_CLKDIV_CPU0_COREM0_SHIFT) |
485 (clkdiv_cpu0_4412[i][2] << EXYNOS4_CLKDIV_CPU0_COREM1_SHIFT) |
486 (clkdiv_cpu0_4412[i][3] << EXYNOS4_CLKDIV_CPU0_PERIPH_SHIFT) |
487 (clkdiv_cpu0_4412[i][4] << EXYNOS4_CLKDIV_CPU0_ATB_SHIFT) |
488 (clkdiv_cpu0_4412[i][5] << EXYNOS4_CLKDIV_CPU0_PCLKDBG_SHIFT) |
489 (clkdiv_cpu0_4412[i][6] << EXYNOS4_CLKDIV_CPU0_APLL_SHIFT) |
490 (clkdiv_cpu0_4412[i][7] << EXYNOS4_CLKDIV_CPU0_CORE2_SHIFT));
491 }
492
493 exynos4x12_clkdiv_table[i].clkdiv = tmp;
494
495 tmp = __raw_readl(EXYNOS4_CLKDIV_CPU1);
496
497 if (soc_is_exynos4212()) {
498 tmp &= ~(EXYNOS4_CLKDIV_CPU1_COPY_MASK |
499 EXYNOS4_CLKDIV_CPU1_HPM_MASK);
500 tmp |= ((clkdiv_cpu1_4212[i][0] << EXYNOS4_CLKDIV_CPU1_COPY_SHIFT) |
501 (clkdiv_cpu1_4212[i][1] << EXYNOS4_CLKDIV_CPU1_HPM_SHIFT));
502 } else {
503 tmp &= ~(EXYNOS4_CLKDIV_CPU1_COPY_MASK |
504 EXYNOS4_CLKDIV_CPU1_HPM_MASK |
505 EXYNOS4_CLKDIV_CPU1_CORES_MASK);
506 tmp |= ((clkdiv_cpu1_4412[i][0] << EXYNOS4_CLKDIV_CPU1_COPY_SHIFT) |
507 (clkdiv_cpu1_4412[i][1] << EXYNOS4_CLKDIV_CPU1_HPM_SHIFT) |
508 (clkdiv_cpu1_4412[i][2] << EXYNOS4_CLKDIV_CPU1_CORES_SHIFT));
509 }
510 exynos4x12_clkdiv_table[i].clkdiv1 = tmp;
511 }
512 244
513 info->mpll_freq_khz = rate; 245 info->mpll_freq_khz = rate;
514 info->pm_lock_idx = L5; 246 /* 800Mhz */
515 info->pll_safe_idx = L7; 247 info->pll_safe_idx = L7;
516 info->max_support_idx = max_support_idx;
517 info->min_support_idx = min_support_idx;
518 info->cpu_clk = cpu_clk; 248 info->cpu_clk = cpu_clk;
519 info->volt_table = exynos4x12_volt_table; 249 info->volt_table = exynos4x12_volt_table;
520 info->freq_table = exynos4x12_freq_table; 250 info->freq_table = exynos4x12_freq_table;
diff --git a/drivers/cpufreq/exynos5250-cpufreq.c b/drivers/cpufreq/exynos5250-cpufreq.c
index e64c253cb169..b9344869f822 100644
--- a/drivers/cpufreq/exynos5250-cpufreq.c
+++ b/drivers/cpufreq/exynos5250-cpufreq.c
@@ -21,23 +21,18 @@
21#include <mach/regs-clock.h> 21#include <mach/regs-clock.h>
22#include <mach/cpufreq.h> 22#include <mach/cpufreq.h>
23 23
24#define CPUFREQ_LEVEL_END (L15 + 1)
25
26static int max_support_idx;
27static int min_support_idx = (CPUFREQ_LEVEL_END - 1);
28static struct clk *cpu_clk; 24static struct clk *cpu_clk;
29static struct clk *moutcore; 25static struct clk *moutcore;
30static struct clk *mout_mpll; 26static struct clk *mout_mpll;
31static struct clk *mout_apll; 27static struct clk *mout_apll;
32 28
33struct cpufreq_clkdiv { 29static unsigned int exynos5250_volt_table[] = {
34 unsigned int index; 30 1300000, 1250000, 1225000, 1200000, 1150000,
35 unsigned int clkdiv; 31 1125000, 1100000, 1075000, 1050000, 1025000,
36 unsigned int clkdiv1; 32 1012500, 1000000, 975000, 950000, 937500,
33 925000
37}; 34};
38 35
39static unsigned int exynos5250_volt_table[CPUFREQ_LEVEL_END];
40
41static struct cpufreq_frequency_table exynos5250_freq_table[] = { 36static struct cpufreq_frequency_table exynos5250_freq_table[] = {
42 {L0, 1700 * 1000}, 37 {L0, 1700 * 1000},
43 {L1, 1600 * 1000}, 38 {L1, 1600 * 1000},
@@ -47,8 +42,8 @@ static struct cpufreq_frequency_table exynos5250_freq_table[] = {
47 {L5, 1200 * 1000}, 42 {L5, 1200 * 1000},
48 {L6, 1100 * 1000}, 43 {L6, 1100 * 1000},
49 {L7, 1000 * 1000}, 44 {L7, 1000 * 1000},
50 {L8, 900 * 1000}, 45 {L8, 900 * 1000},
51 {L9, 800 * 1000}, 46 {L9, 800 * 1000},
52 {L10, 700 * 1000}, 47 {L10, 700 * 1000},
53 {L11, 600 * 1000}, 48 {L11, 600 * 1000},
54 {L12, 500 * 1000}, 49 {L12, 500 * 1000},
@@ -58,78 +53,30 @@ static struct cpufreq_frequency_table exynos5250_freq_table[] = {
58 {0, CPUFREQ_TABLE_END}, 53 {0, CPUFREQ_TABLE_END},
59}; 54};
60 55
61static struct cpufreq_clkdiv exynos5250_clkdiv_table[CPUFREQ_LEVEL_END]; 56static struct apll_freq apll_freq_5250[] = {
62
63static unsigned int clkdiv_cpu0_5250[CPUFREQ_LEVEL_END][8] = {
64 /* 57 /*
65 * Clock divider value for following 58 * values:
66 * { ARM, CPUD, ACP, PERIPH, ATB, PCLK_DBG, APLL, ARM2 } 59 * freq
67 */ 60 * clock divider for ARM, CPUD, ACP, PERIPH, ATB, PCLK_DBG, APLL, ARM2
68 { 0, 3, 7, 7, 7, 3, 5, 0 }, /* 1700 MHz */ 61 * clock divider for COPY, HPM, RESERVED
69 { 0, 3, 7, 7, 7, 1, 4, 0 }, /* 1600 MHz */ 62 * PLL M, P, S
70 { 0, 2, 7, 7, 7, 1, 4, 0 }, /* 1500 MHz */
71 { 0, 2, 7, 7, 6, 1, 4, 0 }, /* 1400 MHz */
72 { 0, 2, 7, 7, 6, 1, 3, 0 }, /* 1300 MHz */
73 { 0, 2, 7, 7, 5, 1, 3, 0 }, /* 1200 MHz */
74 { 0, 3, 7, 7, 5, 1, 3, 0 }, /* 1100 MHz */
75 { 0, 1, 7, 7, 4, 1, 2, 0 }, /* 1000 MHz */
76 { 0, 1, 7, 7, 4, 1, 2, 0 }, /* 900 MHz */
77 { 0, 1, 7, 7, 4, 1, 2, 0 }, /* 800 MHz */
78 { 0, 1, 7, 7, 3, 1, 1, 0 }, /* 700 MHz */
79 { 0, 1, 7, 7, 3, 1, 1, 0 }, /* 600 MHz */
80 { 0, 1, 7, 7, 2, 1, 1, 0 }, /* 500 MHz */
81 { 0, 1, 7, 7, 2, 1, 1, 0 }, /* 400 MHz */
82 { 0, 1, 7, 7, 1, 1, 1, 0 }, /* 300 MHz */
83 { 0, 1, 7, 7, 1, 1, 1, 0 }, /* 200 MHz */
84};
85
86static unsigned int clkdiv_cpu1_5250[CPUFREQ_LEVEL_END][2] = {
87 /* Clock divider value for following
88 * { COPY, HPM }
89 */ 63 */
90 { 0, 2 }, /* 1700 MHz */ 64 APLL_FREQ(1700, 0, 3, 7, 7, 7, 3, 5, 0, 0, 2, 0, 425, 6, 0),
91 { 0, 2 }, /* 1600 MHz */ 65 APLL_FREQ(1600, 0, 3, 7, 7, 7, 1, 4, 0, 0, 2, 0, 200, 3, 0),
92 { 0, 2 }, /* 1500 MHz */ 66 APLL_FREQ(1500, 0, 2, 7, 7, 7, 1, 4, 0, 0, 2, 0, 250, 4, 0),
93 { 0, 2 }, /* 1400 MHz */ 67 APLL_FREQ(1400, 0, 2, 7, 7, 6, 1, 4, 0, 0, 2, 0, 175, 3, 0),
94 { 0, 2 }, /* 1300 MHz */ 68 APLL_FREQ(1300, 0, 2, 7, 7, 6, 1, 3, 0, 0, 2, 0, 325, 6, 0),
95 { 0, 2 }, /* 1200 MHz */ 69 APLL_FREQ(1200, 0, 2, 7, 7, 5, 1, 3, 0, 0, 2, 0, 200, 4, 0),
96 { 0, 2 }, /* 1100 MHz */ 70 APLL_FREQ(1100, 0, 3, 7, 7, 5, 1, 3, 0, 0, 2, 0, 275, 6, 0),
97 { 0, 2 }, /* 1000 MHz */ 71 APLL_FREQ(1000, 0, 1, 7, 7, 4, 1, 2, 0, 0, 2, 0, 125, 3, 0),
98 { 0, 2 }, /* 900 MHz */ 72 APLL_FREQ(900, 0, 1, 7, 7, 4, 1, 2, 0, 0, 2, 0, 150, 4, 0),
99 { 0, 2 }, /* 800 MHz */ 73 APLL_FREQ(800, 0, 1, 7, 7, 4, 1, 2, 0, 0, 2, 0, 100, 3, 0),
100 { 0, 2 }, /* 700 MHz */ 74 APLL_FREQ(700, 0, 1, 7, 7, 3, 1, 1, 0, 0, 2, 0, 175, 3, 1),
101 { 0, 2 }, /* 600 MHz */ 75 APLL_FREQ(600, 0, 1, 7, 7, 3, 1, 1, 0, 0, 2, 0, 200, 4, 1),
102 { 0, 2 }, /* 500 MHz */ 76 APLL_FREQ(500, 0, 1, 7, 7, 2, 1, 1, 0, 0, 2, 0, 125, 3, 1),
103 { 0, 2 }, /* 400 MHz */ 77 APLL_FREQ(400, 0, 1, 7, 7, 2, 1, 1, 0, 0, 2, 0, 100, 3, 1),
104 { 0, 2 }, /* 300 MHz */ 78 APLL_FREQ(300, 0, 1, 7, 7, 1, 1, 1, 0, 0, 2, 0, 200, 4, 2),
105 { 0, 2 }, /* 200 MHz */ 79 APLL_FREQ(200, 0, 1, 7, 7, 1, 1, 1, 0, 0, 2, 0, 100, 3, 2),
106};
107
108static unsigned int exynos5_apll_pms_table[CPUFREQ_LEVEL_END] = {
109 ((425 << 16) | (6 << 8) | 0), /* 1700 MHz */
110 ((200 << 16) | (3 << 8) | 0), /* 1600 MHz */
111 ((250 << 16) | (4 << 8) | 0), /* 1500 MHz */
112 ((175 << 16) | (3 << 8) | 0), /* 1400 MHz */
113 ((325 << 16) | (6 << 8) | 0), /* 1300 MHz */
114 ((200 << 16) | (4 << 8) | 0), /* 1200 MHz */
115 ((275 << 16) | (6 << 8) | 0), /* 1100 MHz */
116 ((125 << 16) | (3 << 8) | 0), /* 1000 MHz */
117 ((150 << 16) | (4 << 8) | 0), /* 900 MHz */
118 ((100 << 16) | (3 << 8) | 0), /* 800 MHz */
119 ((175 << 16) | (3 << 8) | 1), /* 700 MHz */
120 ((200 << 16) | (4 << 8) | 1), /* 600 MHz */
121 ((125 << 16) | (3 << 8) | 1), /* 500 MHz */
122 ((100 << 16) | (3 << 8) | 1), /* 400 MHz */
123 ((200 << 16) | (4 << 8) | 2), /* 300 MHz */
124 ((100 << 16) | (3 << 8) | 2), /* 200 MHz */
125};
126
127/* ASV group voltage table */
128static const unsigned int asv_voltage_5250[CPUFREQ_LEVEL_END] = {
129 1300000, 1250000, 1225000, 1200000, 1150000,
130 1125000, 1100000, 1075000, 1050000, 1025000,
131 1012500, 1000000, 975000, 950000, 937500,
132 925000
133}; 80};
134 81
135static void set_clkdiv(unsigned int div_index) 82static void set_clkdiv(unsigned int div_index)
@@ -138,7 +85,7 @@ static void set_clkdiv(unsigned int div_index)
138 85
139 /* Change Divider - CPU0 */ 86 /* Change Divider - CPU0 */
140 87
141 tmp = exynos5250_clkdiv_table[div_index].clkdiv; 88 tmp = apll_freq_5250[div_index].clk_div_cpu0;
142 89
143 __raw_writel(tmp, EXYNOS5_CLKDIV_CPU0); 90 __raw_writel(tmp, EXYNOS5_CLKDIV_CPU0);
144 91
@@ -146,7 +93,7 @@ static void set_clkdiv(unsigned int div_index)
146 cpu_relax(); 93 cpu_relax();
147 94
148 /* Change Divider - CPU1 */ 95 /* Change Divider - CPU1 */
149 tmp = exynos5250_clkdiv_table[div_index].clkdiv1; 96 tmp = apll_freq_5250[div_index].clk_div_cpu1;
150 97
151 __raw_writel(tmp, EXYNOS5_CLKDIV_CPU1); 98 __raw_writel(tmp, EXYNOS5_CLKDIV_CPU1);
152 99
@@ -169,14 +116,14 @@ static void set_apll(unsigned int new_index,
169 } while (tmp != 0x2); 116 } while (tmp != 0x2);
170 117
171 /* 2. Set APLL Lock time */ 118 /* 2. Set APLL Lock time */
172 pdiv = ((exynos5_apll_pms_table[new_index] >> 8) & 0x3f); 119 pdiv = ((apll_freq_5250[new_index].mps >> 8) & 0x3f);
173 120
174 __raw_writel((pdiv * 250), EXYNOS5_APLL_LOCK); 121 __raw_writel((pdiv * 250), EXYNOS5_APLL_LOCK);
175 122
176 /* 3. Change PLL PMS values */ 123 /* 3. Change PLL PMS values */
177 tmp = __raw_readl(EXYNOS5_APLL_CON0); 124 tmp = __raw_readl(EXYNOS5_APLL_CON0);
178 tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0)); 125 tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0));
179 tmp |= exynos5_apll_pms_table[new_index]; 126 tmp |= apll_freq_5250[new_index].mps;
180 __raw_writel(tmp, EXYNOS5_APLL_CON0); 127 __raw_writel(tmp, EXYNOS5_APLL_CON0);
181 128
182 /* 4. wait_lock_time */ 129 /* 4. wait_lock_time */
@@ -196,10 +143,10 @@ static void set_apll(unsigned int new_index,
196 143
197} 144}
198 145
199bool exynos5250_pms_change(unsigned int old_index, unsigned int new_index) 146static bool exynos5250_pms_change(unsigned int old_index, unsigned int new_index)
200{ 147{
201 unsigned int old_pm = (exynos5_apll_pms_table[old_index] >> 8); 148 unsigned int old_pm = apll_freq_5250[old_index].mps >> 8;
202 unsigned int new_pm = (exynos5_apll_pms_table[new_index] >> 8); 149 unsigned int new_pm = apll_freq_5250[new_index].mps >> 8;
203 150
204 return (old_pm == new_pm) ? 0 : 1; 151 return (old_pm == new_pm) ? 0 : 1;
205} 152}
@@ -216,7 +163,7 @@ static void exynos5250_set_frequency(unsigned int old_index,
216 /* 2. Change just s value in apll m,p,s value */ 163 /* 2. Change just s value in apll m,p,s value */
217 tmp = __raw_readl(EXYNOS5_APLL_CON0); 164 tmp = __raw_readl(EXYNOS5_APLL_CON0);
218 tmp &= ~(0x7 << 0); 165 tmp &= ~(0x7 << 0);
219 tmp |= (exynos5_apll_pms_table[new_index] & 0x7); 166 tmp |= apll_freq_5250[new_index].mps & 0x7;
220 __raw_writel(tmp, EXYNOS5_APLL_CON0); 167 __raw_writel(tmp, EXYNOS5_APLL_CON0);
221 168
222 } else { 169 } else {
@@ -231,7 +178,7 @@ static void exynos5250_set_frequency(unsigned int old_index,
231 /* 1. Change just s value in apll m,p,s value */ 178 /* 1. Change just s value in apll m,p,s value */
232 tmp = __raw_readl(EXYNOS5_APLL_CON0); 179 tmp = __raw_readl(EXYNOS5_APLL_CON0);
233 tmp &= ~(0x7 << 0); 180 tmp &= ~(0x7 << 0);
234 tmp |= (exynos5_apll_pms_table[new_index] & 0x7); 181 tmp |= apll_freq_5250[new_index].mps & 0x7;
235 __raw_writel(tmp, EXYNOS5_APLL_CON0); 182 __raw_writel(tmp, EXYNOS5_APLL_CON0);
236 /* 2. Change the system clock divider values */ 183 /* 2. Change the system clock divider values */
237 set_clkdiv(new_index); 184 set_clkdiv(new_index);
@@ -245,24 +192,10 @@ static void exynos5250_set_frequency(unsigned int old_index,
245 } 192 }
246} 193}
247 194
248static void __init set_volt_table(void)
249{
250 unsigned int i;
251
252 max_support_idx = L0;
253
254 for (i = 0 ; i < CPUFREQ_LEVEL_END ; i++)
255 exynos5250_volt_table[i] = asv_voltage_5250[i];
256}
257
258int exynos5250_cpufreq_init(struct exynos_dvfs_info *info) 195int exynos5250_cpufreq_init(struct exynos_dvfs_info *info)
259{ 196{
260 int i;
261 unsigned int tmp;
262 unsigned long rate; 197 unsigned long rate;
263 198
264 set_volt_table();
265
266 cpu_clk = clk_get(NULL, "armclk"); 199 cpu_clk = clk_get(NULL, "armclk");
267 if (IS_ERR(cpu_clk)) 200 if (IS_ERR(cpu_clk))
268 return PTR_ERR(cpu_clk); 201 return PTR_ERR(cpu_clk);
@@ -281,44 +214,9 @@ int exynos5250_cpufreq_init(struct exynos_dvfs_info *info)
281 if (IS_ERR(mout_apll)) 214 if (IS_ERR(mout_apll))
282 goto err_mout_apll; 215 goto err_mout_apll;
283 216
284 for (i = L0; i < CPUFREQ_LEVEL_END; i++) {
285
286 exynos5250_clkdiv_table[i].index = i;
287
288 tmp = __raw_readl(EXYNOS5_CLKDIV_CPU0);
289
290 tmp &= ~((0x7 << 0) | (0x7 << 4) | (0x7 << 8) |
291 (0x7 << 12) | (0x7 << 16) | (0x7 << 20) |
292 (0x7 << 24) | (0x7 << 28));
293
294 tmp |= ((clkdiv_cpu0_5250[i][0] << 0) |
295 (clkdiv_cpu0_5250[i][1] << 4) |
296 (clkdiv_cpu0_5250[i][2] << 8) |
297 (clkdiv_cpu0_5250[i][3] << 12) |
298 (clkdiv_cpu0_5250[i][4] << 16) |
299 (clkdiv_cpu0_5250[i][5] << 20) |
300 (clkdiv_cpu0_5250[i][6] << 24) |
301 (clkdiv_cpu0_5250[i][7] << 28));
302
303 exynos5250_clkdiv_table[i].clkdiv = tmp;
304
305 tmp = __raw_readl(EXYNOS5_CLKDIV_CPU1);
306
307 tmp &= ~((0x7 << 0) | (0x7 << 4));
308
309 tmp |= ((clkdiv_cpu1_5250[i][0] << 0) |
310 (clkdiv_cpu1_5250[i][1] << 4));
311
312 exynos5250_clkdiv_table[i].clkdiv1 = tmp;
313 }
314
315 info->mpll_freq_khz = rate; 217 info->mpll_freq_khz = rate;
316 /* 1000Mhz */
317 info->pm_lock_idx = L7;
318 /* 800Mhz */ 218 /* 800Mhz */
319 info->pll_safe_idx = L9; 219 info->pll_safe_idx = L9;
320 info->max_support_idx = max_support_idx;
321 info->min_support_idx = min_support_idx;
322 info->cpu_clk = cpu_clk; 220 info->cpu_clk = cpu_clk;
323 info->volt_table = exynos5250_volt_table; 221 info->volt_table = exynos5250_volt_table;
324 info->freq_table = exynos5250_freq_table; 222 info->freq_table = exynos5250_freq_table;
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 49cda256efb2..d7a79662e24c 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -63,9 +63,6 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
63 pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n", 63 pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n",
64 policy->min, policy->max, policy->cpu); 64 policy->min, policy->max, policy->cpu);
65 65
66 if (!cpu_online(policy->cpu))
67 return -EINVAL;
68
69 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, 66 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
70 policy->cpuinfo.max_freq); 67 policy->cpuinfo.max_freq);
71 68
@@ -121,9 +118,6 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
121 break; 118 break;
122 } 119 }
123 120
124 if (!cpu_online(policy->cpu))
125 return -EINVAL;
126
127 for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { 121 for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
128 unsigned int freq = table[i].frequency; 122 unsigned int freq = table[i].frequency;
129 if (freq == CPUFREQ_ENTRY_INVALID) 123 if (freq == CPUFREQ_ENTRY_INVALID)
@@ -227,6 +221,15 @@ void cpufreq_frequency_table_put_attr(unsigned int cpu)
227} 221}
228EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr); 222EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr);
229 223
224void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy)
225{
226 pr_debug("Updating show_table for new_cpu %u from last_cpu %u\n",
227 policy->cpu, policy->last_cpu);
228 per_cpu(cpufreq_show_table, policy->cpu) = per_cpu(cpufreq_show_table,
229 policy->last_cpu);
230 per_cpu(cpufreq_show_table, policy->last_cpu) = NULL;
231}
232
230struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu) 233struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
231{ 234{
232 return per_cpu(cpufreq_show_table, cpu); 235 return per_cpu(cpufreq_show_table, cpu);
diff --git a/drivers/cpufreq/highbank-cpufreq.c b/drivers/cpufreq/highbank-cpufreq.c
new file mode 100644
index 000000000000..66e3a71b81a3
--- /dev/null
+++ b/drivers/cpufreq/highbank-cpufreq.c
@@ -0,0 +1,120 @@
1/*
2 * Copyright (C) 2012 Calxeda, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This driver provides the clk notifier callbacks that are used when
9 * the cpufreq-cpu0 driver changes to frequency to alert the highbank
10 * EnergyCore Management Engine (ECME) about the need to change
11 * voltage. The ECME interfaces with the actual voltage regulators.
12 */
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/clk.h>
19#include <linux/cpu.h>
20#include <linux/err.h>
21#include <linux/of.h>
22#include <linux/mailbox.h>
23#include <linux/platform_device.h>
24
25#define HB_CPUFREQ_CHANGE_NOTE 0x80000001
26#define HB_CPUFREQ_IPC_LEN 7
27#define HB_CPUFREQ_VOLT_RETRIES 15
28
29static int hb_voltage_change(unsigned int freq)
30{
31 int i;
32 u32 msg[HB_CPUFREQ_IPC_LEN];
33
34 msg[0] = HB_CPUFREQ_CHANGE_NOTE;
35 msg[1] = freq / 1000000;
36 for (i = 2; i < HB_CPUFREQ_IPC_LEN; i++)
37 msg[i] = 0;
38
39 return pl320_ipc_transmit(msg);
40}
41
42static int hb_cpufreq_clk_notify(struct notifier_block *nb,
43 unsigned long action, void *hclk)
44{
45 struct clk_notifier_data *clk_data = hclk;
46 int i = 0;
47
48 if (action == PRE_RATE_CHANGE) {
49 if (clk_data->new_rate > clk_data->old_rate)
50 while (hb_voltage_change(clk_data->new_rate))
51 if (i++ > HB_CPUFREQ_VOLT_RETRIES)
52 return NOTIFY_BAD;
53 } else if (action == POST_RATE_CHANGE) {
54 if (clk_data->new_rate < clk_data->old_rate)
55 while (hb_voltage_change(clk_data->new_rate))
56 if (i++ > HB_CPUFREQ_VOLT_RETRIES)
57 return NOTIFY_BAD;
58 }
59
60 return NOTIFY_DONE;
61}
62
63static struct notifier_block hb_cpufreq_clk_nb = {
64 .notifier_call = hb_cpufreq_clk_notify,
65};
66
67static int hb_cpufreq_driver_init(void)
68{
69 struct platform_device_info devinfo = { .name = "cpufreq-cpu0", };
70 struct device *cpu_dev;
71 struct clk *cpu_clk;
72 struct device_node *np;
73 int ret;
74
75 if (!of_machine_is_compatible("calxeda,highbank"))
76 return -ENODEV;
77
78 for_each_child_of_node(of_find_node_by_path("/cpus"), np)
79 if (of_get_property(np, "operating-points", NULL))
80 break;
81
82 if (!np) {
83 pr_err("failed to find highbank cpufreq node\n");
84 return -ENOENT;
85 }
86
87 cpu_dev = get_cpu_device(0);
88 if (!cpu_dev) {
89 pr_err("failed to get highbank cpufreq device\n");
90 ret = -ENODEV;
91 goto out_put_node;
92 }
93
94 cpu_dev->of_node = np;
95
96 cpu_clk = clk_get(cpu_dev, NULL);
97 if (IS_ERR(cpu_clk)) {
98 ret = PTR_ERR(cpu_clk);
99 pr_err("failed to get cpu0 clock: %d\n", ret);
100 goto out_put_node;
101 }
102
103 ret = clk_notifier_register(cpu_clk, &hb_cpufreq_clk_nb);
104 if (ret) {
105 pr_err("failed to register clk notifier: %d\n", ret);
106 goto out_put_node;
107 }
108
109 /* Instantiate cpufreq-cpu0 */
110 platform_device_register_full(&devinfo);
111
112out_put_node:
113 of_node_put(np);
114 return ret;
115}
116module_init(hb_cpufreq_driver_init);
117
118MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@calxeda.com>");
119MODULE_DESCRIPTION("Calxeda Highbank cpufreq driver");
120MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
new file mode 100644
index 000000000000..d6b6ef350cb6
--- /dev/null
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -0,0 +1,336 @@
1/*
2 * Copyright (C) 2013 Freescale Semiconductor, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/clk.h>
10#include <linux/cpufreq.h>
11#include <linux/delay.h>
12#include <linux/err.h>
13#include <linux/module.h>
14#include <linux/of.h>
15#include <linux/opp.h>
16#include <linux/platform_device.h>
17#include <linux/regulator/consumer.h>
18
19#define PU_SOC_VOLTAGE_NORMAL 1250000
20#define PU_SOC_VOLTAGE_HIGH 1275000
21#define FREQ_1P2_GHZ 1200000000
22
23static struct regulator *arm_reg;
24static struct regulator *pu_reg;
25static struct regulator *soc_reg;
26
27static struct clk *arm_clk;
28static struct clk *pll1_sys_clk;
29static struct clk *pll1_sw_clk;
30static struct clk *step_clk;
31static struct clk *pll2_pfd2_396m_clk;
32
33static struct device *cpu_dev;
34static struct cpufreq_frequency_table *freq_table;
35static unsigned int transition_latency;
36
37static int imx6q_verify_speed(struct cpufreq_policy *policy)
38{
39 return cpufreq_frequency_table_verify(policy, freq_table);
40}
41
42static unsigned int imx6q_get_speed(unsigned int cpu)
43{
44 return clk_get_rate(arm_clk) / 1000;
45}
46
47static int imx6q_set_target(struct cpufreq_policy *policy,
48 unsigned int target_freq, unsigned int relation)
49{
50 struct cpufreq_freqs freqs;
51 struct opp *opp;
52 unsigned long freq_hz, volt, volt_old;
53 unsigned int index, cpu;
54 int ret;
55
56 ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
57 relation, &index);
58 if (ret) {
59 dev_err(cpu_dev, "failed to match target frequency %d: %d\n",
60 target_freq, ret);
61 return ret;
62 }
63
64 freqs.new = freq_table[index].frequency;
65 freq_hz = freqs.new * 1000;
66 freqs.old = clk_get_rate(arm_clk) / 1000;
67
68 if (freqs.old == freqs.new)
69 return 0;
70
71 for_each_online_cpu(cpu) {
72 freqs.cpu = cpu;
73 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
74 }
75
76 rcu_read_lock();
77 opp = opp_find_freq_ceil(cpu_dev, &freq_hz);
78 if (IS_ERR(opp)) {
79 rcu_read_unlock();
80 dev_err(cpu_dev, "failed to find OPP for %ld\n", freq_hz);
81 return PTR_ERR(opp);
82 }
83
84 volt = opp_get_voltage(opp);
85 rcu_read_unlock();
86 volt_old = regulator_get_voltage(arm_reg);
87
88 dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n",
89 freqs.old / 1000, volt_old / 1000,
90 freqs.new / 1000, volt / 1000);
91
92 /* scaling up? scale voltage before frequency */
93 if (freqs.new > freqs.old) {
94 ret = regulator_set_voltage_tol(arm_reg, volt, 0);
95 if (ret) {
96 dev_err(cpu_dev,
97 "failed to scale vddarm up: %d\n", ret);
98 return ret;
99 }
100
101 /*
102 * Need to increase vddpu and vddsoc for safety
103 * if we are about to run at 1.2 GHz.
104 */
105 if (freqs.new == FREQ_1P2_GHZ / 1000) {
106 regulator_set_voltage_tol(pu_reg,
107 PU_SOC_VOLTAGE_HIGH, 0);
108 regulator_set_voltage_tol(soc_reg,
109 PU_SOC_VOLTAGE_HIGH, 0);
110 }
111 }
112
113 /*
114 * The setpoints are selected per PLL/PDF frequencies, so we need to
115 * reprogram PLL for frequency scaling. The procedure of reprogramming
116 * PLL1 is as below.
117 *
118 * - Enable pll2_pfd2_396m_clk and reparent pll1_sw_clk to it
119 * - Reprogram pll1_sys_clk and reparent pll1_sw_clk back to it
120 * - Disable pll2_pfd2_396m_clk
121 */
122 clk_prepare_enable(pll2_pfd2_396m_clk);
123 clk_set_parent(step_clk, pll2_pfd2_396m_clk);
124 clk_set_parent(pll1_sw_clk, step_clk);
125 if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) {
126 clk_set_rate(pll1_sys_clk, freqs.new * 1000);
127 /*
128 * If we are leaving 396 MHz set-point, we need to enable
129 * pll1_sys_clk and disable pll2_pfd2_396m_clk to keep
130 * their use count correct.
131 */
132 if (freqs.old * 1000 <= clk_get_rate(pll2_pfd2_396m_clk)) {
133 clk_prepare_enable(pll1_sys_clk);
134 clk_disable_unprepare(pll2_pfd2_396m_clk);
135 }
136 clk_set_parent(pll1_sw_clk, pll1_sys_clk);
137 clk_disable_unprepare(pll2_pfd2_396m_clk);
138 } else {
139 /*
140 * Disable pll1_sys_clk if pll2_pfd2_396m_clk is sufficient
141 * to provide the frequency.
142 */
143 clk_disable_unprepare(pll1_sys_clk);
144 }
145
146 /* Ensure the arm clock divider is what we expect */
147 ret = clk_set_rate(arm_clk, freqs.new * 1000);
148 if (ret) {
149 dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
150 regulator_set_voltage_tol(arm_reg, volt_old, 0);
151 return ret;
152 }
153
154 /* scaling down? scale voltage after frequency */
155 if (freqs.new < freqs.old) {
156 ret = regulator_set_voltage_tol(arm_reg, volt, 0);
157 if (ret)
158 dev_warn(cpu_dev,
159 "failed to scale vddarm down: %d\n", ret);
160
161 if (freqs.old == FREQ_1P2_GHZ / 1000) {
162 regulator_set_voltage_tol(pu_reg,
163 PU_SOC_VOLTAGE_NORMAL, 0);
164 regulator_set_voltage_tol(soc_reg,
165 PU_SOC_VOLTAGE_NORMAL, 0);
166 }
167 }
168
169 for_each_online_cpu(cpu) {
170 freqs.cpu = cpu;
171 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
172 }
173
174 return 0;
175}
176
177static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
178{
179 int ret;
180
181 ret = cpufreq_frequency_table_cpuinfo(policy, freq_table);
182 if (ret) {
183 dev_err(cpu_dev, "invalid frequency table: %d\n", ret);
184 return ret;
185 }
186
187 policy->cpuinfo.transition_latency = transition_latency;
188 policy->cur = clk_get_rate(arm_clk) / 1000;
189 cpumask_setall(policy->cpus);
190 cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
191
192 return 0;
193}
194
195static int imx6q_cpufreq_exit(struct cpufreq_policy *policy)
196{
197 cpufreq_frequency_table_put_attr(policy->cpu);
198 return 0;
199}
200
201static struct freq_attr *imx6q_cpufreq_attr[] = {
202 &cpufreq_freq_attr_scaling_available_freqs,
203 NULL,
204};
205
206static struct cpufreq_driver imx6q_cpufreq_driver = {
207 .verify = imx6q_verify_speed,
208 .target = imx6q_set_target,
209 .get = imx6q_get_speed,
210 .init = imx6q_cpufreq_init,
211 .exit = imx6q_cpufreq_exit,
212 .name = "imx6q-cpufreq",
213 .attr = imx6q_cpufreq_attr,
214};
215
216static int imx6q_cpufreq_probe(struct platform_device *pdev)
217{
218 struct device_node *np;
219 struct opp *opp;
220 unsigned long min_volt, max_volt;
221 int num, ret;
222
223 cpu_dev = &pdev->dev;
224
225 np = of_find_node_by_path("/cpus/cpu@0");
226 if (!np) {
227 dev_err(cpu_dev, "failed to find cpu0 node\n");
228 return -ENOENT;
229 }
230
231 cpu_dev->of_node = np;
232
233 arm_clk = devm_clk_get(cpu_dev, "arm");
234 pll1_sys_clk = devm_clk_get(cpu_dev, "pll1_sys");
235 pll1_sw_clk = devm_clk_get(cpu_dev, "pll1_sw");
236 step_clk = devm_clk_get(cpu_dev, "step");
237 pll2_pfd2_396m_clk = devm_clk_get(cpu_dev, "pll2_pfd2_396m");
238 if (IS_ERR(arm_clk) || IS_ERR(pll1_sys_clk) || IS_ERR(pll1_sw_clk) ||
239 IS_ERR(step_clk) || IS_ERR(pll2_pfd2_396m_clk)) {
240 dev_err(cpu_dev, "failed to get clocks\n");
241 ret = -ENOENT;
242 goto put_node;
243 }
244
245 arm_reg = devm_regulator_get(cpu_dev, "arm");
246 pu_reg = devm_regulator_get(cpu_dev, "pu");
247 soc_reg = devm_regulator_get(cpu_dev, "soc");
248 if (!arm_reg || !pu_reg || !soc_reg) {
249 dev_err(cpu_dev, "failed to get regulators\n");
250 ret = -ENOENT;
251 goto put_node;
252 }
253
254 /* We expect an OPP table supplied by platform */
255 num = opp_get_opp_count(cpu_dev);
256 if (num < 0) {
257 ret = num;
258 dev_err(cpu_dev, "no OPP table is found: %d\n", ret);
259 goto put_node;
260 }
261
262 ret = opp_init_cpufreq_table(cpu_dev, &freq_table);
263 if (ret) {
264 dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
265 goto put_node;
266 }
267
268 if (of_property_read_u32(np, "clock-latency", &transition_latency))
269 transition_latency = CPUFREQ_ETERNAL;
270
271 /*
272 * OPP is maintained in order of increasing frequency, and
273 * freq_table initialised from OPP is therefore sorted in the
274 * same order.
275 */
276 rcu_read_lock();
277 opp = opp_find_freq_exact(cpu_dev,
278 freq_table[0].frequency * 1000, true);
279 min_volt = opp_get_voltage(opp);
280 opp = opp_find_freq_exact(cpu_dev,
281 freq_table[--num].frequency * 1000, true);
282 max_volt = opp_get_voltage(opp);
283 rcu_read_unlock();
284 ret = regulator_set_voltage_time(arm_reg, min_volt, max_volt);
285 if (ret > 0)
286 transition_latency += ret * 1000;
287
288 /* Count vddpu and vddsoc latency in for 1.2 GHz support */
289 if (freq_table[num].frequency == FREQ_1P2_GHZ / 1000) {
290 ret = regulator_set_voltage_time(pu_reg, PU_SOC_VOLTAGE_NORMAL,
291 PU_SOC_VOLTAGE_HIGH);
292 if (ret > 0)
293 transition_latency += ret * 1000;
294 ret = regulator_set_voltage_time(soc_reg, PU_SOC_VOLTAGE_NORMAL,
295 PU_SOC_VOLTAGE_HIGH);
296 if (ret > 0)
297 transition_latency += ret * 1000;
298 }
299
300 ret = cpufreq_register_driver(&imx6q_cpufreq_driver);
301 if (ret) {
302 dev_err(cpu_dev, "failed register driver: %d\n", ret);
303 goto free_freq_table;
304 }
305
306 of_node_put(np);
307 return 0;
308
309free_freq_table:
310 opp_free_cpufreq_table(cpu_dev, &freq_table);
311put_node:
312 of_node_put(np);
313 return ret;
314}
315
316static int imx6q_cpufreq_remove(struct platform_device *pdev)
317{
318 cpufreq_unregister_driver(&imx6q_cpufreq_driver);
319 opp_free_cpufreq_table(cpu_dev, &freq_table);
320
321 return 0;
322}
323
324static struct platform_driver imx6q_cpufreq_platdrv = {
325 .driver = {
326 .name = "imx6q-cpufreq",
327 .owner = THIS_MODULE,
328 },
329 .probe = imx6q_cpufreq_probe,
330 .remove = imx6q_cpufreq_remove,
331};
332module_platform_driver(imx6q_cpufreq_platdrv);
333
334MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
335MODULE_DESCRIPTION("Freescale i.MX6Q cpufreq driver");
336MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
new file mode 100644
index 000000000000..096fde0ebcb5
--- /dev/null
+++ b/drivers/cpufreq/intel_pstate.c
@@ -0,0 +1,823 @@
1/*
2 * cpufreq_snb.c: Native P state management for Intel processors
3 *
4 * (C) Copyright 2012 Intel Corporation
5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 */
12
13#include <linux/kernel.h>
14#include <linux/kernel_stat.h>
15#include <linux/module.h>
16#include <linux/ktime.h>
17#include <linux/hrtimer.h>
18#include <linux/tick.h>
19#include <linux/slab.h>
20#include <linux/sched.h>
21#include <linux/list.h>
22#include <linux/cpu.h>
23#include <linux/cpufreq.h>
24#include <linux/sysfs.h>
25#include <linux/types.h>
26#include <linux/fs.h>
27#include <linux/debugfs.h>
28#include <trace/events/power.h>
29
30#include <asm/div64.h>
31#include <asm/msr.h>
32#include <asm/cpu_device_id.h>
33
34#define SAMPLE_COUNT 3
35
36#define FRAC_BITS 8
37#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
38#define fp_toint(X) ((X) >> FRAC_BITS)
39
40static inline int32_t mul_fp(int32_t x, int32_t y)
41{
42 return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
43}
44
45static inline int32_t div_fp(int32_t x, int32_t y)
46{
47 return div_s64((int64_t)x << FRAC_BITS, (int64_t)y);
48}
49
50struct sample {
51 ktime_t start_time;
52 ktime_t end_time;
53 int core_pct_busy;
54 int pstate_pct_busy;
55 u64 duration_us;
56 u64 idletime_us;
57 u64 aperf;
58 u64 mperf;
59 int freq;
60};
61
62struct pstate_data {
63 int current_pstate;
64 int min_pstate;
65 int max_pstate;
66 int turbo_pstate;
67};
68
69struct _pid {
70 int setpoint;
71 int32_t integral;
72 int32_t p_gain;
73 int32_t i_gain;
74 int32_t d_gain;
75 int deadband;
76 int last_err;
77};
78
79struct cpudata {
80 int cpu;
81
82 char name[64];
83
84 struct timer_list timer;
85
86 struct pstate_adjust_policy *pstate_policy;
87 struct pstate_data pstate;
88 struct _pid pid;
89 struct _pid idle_pid;
90
91 int min_pstate_count;
92 int idle_mode;
93
94 ktime_t prev_sample;
95 u64 prev_idle_time_us;
96 u64 prev_aperf;
97 u64 prev_mperf;
98 int sample_ptr;
99 struct sample samples[SAMPLE_COUNT];
100};
101
102static struct cpudata **all_cpu_data;
103struct pstate_adjust_policy {
104 int sample_rate_ms;
105 int deadband;
106 int setpoint;
107 int p_gain_pct;
108 int d_gain_pct;
109 int i_gain_pct;
110};
111
112static struct pstate_adjust_policy default_policy = {
113 .sample_rate_ms = 10,
114 .deadband = 0,
115 .setpoint = 109,
116 .p_gain_pct = 17,
117 .d_gain_pct = 0,
118 .i_gain_pct = 4,
119};
120
121struct perf_limits {
122 int no_turbo;
123 int max_perf_pct;
124 int min_perf_pct;
125 int32_t max_perf;
126 int32_t min_perf;
127};
128
129static struct perf_limits limits = {
130 .no_turbo = 0,
131 .max_perf_pct = 100,
132 .max_perf = int_tofp(1),
133 .min_perf_pct = 0,
134 .min_perf = 0,
135};
136
137static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
138 int deadband, int integral) {
139 pid->setpoint = setpoint;
140 pid->deadband = deadband;
141 pid->integral = int_tofp(integral);
142 pid->last_err = setpoint - busy;
143}
144
145static inline void pid_p_gain_set(struct _pid *pid, int percent)
146{
147 pid->p_gain = div_fp(int_tofp(percent), int_tofp(100));
148}
149
150static inline void pid_i_gain_set(struct _pid *pid, int percent)
151{
152 pid->i_gain = div_fp(int_tofp(percent), int_tofp(100));
153}
154
155static inline void pid_d_gain_set(struct _pid *pid, int percent)
156{
157
158 pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
159}
160
161static signed int pid_calc(struct _pid *pid, int busy)
162{
163 signed int err, result;
164 int32_t pterm, dterm, fp_error;
165 int32_t integral_limit;
166
167 err = pid->setpoint - busy;
168 fp_error = int_tofp(err);
169
170 if (abs(err) <= pid->deadband)
171 return 0;
172
173 pterm = mul_fp(pid->p_gain, fp_error);
174
175 pid->integral += fp_error;
176
177 /* limit the integral term */
178 integral_limit = int_tofp(30);
179 if (pid->integral > integral_limit)
180 pid->integral = integral_limit;
181 if (pid->integral < -integral_limit)
182 pid->integral = -integral_limit;
183
184 dterm = mul_fp(pid->d_gain, (err - pid->last_err));
185 pid->last_err = err;
186
187 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
188
189 return (signed int)fp_toint(result);
190}
191
192static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu)
193{
194 pid_p_gain_set(&cpu->pid, cpu->pstate_policy->p_gain_pct);
195 pid_d_gain_set(&cpu->pid, cpu->pstate_policy->d_gain_pct);
196 pid_i_gain_set(&cpu->pid, cpu->pstate_policy->i_gain_pct);
197
198 pid_reset(&cpu->pid,
199 cpu->pstate_policy->setpoint,
200 100,
201 cpu->pstate_policy->deadband,
202 0);
203}
204
205static inline void intel_pstate_idle_pid_reset(struct cpudata *cpu)
206{
207 pid_p_gain_set(&cpu->idle_pid, cpu->pstate_policy->p_gain_pct);
208 pid_d_gain_set(&cpu->idle_pid, cpu->pstate_policy->d_gain_pct);
209 pid_i_gain_set(&cpu->idle_pid, cpu->pstate_policy->i_gain_pct);
210
211 pid_reset(&cpu->idle_pid,
212 75,
213 50,
214 cpu->pstate_policy->deadband,
215 0);
216}
217
218static inline void intel_pstate_reset_all_pid(void)
219{
220 unsigned int cpu;
221 for_each_online_cpu(cpu) {
222 if (all_cpu_data[cpu])
223 intel_pstate_busy_pid_reset(all_cpu_data[cpu]);
224 }
225}
226
227/************************** debugfs begin ************************/
228static int pid_param_set(void *data, u64 val)
229{
230 *(u32 *)data = val;
231 intel_pstate_reset_all_pid();
232 return 0;
233}
234static int pid_param_get(void *data, u64 *val)
235{
236 *val = *(u32 *)data;
237 return 0;
238}
239DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get,
240 pid_param_set, "%llu\n");
241
242struct pid_param {
243 char *name;
244 void *value;
245};
246
247static struct pid_param pid_files[] = {
248 {"sample_rate_ms", &default_policy.sample_rate_ms},
249 {"d_gain_pct", &default_policy.d_gain_pct},
250 {"i_gain_pct", &default_policy.i_gain_pct},
251 {"deadband", &default_policy.deadband},
252 {"setpoint", &default_policy.setpoint},
253 {"p_gain_pct", &default_policy.p_gain_pct},
254 {NULL, NULL}
255};
256
257static struct dentry *debugfs_parent;
258static void intel_pstate_debug_expose_params(void)
259{
260 int i = 0;
261
262 debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
263 if (IS_ERR_OR_NULL(debugfs_parent))
264 return;
265 while (pid_files[i].name) {
266 debugfs_create_file(pid_files[i].name, 0660,
267 debugfs_parent, pid_files[i].value,
268 &fops_pid_param);
269 i++;
270 }
271}
272
273/************************** debugfs end ************************/
274
275/************************** sysfs begin ************************/
276#define show_one(file_name, object) \
277 static ssize_t show_##file_name \
278 (struct kobject *kobj, struct attribute *attr, char *buf) \
279 { \
280 return sprintf(buf, "%u\n", limits.object); \
281 }
282
283static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
284 const char *buf, size_t count)
285{
286 unsigned int input;
287 int ret;
288 ret = sscanf(buf, "%u", &input);
289 if (ret != 1)
290 return -EINVAL;
291 limits.no_turbo = clamp_t(int, input, 0 , 1);
292
293 return count;
294}
295
296static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
297 const char *buf, size_t count)
298{
299 unsigned int input;
300 int ret;
301 ret = sscanf(buf, "%u", &input);
302 if (ret != 1)
303 return -EINVAL;
304
305 limits.max_perf_pct = clamp_t(int, input, 0 , 100);
306 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
307 return count;
308}
309
310static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
311 const char *buf, size_t count)
312{
313 unsigned int input;
314 int ret;
315 ret = sscanf(buf, "%u", &input);
316 if (ret != 1)
317 return -EINVAL;
318 limits.min_perf_pct = clamp_t(int, input, 0 , 100);
319 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
320
321 return count;
322}
323
324show_one(no_turbo, no_turbo);
325show_one(max_perf_pct, max_perf_pct);
326show_one(min_perf_pct, min_perf_pct);
327
328define_one_global_rw(no_turbo);
329define_one_global_rw(max_perf_pct);
330define_one_global_rw(min_perf_pct);
331
332static struct attribute *intel_pstate_attributes[] = {
333 &no_turbo.attr,
334 &max_perf_pct.attr,
335 &min_perf_pct.attr,
336 NULL
337};
338
339static struct attribute_group intel_pstate_attr_group = {
340 .attrs = intel_pstate_attributes,
341};
342static struct kobject *intel_pstate_kobject;
343
344static void intel_pstate_sysfs_expose_params(void)
345{
346 int rc;
347
348 intel_pstate_kobject = kobject_create_and_add("intel_pstate",
349 &cpu_subsys.dev_root->kobj);
350 BUG_ON(!intel_pstate_kobject);
351 rc = sysfs_create_group(intel_pstate_kobject,
352 &intel_pstate_attr_group);
353 BUG_ON(rc);
354}
355
356/************************** sysfs end ************************/
357
358static int intel_pstate_min_pstate(void)
359{
360 u64 value;
361 rdmsrl(0xCE, value);
362 return (value >> 40) & 0xFF;
363}
364
365static int intel_pstate_max_pstate(void)
366{
367 u64 value;
368 rdmsrl(0xCE, value);
369 return (value >> 8) & 0xFF;
370}
371
372static int intel_pstate_turbo_pstate(void)
373{
374 u64 value;
375 int nont, ret;
376 rdmsrl(0x1AD, value);
377 nont = intel_pstate_max_pstate();
378 ret = ((value) & 255);
379 if (ret <= nont)
380 ret = nont;
381 return ret;
382}
383
384static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
385{
386 int max_perf = cpu->pstate.turbo_pstate;
387 int min_perf;
388 if (limits.no_turbo)
389 max_perf = cpu->pstate.max_pstate;
390
391 max_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
392 *max = clamp_t(int, max_perf,
393 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
394
395 min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf));
396 *min = clamp_t(int, min_perf,
397 cpu->pstate.min_pstate, max_perf);
398}
399
400static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
401{
402 int max_perf, min_perf;
403
404 intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
405
406 pstate = clamp_t(int, pstate, min_perf, max_perf);
407
408 if (pstate == cpu->pstate.current_pstate)
409 return;
410
411#ifndef MODULE
412 trace_cpu_frequency(pstate * 100000, cpu->cpu);
413#endif
414 cpu->pstate.current_pstate = pstate;
415 wrmsrl(MSR_IA32_PERF_CTL, pstate << 8);
416
417}
418
419static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps)
420{
421 int target;
422 target = cpu->pstate.current_pstate + steps;
423
424 intel_pstate_set_pstate(cpu, target);
425}
426
427static inline void intel_pstate_pstate_decrease(struct cpudata *cpu, int steps)
428{
429 int target;
430 target = cpu->pstate.current_pstate - steps;
431 intel_pstate_set_pstate(cpu, target);
432}
433
434static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
435{
436 sprintf(cpu->name, "Intel 2nd generation core");
437
438 cpu->pstate.min_pstate = intel_pstate_min_pstate();
439 cpu->pstate.max_pstate = intel_pstate_max_pstate();
440 cpu->pstate.turbo_pstate = intel_pstate_turbo_pstate();
441
442 /*
443 * goto max pstate so we don't slow up boot if we are built-in if we are
444 * a module we will take care of it during normal operation
445 */
446 intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
447}
448
449static inline void intel_pstate_calc_busy(struct cpudata *cpu,
450 struct sample *sample)
451{
452 u64 core_pct;
453 sample->pstate_pct_busy = 100 - div64_u64(
454 sample->idletime_us * 100,
455 sample->duration_us);
456 core_pct = div64_u64(sample->aperf * 100, sample->mperf);
457 sample->freq = cpu->pstate.turbo_pstate * core_pct * 1000;
458
459 sample->core_pct_busy = div_s64((sample->pstate_pct_busy * core_pct),
460 100);
461}
462
463static inline void intel_pstate_sample(struct cpudata *cpu)
464{
465 ktime_t now;
466 u64 idle_time_us;
467 u64 aperf, mperf;
468
469 now = ktime_get();
470 idle_time_us = get_cpu_idle_time_us(cpu->cpu, NULL);
471
472 rdmsrl(MSR_IA32_APERF, aperf);
473 rdmsrl(MSR_IA32_MPERF, mperf);
474 /* for the first sample, don't actually record a sample, just
475 * set the baseline */
476 if (cpu->prev_idle_time_us > 0) {
477 cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT;
478 cpu->samples[cpu->sample_ptr].start_time = cpu->prev_sample;
479 cpu->samples[cpu->sample_ptr].end_time = now;
480 cpu->samples[cpu->sample_ptr].duration_us =
481 ktime_us_delta(now, cpu->prev_sample);
482 cpu->samples[cpu->sample_ptr].idletime_us =
483 idle_time_us - cpu->prev_idle_time_us;
484
485 cpu->samples[cpu->sample_ptr].aperf = aperf;
486 cpu->samples[cpu->sample_ptr].mperf = mperf;
487 cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf;
488 cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf;
489
490 intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]);
491 }
492
493 cpu->prev_sample = now;
494 cpu->prev_idle_time_us = idle_time_us;
495 cpu->prev_aperf = aperf;
496 cpu->prev_mperf = mperf;
497}
498
499static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
500{
501 int sample_time, delay;
502
503 sample_time = cpu->pstate_policy->sample_rate_ms;
504 delay = msecs_to_jiffies(sample_time);
505 delay -= jiffies % delay;
506 mod_timer_pinned(&cpu->timer, jiffies + delay);
507}
508
509static inline void intel_pstate_idle_mode(struct cpudata *cpu)
510{
511 cpu->idle_mode = 1;
512}
513
514static inline void intel_pstate_normal_mode(struct cpudata *cpu)
515{
516 cpu->idle_mode = 0;
517}
518
519static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu)
520{
521 int32_t busy_scaled;
522 int32_t core_busy, turbo_pstate, current_pstate;
523
524 core_busy = int_tofp(cpu->samples[cpu->sample_ptr].core_pct_busy);
525 turbo_pstate = int_tofp(cpu->pstate.turbo_pstate);
526 current_pstate = int_tofp(cpu->pstate.current_pstate);
527 busy_scaled = mul_fp(core_busy, div_fp(turbo_pstate, current_pstate));
528
529 return fp_toint(busy_scaled);
530}
531
532static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
533{
534 int busy_scaled;
535 struct _pid *pid;
536 signed int ctl = 0;
537 int steps;
538
539 pid = &cpu->pid;
540 busy_scaled = intel_pstate_get_scaled_busy(cpu);
541
542 ctl = pid_calc(pid, busy_scaled);
543
544 steps = abs(ctl);
545 if (ctl < 0)
546 intel_pstate_pstate_increase(cpu, steps);
547 else
548 intel_pstate_pstate_decrease(cpu, steps);
549}
550
551static inline void intel_pstate_adjust_idle_pstate(struct cpudata *cpu)
552{
553 int busy_scaled;
554 struct _pid *pid;
555 int ctl = 0;
556 int steps;
557
558 pid = &cpu->idle_pid;
559
560 busy_scaled = intel_pstate_get_scaled_busy(cpu);
561
562 ctl = pid_calc(pid, 100 - busy_scaled);
563
564 steps = abs(ctl);
565 if (ctl < 0)
566 intel_pstate_pstate_decrease(cpu, steps);
567 else
568 intel_pstate_pstate_increase(cpu, steps);
569
570 if (cpu->pstate.current_pstate == cpu->pstate.min_pstate)
571 intel_pstate_normal_mode(cpu);
572}
573
574static void intel_pstate_timer_func(unsigned long __data)
575{
576 struct cpudata *cpu = (struct cpudata *) __data;
577
578 intel_pstate_sample(cpu);
579
580 if (!cpu->idle_mode)
581 intel_pstate_adjust_busy_pstate(cpu);
582 else
583 intel_pstate_adjust_idle_pstate(cpu);
584
585#if defined(XPERF_FIX)
586 if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) {
587 cpu->min_pstate_count++;
588 if (!(cpu->min_pstate_count % 5)) {
589 intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
590 intel_pstate_idle_mode(cpu);
591 }
592 } else
593 cpu->min_pstate_count = 0;
594#endif
595 intel_pstate_set_sample_time(cpu);
596}
597
598#define ICPU(model, policy) \
599 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&policy }
600
601static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
602 ICPU(0x2a, default_policy),
603 ICPU(0x2d, default_policy),
604 {}
605};
606MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
607
608static int intel_pstate_init_cpu(unsigned int cpunum)
609{
610
611 const struct x86_cpu_id *id;
612 struct cpudata *cpu;
613
614 id = x86_match_cpu(intel_pstate_cpu_ids);
615 if (!id)
616 return -ENODEV;
617
618 all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), GFP_KERNEL);
619 if (!all_cpu_data[cpunum])
620 return -ENOMEM;
621
622 cpu = all_cpu_data[cpunum];
623
624 intel_pstate_get_cpu_pstates(cpu);
625
626 cpu->cpu = cpunum;
627 cpu->pstate_policy =
628 (struct pstate_adjust_policy *)id->driver_data;
629 init_timer_deferrable(&cpu->timer);
630 cpu->timer.function = intel_pstate_timer_func;
631 cpu->timer.data =
632 (unsigned long)cpu;
633 cpu->timer.expires = jiffies + HZ/100;
634 intel_pstate_busy_pid_reset(cpu);
635 intel_pstate_idle_pid_reset(cpu);
636 intel_pstate_sample(cpu);
637 intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
638
639 add_timer_on(&cpu->timer, cpunum);
640
641 pr_info("Intel pstate controlling: cpu %d\n", cpunum);
642
643 return 0;
644}
645
646static unsigned int intel_pstate_get(unsigned int cpu_num)
647{
648 struct sample *sample;
649 struct cpudata *cpu;
650
651 cpu = all_cpu_data[cpu_num];
652 if (!cpu)
653 return 0;
654 sample = &cpu->samples[cpu->sample_ptr];
655 return sample->freq;
656}
657
658static int intel_pstate_set_policy(struct cpufreq_policy *policy)
659{
660 struct cpudata *cpu;
661 int min, max;
662
663 cpu = all_cpu_data[policy->cpu];
664
665 intel_pstate_get_min_max(cpu, &min, &max);
666
667 limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
668 limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100);
669 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
670
671 limits.max_perf_pct = policy->max * 100 / policy->cpuinfo.max_freq;
672 limits.max_perf_pct = clamp_t(int, limits.max_perf_pct, 0 , 100);
673 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
674
675 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
676 limits.min_perf_pct = 100;
677 limits.min_perf = int_tofp(1);
678 limits.max_perf_pct = 100;
679 limits.max_perf = int_tofp(1);
680 limits.no_turbo = 0;
681 }
682
683 return 0;
684}
685
686static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
687{
688 cpufreq_verify_within_limits(policy,
689 policy->cpuinfo.min_freq,
690 policy->cpuinfo.max_freq);
691
692 if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) &&
693 (policy->policy != CPUFREQ_POLICY_PERFORMANCE))
694 return -EINVAL;
695
696 return 0;
697}
698
699static int __cpuinit intel_pstate_cpu_exit(struct cpufreq_policy *policy)
700{
701 int cpu = policy->cpu;
702
703 del_timer(&all_cpu_data[cpu]->timer);
704 kfree(all_cpu_data[cpu]);
705 all_cpu_data[cpu] = NULL;
706 return 0;
707}
708
709static int __cpuinit intel_pstate_cpu_init(struct cpufreq_policy *policy)
710{
711 int rc, min_pstate, max_pstate;
712 struct cpudata *cpu;
713
714 rc = intel_pstate_init_cpu(policy->cpu);
715 if (rc)
716 return rc;
717
718 cpu = all_cpu_data[policy->cpu];
719
720 if (!limits.no_turbo &&
721 limits.min_perf_pct == 100 && limits.max_perf_pct == 100)
722 policy->policy = CPUFREQ_POLICY_PERFORMANCE;
723 else
724 policy->policy = CPUFREQ_POLICY_POWERSAVE;
725
726 intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate);
727 policy->min = min_pstate * 100000;
728 policy->max = max_pstate * 100000;
729
730 /* cpuinfo and default policy values */
731 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * 100000;
732 policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate * 100000;
733 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
734 cpumask_set_cpu(policy->cpu, policy->cpus);
735
736 return 0;
737}
738
739static struct cpufreq_driver intel_pstate_driver = {
740 .flags = CPUFREQ_CONST_LOOPS,
741 .verify = intel_pstate_verify_policy,
742 .setpolicy = intel_pstate_set_policy,
743 .get = intel_pstate_get,
744 .init = intel_pstate_cpu_init,
745 .exit = intel_pstate_cpu_exit,
746 .name = "intel_pstate",
747 .owner = THIS_MODULE,
748};
749
750static void intel_pstate_exit(void)
751{
752 int cpu;
753
754 sysfs_remove_group(intel_pstate_kobject,
755 &intel_pstate_attr_group);
756 debugfs_remove_recursive(debugfs_parent);
757
758 cpufreq_unregister_driver(&intel_pstate_driver);
759
760 if (!all_cpu_data)
761 return;
762
763 get_online_cpus();
764 for_each_online_cpu(cpu) {
765 if (all_cpu_data[cpu]) {
766 del_timer_sync(&all_cpu_data[cpu]->timer);
767 kfree(all_cpu_data[cpu]);
768 }
769 }
770
771 put_online_cpus();
772 vfree(all_cpu_data);
773}
774module_exit(intel_pstate_exit);
775
776static int __initdata no_load;
777
778static int __init intel_pstate_init(void)
779{
780 int rc = 0;
781 const struct x86_cpu_id *id;
782
783 if (no_load)
784 return -ENODEV;
785
786 id = x86_match_cpu(intel_pstate_cpu_ids);
787 if (!id)
788 return -ENODEV;
789
790 pr_info("Intel P-state driver initializing.\n");
791
792 all_cpu_data = vmalloc(sizeof(void *) * num_possible_cpus());
793 if (!all_cpu_data)
794 return -ENOMEM;
795 memset(all_cpu_data, 0, sizeof(void *) * num_possible_cpus());
796
797 rc = cpufreq_register_driver(&intel_pstate_driver);
798 if (rc)
799 goto out;
800
801 intel_pstate_debug_expose_params();
802 intel_pstate_sysfs_expose_params();
803 return rc;
804out:
805 intel_pstate_exit();
806 return -ENODEV;
807}
808device_initcall(intel_pstate_init);
809
810static int __init intel_pstate_setup(char *str)
811{
812 if (!str)
813 return -EINVAL;
814
815 if (!strcmp(str, "disable"))
816 no_load = 1;
817 return 0;
818}
819early_param("intel_pstate", intel_pstate_setup);
820
821MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
822MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
823MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
new file mode 100644
index 000000000000..0e83e3c24f5b
--- /dev/null
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -0,0 +1,259 @@
1/*
2 * kirkwood_freq.c: cpufreq driver for the Marvell kirkwood
3 *
4 * Copyright (C) 2013 Andrew Lunn <andrew@lunn.ch>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/clk.h>
15#include <linux/clk-provider.h>
16#include <linux/cpufreq.h>
17#include <linux/of.h>
18#include <linux/platform_device.h>
19#include <linux/io.h>
20#include <asm/proc-fns.h>
21
22#define CPU_SW_INT_BLK BIT(28)
23
24static struct priv
25{
26 struct clk *cpu_clk;
27 struct clk *ddr_clk;
28 struct clk *powersave_clk;
29 struct device *dev;
30 void __iomem *base;
31} priv;
32
33#define STATE_CPU_FREQ 0x01
34#define STATE_DDR_FREQ 0x02
35
36/*
37 * Kirkwood can swap the clock to the CPU between two clocks:
38 *
39 * - cpu clk
40 * - ddr clk
41 *
42 * The frequencies are set at runtime before registering this *
43 * table.
44 */
45static struct cpufreq_frequency_table kirkwood_freq_table[] = {
46 {STATE_CPU_FREQ, 0}, /* CPU uses cpuclk */
47 {STATE_DDR_FREQ, 0}, /* CPU uses ddrclk */
48 {0, CPUFREQ_TABLE_END},
49};
50
51static unsigned int kirkwood_cpufreq_get_cpu_frequency(unsigned int cpu)
52{
53 if (__clk_is_enabled(priv.powersave_clk))
54 return kirkwood_freq_table[1].frequency;
55 return kirkwood_freq_table[0].frequency;
56}
57
58static void kirkwood_cpufreq_set_cpu_state(unsigned int index)
59{
60 struct cpufreq_freqs freqs;
61 unsigned int state = kirkwood_freq_table[index].index;
62 unsigned long reg;
63
64 freqs.old = kirkwood_cpufreq_get_cpu_frequency(0);
65 freqs.new = kirkwood_freq_table[index].frequency;
66 freqs.cpu = 0; /* Kirkwood is UP */
67
68 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
69
70 dev_dbg(priv.dev, "Attempting to set frequency to %i KHz\n",
71 kirkwood_freq_table[index].frequency);
72 dev_dbg(priv.dev, "old frequency was %i KHz\n",
73 kirkwood_cpufreq_get_cpu_frequency(0));
74
75 if (freqs.old != freqs.new) {
76 local_irq_disable();
77
78 /* Disable interrupts to the CPU */
79 reg = readl_relaxed(priv.base);
80 reg |= CPU_SW_INT_BLK;
81 writel_relaxed(reg, priv.base);
82
83 switch (state) {
84 case STATE_CPU_FREQ:
85 clk_disable(priv.powersave_clk);
86 break;
87 case STATE_DDR_FREQ:
88 clk_enable(priv.powersave_clk);
89 break;
90 }
91
92 /* Wait-for-Interrupt, while the hardware changes frequency */
93 cpu_do_idle();
94
95 /* Enable interrupts to the CPU */
96 reg = readl_relaxed(priv.base);
97 reg &= ~CPU_SW_INT_BLK;
98 writel_relaxed(reg, priv.base);
99
100 local_irq_enable();
101 }
102 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
103};
104
105static int kirkwood_cpufreq_verify(struct cpufreq_policy *policy)
106{
107 return cpufreq_frequency_table_verify(policy, kirkwood_freq_table);
108}
109
110static int kirkwood_cpufreq_target(struct cpufreq_policy *policy,
111 unsigned int target_freq,
112 unsigned int relation)
113{
114 unsigned int index = 0;
115
116 if (cpufreq_frequency_table_target(policy, kirkwood_freq_table,
117 target_freq, relation, &index))
118 return -EINVAL;
119
120 kirkwood_cpufreq_set_cpu_state(index);
121
122 return 0;
123}
124
125/* Module init and exit code */
126static int kirkwood_cpufreq_cpu_init(struct cpufreq_policy *policy)
127{
128 int result;
129
130 /* cpuinfo and default policy values */
131 policy->cpuinfo.transition_latency = 5000; /* 5uS */
132 policy->cur = kirkwood_cpufreq_get_cpu_frequency(0);
133
134 result = cpufreq_frequency_table_cpuinfo(policy, kirkwood_freq_table);
135 if (result)
136 return result;
137
138 cpufreq_frequency_table_get_attr(kirkwood_freq_table, policy->cpu);
139
140 return 0;
141}
142
143static int kirkwood_cpufreq_cpu_exit(struct cpufreq_policy *policy)
144{
145 cpufreq_frequency_table_put_attr(policy->cpu);
146 return 0;
147}
148
149static struct freq_attr *kirkwood_cpufreq_attr[] = {
150 &cpufreq_freq_attr_scaling_available_freqs,
151 NULL,
152};
153
154static struct cpufreq_driver kirkwood_cpufreq_driver = {
155 .get = kirkwood_cpufreq_get_cpu_frequency,
156 .verify = kirkwood_cpufreq_verify,
157 .target = kirkwood_cpufreq_target,
158 .init = kirkwood_cpufreq_cpu_init,
159 .exit = kirkwood_cpufreq_cpu_exit,
160 .name = "kirkwood-cpufreq",
161 .owner = THIS_MODULE,
162 .attr = kirkwood_cpufreq_attr,
163};
164
165static int kirkwood_cpufreq_probe(struct platform_device *pdev)
166{
167 struct device_node *np;
168 struct resource *res;
169 int err;
170
171 priv.dev = &pdev->dev;
172
173 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
174 if (!res) {
175 dev_err(&pdev->dev, "Cannot get memory resource\n");
176 return -ENODEV;
177 }
178 priv.base = devm_request_and_ioremap(&pdev->dev, res);
179 if (!priv.base) {
180 dev_err(&pdev->dev, "Cannot ioremap\n");
181 return -EADDRNOTAVAIL;
182 }
183
184 np = of_find_node_by_path("/cpus/cpu@0");
185 if (!np)
186 return -ENODEV;
187
188 priv.cpu_clk = of_clk_get_by_name(np, "cpu_clk");
189 if (IS_ERR(priv.cpu_clk)) {
190 dev_err(priv.dev, "Unable to get cpuclk");
191 return PTR_ERR(priv.cpu_clk);
192 }
193
194 clk_prepare_enable(priv.cpu_clk);
195 kirkwood_freq_table[0].frequency = clk_get_rate(priv.cpu_clk) / 1000;
196
197 priv.ddr_clk = of_clk_get_by_name(np, "ddrclk");
198 if (IS_ERR(priv.ddr_clk)) {
199 dev_err(priv.dev, "Unable to get ddrclk");
200 err = PTR_ERR(priv.ddr_clk);
201 goto out_cpu;
202 }
203
204 clk_prepare_enable(priv.ddr_clk);
205 kirkwood_freq_table[1].frequency = clk_get_rate(priv.ddr_clk) / 1000;
206
207 priv.powersave_clk = of_clk_get_by_name(np, "powersave");
208 if (IS_ERR(priv.powersave_clk)) {
209 dev_err(priv.dev, "Unable to get powersave");
210 err = PTR_ERR(priv.powersave_clk);
211 goto out_ddr;
212 }
213 clk_prepare(priv.powersave_clk);
214
215 of_node_put(np);
216 np = NULL;
217
218 err = cpufreq_register_driver(&kirkwood_cpufreq_driver);
219 if (!err)
220 return 0;
221
222 dev_err(priv.dev, "Failed to register cpufreq driver");
223
224 clk_disable_unprepare(priv.powersave_clk);
225out_ddr:
226 clk_disable_unprepare(priv.ddr_clk);
227out_cpu:
228 clk_disable_unprepare(priv.cpu_clk);
229 of_node_put(np);
230
231 return err;
232}
233
234static int kirkwood_cpufreq_remove(struct platform_device *pdev)
235{
236 cpufreq_unregister_driver(&kirkwood_cpufreq_driver);
237
238 clk_disable_unprepare(priv.powersave_clk);
239 clk_disable_unprepare(priv.ddr_clk);
240 clk_disable_unprepare(priv.cpu_clk);
241
242 return 0;
243}
244
245static struct platform_driver kirkwood_cpufreq_platform_driver = {
246 .probe = kirkwood_cpufreq_probe,
247 .remove = kirkwood_cpufreq_remove,
248 .driver = {
249 .name = "kirkwood-cpufreq",
250 .owner = THIS_MODULE,
251 },
252};
253
254module_platform_driver(kirkwood_cpufreq_platform_driver);
255
256MODULE_LICENSE("GPL v2");
257MODULE_AUTHOR("Andrew Lunn <andrew@lunn.ch");
258MODULE_DESCRIPTION("cpufreq driver for Marvell's kirkwood CPU");
259MODULE_ALIAS("platform:kirkwood-cpufreq");
diff --git a/drivers/cpufreq/maple-cpufreq.c b/drivers/cpufreq/maple-cpufreq.c
index 89b178a3f849..d4c4989823dc 100644
--- a/drivers/cpufreq/maple-cpufreq.c
+++ b/drivers/cpufreq/maple-cpufreq.c
@@ -181,7 +181,7 @@ static int maple_cpufreq_cpu_init(struct cpufreq_policy *policy)
181 /* secondary CPUs are tied to the primary one by the 181 /* secondary CPUs are tied to the primary one by the
182 * cpufreq core if in the secondary policy we tell it that 182 * cpufreq core if in the secondary policy we tell it that
183 * it actually must be one policy together with all others. */ 183 * it actually must be one policy together with all others. */
184 cpumask_copy(policy->cpus, cpu_online_mask); 184 cpumask_setall(policy->cpus);
185 cpufreq_frequency_table_get_attr(maple_cpu_freqs, policy->cpu); 185 cpufreq_frequency_table_get_attr(maple_cpu_freqs, policy->cpu);
186 186
187 return cpufreq_frequency_table_cpuinfo(policy, 187 return cpufreq_frequency_table_cpuinfo(policy,
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index 1f3417a8322d..9128c07bafba 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -110,13 +110,16 @@ static int omap_target(struct cpufreq_policy *policy,
110 freq = ret; 110 freq = ret;
111 111
112 if (mpu_reg) { 112 if (mpu_reg) {
113 rcu_read_lock();
113 opp = opp_find_freq_ceil(mpu_dev, &freq); 114 opp = opp_find_freq_ceil(mpu_dev, &freq);
114 if (IS_ERR(opp)) { 115 if (IS_ERR(opp)) {
116 rcu_read_unlock();
115 dev_err(mpu_dev, "%s: unable to find MPU OPP for %d\n", 117 dev_err(mpu_dev, "%s: unable to find MPU OPP for %d\n",
116 __func__, freqs.new); 118 __func__, freqs.new);
117 return -EINVAL; 119 return -EINVAL;
118 } 120 }
119 volt = opp_get_voltage(opp); 121 volt = opp_get_voltage(opp);
122 rcu_read_unlock();
120 tol = volt * OPP_TOLERANCE / 100; 123 tol = volt * OPP_TOLERANCE / 100;
121 volt_old = regulator_get_voltage(mpu_reg); 124 volt_old = regulator_get_voltage(mpu_reg);
122 } 125 }
@@ -211,10 +214,8 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy)
211 * interface to handle this scenario. Additional is_smp() check 214 * interface to handle this scenario. Additional is_smp() check
212 * is to keep SMP_ON_UP build working. 215 * is to keep SMP_ON_UP build working.
213 */ 216 */
214 if (is_smp()) { 217 if (is_smp())
215 policy->shared_type = CPUFREQ_SHARED_TYPE_ANY;
216 cpumask_setall(policy->cpus); 218 cpumask_setall(policy->cpus);
217 }
218 219
219 /* FIXME: what's the actual transition time? */ 220 /* FIXME: what's the actual transition time? */
220 policy->cpuinfo.transition_latency = 300 * 1000; 221 policy->cpuinfo.transition_latency = 300 * 1000;
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 056faf6af1a9..d13a13678b5f 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -1249,39 +1249,59 @@ static struct cpufreq_driver cpufreq_amd64_driver = {
1249 .attr = powernow_k8_attr, 1249 .attr = powernow_k8_attr,
1250}; 1250};
1251 1251
1252static void __request_acpi_cpufreq(void)
1253{
1254 const char *cur_drv, *drv = "acpi-cpufreq";
1255
1256 cur_drv = cpufreq_get_current_driver();
1257 if (!cur_drv)
1258 goto request;
1259
1260 if (strncmp(cur_drv, drv, min_t(size_t, strlen(cur_drv), strlen(drv))))
1261 pr_warn(PFX "WTF driver: %s\n", cur_drv);
1262
1263 return;
1264
1265 request:
1266 pr_warn(PFX "This CPU is not supported anymore, using acpi-cpufreq instead.\n");
1267 request_module(drv);
1268}
1269
1252/* driver entry point for init */ 1270/* driver entry point for init */
1253static int __cpuinit powernowk8_init(void) 1271static int __cpuinit powernowk8_init(void)
1254{ 1272{
1255 unsigned int i, supported_cpus = 0; 1273 unsigned int i, supported_cpus = 0;
1256 int rv; 1274 int ret;
1257 1275
1258 if (static_cpu_has(X86_FEATURE_HW_PSTATE)) { 1276 if (static_cpu_has(X86_FEATURE_HW_PSTATE)) {
1259 pr_warn(PFX "this CPU is not supported anymore, using acpi-cpufreq instead.\n"); 1277 __request_acpi_cpufreq();
1260 request_module("acpi-cpufreq");
1261 return -ENODEV; 1278 return -ENODEV;
1262 } 1279 }
1263 1280
1264 if (!x86_match_cpu(powernow_k8_ids)) 1281 if (!x86_match_cpu(powernow_k8_ids))
1265 return -ENODEV; 1282 return -ENODEV;
1266 1283
1284 get_online_cpus();
1267 for_each_online_cpu(i) { 1285 for_each_online_cpu(i) {
1268 int rc; 1286 smp_call_function_single(i, check_supported_cpu, &ret, 1);
1269 smp_call_function_single(i, check_supported_cpu, &rc, 1); 1287 if (!ret)
1270 if (rc == 0)
1271 supported_cpus++; 1288 supported_cpus++;
1272 } 1289 }
1273 1290
1274 if (supported_cpus != num_online_cpus()) 1291 if (supported_cpus != num_online_cpus()) {
1292 put_online_cpus();
1275 return -ENODEV; 1293 return -ENODEV;
1294 }
1295 put_online_cpus();
1276 1296
1277 rv = cpufreq_register_driver(&cpufreq_amd64_driver); 1297 ret = cpufreq_register_driver(&cpufreq_amd64_driver);
1298 if (ret)
1299 return ret;
1278 1300
1279 if (!rv) 1301 pr_info(PFX "Found %d %s (%d cpu cores) (" VERSION ")\n",
1280 pr_info(PFX "Found %d %s (%d cpu cores) (" VERSION ")\n", 1302 num_online_nodes(), boot_cpu_data.x86_model_id, supported_cpus);
1281 num_online_nodes(), boot_cpu_data.x86_model_id,
1282 supported_cpus);
1283 1303
1284 return rv; 1304 return ret;
1285} 1305}
1286 1306
1287/* driver entry point for term */ 1307/* driver entry point for term */
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index 4575cfe41755..7e4d77327957 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -30,7 +30,7 @@ static struct {
30 u32 cnt; 30 u32 cnt;
31} spear_cpufreq; 31} spear_cpufreq;
32 32
33int spear_cpufreq_verify(struct cpufreq_policy *policy) 33static int spear_cpufreq_verify(struct cpufreq_policy *policy)
34{ 34{
35 return cpufreq_frequency_table_verify(policy, spear_cpufreq.freq_tbl); 35 return cpufreq_frequency_table_verify(policy, spear_cpufreq.freq_tbl);
36} 36}
@@ -157,7 +157,9 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy,
157 157
158 freqs.new = newfreq / 1000; 158 freqs.new = newfreq / 1000;
159 freqs.new /= mult; 159 freqs.new /= mult;
160 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 160
161 for_each_cpu(freqs.cpu, policy->cpus)
162 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
161 163
162 if (mult == 2) 164 if (mult == 2)
163 ret = spear1340_set_cpu_rate(srcclk, newfreq); 165 ret = spear1340_set_cpu_rate(srcclk, newfreq);
@@ -170,7 +172,8 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy,
170 freqs.new = clk_get_rate(spear_cpufreq.clk) / 1000; 172 freqs.new = clk_get_rate(spear_cpufreq.clk) / 1000;
171 } 173 }
172 174
173 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 175 for_each_cpu(freqs.cpu, policy->cpus)
176 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
174 return ret; 177 return ret;
175} 178}
176 179
@@ -188,8 +191,7 @@ static int spear_cpufreq_init(struct cpufreq_policy *policy)
188 policy->cpuinfo.transition_latency = spear_cpufreq.transition_latency; 191 policy->cpuinfo.transition_latency = spear_cpufreq.transition_latency;
189 policy->cur = spear_cpufreq_get(0); 192 policy->cur = spear_cpufreq_get(0);
190 193
191 cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu)); 194 cpumask_setall(policy->cpus);
192 cpumask_copy(policy->related_cpus, policy->cpus);
193 195
194 return 0; 196 return 0;
195} 197}
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index e1f6860e069c..eba69290e074 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -144,7 +144,6 @@ int cpuidle_idle_call(void)
144 return 0; 144 return 0;
145 } 145 }
146 146
147 trace_power_start_rcuidle(POWER_CSTATE, next_state, dev->cpu);
148 trace_cpu_idle_rcuidle(next_state, dev->cpu); 147 trace_cpu_idle_rcuidle(next_state, dev->cpu);
149 148
150 if (cpuidle_state_is_coupled(dev, drv, next_state)) 149 if (cpuidle_state_is_coupled(dev, drv, next_state))
@@ -153,7 +152,6 @@ int cpuidle_idle_call(void)
153 else 152 else
154 entered_state = cpuidle_enter_state(dev, drv, next_state); 153 entered_state = cpuidle_enter_state(dev, drv, next_state);
155 154
156 trace_power_end_rcuidle(dev->cpu);
157 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); 155 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
158 156
159 /* give the governor an opportunity to reflect on the outcome */ 157 /* give the governor an opportunity to reflect on the outcome */
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 53766f39aadd..3b367973a802 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -994,6 +994,11 @@ module_exit(devfreq_exit);
994 * @freq: The frequency given to target function 994 * @freq: The frequency given to target function
995 * @flags: Flags handed from devfreq framework. 995 * @flags: Flags handed from devfreq framework.
996 * 996 *
997 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
998 * protected pointer. The reason for the same is that the opp pointer which is
999 * returned will remain valid for use with opp_get_{voltage, freq} only while
1000 * under the locked area. The pointer returned must be used prior to unlocking
1001 * with rcu_read_unlock() to maintain the integrity of the pointer.
997 */ 1002 */
998struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq, 1003struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq,
999 u32 flags) 1004 u32 flags)
diff --git a/drivers/devfreq/exynos4_bus.c b/drivers/devfreq/exynos4_bus.c
index 80c745e83082..46d94e9e95b5 100644
--- a/drivers/devfreq/exynos4_bus.c
+++ b/drivers/devfreq/exynos4_bus.c
@@ -73,6 +73,16 @@ enum busclk_level_idx {
73#define EX4210_LV_NUM (LV_2 + 1) 73#define EX4210_LV_NUM (LV_2 + 1)
74#define EX4x12_LV_NUM (LV_4 + 1) 74#define EX4x12_LV_NUM (LV_4 + 1)
75 75
76/**
77 * struct busfreq_opp_info - opp information for bus
78 * @rate: Frequency in hertz
79 * @volt: Voltage in microvolts corresponding to this OPP
80 */
81struct busfreq_opp_info {
82 unsigned long rate;
83 unsigned long volt;
84};
85
76struct busfreq_data { 86struct busfreq_data {
77 enum exynos4_busf_type type; 87 enum exynos4_busf_type type;
78 struct device *dev; 88 struct device *dev;
@@ -80,7 +90,7 @@ struct busfreq_data {
80 bool disabled; 90 bool disabled;
81 struct regulator *vdd_int; 91 struct regulator *vdd_int;
82 struct regulator *vdd_mif; /* Exynos4412/4212 only */ 92 struct regulator *vdd_mif; /* Exynos4412/4212 only */
83 struct opp *curr_opp; 93 struct busfreq_opp_info curr_oppinfo;
84 struct exynos4_ppmu dmc[2]; 94 struct exynos4_ppmu dmc[2];
85 95
86 struct notifier_block pm_notifier; 96 struct notifier_block pm_notifier;
@@ -296,13 +306,14 @@ static unsigned int exynos4x12_clkdiv_sclkip[][3] = {
296}; 306};
297 307
298 308
299static int exynos4210_set_busclk(struct busfreq_data *data, struct opp *opp) 309static int exynos4210_set_busclk(struct busfreq_data *data,
310 struct busfreq_opp_info *oppi)
300{ 311{
301 unsigned int index; 312 unsigned int index;
302 unsigned int tmp; 313 unsigned int tmp;
303 314
304 for (index = LV_0; index < EX4210_LV_NUM; index++) 315 for (index = LV_0; index < EX4210_LV_NUM; index++)
305 if (opp_get_freq(opp) == exynos4210_busclk_table[index].clk) 316 if (oppi->rate == exynos4210_busclk_table[index].clk)
306 break; 317 break;
307 318
308 if (index == EX4210_LV_NUM) 319 if (index == EX4210_LV_NUM)
@@ -361,13 +372,14 @@ static int exynos4210_set_busclk(struct busfreq_data *data, struct opp *opp)
361 return 0; 372 return 0;
362} 373}
363 374
364static int exynos4x12_set_busclk(struct busfreq_data *data, struct opp *opp) 375static int exynos4x12_set_busclk(struct busfreq_data *data,
376 struct busfreq_opp_info *oppi)
365{ 377{
366 unsigned int index; 378 unsigned int index;
367 unsigned int tmp; 379 unsigned int tmp;
368 380
369 for (index = LV_0; index < EX4x12_LV_NUM; index++) 381 for (index = LV_0; index < EX4x12_LV_NUM; index++)
370 if (opp_get_freq(opp) == exynos4x12_mifclk_table[index].clk) 382 if (oppi->rate == exynos4x12_mifclk_table[index].clk)
371 break; 383 break;
372 384
373 if (index == EX4x12_LV_NUM) 385 if (index == EX4x12_LV_NUM)
@@ -576,11 +588,12 @@ static int exynos4x12_get_intspec(unsigned long mifclk)
576 return -EINVAL; 588 return -EINVAL;
577} 589}
578 590
579static int exynos4_bus_setvolt(struct busfreq_data *data, struct opp *opp, 591static int exynos4_bus_setvolt(struct busfreq_data *data,
580 struct opp *oldopp) 592 struct busfreq_opp_info *oppi,
593 struct busfreq_opp_info *oldoppi)
581{ 594{
582 int err = 0, tmp; 595 int err = 0, tmp;
583 unsigned long volt = opp_get_voltage(opp); 596 unsigned long volt = oppi->volt;
584 597
585 switch (data->type) { 598 switch (data->type) {
586 case TYPE_BUSF_EXYNOS4210: 599 case TYPE_BUSF_EXYNOS4210:
@@ -595,11 +608,11 @@ static int exynos4_bus_setvolt(struct busfreq_data *data, struct opp *opp,
595 if (err) 608 if (err)
596 break; 609 break;
597 610
598 tmp = exynos4x12_get_intspec(opp_get_freq(opp)); 611 tmp = exynos4x12_get_intspec(oppi->rate);
599 if (tmp < 0) { 612 if (tmp < 0) {
600 err = tmp; 613 err = tmp;
601 regulator_set_voltage(data->vdd_mif, 614 regulator_set_voltage(data->vdd_mif,
602 opp_get_voltage(oldopp), 615 oldoppi->volt,
603 MAX_SAFEVOLT); 616 MAX_SAFEVOLT);
604 break; 617 break;
605 } 618 }
@@ -609,7 +622,7 @@ static int exynos4_bus_setvolt(struct busfreq_data *data, struct opp *opp,
609 /* Try to recover */ 622 /* Try to recover */
610 if (err) 623 if (err)
611 regulator_set_voltage(data->vdd_mif, 624 regulator_set_voltage(data->vdd_mif,
612 opp_get_voltage(oldopp), 625 oldoppi->volt,
613 MAX_SAFEVOLT); 626 MAX_SAFEVOLT);
614 break; 627 break;
615 default: 628 default:
@@ -626,17 +639,26 @@ static int exynos4_bus_target(struct device *dev, unsigned long *_freq,
626 struct platform_device *pdev = container_of(dev, struct platform_device, 639 struct platform_device *pdev = container_of(dev, struct platform_device,
627 dev); 640 dev);
628 struct busfreq_data *data = platform_get_drvdata(pdev); 641 struct busfreq_data *data = platform_get_drvdata(pdev);
629 struct opp *opp = devfreq_recommended_opp(dev, _freq, flags); 642 struct opp *opp;
630 unsigned long freq = opp_get_freq(opp); 643 unsigned long freq;
631 unsigned long old_freq = opp_get_freq(data->curr_opp); 644 unsigned long old_freq = data->curr_oppinfo.rate;
645 struct busfreq_opp_info new_oppinfo;
632 646
633 if (IS_ERR(opp)) 647 rcu_read_lock();
648 opp = devfreq_recommended_opp(dev, _freq, flags);
649 if (IS_ERR(opp)) {
650 rcu_read_unlock();
634 return PTR_ERR(opp); 651 return PTR_ERR(opp);
652 }
653 new_oppinfo.rate = opp_get_freq(opp);
654 new_oppinfo.volt = opp_get_voltage(opp);
655 rcu_read_unlock();
656 freq = new_oppinfo.rate;
635 657
636 if (old_freq == freq) 658 if (old_freq == freq)
637 return 0; 659 return 0;
638 660
639 dev_dbg(dev, "targetting %lukHz %luuV\n", freq, opp_get_voltage(opp)); 661 dev_dbg(dev, "targetting %lukHz %luuV\n", freq, new_oppinfo.volt);
640 662
641 mutex_lock(&data->lock); 663 mutex_lock(&data->lock);
642 664
@@ -644,17 +666,18 @@ static int exynos4_bus_target(struct device *dev, unsigned long *_freq,
644 goto out; 666 goto out;
645 667
646 if (old_freq < freq) 668 if (old_freq < freq)
647 err = exynos4_bus_setvolt(data, opp, data->curr_opp); 669 err = exynos4_bus_setvolt(data, &new_oppinfo,
670 &data->curr_oppinfo);
648 if (err) 671 if (err)
649 goto out; 672 goto out;
650 673
651 if (old_freq != freq) { 674 if (old_freq != freq) {
652 switch (data->type) { 675 switch (data->type) {
653 case TYPE_BUSF_EXYNOS4210: 676 case TYPE_BUSF_EXYNOS4210:
654 err = exynos4210_set_busclk(data, opp); 677 err = exynos4210_set_busclk(data, &new_oppinfo);
655 break; 678 break;
656 case TYPE_BUSF_EXYNOS4x12: 679 case TYPE_BUSF_EXYNOS4x12:
657 err = exynos4x12_set_busclk(data, opp); 680 err = exynos4x12_set_busclk(data, &new_oppinfo);
658 break; 681 break;
659 default: 682 default:
660 err = -EINVAL; 683 err = -EINVAL;
@@ -664,11 +687,12 @@ static int exynos4_bus_target(struct device *dev, unsigned long *_freq,
664 goto out; 687 goto out;
665 688
666 if (old_freq > freq) 689 if (old_freq > freq)
667 err = exynos4_bus_setvolt(data, opp, data->curr_opp); 690 err = exynos4_bus_setvolt(data, &new_oppinfo,
691 &data->curr_oppinfo);
668 if (err) 692 if (err)
669 goto out; 693 goto out;
670 694
671 data->curr_opp = opp; 695 data->curr_oppinfo = new_oppinfo;
672out: 696out:
673 mutex_unlock(&data->lock); 697 mutex_unlock(&data->lock);
674 return err; 698 return err;
@@ -702,7 +726,7 @@ static int exynos4_bus_get_dev_status(struct device *dev,
702 726
703 exynos4_read_ppmu(data); 727 exynos4_read_ppmu(data);
704 busier_dmc = exynos4_get_busier_dmc(data); 728 busier_dmc = exynos4_get_busier_dmc(data);
705 stat->current_frequency = opp_get_freq(data->curr_opp); 729 stat->current_frequency = data->curr_oppinfo.rate;
706 730
707 if (busier_dmc) 731 if (busier_dmc)
708 addr = S5P_VA_DMC1; 732 addr = S5P_VA_DMC1;
@@ -933,6 +957,7 @@ static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
933 struct busfreq_data *data = container_of(this, struct busfreq_data, 957 struct busfreq_data *data = container_of(this, struct busfreq_data,
934 pm_notifier); 958 pm_notifier);
935 struct opp *opp; 959 struct opp *opp;
960 struct busfreq_opp_info new_oppinfo;
936 unsigned long maxfreq = ULONG_MAX; 961 unsigned long maxfreq = ULONG_MAX;
937 int err = 0; 962 int err = 0;
938 963
@@ -943,18 +968,29 @@ static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
943 968
944 data->disabled = true; 969 data->disabled = true;
945 970
971 rcu_read_lock();
946 opp = opp_find_freq_floor(data->dev, &maxfreq); 972 opp = opp_find_freq_floor(data->dev, &maxfreq);
973 if (IS_ERR(opp)) {
974 rcu_read_unlock();
975 dev_err(data->dev, "%s: unable to find a min freq\n",
976 __func__);
977 return PTR_ERR(opp);
978 }
979 new_oppinfo.rate = opp_get_freq(opp);
980 new_oppinfo.volt = opp_get_voltage(opp);
981 rcu_read_unlock();
947 982
948 err = exynos4_bus_setvolt(data, opp, data->curr_opp); 983 err = exynos4_bus_setvolt(data, &new_oppinfo,
984 &data->curr_oppinfo);
949 if (err) 985 if (err)
950 goto unlock; 986 goto unlock;
951 987
952 switch (data->type) { 988 switch (data->type) {
953 case TYPE_BUSF_EXYNOS4210: 989 case TYPE_BUSF_EXYNOS4210:
954 err = exynos4210_set_busclk(data, opp); 990 err = exynos4210_set_busclk(data, &new_oppinfo);
955 break; 991 break;
956 case TYPE_BUSF_EXYNOS4x12: 992 case TYPE_BUSF_EXYNOS4x12:
957 err = exynos4x12_set_busclk(data, opp); 993 err = exynos4x12_set_busclk(data, &new_oppinfo);
958 break; 994 break;
959 default: 995 default:
960 err = -EINVAL; 996 err = -EINVAL;
@@ -962,7 +998,7 @@ static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
962 if (err) 998 if (err)
963 goto unlock; 999 goto unlock;
964 1000
965 data->curr_opp = opp; 1001 data->curr_oppinfo = new_oppinfo;
966unlock: 1002unlock:
967 mutex_unlock(&data->lock); 1003 mutex_unlock(&data->lock);
968 if (err) 1004 if (err)
@@ -1027,13 +1063,17 @@ static int exynos4_busfreq_probe(struct platform_device *pdev)
1027 } 1063 }
1028 } 1064 }
1029 1065
1066 rcu_read_lock();
1030 opp = opp_find_freq_floor(dev, &exynos4_devfreq_profile.initial_freq); 1067 opp = opp_find_freq_floor(dev, &exynos4_devfreq_profile.initial_freq);
1031 if (IS_ERR(opp)) { 1068 if (IS_ERR(opp)) {
1069 rcu_read_unlock();
1032 dev_err(dev, "Invalid initial frequency %lu kHz.\n", 1070 dev_err(dev, "Invalid initial frequency %lu kHz.\n",
1033 exynos4_devfreq_profile.initial_freq); 1071 exynos4_devfreq_profile.initial_freq);
1034 return PTR_ERR(opp); 1072 return PTR_ERR(opp);
1035 } 1073 }
1036 data->curr_opp = opp; 1074 data->curr_oppinfo.rate = opp_get_freq(opp);
1075 data->curr_oppinfo.volt = opp_get_voltage(opp);
1076 rcu_read_unlock();
1037 1077
1038 platform_set_drvdata(pdev, data); 1078 platform_set_drvdata(pdev, data);
1039 1079
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index dbf0e6f8de8a..a7dcf78b1ff8 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -684,9 +684,8 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
684 break; 684 break;
685 } 685 }
686 686
687 imxdmac->hw_chaining = 1; 687 imxdmac->hw_chaining = 0;
688 if (!imxdma_hw_chain(imxdmac)) 688
689 return -EINVAL;
690 imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) | 689 imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) |
691 ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) | 690 ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) |
692 CCR_REN; 691 CCR_REN;
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index e5fc944de1f0..3e9d66920eb3 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -951,7 +951,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
951 goto free_resources; 951 goto free_resources;
952 } 952 }
953 } 953 }
954 dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_TO_DEVICE); 954 dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
955 955
956 /* skip validate if the capability is not present */ 956 /* skip validate if the capability is not present */
957 if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask)) 957 if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index c39e61bc8172..3cad856fe67f 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -266,6 +266,7 @@ static struct tegra_dma_desc *tegra_dma_desc_get(
266 if (async_tx_test_ack(&dma_desc->txd)) { 266 if (async_tx_test_ack(&dma_desc->txd)) {
267 list_del(&dma_desc->node); 267 list_del(&dma_desc->node);
268 spin_unlock_irqrestore(&tdc->lock, flags); 268 spin_unlock_irqrestore(&tdc->lock, flags);
269 dma_desc->txd.flags = 0;
269 return dma_desc; 270 return dma_desc;
270 } 271 }
271 } 272 }
@@ -1050,7 +1051,9 @@ struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
1050 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT; 1051 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
1051 ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32; 1052 ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
1052 1053
1053 csr |= TEGRA_APBDMA_CSR_FLOW | TEGRA_APBDMA_CSR_IE_EOC; 1054 csr |= TEGRA_APBDMA_CSR_FLOW;
1055 if (flags & DMA_PREP_INTERRUPT)
1056 csr |= TEGRA_APBDMA_CSR_IE_EOC;
1054 csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; 1057 csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
1055 1058
1056 apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1; 1059 apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
@@ -1095,7 +1098,8 @@ struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
1095 mem += len; 1098 mem += len;
1096 } 1099 }
1097 sg_req->last_sg = true; 1100 sg_req->last_sg = true;
1098 dma_desc->txd.flags = 0; 1101 if (flags & DMA_CTRL_ACK)
1102 dma_desc->txd.flags = DMA_CTRL_ACK;
1099 1103
1100 /* 1104 /*
1101 * Make sure that mode should not be conflicting with currently 1105 * Make sure that mode should not be conflicting with currently
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index ad8bf2aa629d..910b0116c128 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -31,7 +31,7 @@ static struct ecc_settings **ecc_stngs;
31 * 31 *
32 *FIXME: Produce a better mapping/linearisation. 32 *FIXME: Produce a better mapping/linearisation.
33 */ 33 */
34struct scrubrate { 34static const struct scrubrate {
35 u32 scrubval; /* bit pattern for scrub rate */ 35 u32 scrubval; /* bit pattern for scrub rate */
36 u32 bandwidth; /* bandwidth consumed (bytes/sec) */ 36 u32 bandwidth; /* bandwidth consumed (bytes/sec) */
37} scrubrates[] = { 37} scrubrates[] = {
@@ -239,7 +239,7 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
239 * DRAM base/limit associated with node_id 239 * DRAM base/limit associated with node_id
240 */ 240 */
241static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, 241static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr,
242 unsigned nid) 242 u8 nid)
243{ 243{
244 u64 addr; 244 u64 addr;
245 245
@@ -265,7 +265,7 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
265 u64 sys_addr) 265 u64 sys_addr)
266{ 266{
267 struct amd64_pvt *pvt; 267 struct amd64_pvt *pvt;
268 unsigned node_id; 268 u8 node_id;
269 u32 intlv_en, bits; 269 u32 intlv_en, bits;
270 270
271 /* 271 /*
@@ -602,111 +602,6 @@ static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
602 return input_addr; 602 return input_addr;
603} 603}
604 604
605
606/*
607 * @input_addr is an InputAddr associated with the node represented by mci.
608 * Translate @input_addr to a DramAddr and return the result.
609 */
610static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
611{
612 struct amd64_pvt *pvt;
613 unsigned node_id, intlv_shift;
614 u64 bits, dram_addr;
615 u32 intlv_sel;
616
617 /*
618 * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
619 * shows how to translate a DramAddr to an InputAddr. Here we reverse
620 * this procedure. When translating from a DramAddr to an InputAddr, the
621 * bits used for node interleaving are discarded. Here we recover these
622 * bits from the IntlvSel field of the DRAM Limit register (section
623 * 3.4.4.2) for the node that input_addr is associated with.
624 */
625 pvt = mci->pvt_info;
626 node_id = pvt->mc_node_id;
627
628 BUG_ON(node_id > 7);
629
630 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
631 if (intlv_shift == 0) {
632 edac_dbg(1, " InputAddr 0x%lx translates to DramAddr of same value\n",
633 (unsigned long)input_addr);
634
635 return input_addr;
636 }
637
638 bits = ((input_addr & GENMASK(12, 35)) << intlv_shift) +
639 (input_addr & 0xfff);
640
641 intlv_sel = dram_intlv_sel(pvt, node_id) & ((1 << intlv_shift) - 1);
642 dram_addr = bits + (intlv_sel << 12);
643
644 edac_dbg(1, "InputAddr 0x%lx translates to DramAddr 0x%lx (%d node interleave bits)\n",
645 (unsigned long)input_addr,
646 (unsigned long)dram_addr, intlv_shift);
647
648 return dram_addr;
649}
650
651/*
652 * @dram_addr is a DramAddr that maps to the node represented by mci. Convert
653 * @dram_addr to a SysAddr.
654 */
655static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
656{
657 struct amd64_pvt *pvt = mci->pvt_info;
658 u64 hole_base, hole_offset, hole_size, base, sys_addr;
659 int ret = 0;
660
661 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
662 &hole_size);
663 if (!ret) {
664 if ((dram_addr >= hole_base) &&
665 (dram_addr < (hole_base + hole_size))) {
666 sys_addr = dram_addr + hole_offset;
667
668 edac_dbg(1, "using DHAR to translate DramAddr 0x%lx to SysAddr 0x%lx\n",
669 (unsigned long)dram_addr,
670 (unsigned long)sys_addr);
671
672 return sys_addr;
673 }
674 }
675
676 base = get_dram_base(pvt, pvt->mc_node_id);
677 sys_addr = dram_addr + base;
678
679 /*
680 * The sys_addr we have computed up to this point is a 40-bit value
681 * because the k8 deals with 40-bit values. However, the value we are
682 * supposed to return is a full 64-bit physical address. The AMD
683 * x86-64 architecture specifies that the most significant implemented
684 * address bit through bit 63 of a physical address must be either all
685 * 0s or all 1s. Therefore we sign-extend the 40-bit sys_addr to a
686 * 64-bit value below. See section 3.4.2 of AMD publication 24592:
687 * AMD x86-64 Architecture Programmer's Manual Volume 1 Application
688 * Programming.
689 */
690 sys_addr |= ~((sys_addr & (1ull << 39)) - 1);
691
692 edac_dbg(1, " Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
693 pvt->mc_node_id, (unsigned long)dram_addr,
694 (unsigned long)sys_addr);
695
696 return sys_addr;
697}
698
699/*
700 * @input_addr is an InputAddr associated with the node given by mci. Translate
701 * @input_addr to a SysAddr.
702 */
703static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
704 u64 input_addr)
705{
706 return dram_addr_to_sys_addr(mci,
707 input_addr_to_dram_addr(mci, input_addr));
708}
709
710/* Map the Error address to a PAGE and PAGE OFFSET. */ 605/* Map the Error address to a PAGE and PAGE OFFSET. */
711static inline void error_address_to_page_and_offset(u64 error_address, 606static inline void error_address_to_page_and_offset(u64 error_address,
712 struct err_info *err) 607 struct err_info *err)
@@ -939,7 +834,8 @@ static u64 get_error_address(struct mce *m)
939 struct amd64_pvt *pvt; 834 struct amd64_pvt *pvt;
940 u64 cc6_base, tmp_addr; 835 u64 cc6_base, tmp_addr;
941 u32 tmp; 836 u32 tmp;
942 u8 mce_nid, intlv_en; 837 u16 mce_nid;
838 u8 intlv_en;
943 839
944 if ((addr & GENMASK(24, 47)) >> 24 != 0x00fdf7) 840 if ((addr & GENMASK(24, 47)) >> 24 != 0x00fdf7)
945 return addr; 841 return addr;
@@ -979,10 +875,29 @@ static u64 get_error_address(struct mce *m)
979 return addr; 875 return addr;
980} 876}
981 877
878static struct pci_dev *pci_get_related_function(unsigned int vendor,
879 unsigned int device,
880 struct pci_dev *related)
881{
882 struct pci_dev *dev = NULL;
883
884 while ((dev = pci_get_device(vendor, device, dev))) {
885 if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) &&
886 (dev->bus->number == related->bus->number) &&
887 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
888 break;
889 }
890
891 return dev;
892}
893
982static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range) 894static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
983{ 895{
896 struct amd_northbridge *nb;
897 struct pci_dev *misc, *f1 = NULL;
984 struct cpuinfo_x86 *c = &boot_cpu_data; 898 struct cpuinfo_x86 *c = &boot_cpu_data;
985 int off = range << 3; 899 int off = range << 3;
900 u32 llim;
986 901
987 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo); 902 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
988 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo); 903 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
@@ -996,30 +911,32 @@ static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
996 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi); 911 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
997 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi); 912 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
998 913
999 /* Factor in CC6 save area by reading dst node's limit reg */ 914 /* F15h: factor in CC6 save area by reading dst node's limit reg */
1000 if (c->x86 == 0x15) { 915 if (c->x86 != 0x15)
1001 struct pci_dev *f1 = NULL; 916 return;
1002 u8 nid = dram_dst_node(pvt, range);
1003 u32 llim;
1004 917
1005 f1 = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0x18 + nid, 1)); 918 nb = node_to_amd_nb(dram_dst_node(pvt, range));
1006 if (WARN_ON(!f1)) 919 if (WARN_ON(!nb))
1007 return; 920 return;
921
922 misc = nb->misc;
923 f1 = pci_get_related_function(misc->vendor, PCI_DEVICE_ID_AMD_15H_NB_F1, misc);
924 if (WARN_ON(!f1))
925 return;
1008 926
1009 amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim); 927 amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
1010 928
1011 pvt->ranges[range].lim.lo &= GENMASK(0, 15); 929 pvt->ranges[range].lim.lo &= GENMASK(0, 15);
1012 930
1013 /* {[39:27],111b} */ 931 /* {[39:27],111b} */
1014 pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16; 932 pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
1015 933
1016 pvt->ranges[range].lim.hi &= GENMASK(0, 7); 934 pvt->ranges[range].lim.hi &= GENMASK(0, 7);
1017 935
1018 /* [47:40] */ 936 /* [47:40] */
1019 pvt->ranges[range].lim.hi |= llim >> 13; 937 pvt->ranges[range].lim.hi |= llim >> 13;
1020 938
1021 pci_dev_put(f1); 939 pci_dev_put(f1);
1022 }
1023} 940}
1024 941
1025static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr, 942static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
@@ -1305,7 +1222,7 @@ static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1305} 1222}
1306 1223
1307/* Convert the sys_addr to the normalized DCT address */ 1224/* Convert the sys_addr to the normalized DCT address */
1308static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, unsigned range, 1225static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
1309 u64 sys_addr, bool hi_rng, 1226 u64 sys_addr, bool hi_rng,
1310 u32 dct_sel_base_addr) 1227 u32 dct_sel_base_addr)
1311{ 1228{
@@ -1381,7 +1298,7 @@ static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
1381 * -EINVAL: NOT FOUND 1298 * -EINVAL: NOT FOUND
1382 * 0..csrow = Chip-Select Row 1299 * 0..csrow = Chip-Select Row
1383 */ 1300 */
1384static int f1x_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct) 1301static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
1385{ 1302{
1386 struct mem_ctl_info *mci; 1303 struct mem_ctl_info *mci;
1387 struct amd64_pvt *pvt; 1304 struct amd64_pvt *pvt;
@@ -1672,23 +1589,6 @@ static struct amd64_family_type amd64_family_types[] = {
1672 }, 1589 },
1673}; 1590};
1674 1591
1675static struct pci_dev *pci_get_related_function(unsigned int vendor,
1676 unsigned int device,
1677 struct pci_dev *related)
1678{
1679 struct pci_dev *dev = NULL;
1680
1681 dev = pci_get_device(vendor, device, dev);
1682 while (dev) {
1683 if ((dev->bus->number == related->bus->number) &&
1684 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1685 break;
1686 dev = pci_get_device(vendor, device, dev);
1687 }
1688
1689 return dev;
1690}
1691
1692/* 1592/*
1693 * These are tables of eigenvectors (one per line) which can be used for the 1593 * These are tables of eigenvectors (one per line) which can be used for the
1694 * construction of the syndrome tables. The modified syndrome search algorithm 1594 * construction of the syndrome tables. The modified syndrome search algorithm
@@ -1696,7 +1596,7 @@ static struct pci_dev *pci_get_related_function(unsigned int vendor,
1696 * 1596 *
1697 * Algorithm courtesy of Ross LaFetra from AMD. 1597 * Algorithm courtesy of Ross LaFetra from AMD.
1698 */ 1598 */
1699static u16 x4_vectors[] = { 1599static const u16 x4_vectors[] = {
1700 0x2f57, 0x1afe, 0x66cc, 0xdd88, 1600 0x2f57, 0x1afe, 0x66cc, 0xdd88,
1701 0x11eb, 0x3396, 0x7f4c, 0xeac8, 1601 0x11eb, 0x3396, 0x7f4c, 0xeac8,
1702 0x0001, 0x0002, 0x0004, 0x0008, 1602 0x0001, 0x0002, 0x0004, 0x0008,
@@ -1735,7 +1635,7 @@ static u16 x4_vectors[] = {
1735 0x19a9, 0x2efe, 0xb5cc, 0x6f88, 1635 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
1736}; 1636};
1737 1637
1738static u16 x8_vectors[] = { 1638static const u16 x8_vectors[] = {
1739 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480, 1639 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
1740 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80, 1640 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
1741 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80, 1641 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
@@ -1757,7 +1657,7 @@ static u16 x8_vectors[] = {
1757 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000, 1657 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
1758}; 1658};
1759 1659
1760static int decode_syndrome(u16 syndrome, u16 *vectors, unsigned num_vecs, 1660static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
1761 unsigned v_dim) 1661 unsigned v_dim)
1762{ 1662{
1763 unsigned int i, err_sym; 1663 unsigned int i, err_sym;
@@ -2181,7 +2081,7 @@ static int init_csrows(struct mem_ctl_info *mci)
2181} 2081}
2182 2082
2183/* get all cores on this DCT */ 2083/* get all cores on this DCT */
2184static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, unsigned nid) 2084static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
2185{ 2085{
2186 int cpu; 2086 int cpu;
2187 2087
@@ -2191,7 +2091,7 @@ static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, unsigned nid)
2191} 2091}
2192 2092
2193/* check MCG_CTL on all the cpus on this node */ 2093/* check MCG_CTL on all the cpus on this node */
2194static bool amd64_nb_mce_bank_enabled_on_node(unsigned nid) 2094static bool amd64_nb_mce_bank_enabled_on_node(u16 nid)
2195{ 2095{
2196 cpumask_var_t mask; 2096 cpumask_var_t mask;
2197 int cpu, nbe; 2097 int cpu, nbe;
@@ -2224,7 +2124,7 @@ out:
2224 return ret; 2124 return ret;
2225} 2125}
2226 2126
2227static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on) 2127static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
2228{ 2128{
2229 cpumask_var_t cmask; 2129 cpumask_var_t cmask;
2230 int cpu; 2130 int cpu;
@@ -2262,7 +2162,7 @@ static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on)
2262 return 0; 2162 return 0;
2263} 2163}
2264 2164
2265static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid, 2165static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
2266 struct pci_dev *F3) 2166 struct pci_dev *F3)
2267{ 2167{
2268 bool ret = true; 2168 bool ret = true;
@@ -2314,7 +2214,7 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
2314 return ret; 2214 return ret;
2315} 2215}
2316 2216
2317static void restore_ecc_error_reporting(struct ecc_settings *s, u8 nid, 2217static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
2318 struct pci_dev *F3) 2218 struct pci_dev *F3)
2319{ 2219{
2320 u32 value, mask = 0x3; /* UECC/CECC enable */ 2220 u32 value, mask = 0x3; /* UECC/CECC enable */
@@ -2353,7 +2253,7 @@ static const char *ecc_msg =
2353 "'ecc_enable_override'.\n" 2253 "'ecc_enable_override'.\n"
2354 " (Note that use of the override may cause unknown side effects.)\n"; 2254 " (Note that use of the override may cause unknown side effects.)\n";
2355 2255
2356static bool ecc_enabled(struct pci_dev *F3, u8 nid) 2256static bool ecc_enabled(struct pci_dev *F3, u16 nid)
2357{ 2257{
2358 u32 value; 2258 u32 value;
2359 u8 ecc_en = 0; 2259 u8 ecc_en = 0;
@@ -2474,7 +2374,7 @@ static int amd64_init_one_instance(struct pci_dev *F2)
2474 struct mem_ctl_info *mci = NULL; 2374 struct mem_ctl_info *mci = NULL;
2475 struct edac_mc_layer layers[2]; 2375 struct edac_mc_layer layers[2];
2476 int err = 0, ret; 2376 int err = 0, ret;
2477 u8 nid = get_node_id(F2); 2377 u16 nid = amd_get_node_id(F2);
2478 2378
2479 ret = -ENOMEM; 2379 ret = -ENOMEM;
2480 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL); 2380 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
@@ -2566,7 +2466,7 @@ err_ret:
2566static int amd64_probe_one_instance(struct pci_dev *pdev, 2466static int amd64_probe_one_instance(struct pci_dev *pdev,
2567 const struct pci_device_id *mc_type) 2467 const struct pci_device_id *mc_type)
2568{ 2468{
2569 u8 nid = get_node_id(pdev); 2469 u16 nid = amd_get_node_id(pdev);
2570 struct pci_dev *F3 = node_to_amd_nb(nid)->misc; 2470 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2571 struct ecc_settings *s; 2471 struct ecc_settings *s;
2572 int ret = 0; 2472 int ret = 0;
@@ -2616,7 +2516,7 @@ static void amd64_remove_one_instance(struct pci_dev *pdev)
2616{ 2516{
2617 struct mem_ctl_info *mci; 2517 struct mem_ctl_info *mci;
2618 struct amd64_pvt *pvt; 2518 struct amd64_pvt *pvt;
2619 u8 nid = get_node_id(pdev); 2519 u16 nid = amd_get_node_id(pdev);
2620 struct pci_dev *F3 = node_to_amd_nb(nid)->misc; 2520 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2621 struct ecc_settings *s = ecc_stngs[nid]; 2521 struct ecc_settings *s = ecc_stngs[nid];
2622 2522
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index e864f407806c..35637d83f235 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -292,12 +292,6 @@
292/* MSRs */ 292/* MSRs */
293#define MSR_MCGCTL_NBE BIT(4) 293#define MSR_MCGCTL_NBE BIT(4)
294 294
295/* AMD sets the first MC device at device ID 0x18. */
296static inline u8 get_node_id(struct pci_dev *pdev)
297{
298 return PCI_SLOT(pdev->devfn) - 0x18;
299}
300
301enum amd_families { 295enum amd_families {
302 K8_CPUS = 0, 296 K8_CPUS = 0,
303 F10_CPUS, 297 F10_CPUS,
@@ -340,7 +334,7 @@ struct amd64_pvt {
340 /* pci_device handles which we utilize */ 334 /* pci_device handles which we utilize */
341 struct pci_dev *F1, *F2, *F3; 335 struct pci_dev *F1, *F2, *F3;
342 336
343 unsigned mc_node_id; /* MC index of this MC node */ 337 u16 mc_node_id; /* MC index of this MC node */
344 int ext_model; /* extended model value of this node */ 338 int ext_model; /* extended model value of this node */
345 int channel_count; 339 int channel_count;
346 340
@@ -393,7 +387,7 @@ struct err_info {
393 u32 offset; 387 u32 offset;
394}; 388};
395 389
396static inline u64 get_dram_base(struct amd64_pvt *pvt, unsigned i) 390static inline u64 get_dram_base(struct amd64_pvt *pvt, u8 i)
397{ 391{
398 u64 addr = ((u64)pvt->ranges[i].base.lo & 0xffff0000) << 8; 392 u64 addr = ((u64)pvt->ranges[i].base.lo & 0xffff0000) << 8;
399 393
@@ -403,7 +397,7 @@ static inline u64 get_dram_base(struct amd64_pvt *pvt, unsigned i)
403 return (((u64)pvt->ranges[i].base.hi & 0x000000ff) << 40) | addr; 397 return (((u64)pvt->ranges[i].base.hi & 0x000000ff) << 40) | addr;
404} 398}
405 399
406static inline u64 get_dram_limit(struct amd64_pvt *pvt, unsigned i) 400static inline u64 get_dram_limit(struct amd64_pvt *pvt, u8 i)
407{ 401{
408 u64 lim = (((u64)pvt->ranges[i].lim.lo & 0xffff0000) << 8) | 0x00ffffff; 402 u64 lim = (((u64)pvt->ranges[i].lim.lo & 0xffff0000) << 8) | 0x00ffffff;
409 403
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 281f566a5513..d1e9eb191f2b 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -340,7 +340,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
340 /* 340 /*
341 * Alocate and fill the csrow/channels structs 341 * Alocate and fill the csrow/channels structs
342 */ 342 */
343 mci->csrows = kcalloc(sizeof(*mci->csrows), tot_csrows, GFP_KERNEL); 343 mci->csrows = kcalloc(tot_csrows, sizeof(*mci->csrows), GFP_KERNEL);
344 if (!mci->csrows) 344 if (!mci->csrows)
345 goto error; 345 goto error;
346 for (row = 0; row < tot_csrows; row++) { 346 for (row = 0; row < tot_csrows; row++) {
@@ -351,7 +351,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
351 csr->csrow_idx = row; 351 csr->csrow_idx = row;
352 csr->mci = mci; 352 csr->mci = mci;
353 csr->nr_channels = tot_channels; 353 csr->nr_channels = tot_channels;
354 csr->channels = kcalloc(sizeof(*csr->channels), tot_channels, 354 csr->channels = kcalloc(tot_channels, sizeof(*csr->channels),
355 GFP_KERNEL); 355 GFP_KERNEL);
356 if (!csr->channels) 356 if (!csr->channels)
357 goto error; 357 goto error;
@@ -369,7 +369,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
369 /* 369 /*
370 * Allocate and fill the dimm structs 370 * Allocate and fill the dimm structs
371 */ 371 */
372 mci->dimms = kcalloc(sizeof(*mci->dimms), tot_dimms, GFP_KERNEL); 372 mci->dimms = kcalloc(tot_dimms, sizeof(*mci->dimms), GFP_KERNEL);
373 if (!mci->dimms) 373 if (!mci->dimms)
374 goto error; 374 goto error;
375 375
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
index dc6e905ee1a5..0056c4dae9d5 100644
--- a/drivers/edac/edac_pci_sysfs.c
+++ b/drivers/edac/edac_pci_sysfs.c
@@ -256,7 +256,7 @@ static ssize_t edac_pci_dev_store(struct kobject *kobj,
256 struct edac_pci_dev_attribute *edac_pci_dev; 256 struct edac_pci_dev_attribute *edac_pci_dev;
257 edac_pci_dev = (struct edac_pci_dev_attribute *)attr; 257 edac_pci_dev = (struct edac_pci_dev_attribute *)attr;
258 258
259 if (edac_pci_dev->show) 259 if (edac_pci_dev->store)
260 return edac_pci_dev->store(edac_pci_dev->value, buffer, count); 260 return edac_pci_dev->store(edac_pci_dev->value, buffer, count);
261 return -EIO; 261 return -EIO;
262} 262}
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index ad637572d8c7..f3f0c930d550 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -39,30 +39,28 @@ EXPORT_SYMBOL_GPL(amd_unregister_ecc_decoder);
39 */ 39 */
40 40
41/* transaction type */ 41/* transaction type */
42const char * const tt_msgs[] = { "INSN", "DATA", "GEN", "RESV" }; 42static const char * const tt_msgs[] = { "INSN", "DATA", "GEN", "RESV" };
43EXPORT_SYMBOL_GPL(tt_msgs);
44 43
45/* cache level */ 44/* cache level */
46const char * const ll_msgs[] = { "RESV", "L1", "L2", "L3/GEN" }; 45static const char * const ll_msgs[] = { "RESV", "L1", "L2", "L3/GEN" };
47EXPORT_SYMBOL_GPL(ll_msgs);
48 46
49/* memory transaction type */ 47/* memory transaction type */
50const char * const rrrr_msgs[] = { 48static const char * const rrrr_msgs[] = {
51 "GEN", "RD", "WR", "DRD", "DWR", "IRD", "PRF", "EV", "SNP" 49 "GEN", "RD", "WR", "DRD", "DWR", "IRD", "PRF", "EV", "SNP"
52}; 50};
53EXPORT_SYMBOL_GPL(rrrr_msgs);
54 51
55/* participating processor */ 52/* participating processor */
56const char * const pp_msgs[] = { "SRC", "RES", "OBS", "GEN" }; 53const char * const pp_msgs[] = { "SRC", "RES", "OBS", "GEN" };
57EXPORT_SYMBOL_GPL(pp_msgs); 54EXPORT_SYMBOL_GPL(pp_msgs);
58 55
59/* request timeout */ 56/* request timeout */
60const char * const to_msgs[] = { "no timeout", "timed out" }; 57static const char * const to_msgs[] = { "no timeout", "timed out" };
61EXPORT_SYMBOL_GPL(to_msgs);
62 58
63/* memory or i/o */ 59/* memory or i/o */
64const char * const ii_msgs[] = { "MEM", "RESV", "IO", "GEN" }; 60static const char * const ii_msgs[] = { "MEM", "RESV", "IO", "GEN" };
65EXPORT_SYMBOL_GPL(ii_msgs); 61
62/* internal error type */
63static const char * const uu_msgs[] = { "RESV", "RESV", "HWA", "RESV" };
66 64
67static const char * const f15h_mc1_mce_desc[] = { 65static const char * const f15h_mc1_mce_desc[] = {
68 "UC during a demand linefill from L2", 66 "UC during a demand linefill from L2",
@@ -176,7 +174,7 @@ static bool k8_mc0_mce(u16 ec, u8 xec)
176 return f10h_mc0_mce(ec, xec); 174 return f10h_mc0_mce(ec, xec);
177} 175}
178 176
179static bool f14h_mc0_mce(u16 ec, u8 xec) 177static bool cat_mc0_mce(u16 ec, u8 xec)
180{ 178{
181 u8 r4 = R4(ec); 179 u8 r4 = R4(ec);
182 bool ret = true; 180 bool ret = true;
@@ -330,22 +328,28 @@ static bool k8_mc1_mce(u16 ec, u8 xec)
330 return ret; 328 return ret;
331} 329}
332 330
333static bool f14h_mc1_mce(u16 ec, u8 xec) 331static bool cat_mc1_mce(u16 ec, u8 xec)
334{ 332{
335 u8 r4 = R4(ec); 333 u8 r4 = R4(ec);
336 bool ret = true; 334 bool ret = true;
337 335
338 if (MEM_ERROR(ec)) { 336 if (!MEM_ERROR(ec))
339 if (TT(ec) != 0 || LL(ec) != 1) 337 return false;
340 ret = false; 338
339 if (TT(ec) != TT_INSTR)
340 return false;
341
342 if (r4 == R4_IRD)
343 pr_cont("Data/tag array parity error for a tag hit.\n");
344 else if (r4 == R4_SNOOP)
345 pr_cont("Tag error during snoop/victimization.\n");
346 else if (xec == 0x0)
347 pr_cont("Tag parity error from victim castout.\n");
348 else if (xec == 0x2)
349 pr_cont("Microcode patch RAM parity error.\n");
350 else
351 ret = false;
341 352
342 if (r4 == R4_IRD)
343 pr_cont("Data/tag array parity error for a tag hit.\n");
344 else if (r4 == R4_SNOOP)
345 pr_cont("Tag error during snoop/victimization.\n");
346 else
347 ret = false;
348 }
349 return ret; 353 return ret;
350} 354}
351 355
@@ -399,12 +403,9 @@ static void decode_mc1_mce(struct mce *m)
399 pr_emerg(HW_ERR "Corrupted MC1 MCE info?\n"); 403 pr_emerg(HW_ERR "Corrupted MC1 MCE info?\n");
400} 404}
401 405
402static void decode_mc2_mce(struct mce *m) 406static bool k8_mc2_mce(u16 ec, u8 xec)
403{ 407{
404 u16 ec = EC(m->status); 408 bool ret = true;
405 u8 xec = XEC(m->status, xec_mask);
406
407 pr_emerg(HW_ERR "MC2 Error");
408 409
409 if (xec == 0x1) 410 if (xec == 0x1)
410 pr_cont(" in the write data buffers.\n"); 411 pr_cont(" in the write data buffers.\n");
@@ -429,24 +430,18 @@ static void decode_mc2_mce(struct mce *m)
429 pr_cont(": %s parity/ECC error during data " 430 pr_cont(": %s parity/ECC error during data "
430 "access from L2.\n", R4_MSG(ec)); 431 "access from L2.\n", R4_MSG(ec));
431 else 432 else
432 goto wrong_mc2_mce; 433 ret = false;
433 } else 434 } else
434 goto wrong_mc2_mce; 435 ret = false;
435 } else 436 } else
436 goto wrong_mc2_mce; 437 ret = false;
437
438 return;
439 438
440 wrong_mc2_mce: 439 return ret;
441 pr_emerg(HW_ERR "Corrupted MC2 MCE info?\n");
442} 440}
443 441
444static void decode_f15_mc2_mce(struct mce *m) 442static bool f15h_mc2_mce(u16 ec, u8 xec)
445{ 443{
446 u16 ec = EC(m->status); 444 bool ret = true;
447 u8 xec = XEC(m->status, xec_mask);
448
449 pr_emerg(HW_ERR "MC2 Error: ");
450 445
451 if (TLB_ERROR(ec)) { 446 if (TLB_ERROR(ec)) {
452 if (xec == 0x0) 447 if (xec == 0x0)
@@ -454,10 +449,10 @@ static void decode_f15_mc2_mce(struct mce *m)
454 else if (xec == 0x1) 449 else if (xec == 0x1)
455 pr_cont("Poison data provided for TLB fill.\n"); 450 pr_cont("Poison data provided for TLB fill.\n");
456 else 451 else
457 goto wrong_f15_mc2_mce; 452 ret = false;
458 } else if (BUS_ERROR(ec)) { 453 } else if (BUS_ERROR(ec)) {
459 if (xec > 2) 454 if (xec > 2)
460 goto wrong_f15_mc2_mce; 455 ret = false;
461 456
462 pr_cont("Error during attempted NB data read.\n"); 457 pr_cont("Error during attempted NB data read.\n");
463 } else if (MEM_ERROR(ec)) { 458 } else if (MEM_ERROR(ec)) {
@@ -471,14 +466,63 @@ static void decode_f15_mc2_mce(struct mce *m)
471 break; 466 break;
472 467
473 default: 468 default:
474 goto wrong_f15_mc2_mce; 469 ret = false;
475 } 470 }
476 } 471 }
477 472
478 return; 473 return ret;
474}
479 475
480 wrong_f15_mc2_mce: 476static bool f16h_mc2_mce(u16 ec, u8 xec)
481 pr_emerg(HW_ERR "Corrupted MC2 MCE info?\n"); 477{
478 u8 r4 = R4(ec);
479
480 if (!MEM_ERROR(ec))
481 return false;
482
483 switch (xec) {
484 case 0x04 ... 0x05:
485 pr_cont("%cBUFF parity error.\n", (r4 == R4_RD) ? 'I' : 'O');
486 break;
487
488 case 0x09 ... 0x0b:
489 case 0x0d ... 0x0f:
490 pr_cont("ECC error in L2 tag (%s).\n",
491 ((r4 == R4_GEN) ? "BankReq" :
492 ((r4 == R4_SNOOP) ? "Prb" : "Fill")));
493 break;
494
495 case 0x10 ... 0x19:
496 case 0x1b:
497 pr_cont("ECC error in L2 data array (%s).\n",
498 (((r4 == R4_RD) && !(xec & 0x3)) ? "Hit" :
499 ((r4 == R4_GEN) ? "Attr" :
500 ((r4 == R4_EVICT) ? "Vict" : "Fill"))));
501 break;
502
503 case 0x1c ... 0x1d:
504 case 0x1f:
505 pr_cont("Parity error in L2 attribute bits (%s).\n",
506 ((r4 == R4_RD) ? "Hit" :
507 ((r4 == R4_GEN) ? "Attr" : "Fill")));
508 break;
509
510 default:
511 return false;
512 }
513
514 return true;
515}
516
517static void decode_mc2_mce(struct mce *m)
518{
519 u16 ec = EC(m->status);
520 u8 xec = XEC(m->status, xec_mask);
521
522 pr_emerg(HW_ERR "MC2 Error: ");
523
524 if (!fam_ops->mc2_mce(ec, xec))
525 pr_cont(HW_ERR "Corrupted MC2 MCE info?\n");
482} 526}
483 527
484static void decode_mc3_mce(struct mce *m) 528static void decode_mc3_mce(struct mce *m)
@@ -547,7 +591,7 @@ static void decode_mc4_mce(struct mce *m)
547 return; 591 return;
548 592
549 case 0x19: 593 case 0x19:
550 if (boot_cpu_data.x86 == 0x15) 594 if (boot_cpu_data.x86 == 0x15 || boot_cpu_data.x86 == 0x16)
551 pr_cont("Compute Unit Data Error.\n"); 595 pr_cont("Compute Unit Data Error.\n");
552 else 596 else
553 goto wrong_mc4_mce; 597 goto wrong_mc4_mce;
@@ -633,6 +677,10 @@ static void decode_mc6_mce(struct mce *m)
633 677
634static inline void amd_decode_err_code(u16 ec) 678static inline void amd_decode_err_code(u16 ec)
635{ 679{
680 if (INT_ERROR(ec)) {
681 pr_emerg(HW_ERR "internal: %s\n", UU_MSG(ec));
682 return;
683 }
636 684
637 pr_emerg(HW_ERR "cache level: %s", LL_MSG(ec)); 685 pr_emerg(HW_ERR "cache level: %s", LL_MSG(ec));
638 686
@@ -702,10 +750,7 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
702 break; 750 break;
703 751
704 case 2: 752 case 2:
705 if (c->x86 == 0x15) 753 decode_mc2_mce(m);
706 decode_f15_mc2_mce(m);
707 else
708 decode_mc2_mce(m);
709 break; 754 break;
710 755
711 case 3: 756 case 3:
@@ -740,7 +785,7 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
740 ((m->status & MCI_STATUS_PCC) ? "PCC" : "-"), 785 ((m->status & MCI_STATUS_PCC) ? "PCC" : "-"),
741 ((m->status & MCI_STATUS_ADDRV) ? "AddrV" : "-")); 786 ((m->status & MCI_STATUS_ADDRV) ? "AddrV" : "-"));
742 787
743 if (c->x86 == 0x15) 788 if (c->x86 == 0x15 || c->x86 == 0x16)
744 pr_cont("|%s|%s", 789 pr_cont("|%s|%s",
745 ((m->status & MCI_STATUS_DEFERRED) ? "Deferred" : "-"), 790 ((m->status & MCI_STATUS_DEFERRED) ? "Deferred" : "-"),
746 ((m->status & MCI_STATUS_POISON) ? "Poison" : "-")); 791 ((m->status & MCI_STATUS_POISON) ? "Poison" : "-"));
@@ -772,7 +817,7 @@ static int __init mce_amd_init(void)
772 if (c->x86_vendor != X86_VENDOR_AMD) 817 if (c->x86_vendor != X86_VENDOR_AMD)
773 return 0; 818 return 0;
774 819
775 if (c->x86 < 0xf || c->x86 > 0x15) 820 if (c->x86 < 0xf || c->x86 > 0x16)
776 return 0; 821 return 0;
777 822
778 fam_ops = kzalloc(sizeof(struct amd_decoder_ops), GFP_KERNEL); 823 fam_ops = kzalloc(sizeof(struct amd_decoder_ops), GFP_KERNEL);
@@ -783,33 +828,46 @@ static int __init mce_amd_init(void)
783 case 0xf: 828 case 0xf:
784 fam_ops->mc0_mce = k8_mc0_mce; 829 fam_ops->mc0_mce = k8_mc0_mce;
785 fam_ops->mc1_mce = k8_mc1_mce; 830 fam_ops->mc1_mce = k8_mc1_mce;
831 fam_ops->mc2_mce = k8_mc2_mce;
786 break; 832 break;
787 833
788 case 0x10: 834 case 0x10:
789 fam_ops->mc0_mce = f10h_mc0_mce; 835 fam_ops->mc0_mce = f10h_mc0_mce;
790 fam_ops->mc1_mce = k8_mc1_mce; 836 fam_ops->mc1_mce = k8_mc1_mce;
837 fam_ops->mc2_mce = k8_mc2_mce;
791 break; 838 break;
792 839
793 case 0x11: 840 case 0x11:
794 fam_ops->mc0_mce = k8_mc0_mce; 841 fam_ops->mc0_mce = k8_mc0_mce;
795 fam_ops->mc1_mce = k8_mc1_mce; 842 fam_ops->mc1_mce = k8_mc1_mce;
843 fam_ops->mc2_mce = k8_mc2_mce;
796 break; 844 break;
797 845
798 case 0x12: 846 case 0x12:
799 fam_ops->mc0_mce = f12h_mc0_mce; 847 fam_ops->mc0_mce = f12h_mc0_mce;
800 fam_ops->mc1_mce = k8_mc1_mce; 848 fam_ops->mc1_mce = k8_mc1_mce;
849 fam_ops->mc2_mce = k8_mc2_mce;
801 break; 850 break;
802 851
803 case 0x14: 852 case 0x14:
804 nb_err_cpumask = 0x3; 853 nb_err_cpumask = 0x3;
805 fam_ops->mc0_mce = f14h_mc0_mce; 854 fam_ops->mc0_mce = cat_mc0_mce;
806 fam_ops->mc1_mce = f14h_mc1_mce; 855 fam_ops->mc1_mce = cat_mc1_mce;
856 fam_ops->mc2_mce = k8_mc2_mce;
807 break; 857 break;
808 858
809 case 0x15: 859 case 0x15:
810 xec_mask = 0x1f; 860 xec_mask = 0x1f;
811 fam_ops->mc0_mce = f15h_mc0_mce; 861 fam_ops->mc0_mce = f15h_mc0_mce;
812 fam_ops->mc1_mce = f15h_mc1_mce; 862 fam_ops->mc1_mce = f15h_mc1_mce;
863 fam_ops->mc2_mce = f15h_mc2_mce;
864 break;
865
866 case 0x16:
867 xec_mask = 0x1f;
868 fam_ops->mc0_mce = cat_mc0_mce;
869 fam_ops->mc1_mce = cat_mc1_mce;
870 fam_ops->mc2_mce = f16h_mc2_mce;
813 break; 871 break;
814 872
815 default: 873 default:
diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
index 679679951e23..51b7e3a36e37 100644
--- a/drivers/edac/mce_amd.h
+++ b/drivers/edac/mce_amd.h
@@ -14,6 +14,7 @@
14#define TLB_ERROR(x) (((x) & 0xFFF0) == 0x0010) 14#define TLB_ERROR(x) (((x) & 0xFFF0) == 0x0010)
15#define MEM_ERROR(x) (((x) & 0xFF00) == 0x0100) 15#define MEM_ERROR(x) (((x) & 0xFF00) == 0x0100)
16#define BUS_ERROR(x) (((x) & 0xF800) == 0x0800) 16#define BUS_ERROR(x) (((x) & 0xF800) == 0x0800)
17#define INT_ERROR(x) (((x) & 0xF4FF) == 0x0400)
17 18
18#define TT(x) (((x) >> 2) & 0x3) 19#define TT(x) (((x) >> 2) & 0x3)
19#define TT_MSG(x) tt_msgs[TT(x)] 20#define TT_MSG(x) tt_msgs[TT(x)]
@@ -25,6 +26,8 @@
25#define TO_MSG(x) to_msgs[TO(x)] 26#define TO_MSG(x) to_msgs[TO(x)]
26#define PP(x) (((x) >> 9) & 0x3) 27#define PP(x) (((x) >> 9) & 0x3)
27#define PP_MSG(x) pp_msgs[PP(x)] 28#define PP_MSG(x) pp_msgs[PP(x)]
29#define UU(x) (((x) >> 8) & 0x3)
30#define UU_MSG(x) uu_msgs[UU(x)]
28 31
29#define R4(x) (((x) >> 4) & 0xf) 32#define R4(x) (((x) >> 4) & 0xf)
30#define R4_MSG(x) ((R4(x) < 9) ? rrrr_msgs[R4(x)] : "Wrong R4!") 33#define R4_MSG(x) ((R4(x) < 9) ? rrrr_msgs[R4(x)] : "Wrong R4!")
@@ -32,6 +35,8 @@
32#define MCI_STATUS_DEFERRED BIT_64(44) 35#define MCI_STATUS_DEFERRED BIT_64(44)
33#define MCI_STATUS_POISON BIT_64(43) 36#define MCI_STATUS_POISON BIT_64(43)
34 37
38extern const char * const pp_msgs[];
39
35enum tt_ids { 40enum tt_ids {
36 TT_INSTR = 0, 41 TT_INSTR = 0,
37 TT_DATA, 42 TT_DATA,
@@ -65,19 +70,13 @@ enum rrrr_ids {
65 R4_SNOOP, 70 R4_SNOOP,
66}; 71};
67 72
68extern const char * const tt_msgs[];
69extern const char * const ll_msgs[];
70extern const char * const rrrr_msgs[];
71extern const char * const pp_msgs[];
72extern const char * const to_msgs[];
73extern const char * const ii_msgs[];
74
75/* 73/*
76 * per-family decoder ops 74 * per-family decoder ops
77 */ 75 */
78struct amd_decoder_ops { 76struct amd_decoder_ops {
79 bool (*mc0_mce)(u16, u8); 77 bool (*mc0_mce)(u16, u8);
80 bool (*mc1_mce)(u16, u8); 78 bool (*mc1_mce)(u16, u8);
79 bool (*mc2_mce)(u16, u8);
81}; 80};
82 81
83void amd_report_gart_errors(bool); 82void amd_report_gart_errors(bool);
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 42a840d530a5..3eb32f62d72a 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -301,7 +301,7 @@ int mpc85xx_pci_err_probe(struct platform_device *op)
301 "[EDAC] PCI err", pci); 301 "[EDAC] PCI err", pci);
302 if (res < 0) { 302 if (res < 0) {
303 printk(KERN_ERR 303 printk(KERN_ERR
304 "%s: Unable to requiest irq %d for " 304 "%s: Unable to request irq %d for "
305 "MPC85xx PCI err\n", __func__, pdata->irq); 305 "MPC85xx PCI err\n", __func__, pdata->irq);
306 irq_dispose_mapping(pdata->irq); 306 irq_dispose_mapping(pdata->irq);
307 res = -ENODEV; 307 res = -ENODEV;
@@ -583,7 +583,7 @@ static int mpc85xx_l2_err_probe(struct platform_device *op)
583 "[EDAC] L2 err", edac_dev); 583 "[EDAC] L2 err", edac_dev);
584 if (res < 0) { 584 if (res < 0) {
585 printk(KERN_ERR 585 printk(KERN_ERR
586 "%s: Unable to requiest irq %d for " 586 "%s: Unable to request irq %d for "
587 "MPC85xx L2 err\n", __func__, pdata->irq); 587 "MPC85xx L2 err\n", __func__, pdata->irq);
588 irq_dispose_mapping(pdata->irq); 588 irq_dispose_mapping(pdata->irq);
589 res = -ENODEV; 589 res = -ENODEV;
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index fd3ae6290d71..982f1f5f5742 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -471,7 +471,7 @@ void __init dmi_scan_machine(void)
471 char __iomem *p, *q; 471 char __iomem *p, *q;
472 int rc; 472 int rc;
473 473
474 if (efi_enabled) { 474 if (efi_enabled(EFI_CONFIG_TABLES)) {
475 if (efi.smbios == EFI_INVALID_TABLE_ADDR) 475 if (efi.smbios == EFI_INVALID_TABLE_ADDR)
476 goto error; 476 goto error;
477 477
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index 7b1c37497c9a..f5596db0cf58 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -674,7 +674,7 @@ static int efi_status_to_err(efi_status_t status)
674 err = -EACCES; 674 err = -EACCES;
675 break; 675 break;
676 case EFI_NOT_FOUND: 676 case EFI_NOT_FOUND:
677 err = -ENOENT; 677 err = -EIO;
678 break; 678 break;
679 default: 679 default:
680 err = -EINVAL; 680 err = -EINVAL;
@@ -793,6 +793,7 @@ static ssize_t efivarfs_file_write(struct file *file,
793 spin_unlock(&efivars->lock); 793 spin_unlock(&efivars->lock);
794 efivar_unregister(var); 794 efivar_unregister(var);
795 drop_nlink(inode); 795 drop_nlink(inode);
796 d_delete(file->f_dentry);
796 dput(file->f_dentry); 797 dput(file->f_dentry);
797 798
798 } else { 799 } else {
@@ -994,7 +995,7 @@ static int efivarfs_unlink(struct inode *dir, struct dentry *dentry)
994 list_del(&var->list); 995 list_del(&var->list);
995 spin_unlock(&efivars->lock); 996 spin_unlock(&efivars->lock);
996 efivar_unregister(var); 997 efivar_unregister(var);
997 drop_nlink(dir); 998 drop_nlink(dentry->d_inode);
998 dput(dentry); 999 dput(dentry);
999 return 0; 1000 return 0;
1000 } 1001 }
@@ -1782,7 +1783,7 @@ efivars_init(void)
1782 printk(KERN_INFO "EFI Variables Facility v%s %s\n", EFIVARS_VERSION, 1783 printk(KERN_INFO "EFI Variables Facility v%s %s\n", EFIVARS_VERSION,
1783 EFIVARS_DATE); 1784 EFIVARS_DATE);
1784 1785
1785 if (!efi_enabled) 1786 if (!efi_enabled(EFI_RUNTIME_SERVICES))
1786 return 0; 1787 return 0;
1787 1788
1788 /* For now we'll register the efi directory at /sys/firmware/efi */ 1789 /* For now we'll register the efi directory at /sys/firmware/efi */
@@ -1822,7 +1823,7 @@ err_put:
1822static void __exit 1823static void __exit
1823efivars_exit(void) 1824efivars_exit(void)
1824{ 1825{
1825 if (efi_enabled) { 1826 if (efi_enabled(EFI_RUNTIME_SERVICES)) {
1826 unregister_efivars(&__efivars); 1827 unregister_efivars(&__efivars);
1827 kobject_put(efi_kobj); 1828 kobject_put(efi_kobj);
1828 } 1829 }
diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c
index 4da4eb9ae926..2224f1dc074b 100644
--- a/drivers/firmware/iscsi_ibft_find.c
+++ b/drivers/firmware/iscsi_ibft_find.c
@@ -99,7 +99,7 @@ unsigned long __init find_ibft_region(unsigned long *sizep)
99 /* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will 99 /* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will
100 * only use ACPI for this */ 100 * only use ACPI for this */
101 101
102 if (!efi_enabled) 102 if (!efi_enabled(EFI_BOOT))
103 find_ibft_in_mem(); 103 find_ibft_in_mem();
104 104
105 if (ibft_addr) { 105 if (ibft_addr) {
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 682de754d63f..e5116fa85140 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -657,12 +657,6 @@ config GPIO_JANZ_TTL
657 This driver provides support for driving the pins in output 657 This driver provides support for driving the pins in output
658 mode only. Input mode is not supported. 658 mode only. Input mode is not supported.
659 659
660config GPIO_AB8500
661 bool "ST-Ericsson AB8500 Mixed Signal Circuit gpio functions"
662 depends on AB8500_CORE && BROKEN
663 help
664 Select this to enable the AB8500 IC GPIO driver
665
666config GPIO_TPS6586X 660config GPIO_TPS6586X
667 bool "TPS6586X GPIO" 661 bool "TPS6586X GPIO"
668 depends on MFD_TPS6586X 662 depends on MFD_TPS6586X
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index c5aebd008dde..45a388c21d04 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -10,7 +10,6 @@ obj-$(CONFIG_GPIO_ACPI) += gpiolib-acpi.o
10obj-$(CONFIG_GPIO_GENERIC) += gpio-generic.o 10obj-$(CONFIG_GPIO_GENERIC) += gpio-generic.o
11 11
12obj-$(CONFIG_GPIO_74X164) += gpio-74x164.o 12obj-$(CONFIG_GPIO_74X164) += gpio-74x164.o
13obj-$(CONFIG_GPIO_AB8500) += gpio-ab8500.o
14obj-$(CONFIG_GPIO_ADNP) += gpio-adnp.o 13obj-$(CONFIG_GPIO_ADNP) += gpio-adnp.o
15obj-$(CONFIG_GPIO_ADP5520) += gpio-adp5520.o 14obj-$(CONFIG_GPIO_ADP5520) += gpio-adp5520.o
16obj-$(CONFIG_GPIO_ADP5588) += gpio-adp5588.o 15obj-$(CONFIG_GPIO_ADP5588) += gpio-adp5588.o
diff --git a/drivers/gpio/gpio-ab8500.c b/drivers/gpio/gpio-ab8500.c
deleted file mode 100644
index 983ad425f0ac..000000000000
--- a/drivers/gpio/gpio-ab8500.c
+++ /dev/null
@@ -1,520 +0,0 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2011
3 *
4 * Author: BIBEK BASU <bibek.basu@stericsson.com>
5 * License terms: GNU General Public License (GPL) version 2
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/kernel.h>
12#include <linux/types.h>
13#include <linux/slab.h>
14#include <linux/init.h>
15#include <linux/module.h>
16#include <linux/err.h>
17#include <linux/platform_device.h>
18#include <linux/gpio.h>
19#include <linux/irq.h>
20#include <linux/interrupt.h>
21#include <linux/mfd/ab8500.h>
22#include <linux/mfd/abx500.h>
23#include <linux/mfd/ab8500/gpio.h>
24
25/*
26 * GPIO registers offset
27 * Bank: 0x10
28 */
29#define AB8500_GPIO_SEL1_REG 0x00
30#define AB8500_GPIO_SEL2_REG 0x01
31#define AB8500_GPIO_SEL3_REG 0x02
32#define AB8500_GPIO_SEL4_REG 0x03
33#define AB8500_GPIO_SEL5_REG 0x04
34#define AB8500_GPIO_SEL6_REG 0x05
35
36#define AB8500_GPIO_DIR1_REG 0x10
37#define AB8500_GPIO_DIR2_REG 0x11
38#define AB8500_GPIO_DIR3_REG 0x12
39#define AB8500_GPIO_DIR4_REG 0x13
40#define AB8500_GPIO_DIR5_REG 0x14
41#define AB8500_GPIO_DIR6_REG 0x15
42
43#define AB8500_GPIO_OUT1_REG 0x20
44#define AB8500_GPIO_OUT2_REG 0x21
45#define AB8500_GPIO_OUT3_REG 0x22
46#define AB8500_GPIO_OUT4_REG 0x23
47#define AB8500_GPIO_OUT5_REG 0x24
48#define AB8500_GPIO_OUT6_REG 0x25
49
50#define AB8500_GPIO_PUD1_REG 0x30
51#define AB8500_GPIO_PUD2_REG 0x31
52#define AB8500_GPIO_PUD3_REG 0x32
53#define AB8500_GPIO_PUD4_REG 0x33
54#define AB8500_GPIO_PUD5_REG 0x34
55#define AB8500_GPIO_PUD6_REG 0x35
56
57#define AB8500_GPIO_IN1_REG 0x40
58#define AB8500_GPIO_IN2_REG 0x41
59#define AB8500_GPIO_IN3_REG 0x42
60#define AB8500_GPIO_IN4_REG 0x43
61#define AB8500_GPIO_IN5_REG 0x44
62#define AB8500_GPIO_IN6_REG 0x45
63#define AB8500_GPIO_ALTFUN_REG 0x45
64#define ALTFUN_REG_INDEX 6
65#define AB8500_NUM_GPIO 42
66#define AB8500_NUM_VIR_GPIO_IRQ 16
67
68enum ab8500_gpio_action {
69 NONE,
70 STARTUP,
71 SHUTDOWN,
72 MASK,
73 UNMASK
74};
75
76struct ab8500_gpio {
77 struct gpio_chip chip;
78 struct ab8500 *parent;
79 struct device *dev;
80 struct mutex lock;
81 u32 irq_base;
82 enum ab8500_gpio_action irq_action;
83 u16 rising;
84 u16 falling;
85};
86/**
87 * to_ab8500_gpio() - get the pointer to ab8500_gpio
88 * @chip: Member of the structure ab8500_gpio
89 */
90static inline struct ab8500_gpio *to_ab8500_gpio(struct gpio_chip *chip)
91{
92 return container_of(chip, struct ab8500_gpio, chip);
93}
94
95static int ab8500_gpio_set_bits(struct gpio_chip *chip, u8 reg,
96 unsigned offset, int val)
97{
98 struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip);
99 u8 pos = offset % 8;
100 int ret;
101
102 reg = reg + (offset / 8);
103 ret = abx500_mask_and_set_register_interruptible(ab8500_gpio->dev,
104 AB8500_MISC, reg, 1 << pos, val << pos);
105 if (ret < 0)
106 dev_err(ab8500_gpio->dev, "%s write failed\n", __func__);
107 return ret;
108}
109/**
110 * ab8500_gpio_get() - Get the particular GPIO value
111 * @chip: Gpio device
112 * @offset: GPIO number to read
113 */
114static int ab8500_gpio_get(struct gpio_chip *chip, unsigned offset)
115{
116 struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip);
117 u8 mask = 1 << (offset % 8);
118 u8 reg = AB8500_GPIO_OUT1_REG + (offset / 8);
119 int ret;
120 u8 data;
121 ret = abx500_get_register_interruptible(ab8500_gpio->dev, AB8500_MISC,
122 reg, &data);
123 if (ret < 0) {
124 dev_err(ab8500_gpio->dev, "%s read failed\n", __func__);
125 return ret;
126 }
127 return (data & mask) >> (offset % 8);
128}
129
130static void ab8500_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
131{
132 struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip);
133 int ret;
134 /* Write the data */
135 ret = ab8500_gpio_set_bits(chip, AB8500_GPIO_OUT1_REG, offset, 1);
136 if (ret < 0)
137 dev_err(ab8500_gpio->dev, "%s write failed\n", __func__);
138}
139
140static int ab8500_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
141 int val)
142{
143 int ret;
144 /* set direction as output */
145 ret = ab8500_gpio_set_bits(chip, AB8500_GPIO_DIR1_REG, offset, 1);
146 if (ret < 0)
147 return ret;
148 /* disable pull down */
149 ret = ab8500_gpio_set_bits(chip, AB8500_GPIO_PUD1_REG, offset, 1);
150 if (ret < 0)
151 return ret;
152 /* set the output as 1 or 0 */
153 return ab8500_gpio_set_bits(chip, AB8500_GPIO_OUT1_REG, offset, val);
154
155}
156
157static int ab8500_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
158{
159 /* set the register as input */
160 return ab8500_gpio_set_bits(chip, AB8500_GPIO_DIR1_REG, offset, 0);
161}
162
163static int ab8500_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
164{
165 /*
166 * Only some GPIOs are interrupt capable, and they are
167 * organized in discontiguous clusters:
168 *
169 * GPIO6 to GPIO13
170 * GPIO24 and GPIO25
171 * GPIO36 to GPIO41
172 */
173 static struct ab8500_gpio_irq_cluster {
174 int start;
175 int end;
176 } clusters[] = {
177 {.start = 6, .end = 13},
178 {.start = 24, .end = 25},
179 {.start = 36, .end = 41},
180 };
181 struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip);
182 int base = ab8500_gpio->irq_base;
183 int i;
184
185 for (i = 0; i < ARRAY_SIZE(clusters); i++) {
186 struct ab8500_gpio_irq_cluster *cluster = &clusters[i];
187
188 if (offset >= cluster->start && offset <= cluster->end)
189 return base + offset - cluster->start;
190
191 /* Advance by the number of gpios in this cluster */
192 base += cluster->end - cluster->start + 1;
193 }
194
195 return -EINVAL;
196}
197
198static struct gpio_chip ab8500gpio_chip = {
199 .label = "ab8500_gpio",
200 .owner = THIS_MODULE,
201 .direction_input = ab8500_gpio_direction_input,
202 .get = ab8500_gpio_get,
203 .direction_output = ab8500_gpio_direction_output,
204 .set = ab8500_gpio_set,
205 .to_irq = ab8500_gpio_to_irq,
206};
207
208static unsigned int irq_to_rising(unsigned int irq)
209{
210 struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
211 int offset = irq - ab8500_gpio->irq_base;
212 int new_irq = offset + AB8500_INT_GPIO6R
213 + ab8500_gpio->parent->irq_base;
214 return new_irq;
215}
216
217static unsigned int irq_to_falling(unsigned int irq)
218{
219 struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
220 int offset = irq - ab8500_gpio->irq_base;
221 int new_irq = offset + AB8500_INT_GPIO6F
222 + ab8500_gpio->parent->irq_base;
223 return new_irq;
224
225}
226
227static unsigned int rising_to_irq(unsigned int irq, void *dev)
228{
229 struct ab8500_gpio *ab8500_gpio = dev;
230 int offset = irq - AB8500_INT_GPIO6R
231 - ab8500_gpio->parent->irq_base ;
232 int new_irq = offset + ab8500_gpio->irq_base;
233 return new_irq;
234}
235
236static unsigned int falling_to_irq(unsigned int irq, void *dev)
237{
238 struct ab8500_gpio *ab8500_gpio = dev;
239 int offset = irq - AB8500_INT_GPIO6F
240 - ab8500_gpio->parent->irq_base ;
241 int new_irq = offset + ab8500_gpio->irq_base;
242 return new_irq;
243
244}
245
246/*
247 * IRQ handler
248 */
249
250static irqreturn_t handle_rising(int irq, void *dev)
251{
252
253 handle_nested_irq(rising_to_irq(irq , dev));
254 return IRQ_HANDLED;
255}
256
257static irqreturn_t handle_falling(int irq, void *dev)
258{
259
260 handle_nested_irq(falling_to_irq(irq, dev));
261 return IRQ_HANDLED;
262}
263
264static void ab8500_gpio_irq_lock(unsigned int irq)
265{
266 struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
267 mutex_lock(&ab8500_gpio->lock);
268}
269
270static void ab8500_gpio_irq_sync_unlock(unsigned int irq)
271{
272 struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
273 int offset = irq - ab8500_gpio->irq_base;
274 bool rising = ab8500_gpio->rising & BIT(offset);
275 bool falling = ab8500_gpio->falling & BIT(offset);
276 int ret;
277
278 switch (ab8500_gpio->irq_action) {
279 case STARTUP:
280 if (rising)
281 ret = request_threaded_irq(irq_to_rising(irq),
282 NULL, handle_rising,
283 IRQF_TRIGGER_RISING,
284 "ab8500-gpio-r", ab8500_gpio);
285 if (falling)
286 ret = request_threaded_irq(irq_to_falling(irq),
287 NULL, handle_falling,
288 IRQF_TRIGGER_FALLING,
289 "ab8500-gpio-f", ab8500_gpio);
290 break;
291 case SHUTDOWN:
292 if (rising)
293 free_irq(irq_to_rising(irq), ab8500_gpio);
294 if (falling)
295 free_irq(irq_to_falling(irq), ab8500_gpio);
296 break;
297 case MASK:
298 if (rising)
299 disable_irq(irq_to_rising(irq));
300 if (falling)
301 disable_irq(irq_to_falling(irq));
302 break;
303 case UNMASK:
304 if (rising)
305 enable_irq(irq_to_rising(irq));
306 if (falling)
307 enable_irq(irq_to_falling(irq));
308 break;
309 case NONE:
310 break;
311 }
312 ab8500_gpio->irq_action = NONE;
313 ab8500_gpio->rising &= ~(BIT(offset));
314 ab8500_gpio->falling &= ~(BIT(offset));
315 mutex_unlock(&ab8500_gpio->lock);
316}
317
318
319static void ab8500_gpio_irq_mask(unsigned int irq)
320{
321 struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
322 ab8500_gpio->irq_action = MASK;
323}
324
325static void ab8500_gpio_irq_unmask(unsigned int irq)
326{
327 struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
328 ab8500_gpio->irq_action = UNMASK;
329}
330
331static int ab8500_gpio_irq_set_type(unsigned int irq, unsigned int type)
332{
333 struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
334 int offset = irq - ab8500_gpio->irq_base;
335
336 if (type == IRQ_TYPE_EDGE_BOTH) {
337 ab8500_gpio->rising = BIT(offset);
338 ab8500_gpio->falling = BIT(offset);
339 } else if (type == IRQ_TYPE_EDGE_RISING) {
340 ab8500_gpio->rising = BIT(offset);
341 } else {
342 ab8500_gpio->falling = BIT(offset);
343 }
344 return 0;
345}
346
347unsigned int ab8500_gpio_irq_startup(unsigned int irq)
348{
349 struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
350 ab8500_gpio->irq_action = STARTUP;
351 return 0;
352}
353
354void ab8500_gpio_irq_shutdown(unsigned int irq)
355{
356 struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
357 ab8500_gpio->irq_action = SHUTDOWN;
358}
359
360static struct irq_chip ab8500_gpio_irq_chip = {
361 .name = "ab8500-gpio",
362 .startup = ab8500_gpio_irq_startup,
363 .shutdown = ab8500_gpio_irq_shutdown,
364 .bus_lock = ab8500_gpio_irq_lock,
365 .bus_sync_unlock = ab8500_gpio_irq_sync_unlock,
366 .mask = ab8500_gpio_irq_mask,
367 .unmask = ab8500_gpio_irq_unmask,
368 .set_type = ab8500_gpio_irq_set_type,
369};
370
371static int ab8500_gpio_irq_init(struct ab8500_gpio *ab8500_gpio)
372{
373 u32 base = ab8500_gpio->irq_base;
374 int irq;
375
376 for (irq = base; irq < base + AB8500_NUM_VIR_GPIO_IRQ ; irq++) {
377 set_irq_chip_data(irq, ab8500_gpio);
378 set_irq_chip_and_handler(irq, &ab8500_gpio_irq_chip,
379 handle_simple_irq);
380 set_irq_nested_thread(irq, 1);
381#ifdef CONFIG_ARM
382 set_irq_flags(irq, IRQF_VALID);
383#else
384 set_irq_noprobe(irq);
385#endif
386 }
387
388 return 0;
389}
390
391static void ab8500_gpio_irq_remove(struct ab8500_gpio *ab8500_gpio)
392{
393 int base = ab8500_gpio->irq_base;
394 int irq;
395
396 for (irq = base; irq < base + AB8500_NUM_VIR_GPIO_IRQ; irq++) {
397#ifdef CONFIG_ARM
398 set_irq_flags(irq, 0);
399#endif
400 set_irq_chip_and_handler(irq, NULL, NULL);
401 set_irq_chip_data(irq, NULL);
402 }
403}
404
405static int ab8500_gpio_probe(struct platform_device *pdev)
406{
407 struct ab8500_platform_data *ab8500_pdata =
408 dev_get_platdata(pdev->dev.parent);
409 struct ab8500_gpio_platform_data *pdata;
410 struct ab8500_gpio *ab8500_gpio;
411 int ret;
412 int i;
413
414 pdata = ab8500_pdata->gpio;
415 if (!pdata) {
416 dev_err(&pdev->dev, "gpio platform data missing\n");
417 return -ENODEV;
418 }
419
420 ab8500_gpio = kzalloc(sizeof(struct ab8500_gpio), GFP_KERNEL);
421 if (ab8500_gpio == NULL) {
422 dev_err(&pdev->dev, "failed to allocate memory\n");
423 return -ENOMEM;
424 }
425 ab8500_gpio->dev = &pdev->dev;
426 ab8500_gpio->parent = dev_get_drvdata(pdev->dev.parent);
427 ab8500_gpio->chip = ab8500gpio_chip;
428 ab8500_gpio->chip.ngpio = AB8500_NUM_GPIO;
429 ab8500_gpio->chip.dev = &pdev->dev;
430 ab8500_gpio->chip.base = pdata->gpio_base;
431 ab8500_gpio->irq_base = pdata->irq_base;
432 /* initialize the lock */
433 mutex_init(&ab8500_gpio->lock);
434 /*
435 * AB8500 core will handle and clear the IRQ
436 * configre GPIO based on config-reg value.
437 * These values are for selecting the PINs as
438 * GPIO or alternate function
439 */
440 for (i = AB8500_GPIO_SEL1_REG; i <= AB8500_GPIO_SEL6_REG; i++) {
441 ret = abx500_set_register_interruptible(ab8500_gpio->dev,
442 AB8500_MISC, i,
443 pdata->config_reg[i]);
444 if (ret < 0)
445 goto out_free;
446 }
447 ret = abx500_set_register_interruptible(ab8500_gpio->dev, AB8500_MISC,
448 AB8500_GPIO_ALTFUN_REG,
449 pdata->config_reg[ALTFUN_REG_INDEX]);
450 if (ret < 0)
451 goto out_free;
452
453 ret = ab8500_gpio_irq_init(ab8500_gpio);
454 if (ret)
455 goto out_free;
456 ret = gpiochip_add(&ab8500_gpio->chip);
457 if (ret) {
458 dev_err(&pdev->dev, "unable to add gpiochip: %d\n",
459 ret);
460 goto out_rem_irq;
461 }
462 platform_set_drvdata(pdev, ab8500_gpio);
463 return 0;
464
465out_rem_irq:
466 ab8500_gpio_irq_remove(ab8500_gpio);
467out_free:
468 mutex_destroy(&ab8500_gpio->lock);
469 kfree(ab8500_gpio);
470 return ret;
471}
472
473/*
474 * ab8500_gpio_remove() - remove Ab8500-gpio driver
475 * @pdev : Platform device registered
476 */
477static int ab8500_gpio_remove(struct platform_device *pdev)
478{
479 struct ab8500_gpio *ab8500_gpio = platform_get_drvdata(pdev);
480 int ret;
481
482 ret = gpiochip_remove(&ab8500_gpio->chip);
483 if (ret < 0) {
484 dev_err(ab8500_gpio->dev, "unable to remove gpiochip: %d\n",
485 ret);
486 return ret;
487 }
488
489 platform_set_drvdata(pdev, NULL);
490 mutex_destroy(&ab8500_gpio->lock);
491 kfree(ab8500_gpio);
492
493 return 0;
494}
495
496static struct platform_driver ab8500_gpio_driver = {
497 .driver = {
498 .name = "ab8500-gpio",
499 .owner = THIS_MODULE,
500 },
501 .probe = ab8500_gpio_probe,
502 .remove = ab8500_gpio_remove,
503};
504
505static int __init ab8500_gpio_init(void)
506{
507 return platform_driver_register(&ab8500_gpio_driver);
508}
509arch_initcall(ab8500_gpio_init);
510
511static void __exit ab8500_gpio_exit(void)
512{
513 platform_driver_unregister(&ab8500_gpio_driver);
514}
515module_exit(ab8500_gpio_exit);
516
517MODULE_AUTHOR("BIBEK BASU <bibek.basu@stericsson.com>");
518MODULE_DESCRIPTION("Driver allows to use AB8500 unused pins to be used as GPIO");
519MODULE_ALIAS("platform:ab8500-gpio");
520MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 7d9bd94be8d2..6819d63cb167 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -547,7 +547,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
547 mvchip->membase = devm_request_and_ioremap(&pdev->dev, res); 547 mvchip->membase = devm_request_and_ioremap(&pdev->dev, res);
548 if (! mvchip->membase) { 548 if (! mvchip->membase) {
549 dev_err(&pdev->dev, "Cannot ioremap\n"); 549 dev_err(&pdev->dev, "Cannot ioremap\n");
550 kfree(mvchip->chip.label);
551 return -ENOMEM; 550 return -ENOMEM;
552 } 551 }
553 552
@@ -557,14 +556,12 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
557 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 556 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
558 if (! res) { 557 if (! res) {
559 dev_err(&pdev->dev, "Cannot get memory resource\n"); 558 dev_err(&pdev->dev, "Cannot get memory resource\n");
560 kfree(mvchip->chip.label);
561 return -ENODEV; 559 return -ENODEV;
562 } 560 }
563 561
564 mvchip->percpu_membase = devm_request_and_ioremap(&pdev->dev, res); 562 mvchip->percpu_membase = devm_request_and_ioremap(&pdev->dev, res);
565 if (! mvchip->percpu_membase) { 563 if (! mvchip->percpu_membase) {
566 dev_err(&pdev->dev, "Cannot ioremap\n"); 564 dev_err(&pdev->dev, "Cannot ioremap\n");
567 kfree(mvchip->chip.label);
568 return -ENOMEM; 565 return -ENOMEM;
569 } 566 }
570 } 567 }
@@ -625,7 +622,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
625 mvchip->irqbase = irq_alloc_descs(-1, 0, ngpios, -1); 622 mvchip->irqbase = irq_alloc_descs(-1, 0, ngpios, -1);
626 if (mvchip->irqbase < 0) { 623 if (mvchip->irqbase < 0) {
627 dev_err(&pdev->dev, "no irqs\n"); 624 dev_err(&pdev->dev, "no irqs\n");
628 kfree(mvchip->chip.label);
629 return -ENOMEM; 625 return -ENOMEM;
630 } 626 }
631 627
@@ -633,7 +629,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
633 mvchip->membase, handle_level_irq); 629 mvchip->membase, handle_level_irq);
634 if (! gc) { 630 if (! gc) {
635 dev_err(&pdev->dev, "Cannot allocate generic irq_chip\n"); 631 dev_err(&pdev->dev, "Cannot allocate generic irq_chip\n");
636 kfree(mvchip->chip.label);
637 return -ENOMEM; 632 return -ENOMEM;
638 } 633 }
639 634
@@ -668,7 +663,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
668 irq_remove_generic_chip(gc, IRQ_MSK(ngpios), IRQ_NOREQUEST, 663 irq_remove_generic_chip(gc, IRQ_MSK(ngpios), IRQ_NOREQUEST,
669 IRQ_LEVEL | IRQ_NOPROBE); 664 IRQ_LEVEL | IRQ_NOPROBE);
670 kfree(gc); 665 kfree(gc);
671 kfree(mvchip->chip.label);
672 return -ENODEV; 666 return -ENODEV;
673 } 667 }
674 668
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c
index 01f7fe955590..76be7eed79de 100644
--- a/drivers/gpio/gpio-samsung.c
+++ b/drivers/gpio/gpio-samsung.c
@@ -32,7 +32,6 @@
32 32
33#include <mach/hardware.h> 33#include <mach/hardware.h>
34#include <mach/map.h> 34#include <mach/map.h>
35#include <mach/regs-clock.h>
36#include <mach/regs-gpio.h> 35#include <mach/regs-gpio.h>
37 36
38#include <plat/cpu.h> 37#include <plat/cpu.h>
@@ -446,7 +445,7 @@ static struct samsung_gpio_cfg s3c24xx_gpiocfg_banka = {
446}; 445};
447#endif 446#endif
448 447
449#if defined(CONFIG_ARCH_EXYNOS4) || defined(CONFIG_ARCH_EXYNOS5) 448#if defined(CONFIG_ARCH_EXYNOS4) || defined(CONFIG_SOC_EXYNOS5250)
450static struct samsung_gpio_cfg exynos_gpio_cfg = { 449static struct samsung_gpio_cfg exynos_gpio_cfg = {
451 .set_pull = exynos_gpio_setpull, 450 .set_pull = exynos_gpio_setpull,
452 .get_pull = exynos_gpio_getpull, 451 .get_pull = exynos_gpio_getpull,
@@ -2446,7 +2445,7 @@ static struct samsung_gpio_chip exynos4_gpios_3[] = {
2446}; 2445};
2447#endif 2446#endif
2448 2447
2449#ifdef CONFIG_ARCH_EXYNOS5 2448#ifdef CONFIG_SOC_EXYNOS5250
2450static struct samsung_gpio_chip exynos5_gpios_1[] = { 2449static struct samsung_gpio_chip exynos5_gpios_1[] = {
2451 { 2450 {
2452 .chip = { 2451 .chip = {
@@ -2614,7 +2613,7 @@ static struct samsung_gpio_chip exynos5_gpios_1[] = {
2614}; 2613};
2615#endif 2614#endif
2616 2615
2617#ifdef CONFIG_ARCH_EXYNOS5 2616#ifdef CONFIG_SOC_EXYNOS5250
2618static struct samsung_gpio_chip exynos5_gpios_2[] = { 2617static struct samsung_gpio_chip exynos5_gpios_2[] = {
2619 { 2618 {
2620 .chip = { 2619 .chip = {
@@ -2675,7 +2674,7 @@ static struct samsung_gpio_chip exynos5_gpios_2[] = {
2675}; 2674};
2676#endif 2675#endif
2677 2676
2678#ifdef CONFIG_ARCH_EXYNOS5 2677#ifdef CONFIG_SOC_EXYNOS5250
2679static struct samsung_gpio_chip exynos5_gpios_3[] = { 2678static struct samsung_gpio_chip exynos5_gpios_3[] = {
2680 { 2679 {
2681 .chip = { 2680 .chip = {
@@ -2711,7 +2710,7 @@ static struct samsung_gpio_chip exynos5_gpios_3[] = {
2711}; 2710};
2712#endif 2711#endif
2713 2712
2714#ifdef CONFIG_ARCH_EXYNOS5 2713#ifdef CONFIG_SOC_EXYNOS5250
2715static struct samsung_gpio_chip exynos5_gpios_4[] = { 2714static struct samsung_gpio_chip exynos5_gpios_4[] = {
2716 { 2715 {
2717 .chip = { 2716 .chip = {
@@ -3010,7 +3009,7 @@ static __init int samsung_gpiolib_init(void)
3010 int i, nr_chips; 3009 int i, nr_chips;
3011 int group = 0; 3010 int group = 0;
3012 3011
3013#ifdef CONFIG_PINCTRL_SAMSUNG 3012#if defined(CONFIG_PINCTRL_EXYNOS) || defined(CONFIG_PINCTRL_EXYNOS5440)
3014 /* 3013 /*
3015 * This gpio driver includes support for device tree support and there 3014 * This gpio driver includes support for device tree support and there
3016 * are platforms using it. In order to maintain compatibility with those 3015 * are platforms using it. In order to maintain compatibility with those
@@ -3026,6 +3025,7 @@ static __init int samsung_gpiolib_init(void)
3026 static const struct of_device_id exynos_pinctrl_ids[] = { 3025 static const struct of_device_id exynos_pinctrl_ids[] = {
3027 { .compatible = "samsung,pinctrl-exynos4210", }, 3026 { .compatible = "samsung,pinctrl-exynos4210", },
3028 { .compatible = "samsung,pinctrl-exynos4x12", }, 3027 { .compatible = "samsung,pinctrl-exynos4x12", },
3028 { .compatible = "samsung,pinctrl-exynos5440", },
3029 }; 3029 };
3030 for_each_matching_node(pctrl_np, exynos_pinctrl_ids) 3030 for_each_matching_node(pctrl_np, exynos_pinctrl_ids)
3031 if (pctrl_np && of_device_is_available(pctrl_np)) 3031 if (pctrl_np && of_device_is_available(pctrl_np))
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index d542a141811a..a71a54a3e3f7 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -89,41 +89,6 @@ int of_get_named_gpio_flags(struct device_node *np, const char *propname,
89EXPORT_SYMBOL(of_get_named_gpio_flags); 89EXPORT_SYMBOL(of_get_named_gpio_flags);
90 90
91/** 91/**
92 * of_gpio_named_count - Count GPIOs for a device
93 * @np: device node to count GPIOs for
94 * @propname: property name containing gpio specifier(s)
95 *
96 * The function returns the count of GPIOs specified for a node.
97 *
98 * Note that the empty GPIO specifiers counts too. For example,
99 *
100 * gpios = <0
101 * &pio1 1 2
102 * 0
103 * &pio2 3 4>;
104 *
105 * defines four GPIOs (so this function will return 4), two of which
106 * are not specified.
107 */
108unsigned int of_gpio_named_count(struct device_node *np, const char* propname)
109{
110 unsigned int cnt = 0;
111
112 do {
113 int ret;
114
115 ret = of_parse_phandle_with_args(np, propname, "#gpio-cells",
116 cnt, NULL);
117 /* A hole in the gpios = <> counts anyway. */
118 if (ret < 0 && ret != -EEXIST)
119 break;
120 } while (++cnt);
121
122 return cnt;
123}
124EXPORT_SYMBOL(of_gpio_named_count);
125
126/**
127 * of_gpio_simple_xlate - translate gpio_spec to the GPIO number and flags 92 * of_gpio_simple_xlate - translate gpio_spec to the GPIO number and flags
128 * @gc: pointer to the gpio_chip structure 93 * @gc: pointer to the gpio_chip structure
129 * @np: device node of the GPIO chip 94 * @np: device node of the GPIO chip
@@ -250,7 +215,7 @@ static void of_gpiochip_add_pin_range(struct gpio_chip *chip)
250 * on the same GPIO chip. 215 * on the same GPIO chip.
251 */ 216 */
252 ret = gpiochip_add_pin_range(chip, 217 ret = gpiochip_add_pin_range(chip,
253 pinctrl_dev_get_name(pctldev), 218 pinctrl_dev_get_devname(pctldev),
254 0, /* offset in gpiochip */ 219 0, /* offset in gpiochip */
255 pinspec.args[0], 220 pinspec.args[0],
256 pinspec.args[1]); 221 pinspec.args[1]);
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 1d1f1e5e33f0..046bcda36abe 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -24,7 +24,7 @@ config DRM_EXYNOS_DMABUF
24 24
25config DRM_EXYNOS_FIMD 25config DRM_EXYNOS_FIMD
26 bool "Exynos DRM FIMD" 26 bool "Exynos DRM FIMD"
27 depends on DRM_EXYNOS && !FB_S3C 27 depends on DRM_EXYNOS && !FB_S3C && !ARCH_MULTIPLATFORM
28 help 28 help
29 Choose this option if you want to use Exynos FIMD for DRM. 29 Choose this option if you want to use Exynos FIMD for DRM.
30 30
@@ -48,7 +48,7 @@ config DRM_EXYNOS_G2D
48 48
49config DRM_EXYNOS_IPP 49config DRM_EXYNOS_IPP
50 bool "Exynos DRM IPP" 50 bool "Exynos DRM IPP"
51 depends on DRM_EXYNOS 51 depends on DRM_EXYNOS && !ARCH_MULTIPLATFORM
52 help 52 help
53 Choose this option if you want to use IPP feature for DRM. 53 Choose this option if you want to use IPP feature for DRM.
54 54
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index ab37437bad8a..4c5b6859c9ea 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -18,7 +18,6 @@
18#include "exynos_drm_drv.h" 18#include "exynos_drm_drv.h"
19#include "exynos_drm_encoder.h" 19#include "exynos_drm_encoder.h"
20 20
21#define MAX_EDID 256
22#define to_exynos_connector(x) container_of(x, struct exynos_drm_connector,\ 21#define to_exynos_connector(x) container_of(x, struct exynos_drm_connector,\
23 drm_connector) 22 drm_connector)
24 23
@@ -96,7 +95,9 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
96 to_exynos_connector(connector); 95 to_exynos_connector(connector);
97 struct exynos_drm_manager *manager = exynos_connector->manager; 96 struct exynos_drm_manager *manager = exynos_connector->manager;
98 struct exynos_drm_display_ops *display_ops = manager->display_ops; 97 struct exynos_drm_display_ops *display_ops = manager->display_ops;
99 unsigned int count; 98 struct edid *edid = NULL;
99 unsigned int count = 0;
100 int ret;
100 101
101 DRM_DEBUG_KMS("%s\n", __FILE__); 102 DRM_DEBUG_KMS("%s\n", __FILE__);
102 103
@@ -114,27 +115,21 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
114 * because lcd panel has only one mode. 115 * because lcd panel has only one mode.
115 */ 116 */
116 if (display_ops->get_edid) { 117 if (display_ops->get_edid) {
117 int ret; 118 edid = display_ops->get_edid(manager->dev, connector);
118 void *edid; 119 if (IS_ERR_OR_NULL(edid)) {
119 120 ret = PTR_ERR(edid);
120 edid = kzalloc(MAX_EDID, GFP_KERNEL); 121 edid = NULL;
121 if (!edid) { 122 DRM_ERROR("Panel operation get_edid failed %d\n", ret);
122 DRM_ERROR("failed to allocate edid\n"); 123 goto out;
123 return 0;
124 } 124 }
125 125
126 ret = display_ops->get_edid(manager->dev, connector, 126 count = drm_add_edid_modes(connector, edid);
127 edid, MAX_EDID); 127 if (count < 0) {
128 if (ret < 0) { 128 DRM_ERROR("Add edid modes failed %d\n", count);
129 DRM_ERROR("failed to get edid data.\n"); 129 goto out;
130 kfree(edid);
131 edid = NULL;
132 return 0;
133 } 130 }
134 131
135 drm_mode_connector_update_edid_property(connector, edid); 132 drm_mode_connector_update_edid_property(connector, edid);
136 count = drm_add_edid_modes(connector, edid);
137 kfree(edid);
138 } else { 133 } else {
139 struct exynos_drm_panel_info *panel; 134 struct exynos_drm_panel_info *panel;
140 struct drm_display_mode *mode = drm_mode_create(connector->dev); 135 struct drm_display_mode *mode = drm_mode_create(connector->dev);
@@ -161,6 +156,8 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
161 count = 1; 156 count = 1;
162 } 157 }
163 158
159out:
160 kfree(edid);
164 return count; 161 return count;
165} 162}
166 163
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index 9df97714b6c0..ba0a3aa78547 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -19,6 +19,7 @@
19struct exynos_drm_dmabuf_attachment { 19struct exynos_drm_dmabuf_attachment {
20 struct sg_table sgt; 20 struct sg_table sgt;
21 enum dma_data_direction dir; 21 enum dma_data_direction dir;
22 bool is_mapped;
22}; 23};
23 24
24static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf, 25static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
@@ -72,17 +73,10 @@ static struct sg_table *
72 73
73 DRM_DEBUG_PRIME("%s\n", __FILE__); 74 DRM_DEBUG_PRIME("%s\n", __FILE__);
74 75
75 if (WARN_ON(dir == DMA_NONE))
76 return ERR_PTR(-EINVAL);
77
78 /* just return current sgt if already requested. */ 76 /* just return current sgt if already requested. */
79 if (exynos_attach->dir == dir) 77 if (exynos_attach->dir == dir && exynos_attach->is_mapped)
80 return &exynos_attach->sgt; 78 return &exynos_attach->sgt;
81 79
82 /* reattaching is not allowed. */
83 if (WARN_ON(exynos_attach->dir != DMA_NONE))
84 return ERR_PTR(-EBUSY);
85
86 buf = gem_obj->buffer; 80 buf = gem_obj->buffer;
87 if (!buf) { 81 if (!buf) {
88 DRM_ERROR("buffer is null.\n"); 82 DRM_ERROR("buffer is null.\n");
@@ -107,13 +101,17 @@ static struct sg_table *
107 wr = sg_next(wr); 101 wr = sg_next(wr);
108 } 102 }
109 103
110 nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir); 104 if (dir != DMA_NONE) {
111 if (!nents) { 105 nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
112 DRM_ERROR("failed to map sgl with iommu.\n"); 106 if (!nents) {
113 sgt = ERR_PTR(-EIO); 107 DRM_ERROR("failed to map sgl with iommu.\n");
114 goto err_unlock; 108 sg_free_table(sgt);
109 sgt = ERR_PTR(-EIO);
110 goto err_unlock;
111 }
115 } 112 }
116 113
114 exynos_attach->is_mapped = true;
117 exynos_attach->dir = dir; 115 exynos_attach->dir = dir;
118 attach->priv = exynos_attach; 116 attach->priv = exynos_attach;
119 117
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index b9e51bc09e81..4606fac7241a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -148,8 +148,8 @@ struct exynos_drm_overlay {
148struct exynos_drm_display_ops { 148struct exynos_drm_display_ops {
149 enum exynos_drm_output_type type; 149 enum exynos_drm_output_type type;
150 bool (*is_connected)(struct device *dev); 150 bool (*is_connected)(struct device *dev);
151 int (*get_edid)(struct device *dev, struct drm_connector *connector, 151 struct edid *(*get_edid)(struct device *dev,
152 u8 *edid, int len); 152 struct drm_connector *connector);
153 void *(*get_panel)(struct device *dev); 153 void *(*get_panel)(struct device *dev);
154 int (*check_timing)(struct device *dev, void *timing); 154 int (*check_timing)(struct device *dev, void *timing);
155 int (*power_on)(struct device *dev, int mode); 155 int (*power_on)(struct device *dev, int mode);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 36c3905536a6..9a4c08e7453c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -324,7 +324,7 @@ out:
324 g2d_userptr = NULL; 324 g2d_userptr = NULL;
325} 325}
326 326
327dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev, 327static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
328 unsigned long userptr, 328 unsigned long userptr,
329 unsigned long size, 329 unsigned long size,
330 struct drm_file *filp, 330 struct drm_file *filp,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index 850e9950b7da..28644539b305 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -108,18 +108,17 @@ static bool drm_hdmi_is_connected(struct device *dev)
108 return false; 108 return false;
109} 109}
110 110
111static int drm_hdmi_get_edid(struct device *dev, 111static struct edid *drm_hdmi_get_edid(struct device *dev,
112 struct drm_connector *connector, u8 *edid, int len) 112 struct drm_connector *connector)
113{ 113{
114 struct drm_hdmi_context *ctx = to_context(dev); 114 struct drm_hdmi_context *ctx = to_context(dev);
115 115
116 DRM_DEBUG_KMS("%s\n", __FILE__); 116 DRM_DEBUG_KMS("%s\n", __FILE__);
117 117
118 if (hdmi_ops && hdmi_ops->get_edid) 118 if (hdmi_ops && hdmi_ops->get_edid)
119 return hdmi_ops->get_edid(ctx->hdmi_ctx->ctx, connector, edid, 119 return hdmi_ops->get_edid(ctx->hdmi_ctx->ctx, connector);
120 len);
121 120
122 return 0; 121 return NULL;
123} 122}
124 123
125static int drm_hdmi_check_timing(struct device *dev, void *timing) 124static int drm_hdmi_check_timing(struct device *dev, void *timing)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
index 784a7e9a766c..d80516fc9ed7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
@@ -30,8 +30,8 @@ struct exynos_drm_hdmi_context {
30struct exynos_hdmi_ops { 30struct exynos_hdmi_ops {
31 /* display */ 31 /* display */
32 bool (*is_connected)(void *ctx); 32 bool (*is_connected)(void *ctx);
33 int (*get_edid)(void *ctx, struct drm_connector *connector, 33 struct edid *(*get_edid)(void *ctx,
34 u8 *edid, int len); 34 struct drm_connector *connector);
35 int (*check_timing)(void *ctx, void *timing); 35 int (*check_timing)(void *ctx, void *timing);
36 int (*power_on)(void *ctx, int mode); 36 int (*power_on)(void *ctx, int mode);
37 37
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 0bda96454a02..1a556354e92f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -869,7 +869,7 @@ static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
869 } 869 }
870} 870}
871 871
872void ipp_handle_cmd_work(struct device *dev, 872static void ipp_handle_cmd_work(struct device *dev,
873 struct exynos_drm_ippdrv *ippdrv, 873 struct exynos_drm_ippdrv *ippdrv,
874 struct drm_exynos_ipp_cmd_work *cmd_work, 874 struct drm_exynos_ipp_cmd_work *cmd_work,
875 struct drm_exynos_ipp_cmd_node *c_node) 875 struct drm_exynos_ipp_cmd_node *c_node)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index e9e83ef688f0..f976e29def6e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -734,7 +734,7 @@ static int rotator_remove(struct platform_device *pdev)
734 return 0; 734 return 0;
735} 735}
736 736
737struct rot_limit_table rot_limit_tbl = { 737static struct rot_limit_table rot_limit_tbl = {
738 .ycbcr420_2p = { 738 .ycbcr420_2p = {
739 .min_w = 32, 739 .min_w = 32,
740 .min_h = 32, 740 .min_h = 32,
@@ -751,7 +751,7 @@ struct rot_limit_table rot_limit_tbl = {
751 }, 751 },
752}; 752};
753 753
754struct platform_device_id rotator_driver_ids[] = { 754static struct platform_device_id rotator_driver_ids[] = {
755 { 755 {
756 .name = "exynos-rot", 756 .name = "exynos-rot",
757 .driver_data = (unsigned long)&rot_limit_tbl, 757 .driver_data = (unsigned long)&rot_limit_tbl,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index d0ca3c4e06c6..13ccbd4bcfaa 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -98,10 +98,12 @@ static bool vidi_display_is_connected(struct device *dev)
98 return ctx->connected ? true : false; 98 return ctx->connected ? true : false;
99} 99}
100 100
101static int vidi_get_edid(struct device *dev, struct drm_connector *connector, 101static struct edid *vidi_get_edid(struct device *dev,
102 u8 *edid, int len) 102 struct drm_connector *connector)
103{ 103{
104 struct vidi_context *ctx = get_vidi_context(dev); 104 struct vidi_context *ctx = get_vidi_context(dev);
105 struct edid *edid;
106 int edid_len;
105 107
106 DRM_DEBUG_KMS("%s\n", __FILE__); 108 DRM_DEBUG_KMS("%s\n", __FILE__);
107 109
@@ -111,13 +113,18 @@ static int vidi_get_edid(struct device *dev, struct drm_connector *connector,
111 */ 113 */
112 if (!ctx->raw_edid) { 114 if (!ctx->raw_edid) {
113 DRM_DEBUG_KMS("raw_edid is null.\n"); 115 DRM_DEBUG_KMS("raw_edid is null.\n");
114 return -EFAULT; 116 return ERR_PTR(-EFAULT);
115 } 117 }
116 118
117 memcpy(edid, ctx->raw_edid, min((1 + ctx->raw_edid->extensions) 119 edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH;
118 * EDID_LENGTH, len)); 120 edid = kzalloc(edid_len, GFP_KERNEL);
121 if (!edid) {
122 DRM_DEBUG_KMS("failed to allocate edid\n");
123 return ERR_PTR(-ENOMEM);
124 }
119 125
120 return 0; 126 memcpy(edid, ctx->raw_edid, edid_len);
127 return edid;
121} 128}
122 129
123static void *vidi_get_panel(struct device *dev) 130static void *vidi_get_panel(struct device *dev)
@@ -514,7 +521,6 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
514 struct exynos_drm_manager *manager; 521 struct exynos_drm_manager *manager;
515 struct exynos_drm_display_ops *display_ops; 522 struct exynos_drm_display_ops *display_ops;
516 struct drm_exynos_vidi_connection *vidi = data; 523 struct drm_exynos_vidi_connection *vidi = data;
517 struct edid *raw_edid;
518 int edid_len; 524 int edid_len;
519 525
520 DRM_DEBUG_KMS("%s\n", __FILE__); 526 DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -551,11 +557,11 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
551 } 557 }
552 558
553 if (vidi->connection) { 559 if (vidi->connection) {
554 if (!vidi->edid) { 560 struct edid *raw_edid = (struct edid *)(uint32_t)vidi->edid;
555 DRM_DEBUG_KMS("edid data is null.\n"); 561 if (!drm_edid_is_valid(raw_edid)) {
562 DRM_DEBUG_KMS("edid data is invalid.\n");
556 return -EINVAL; 563 return -EINVAL;
557 } 564 }
558 raw_edid = (struct edid *)(uint32_t)vidi->edid;
559 edid_len = (1 + raw_edid->extensions) * EDID_LENGTH; 565 edid_len = (1 + raw_edid->extensions) * EDID_LENGTH;
560 ctx->raw_edid = kzalloc(edid_len, GFP_KERNEL); 566 ctx->raw_edid = kzalloc(edid_len, GFP_KERNEL);
561 if (!ctx->raw_edid) { 567 if (!ctx->raw_edid) {
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 41ff79d8ac8e..fbab3c468603 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -34,7 +34,6 @@
34#include <linux/regulator/consumer.h> 34#include <linux/regulator/consumer.h>
35#include <linux/io.h> 35#include <linux/io.h>
36#include <linux/of_gpio.h> 36#include <linux/of_gpio.h>
37#include <plat/gpio-cfg.h>
38 37
39#include <drm/exynos_drm.h> 38#include <drm/exynos_drm.h>
40 39
@@ -98,8 +97,7 @@ struct hdmi_context {
98 97
99 void __iomem *regs; 98 void __iomem *regs;
100 void *parent_ctx; 99 void *parent_ctx;
101 int external_irq; 100 int irq;
102 int internal_irq;
103 101
104 struct i2c_client *ddc_port; 102 struct i2c_client *ddc_port;
105 struct i2c_client *hdmiphy_port; 103 struct i2c_client *hdmiphy_port;
@@ -1391,8 +1389,7 @@ static bool hdmi_is_connected(void *ctx)
1391 return hdata->hpd; 1389 return hdata->hpd;
1392} 1390}
1393 1391
1394static int hdmi_get_edid(void *ctx, struct drm_connector *connector, 1392static struct edid *hdmi_get_edid(void *ctx, struct drm_connector *connector)
1395 u8 *edid, int len)
1396{ 1393{
1397 struct edid *raw_edid; 1394 struct edid *raw_edid;
1398 struct hdmi_context *hdata = ctx; 1395 struct hdmi_context *hdata = ctx;
@@ -1400,22 +1397,18 @@ static int hdmi_get_edid(void *ctx, struct drm_connector *connector,
1400 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); 1397 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
1401 1398
1402 if (!hdata->ddc_port) 1399 if (!hdata->ddc_port)
1403 return -ENODEV; 1400 return ERR_PTR(-ENODEV);
1404 1401
1405 raw_edid = drm_get_edid(connector, hdata->ddc_port->adapter); 1402 raw_edid = drm_get_edid(connector, hdata->ddc_port->adapter);
1406 if (raw_edid) { 1403 if (!raw_edid)
1407 hdata->dvi_mode = !drm_detect_hdmi_monitor(raw_edid); 1404 return ERR_PTR(-ENODEV);
1408 memcpy(edid, raw_edid, min((1 + raw_edid->extensions)
1409 * EDID_LENGTH, len));
1410 DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n",
1411 (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
1412 raw_edid->width_cm, raw_edid->height_cm);
1413 kfree(raw_edid);
1414 } else {
1415 return -ENODEV;
1416 }
1417 1405
1418 return 0; 1406 hdata->dvi_mode = !drm_detect_hdmi_monitor(raw_edid);
1407 DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n",
1408 (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
1409 raw_edid->width_cm, raw_edid->height_cm);
1410
1411 return raw_edid;
1419} 1412}
1420 1413
1421static int hdmi_v13_check_timing(struct fb_videomode *check_timing) 1414static int hdmi_v13_check_timing(struct fb_videomode *check_timing)
@@ -1652,16 +1645,16 @@ static void hdmi_conf_reset(struct hdmi_context *hdata)
1652 1645
1653 /* resetting HDMI core */ 1646 /* resetting HDMI core */
1654 hdmi_reg_writemask(hdata, reg, 0, HDMI_CORE_SW_RSTOUT); 1647 hdmi_reg_writemask(hdata, reg, 0, HDMI_CORE_SW_RSTOUT);
1655 mdelay(10); 1648 usleep_range(10000, 12000);
1656 hdmi_reg_writemask(hdata, reg, ~0, HDMI_CORE_SW_RSTOUT); 1649 hdmi_reg_writemask(hdata, reg, ~0, HDMI_CORE_SW_RSTOUT);
1657 mdelay(10); 1650 usleep_range(10000, 12000);
1658} 1651}
1659 1652
1660static void hdmi_conf_init(struct hdmi_context *hdata) 1653static void hdmi_conf_init(struct hdmi_context *hdata)
1661{ 1654{
1662 struct hdmi_infoframe infoframe; 1655 struct hdmi_infoframe infoframe;
1663 1656
1664 /* disable HPD interrupts */ 1657 /* disable HPD interrupts from HDMI IP block, use GPIO instead */
1665 hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL | 1658 hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
1666 HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG); 1659 HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
1667 1660
@@ -1779,7 +1772,7 @@ static void hdmi_v13_timing_apply(struct hdmi_context *hdata)
1779 u32 val = hdmi_reg_read(hdata, HDMI_V13_PHY_STATUS); 1772 u32 val = hdmi_reg_read(hdata, HDMI_V13_PHY_STATUS);
1780 if (val & HDMI_PHY_STATUS_READY) 1773 if (val & HDMI_PHY_STATUS_READY)
1781 break; 1774 break;
1782 mdelay(1); 1775 usleep_range(1000, 2000);
1783 } 1776 }
1784 /* steady state not achieved */ 1777 /* steady state not achieved */
1785 if (tries == 0) { 1778 if (tries == 0) {
@@ -1946,7 +1939,7 @@ static void hdmi_v14_timing_apply(struct hdmi_context *hdata)
1946 u32 val = hdmi_reg_read(hdata, HDMI_PHY_STATUS_0); 1939 u32 val = hdmi_reg_read(hdata, HDMI_PHY_STATUS_0);
1947 if (val & HDMI_PHY_STATUS_READY) 1940 if (val & HDMI_PHY_STATUS_READY)
1948 break; 1941 break;
1949 mdelay(1); 1942 usleep_range(1000, 2000);
1950 } 1943 }
1951 /* steady state not achieved */ 1944 /* steady state not achieved */
1952 if (tries == 0) { 1945 if (tries == 0) {
@@ -1998,9 +1991,9 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata)
1998 1991
1999 /* reset hdmiphy */ 1992 /* reset hdmiphy */
2000 hdmi_reg_writemask(hdata, reg, ~0, HDMI_PHY_SW_RSTOUT); 1993 hdmi_reg_writemask(hdata, reg, ~0, HDMI_PHY_SW_RSTOUT);
2001 mdelay(10); 1994 usleep_range(10000, 12000);
2002 hdmi_reg_writemask(hdata, reg, 0, HDMI_PHY_SW_RSTOUT); 1995 hdmi_reg_writemask(hdata, reg, 0, HDMI_PHY_SW_RSTOUT);
2003 mdelay(10); 1996 usleep_range(10000, 12000);
2004} 1997}
2005 1998
2006static void hdmiphy_poweron(struct hdmi_context *hdata) 1999static void hdmiphy_poweron(struct hdmi_context *hdata)
@@ -2048,7 +2041,7 @@ static void hdmiphy_conf_apply(struct hdmi_context *hdata)
2048 return; 2041 return;
2049 } 2042 }
2050 2043
2051 mdelay(10); 2044 usleep_range(10000, 12000);
2052 2045
2053 /* operation mode */ 2046 /* operation mode */
2054 operation[0] = 0x1f; 2047 operation[0] = 0x1f;
@@ -2170,6 +2163,13 @@ static void hdmi_commit(void *ctx)
2170 2163
2171 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); 2164 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
2172 2165
2166 mutex_lock(&hdata->hdmi_mutex);
2167 if (!hdata->powered) {
2168 mutex_unlock(&hdata->hdmi_mutex);
2169 return;
2170 }
2171 mutex_unlock(&hdata->hdmi_mutex);
2172
2173 hdmi_conf_apply(hdata); 2173 hdmi_conf_apply(hdata);
2174} 2174}
2175 2175
@@ -2265,7 +2265,7 @@ static struct exynos_hdmi_ops hdmi_ops = {
2265 .dpms = hdmi_dpms, 2265 .dpms = hdmi_dpms,
2266}; 2266};
2267 2267
2268static irqreturn_t hdmi_external_irq_thread(int irq, void *arg) 2268static irqreturn_t hdmi_irq_thread(int irq, void *arg)
2269{ 2269{
2270 struct exynos_drm_hdmi_context *ctx = arg; 2270 struct exynos_drm_hdmi_context *ctx = arg;
2271 struct hdmi_context *hdata = ctx->ctx; 2271 struct hdmi_context *hdata = ctx->ctx;
@@ -2280,31 +2280,6 @@ static irqreturn_t hdmi_external_irq_thread(int irq, void *arg)
2280 return IRQ_HANDLED; 2280 return IRQ_HANDLED;
2281} 2281}
2282 2282
2283static irqreturn_t hdmi_internal_irq_thread(int irq, void *arg)
2284{
2285 struct exynos_drm_hdmi_context *ctx = arg;
2286 struct hdmi_context *hdata = ctx->ctx;
2287 u32 intc_flag;
2288
2289 intc_flag = hdmi_reg_read(hdata, HDMI_INTC_FLAG);
2290 /* clearing flags for HPD plug/unplug */
2291 if (intc_flag & HDMI_INTC_FLAG_HPD_UNPLUG) {
2292 DRM_DEBUG_KMS("unplugged\n");
2293 hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0,
2294 HDMI_INTC_FLAG_HPD_UNPLUG);
2295 }
2296 if (intc_flag & HDMI_INTC_FLAG_HPD_PLUG) {
2297 DRM_DEBUG_KMS("plugged\n");
2298 hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0,
2299 HDMI_INTC_FLAG_HPD_PLUG);
2300 }
2301
2302 if (ctx->drm_dev)
2303 drm_helper_hpd_irq_event(ctx->drm_dev);
2304
2305 return IRQ_HANDLED;
2306}
2307
2308static int hdmi_resources_init(struct hdmi_context *hdata) 2283static int hdmi_resources_init(struct hdmi_context *hdata)
2309{ 2284{
2310 struct device *dev = hdata->dev; 2285 struct device *dev = hdata->dev;
@@ -2555,39 +2530,24 @@ static int hdmi_probe(struct platform_device *pdev)
2555 2530
2556 hdata->hdmiphy_port = hdmi_hdmiphy; 2531 hdata->hdmiphy_port = hdmi_hdmiphy;
2557 2532
2558 hdata->external_irq = gpio_to_irq(hdata->hpd_gpio); 2533 hdata->irq = gpio_to_irq(hdata->hpd_gpio);
2559 if (hdata->external_irq < 0) { 2534 if (hdata->irq < 0) {
2560 DRM_ERROR("failed to get GPIO external irq\n"); 2535 DRM_ERROR("failed to get GPIO irq\n");
2561 ret = hdata->external_irq; 2536 ret = hdata->irq;
2562 goto err_hdmiphy;
2563 }
2564
2565 hdata->internal_irq = platform_get_irq(pdev, 0);
2566 if (hdata->internal_irq < 0) {
2567 DRM_ERROR("failed to get platform internal irq\n");
2568 ret = hdata->internal_irq;
2569 goto err_hdmiphy; 2537 goto err_hdmiphy;
2570 } 2538 }
2571 2539
2572 hdata->hpd = gpio_get_value(hdata->hpd_gpio); 2540 hdata->hpd = gpio_get_value(hdata->hpd_gpio);
2573 2541
2574 ret = request_threaded_irq(hdata->external_irq, NULL, 2542 ret = request_threaded_irq(hdata->irq, NULL,
2575 hdmi_external_irq_thread, IRQF_TRIGGER_RISING | 2543 hdmi_irq_thread, IRQF_TRIGGER_RISING |
2576 IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 2544 IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
2577 "hdmi_external", drm_hdmi_ctx); 2545 "hdmi", drm_hdmi_ctx);
2578 if (ret) { 2546 if (ret) {
2579 DRM_ERROR("failed to register hdmi external interrupt\n"); 2547 DRM_ERROR("failed to register hdmi interrupt\n");
2580 goto err_hdmiphy; 2548 goto err_hdmiphy;
2581 } 2549 }
2582 2550
2583 ret = request_threaded_irq(hdata->internal_irq, NULL,
2584 hdmi_internal_irq_thread, IRQF_ONESHOT,
2585 "hdmi_internal", drm_hdmi_ctx);
2586 if (ret) {
2587 DRM_ERROR("failed to register hdmi internal interrupt\n");
2588 goto err_free_irq;
2589 }
2590
2591 /* Attach HDMI Driver to common hdmi. */ 2551 /* Attach HDMI Driver to common hdmi. */
2592 exynos_hdmi_drv_attach(drm_hdmi_ctx); 2552 exynos_hdmi_drv_attach(drm_hdmi_ctx);
2593 2553
@@ -2598,8 +2558,6 @@ static int hdmi_probe(struct platform_device *pdev)
2598 2558
2599 return 0; 2559 return 0;
2600 2560
2601err_free_irq:
2602 free_irq(hdata->external_irq, drm_hdmi_ctx);
2603err_hdmiphy: 2561err_hdmiphy:
2604 i2c_del_driver(&hdmiphy_driver); 2562 i2c_del_driver(&hdmiphy_driver);
2605err_ddc: 2563err_ddc:
@@ -2617,8 +2575,7 @@ static int hdmi_remove(struct platform_device *pdev)
2617 2575
2618 pm_runtime_disable(dev); 2576 pm_runtime_disable(dev);
2619 2577
2620 free_irq(hdata->internal_irq, hdata); 2578 free_irq(hdata->irq, hdata);
2621 free_irq(hdata->external_irq, hdata);
2622 2579
2623 2580
2624 /* hdmiphy i2c driver */ 2581 /* hdmiphy i2c driver */
@@ -2637,8 +2594,7 @@ static int hdmi_suspend(struct device *dev)
2637 2594
2638 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); 2595 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
2639 2596
2640 disable_irq(hdata->internal_irq); 2597 disable_irq(hdata->irq);
2641 disable_irq(hdata->external_irq);
2642 2598
2643 hdata->hpd = false; 2599 hdata->hpd = false;
2644 if (ctx->drm_dev) 2600 if (ctx->drm_dev)
@@ -2663,8 +2619,7 @@ static int hdmi_resume(struct device *dev)
2663 2619
2664 hdata->hpd = gpio_get_value(hdata->hpd_gpio); 2620 hdata->hpd = gpio_get_value(hdata->hpd_gpio);
2665 2621
2666 enable_irq(hdata->external_irq); 2622 enable_irq(hdata->irq);
2667 enable_irq(hdata->internal_irq);
2668 2623
2669 if (!pm_runtime_suspended(dev)) { 2624 if (!pm_runtime_suspended(dev)) {
2670 DRM_DEBUG_KMS("%s : Already resumed\n", __func__); 2625 DRM_DEBUG_KMS("%s : Already resumed\n", __func__);
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index c187ea33b748..c414584bfbae 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -600,7 +600,7 @@ static void vp_win_reset(struct mixer_context *ctx)
600 /* waiting until VP_SRESET_PROCESSING is 0 */ 600 /* waiting until VP_SRESET_PROCESSING is 0 */
601 if (~vp_reg_read(res, VP_SRESET) & VP_SRESET_PROCESSING) 601 if (~vp_reg_read(res, VP_SRESET) & VP_SRESET_PROCESSING)
602 break; 602 break;
603 mdelay(10); 603 usleep_range(10000, 12000);
604 } 604 }
605 WARN(tries == 0, "failed to reset Video Processor\n"); 605 WARN(tries == 0, "failed to reset Video Processor\n");
606} 606}
@@ -776,6 +776,13 @@ static void mixer_win_commit(void *ctx, int win)
776 776
777 DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win); 777 DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
778 778
779 mutex_lock(&mixer_ctx->mixer_mutex);
780 if (!mixer_ctx->powered) {
781 mutex_unlock(&mixer_ctx->mixer_mutex);
782 return;
783 }
784 mutex_unlock(&mixer_ctx->mixer_mutex);
785
779 if (win > 1 && mixer_ctx->vp_enabled) 786 if (win > 1 && mixer_ctx->vp_enabled)
780 vp_video_buffer(mixer_ctx, win); 787 vp_video_buffer(mixer_ctx, win);
781 else 788 else
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e6a11ca85eaf..9d4a2c2adf0e 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -30,6 +30,7 @@
30#include <linux/debugfs.h> 30#include <linux/debugfs.h>
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/export.h> 32#include <linux/export.h>
33#include <generated/utsrelease.h>
33#include <drm/drmP.h> 34#include <drm/drmP.h>
34#include "intel_drv.h" 35#include "intel_drv.h"
35#include "intel_ringbuffer.h" 36#include "intel_ringbuffer.h"
@@ -641,6 +642,7 @@ static void i915_ring_error_state(struct seq_file *m,
641 seq_printf(m, "%s command stream:\n", ring_str(ring)); 642 seq_printf(m, "%s command stream:\n", ring_str(ring));
642 seq_printf(m, " HEAD: 0x%08x\n", error->head[ring]); 643 seq_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
643 seq_printf(m, " TAIL: 0x%08x\n", error->tail[ring]); 644 seq_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
645 seq_printf(m, " CTL: 0x%08x\n", error->ctl[ring]);
644 seq_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]); 646 seq_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]);
645 seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]); 647 seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
646 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]); 648 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
@@ -689,10 +691,13 @@ static int i915_error_state(struct seq_file *m, void *unused)
689 691
690 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, 692 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
691 error->time.tv_usec); 693 error->time.tv_usec);
694 seq_printf(m, "Kernel: " UTS_RELEASE);
692 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); 695 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
693 seq_printf(m, "EIR: 0x%08x\n", error->eir); 696 seq_printf(m, "EIR: 0x%08x\n", error->eir);
694 seq_printf(m, "IER: 0x%08x\n", error->ier); 697 seq_printf(m, "IER: 0x%08x\n", error->ier);
695 seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); 698 seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
699 seq_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
700 seq_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
696 seq_printf(m, "CCID: 0x%08x\n", error->ccid); 701 seq_printf(m, "CCID: 0x%08x\n", error->ccid);
697 702
698 for (i = 0; i < dev_priv->num_fence_regs; i++) 703 for (i = 0; i < dev_priv->num_fence_regs; i++)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ed3059575576..12ab3bdea54d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -188,10 +188,13 @@ struct drm_i915_error_state {
188 u32 pgtbl_er; 188 u32 pgtbl_er;
189 u32 ier; 189 u32 ier;
190 u32 ccid; 190 u32 ccid;
191 u32 derrmr;
192 u32 forcewake;
191 bool waiting[I915_NUM_RINGS]; 193 bool waiting[I915_NUM_RINGS];
192 u32 pipestat[I915_MAX_PIPES]; 194 u32 pipestat[I915_MAX_PIPES];
193 u32 tail[I915_NUM_RINGS]; 195 u32 tail[I915_NUM_RINGS];
194 u32 head[I915_NUM_RINGS]; 196 u32 head[I915_NUM_RINGS];
197 u32 ctl[I915_NUM_RINGS];
195 u32 ipeir[I915_NUM_RINGS]; 198 u32 ipeir[I915_NUM_RINGS];
196 u32 ipehr[I915_NUM_RINGS]; 199 u32 ipehr[I915_NUM_RINGS];
197 u32 instdone[I915_NUM_RINGS]; 200 u32 instdone[I915_NUM_RINGS];
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index d6a994a07393..26d08bb58218 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -539,6 +539,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
539 total = 0; 539 total = 0;
540 for (i = 0; i < count; i++) { 540 for (i = 0; i < count; i++) {
541 struct drm_i915_gem_relocation_entry __user *user_relocs; 541 struct drm_i915_gem_relocation_entry __user *user_relocs;
542 u64 invalid_offset = (u64)-1;
543 int j;
542 544
543 user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr; 545 user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
544 546
@@ -549,6 +551,25 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
549 goto err; 551 goto err;
550 } 552 }
551 553
554 /* As we do not update the known relocation offsets after
555 * relocating (due to the complexities in lock handling),
556 * we need to mark them as invalid now so that we force the
557 * relocation processing next time. Just in case the target
558 * object is evicted and then rebound into its old
559 * presumed_offset before the next execbuffer - if that
560 * happened we would make the mistake of assuming that the
561 * relocations were valid.
562 */
563 for (j = 0; j < exec[i].relocation_count; j++) {
564 if (copy_to_user(&user_relocs[j].presumed_offset,
565 &invalid_offset,
566 sizeof(invalid_offset))) {
567 ret = -EFAULT;
568 mutex_lock(&dev->struct_mutex);
569 goto err;
570 }
571 }
572
552 reloc_offset[i] = total; 573 reloc_offset[i] = total;
553 total += exec[i].relocation_count; 574 total += exec[i].relocation_count;
554 } 575 }
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 2220dec3e5d9..fe843389c7b4 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1157,6 +1157,7 @@ static void i915_record_ring_state(struct drm_device *dev,
1157 error->acthd[ring->id] = intel_ring_get_active_head(ring); 1157 error->acthd[ring->id] = intel_ring_get_active_head(ring);
1158 error->head[ring->id] = I915_READ_HEAD(ring); 1158 error->head[ring->id] = I915_READ_HEAD(ring);
1159 error->tail[ring->id] = I915_READ_TAIL(ring); 1159 error->tail[ring->id] = I915_READ_TAIL(ring);
1160 error->ctl[ring->id] = I915_READ_CTL(ring);
1160 1161
1161 error->cpu_ring_head[ring->id] = ring->head; 1162 error->cpu_ring_head[ring->id] = ring->head;
1162 error->cpu_ring_tail[ring->id] = ring->tail; 1163 error->cpu_ring_tail[ring->id] = ring->tail;
@@ -1251,6 +1252,16 @@ static void i915_capture_error_state(struct drm_device *dev)
1251 else 1252 else
1252 error->ier = I915_READ(IER); 1253 error->ier = I915_READ(IER);
1253 1254
1255 if (INTEL_INFO(dev)->gen >= 6)
1256 error->derrmr = I915_READ(DERRMR);
1257
1258 if (IS_VALLEYVIEW(dev))
1259 error->forcewake = I915_READ(FORCEWAKE_VLV);
1260 else if (INTEL_INFO(dev)->gen >= 7)
1261 error->forcewake = I915_READ(FORCEWAKE_MT);
1262 else if (INTEL_INFO(dev)->gen == 6)
1263 error->forcewake = I915_READ(FORCEWAKE);
1264
1254 for_each_pipe(pipe) 1265 for_each_pipe(pipe)
1255 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); 1266 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
1256 1267
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 186ee5c85b51..59afb7eb6db6 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -512,6 +512,8 @@
512#define GEN7_ERR_INT 0x44040 512#define GEN7_ERR_INT 0x44040
513#define ERR_INT_MMIO_UNCLAIMED (1<<13) 513#define ERR_INT_MMIO_UNCLAIMED (1<<13)
514 514
515#define DERRMR 0x44050
516
515/* GM45+ chicken bits -- debug workaround bits that may be required 517/* GM45+ chicken bits -- debug workaround bits that may be required
516 * for various sorts of correct behavior. The top 16 bits of each are 518 * for various sorts of correct behavior. The top 16 bits of each are
517 * the enables for writing to the corresponding low bit. 519 * the enables for writing to the corresponding low bit.
@@ -531,6 +533,7 @@
531#define MI_MODE 0x0209c 533#define MI_MODE 0x0209c
532# define VS_TIMER_DISPATCH (1 << 6) 534# define VS_TIMER_DISPATCH (1 << 6)
533# define MI_FLUSH_ENABLE (1 << 12) 535# define MI_FLUSH_ENABLE (1 << 12)
536# define ASYNC_FLIP_PERF_DISABLE (1 << 14)
534 537
535#define GEN6_GT_MODE 0x20d0 538#define GEN6_GT_MODE 0x20d0
536#define GEN6_GT_MODE_HI (1 << 9) 539#define GEN6_GT_MODE_HI (1 << 9)
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 1b63d55318a0..fb3715b4b09d 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -2579,7 +2579,8 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
2579 2579
2580static void 2580static void
2581intel_dp_init_panel_power_sequencer(struct drm_device *dev, 2581intel_dp_init_panel_power_sequencer(struct drm_device *dev,
2582 struct intel_dp *intel_dp) 2582 struct intel_dp *intel_dp,
2583 struct edp_power_seq *out)
2583{ 2584{
2584 struct drm_i915_private *dev_priv = dev->dev_private; 2585 struct drm_i915_private *dev_priv = dev->dev_private;
2585 struct edp_power_seq cur, vbt, spec, final; 2586 struct edp_power_seq cur, vbt, spec, final;
@@ -2650,16 +2651,35 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
2650 intel_dp->panel_power_cycle_delay = get_delay(t11_t12); 2651 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
2651#undef get_delay 2652#undef get_delay
2652 2653
2654 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
2655 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
2656 intel_dp->panel_power_cycle_delay);
2657
2658 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
2659 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
2660
2661 if (out)
2662 *out = final;
2663}
2664
2665static void
2666intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
2667 struct intel_dp *intel_dp,
2668 struct edp_power_seq *seq)
2669{
2670 struct drm_i915_private *dev_priv = dev->dev_private;
2671 u32 pp_on, pp_off, pp_div;
2672
2653 /* And finally store the new values in the power sequencer. */ 2673 /* And finally store the new values in the power sequencer. */
2654 pp_on = (final.t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | 2674 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
2655 (final.t8 << PANEL_LIGHT_ON_DELAY_SHIFT); 2675 (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
2656 pp_off = (final.t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) | 2676 pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
2657 (final.t10 << PANEL_POWER_DOWN_DELAY_SHIFT); 2677 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
2658 /* Compute the divisor for the pp clock, simply match the Bspec 2678 /* Compute the divisor for the pp clock, simply match the Bspec
2659 * formula. */ 2679 * formula. */
2660 pp_div = ((100 * intel_pch_rawclk(dev))/2 - 1) 2680 pp_div = ((100 * intel_pch_rawclk(dev))/2 - 1)
2661 << PP_REFERENCE_DIVIDER_SHIFT; 2681 << PP_REFERENCE_DIVIDER_SHIFT;
2662 pp_div |= (DIV_ROUND_UP(final.t11_t12, 1000) 2682 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
2663 << PANEL_POWER_CYCLE_DELAY_SHIFT); 2683 << PANEL_POWER_CYCLE_DELAY_SHIFT);
2664 2684
2665 /* Haswell doesn't have any port selection bits for the panel 2685 /* Haswell doesn't have any port selection bits for the panel
@@ -2675,14 +2695,6 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
2675 I915_WRITE(PCH_PP_OFF_DELAYS, pp_off); 2695 I915_WRITE(PCH_PP_OFF_DELAYS, pp_off);
2676 I915_WRITE(PCH_PP_DIVISOR, pp_div); 2696 I915_WRITE(PCH_PP_DIVISOR, pp_div);
2677 2697
2678
2679 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
2680 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
2681 intel_dp->panel_power_cycle_delay);
2682
2683 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
2684 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
2685
2686 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", 2698 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
2687 I915_READ(PCH_PP_ON_DELAYS), 2699 I915_READ(PCH_PP_ON_DELAYS),
2688 I915_READ(PCH_PP_OFF_DELAYS), 2700 I915_READ(PCH_PP_OFF_DELAYS),
@@ -2699,6 +2711,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
2699 struct drm_device *dev = intel_encoder->base.dev; 2711 struct drm_device *dev = intel_encoder->base.dev;
2700 struct drm_i915_private *dev_priv = dev->dev_private; 2712 struct drm_i915_private *dev_priv = dev->dev_private;
2701 struct drm_display_mode *fixed_mode = NULL; 2713 struct drm_display_mode *fixed_mode = NULL;
2714 struct edp_power_seq power_seq = { 0 };
2702 enum port port = intel_dig_port->port; 2715 enum port port = intel_dig_port->port;
2703 const char *name = NULL; 2716 const char *name = NULL;
2704 int type; 2717 int type;
@@ -2771,7 +2784,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
2771 } 2784 }
2772 2785
2773 if (is_edp(intel_dp)) 2786 if (is_edp(intel_dp))
2774 intel_dp_init_panel_power_sequencer(dev, intel_dp); 2787 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
2775 2788
2776 intel_dp_i2c_init(intel_dp, intel_connector, name); 2789 intel_dp_i2c_init(intel_dp, intel_connector, name);
2777 2790
@@ -2798,6 +2811,10 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
2798 return; 2811 return;
2799 } 2812 }
2800 2813
2814 /* We now know it's not a ghost, init power sequence regs. */
2815 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
2816 &power_seq);
2817
2801 ironlake_edp_panel_vdd_on(intel_dp); 2818 ironlake_edp_panel_vdd_on(intel_dp);
2802 edid = drm_get_edid(connector, &intel_dp->adapter); 2819 edid = drm_get_edid(connector, &intel_dp->adapter);
2803 if (edid) { 2820 if (edid) {
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index e83a11794172..3280cffe50f4 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4250,7 +4250,8 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
4250static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv) 4250static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
4251{ 4251{
4252 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff)); 4252 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
4253 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ 4253 /* something from same cacheline, but !FORCEWAKE_MT */
4254 POSTING_READ(ECOBUS);
4254} 4255}
4255 4256
4256static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) 4257static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
@@ -4267,7 +4268,8 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
4267 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); 4268 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
4268 4269
4269 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); 4270 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
4270 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ 4271 /* something from same cacheline, but !FORCEWAKE_MT */
4272 POSTING_READ(ECOBUS);
4271 4273
4272 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1), 4274 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
4273 FORCEWAKE_ACK_TIMEOUT_MS)) 4275 FORCEWAKE_ACK_TIMEOUT_MS))
@@ -4304,14 +4306,16 @@ void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
4304static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) 4306static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
4305{ 4307{
4306 I915_WRITE_NOTRACE(FORCEWAKE, 0); 4308 I915_WRITE_NOTRACE(FORCEWAKE, 0);
4307 /* gen6_gt_check_fifodbg doubles as the POSTING_READ */ 4309 /* something from same cacheline, but !FORCEWAKE */
4310 POSTING_READ(ECOBUS);
4308 gen6_gt_check_fifodbg(dev_priv); 4311 gen6_gt_check_fifodbg(dev_priv);
4309} 4312}
4310 4313
4311static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) 4314static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
4312{ 4315{
4313 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); 4316 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
4314 /* gen6_gt_check_fifodbg doubles as the POSTING_READ */ 4317 /* something from same cacheline, but !FORCEWAKE_MT */
4318 POSTING_READ(ECOBUS);
4315 gen6_gt_check_fifodbg(dev_priv); 4319 gen6_gt_check_fifodbg(dev_priv);
4316} 4320}
4317 4321
@@ -4351,6 +4355,8 @@ int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
4351static void vlv_force_wake_reset(struct drm_i915_private *dev_priv) 4355static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
4352{ 4356{
4353 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff)); 4357 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff));
4358 /* something from same cacheline, but !FORCEWAKE_VLV */
4359 POSTING_READ(FORCEWAKE_ACK_VLV);
4354} 4360}
4355 4361
4356static void vlv_force_wake_get(struct drm_i915_private *dev_priv) 4362static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
@@ -4371,7 +4377,8 @@ static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
4371static void vlv_force_wake_put(struct drm_i915_private *dev_priv) 4377static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
4372{ 4378{
4373 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); 4379 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
4374 /* The below doubles as a POSTING_READ */ 4380 /* something from same cacheline, but !FORCEWAKE_VLV */
4381 POSTING_READ(FORCEWAKE_ACK_VLV);
4375 gen6_gt_check_fifodbg(dev_priv); 4382 gen6_gt_check_fifodbg(dev_priv);
4376} 4383}
4377 4384
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index ae253e04c391..42ff97d667d2 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -505,13 +505,25 @@ static int init_render_ring(struct intel_ring_buffer *ring)
505 struct drm_i915_private *dev_priv = dev->dev_private; 505 struct drm_i915_private *dev_priv = dev->dev_private;
506 int ret = init_ring_common(ring); 506 int ret = init_ring_common(ring);
507 507
508 if (INTEL_INFO(dev)->gen > 3) { 508 if (INTEL_INFO(dev)->gen > 3)
509 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); 509 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
510 if (IS_GEN7(dev)) 510
511 I915_WRITE(GFX_MODE_GEN7, 511 /* We need to disable the AsyncFlip performance optimisations in order
512 _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) | 512 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
513 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); 513 * programmed to '1' on all products.
514 } 514 */
515 if (INTEL_INFO(dev)->gen >= 6)
516 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
517
518 /* Required for the hardware to program scanline values for waiting */
519 if (INTEL_INFO(dev)->gen == 6)
520 I915_WRITE(GFX_MODE,
521 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_ALWAYS));
522
523 if (IS_GEN7(dev))
524 I915_WRITE(GFX_MODE_GEN7,
525 _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
526 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
515 527
516 if (INTEL_INFO(dev)->gen >= 5) { 528 if (INTEL_INFO(dev)->gen >= 5) {
517 ret = init_pipe_control(ring); 529 ret = init_pipe_control(ring);
diff --git a/drivers/gpu/drm/nouveau/core/core/falcon.c b/drivers/gpu/drm/nouveau/core/core/falcon.c
index 6b0843c33877..e05c15777588 100644
--- a/drivers/gpu/drm/nouveau/core/core/falcon.c
+++ b/drivers/gpu/drm/nouveau/core/core/falcon.c
@@ -73,8 +73,11 @@ _nouveau_falcon_init(struct nouveau_object *object)
73 nv_debug(falcon, "data limit: %d\n", falcon->data.limit); 73 nv_debug(falcon, "data limit: %d\n", falcon->data.limit);
74 74
75 /* wait for 'uc halted' to be signalled before continuing */ 75 /* wait for 'uc halted' to be signalled before continuing */
76 if (falcon->secret) { 76 if (falcon->secret && falcon->version < 4) {
77 nv_wait(falcon, 0x008, 0x00000010, 0x00000010); 77 if (!falcon->version)
78 nv_wait(falcon, 0x008, 0x00000010, 0x00000010);
79 else
80 nv_wait(falcon, 0x180, 0x80000000, 0);
78 nv_wo32(falcon, 0x004, 0x00000010); 81 nv_wo32(falcon, 0x004, 0x00000010);
79 } 82 }
80 83
diff --git a/drivers/gpu/drm/nouveau/core/core/subdev.c b/drivers/gpu/drm/nouveau/core/core/subdev.c
index f74c30aa33a0..48f06378d3f9 100644
--- a/drivers/gpu/drm/nouveau/core/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/core/core/subdev.c
@@ -99,7 +99,7 @@ nouveau_subdev_create_(struct nouveau_object *parent,
99 if (ret) 99 if (ret)
100 return ret; 100 return ret;
101 101
102 mutex_init(&subdev->mutex); 102 __mutex_init(&subdev->mutex, subname, &oclass->lock_class_key);
103 subdev->name = subname; 103 subdev->name = subname;
104 104
105 if (parent) { 105 if (parent) {
diff --git a/drivers/gpu/drm/nouveau/core/include/core/object.h b/drivers/gpu/drm/nouveau/core/include/core/object.h
index 5982935ee23a..106bb19fdd9a 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/object.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/object.h
@@ -50,10 +50,13 @@ int nouveau_object_fini(struct nouveau_object *, bool suspend);
50 50
51extern struct nouveau_ofuncs nouveau_object_ofuncs; 51extern struct nouveau_ofuncs nouveau_object_ofuncs;
52 52
53/* Don't allocate dynamically, because lockdep needs lock_class_keys to be in
54 * ".data". */
53struct nouveau_oclass { 55struct nouveau_oclass {
54 u32 handle; 56 u32 handle;
55 struct nouveau_ofuncs *ofuncs; 57 struct nouveau_ofuncs * const ofuncs;
56 struct nouveau_omthds *omthds; 58 struct nouveau_omthds * const omthds;
59 struct lock_class_key lock_class_key;
57}; 60};
58 61
59#define nv_oclass(o) nv_object(o)->oclass 62#define nv_oclass(o) nv_object(o)->oclass
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
index d6d16007ec1a..d62045f454b2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
@@ -86,8 +86,8 @@ nouveau_fb_preinit(struct nouveau_fb *pfb)
86 return ret; 86 return ret;
87 } 87 }
88 88
89 if (!nouveau_mm_initialised(&pfb->tags) && tags) { 89 if (!nouveau_mm_initialised(&pfb->tags)) {
90 ret = nouveau_mm_init(&pfb->tags, 0, ++tags, 1); 90 ret = nouveau_mm_init(&pfb->tags, 0, tags ? ++tags : 0, 1);
91 if (ret) 91 if (ret)
92 return ret; 92 return ret;
93 } 93 }
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
index 487cb8c6c204..eac236ed19b2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
@@ -99,7 +99,7 @@ nv50_fb_vram_init(struct nouveau_fb *pfb)
99 struct nouveau_bios *bios = nouveau_bios(device); 99 struct nouveau_bios *bios = nouveau_bios(device);
100 const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ 100 const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
101 const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ 101 const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
102 u32 size; 102 u32 size, tags = 0;
103 int ret; 103 int ret;
104 104
105 pfb->ram.size = nv_rd32(pfb, 0x10020c); 105 pfb->ram.size = nv_rd32(pfb, 0x10020c);
@@ -140,10 +140,11 @@ nv50_fb_vram_init(struct nouveau_fb *pfb)
140 return ret; 140 return ret;
141 141
142 pfb->ram.ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1; 142 pfb->ram.ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1;
143 tags = nv_rd32(pfb, 0x100320);
143 break; 144 break;
144 } 145 }
145 146
146 return nv_rd32(pfb, 0x100320); 147 return tags;
147} 148}
148 149
149static int 150static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 69d7b1d0b9d6..1699a9083a2f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -28,6 +28,7 @@
28 */ 28 */
29 29
30#include <core/engine.h> 30#include <core/engine.h>
31#include <linux/swiotlb.h>
31 32
32#include <subdev/fb.h> 33#include <subdev/fb.h>
33#include <subdev/vm.h> 34#include <subdev/vm.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 8b090f1eb51d..5e7aef23825a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -245,6 +245,8 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
245 return 0; 245 return 0;
246} 246}
247 247
248static struct lock_class_key drm_client_lock_class_key;
249
248static int 250static int
249nouveau_drm_load(struct drm_device *dev, unsigned long flags) 251nouveau_drm_load(struct drm_device *dev, unsigned long flags)
250{ 252{
@@ -256,6 +258,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
256 ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm); 258 ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm);
257 if (ret) 259 if (ret)
258 return ret; 260 return ret;
261 lockdep_set_class(&drm->client.mutex, &drm_client_lock_class_key);
259 262
260 dev->dev_private = drm; 263 dev->dev_private = drm;
261 drm->dev = dev; 264 drm->dev = dev;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 061fa0a28900..a2d478e8692a 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1313,14 +1313,18 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
1313 if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) { 1313 if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
1314 radeon_wait_for_vblank(rdev, i); 1314 radeon_wait_for_vblank(rdev, i);
1315 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN; 1315 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
1316 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
1316 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp); 1317 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
1318 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
1317 } 1319 }
1318 } else { 1320 } else {
1319 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); 1321 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
1320 if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) { 1322 if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
1321 radeon_wait_for_vblank(rdev, i); 1323 radeon_wait_for_vblank(rdev, i);
1322 tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE; 1324 tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
1325 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
1323 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp); 1326 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
1327 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
1324 } 1328 }
1325 } 1329 }
1326 /* wait for the next frame */ 1330 /* wait for the next frame */
@@ -1345,6 +1349,8 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
1345 blackout &= ~BLACKOUT_MODE_MASK; 1349 blackout &= ~BLACKOUT_MODE_MASK;
1346 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1); 1350 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
1347 } 1351 }
1352 /* wait for the MC to settle */
1353 udelay(100);
1348} 1354}
1349 1355
1350void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save) 1356void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
@@ -1378,11 +1384,15 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
1378 if (ASIC_IS_DCE6(rdev)) { 1384 if (ASIC_IS_DCE6(rdev)) {
1379 tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]); 1385 tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
1380 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN; 1386 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
1387 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
1381 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp); 1388 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
1389 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
1382 } else { 1390 } else {
1383 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); 1391 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
1384 tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE; 1392 tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
1393 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
1385 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp); 1394 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
1395 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
1386 } 1396 }
1387 /* wait for the next frame */ 1397 /* wait for the next frame */
1388 frame_count = radeon_get_vblank_counter(rdev, i); 1398 frame_count = radeon_get_vblank_counter(rdev, i);
@@ -2036,9 +2046,20 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
2036 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 2046 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
2037 WREG32(DMA_TILING_CONFIG, gb_addr_config); 2047 WREG32(DMA_TILING_CONFIG, gb_addr_config);
2038 2048
2039 tmp = gb_addr_config & NUM_PIPES_MASK; 2049 if ((rdev->config.evergreen.max_backends == 1) &&
2040 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends, 2050 (rdev->flags & RADEON_IS_IGP)) {
2041 EVERGREEN_MAX_BACKENDS, disabled_rb_mask); 2051 if ((disabled_rb_mask & 3) == 1) {
2052 /* RB0 disabled, RB1 enabled */
2053 tmp = 0x11111111;
2054 } else {
2055 /* RB1 disabled, RB0 enabled */
2056 tmp = 0x00000000;
2057 }
2058 } else {
2059 tmp = gb_addr_config & NUM_PIPES_MASK;
2060 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
2061 EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
2062 }
2042 WREG32(GB_BACKEND_MAP, tmp); 2063 WREG32(GB_BACKEND_MAP, tmp);
2043 2064
2044 WREG32(CGTS_SYS_TCC_DISABLE, 0); 2065 WREG32(CGTS_SYS_TCC_DISABLE, 0);
@@ -2401,6 +2422,12 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
2401{ 2422{
2402 struct evergreen_mc_save save; 2423 struct evergreen_mc_save save;
2403 2424
2425 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
2426 reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
2427
2428 if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
2429 reset_mask &= ~RADEON_RESET_DMA;
2430
2404 if (reset_mask == 0) 2431 if (reset_mask == 0)
2405 return 0; 2432 return 0;
2406 2433
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 7a445666e71f..ee4cff534f10 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -2909,14 +2909,14 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
2909 return -EINVAL; 2909 return -EINVAL;
2910 } 2910 }
2911 if (tiled) { 2911 if (tiled) {
2912 dst_offset = ib[idx+1]; 2912 dst_offset = radeon_get_ib_value(p, idx+1);
2913 dst_offset <<= 8; 2913 dst_offset <<= 8;
2914 2914
2915 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); 2915 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2916 p->idx += count + 7; 2916 p->idx += count + 7;
2917 } else { 2917 } else {
2918 dst_offset = ib[idx+1]; 2918 dst_offset = radeon_get_ib_value(p, idx+1);
2919 dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32; 2919 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2920 2920
2921 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2921 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2922 ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; 2922 ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
@@ -2954,12 +2954,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
2954 DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n"); 2954 DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
2955 return -EINVAL; 2955 return -EINVAL;
2956 } 2956 }
2957 dst_offset = ib[idx+1]; 2957 dst_offset = radeon_get_ib_value(p, idx+1);
2958 dst_offset <<= 8; 2958 dst_offset <<= 8;
2959 dst2_offset = ib[idx+2]; 2959 dst2_offset = radeon_get_ib_value(p, idx+2);
2960 dst2_offset <<= 8; 2960 dst2_offset <<= 8;
2961 src_offset = ib[idx+8]; 2961 src_offset = radeon_get_ib_value(p, idx+8);
2962 src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32; 2962 src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
2963 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { 2963 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
2964 dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n", 2964 dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
2965 src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); 2965 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
@@ -3014,12 +3014,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
3014 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n"); 3014 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
3015 return -EINVAL; 3015 return -EINVAL;
3016 } 3016 }
3017 dst_offset = ib[idx+1]; 3017 dst_offset = radeon_get_ib_value(p, idx+1);
3018 dst_offset <<= 8; 3018 dst_offset <<= 8;
3019 dst2_offset = ib[idx+2]; 3019 dst2_offset = radeon_get_ib_value(p, idx+2);
3020 dst2_offset <<= 8; 3020 dst2_offset <<= 8;
3021 src_offset = ib[idx+8]; 3021 src_offset = radeon_get_ib_value(p, idx+8);
3022 src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32; 3022 src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
3023 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { 3023 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
3024 dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n", 3024 dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
3025 src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); 3025 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
@@ -3046,22 +3046,22 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
3046 /* detile bit */ 3046 /* detile bit */
3047 if (idx_value & (1 << 31)) { 3047 if (idx_value & (1 << 31)) {
3048 /* tiled src, linear dst */ 3048 /* tiled src, linear dst */
3049 src_offset = ib[idx+1]; 3049 src_offset = radeon_get_ib_value(p, idx+1);
3050 src_offset <<= 8; 3050 src_offset <<= 8;
3051 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); 3051 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
3052 3052
3053 dst_offset = ib[idx+7]; 3053 dst_offset = radeon_get_ib_value(p, idx+7);
3054 dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32; 3054 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
3055 ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 3055 ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
3056 ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; 3056 ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
3057 } else { 3057 } else {
3058 /* linear src, tiled dst */ 3058 /* linear src, tiled dst */
3059 src_offset = ib[idx+7]; 3059 src_offset = radeon_get_ib_value(p, idx+7);
3060 src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32; 3060 src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
3061 ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); 3061 ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
3062 ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; 3062 ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3063 3063
3064 dst_offset = ib[idx+1]; 3064 dst_offset = radeon_get_ib_value(p, idx+1);
3065 dst_offset <<= 8; 3065 dst_offset <<= 8;
3066 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); 3066 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
3067 } 3067 }
@@ -3098,12 +3098,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
3098 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n"); 3098 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
3099 return -EINVAL; 3099 return -EINVAL;
3100 } 3100 }
3101 dst_offset = ib[idx+1]; 3101 dst_offset = radeon_get_ib_value(p, idx+1);
3102 dst_offset <<= 8; 3102 dst_offset <<= 8;
3103 dst2_offset = ib[idx+2]; 3103 dst2_offset = radeon_get_ib_value(p, idx+2);
3104 dst2_offset <<= 8; 3104 dst2_offset <<= 8;
3105 src_offset = ib[idx+8]; 3105 src_offset = radeon_get_ib_value(p, idx+8);
3106 src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32; 3106 src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
3107 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { 3107 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
3108 dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n", 3108 dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
3109 src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); 3109 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
@@ -3135,22 +3135,22 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
3135 /* detile bit */ 3135 /* detile bit */
3136 if (idx_value & (1 << 31)) { 3136 if (idx_value & (1 << 31)) {
3137 /* tiled src, linear dst */ 3137 /* tiled src, linear dst */
3138 src_offset = ib[idx+1]; 3138 src_offset = radeon_get_ib_value(p, idx+1);
3139 src_offset <<= 8; 3139 src_offset <<= 8;
3140 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); 3140 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
3141 3141
3142 dst_offset = ib[idx+7]; 3142 dst_offset = radeon_get_ib_value(p, idx+7);
3143 dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32; 3143 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
3144 ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 3144 ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
3145 ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; 3145 ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
3146 } else { 3146 } else {
3147 /* linear src, tiled dst */ 3147 /* linear src, tiled dst */
3148 src_offset = ib[idx+7]; 3148 src_offset = radeon_get_ib_value(p, idx+7);
3149 src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32; 3149 src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
3150 ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); 3150 ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
3151 ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; 3151 ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3152 3152
3153 dst_offset = ib[idx+1]; 3153 dst_offset = radeon_get_ib_value(p, idx+1);
3154 dst_offset <<= 8; 3154 dst_offset <<= 8;
3155 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); 3155 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
3156 } 3156 }
@@ -3176,10 +3176,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
3176 switch (misc) { 3176 switch (misc) {
3177 case 0: 3177 case 0:
3178 /* L2L, byte */ 3178 /* L2L, byte */
3179 src_offset = ib[idx+2]; 3179 src_offset = radeon_get_ib_value(p, idx+2);
3180 src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32; 3180 src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
3181 dst_offset = ib[idx+1]; 3181 dst_offset = radeon_get_ib_value(p, idx+1);
3182 dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32; 3182 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
3183 if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) { 3183 if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
3184 dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n", 3184 dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
3185 src_offset + count, radeon_bo_size(src_reloc->robj)); 3185 src_offset + count, radeon_bo_size(src_reloc->robj));
@@ -3216,12 +3216,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
3216 DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n"); 3216 DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
3217 return -EINVAL; 3217 return -EINVAL;
3218 } 3218 }
3219 dst_offset = ib[idx+1]; 3219 dst_offset = radeon_get_ib_value(p, idx+1);
3220 dst_offset |= ((u64)(ib[idx+4] & 0xff)) << 32; 3220 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
3221 dst2_offset = ib[idx+2]; 3221 dst2_offset = radeon_get_ib_value(p, idx+2);
3222 dst2_offset |= ((u64)(ib[idx+5] & 0xff)) << 32; 3222 dst2_offset |= ((u64)(radeon_get_ib_value(p, idx+5) & 0xff)) << 32;
3223 src_offset = ib[idx+3]; 3223 src_offset = radeon_get_ib_value(p, idx+3);
3224 src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32; 3224 src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
3225 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { 3225 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
3226 dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n", 3226 dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
3227 src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); 3227 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
@@ -3251,10 +3251,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
3251 } 3251 }
3252 } else { 3252 } else {
3253 /* L2L, dw */ 3253 /* L2L, dw */
3254 src_offset = ib[idx+2]; 3254 src_offset = radeon_get_ib_value(p, idx+2);
3255 src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32; 3255 src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
3256 dst_offset = ib[idx+1]; 3256 dst_offset = radeon_get_ib_value(p, idx+1);
3257 dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32; 3257 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
3258 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { 3258 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
3259 dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n", 3259 dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
3260 src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); 3260 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
@@ -3279,8 +3279,8 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
3279 DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n"); 3279 DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n");
3280 return -EINVAL; 3280 return -EINVAL;
3281 } 3281 }
3282 dst_offset = ib[idx+1]; 3282 dst_offset = radeon_get_ib_value(p, idx+1);
3283 dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16; 3283 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
3284 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { 3284 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
3285 dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n", 3285 dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
3286 dst_offset, radeon_bo_size(dst_reloc->robj)); 3286 dst_offset, radeon_bo_size(dst_reloc->robj));
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 896f1cbc58a5..835992d8d067 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1216,7 +1216,7 @@ void cayman_dma_stop(struct radeon_device *rdev)
1216int cayman_dma_resume(struct radeon_device *rdev) 1216int cayman_dma_resume(struct radeon_device *rdev)
1217{ 1217{
1218 struct radeon_ring *ring; 1218 struct radeon_ring *ring;
1219 u32 rb_cntl, dma_cntl; 1219 u32 rb_cntl, dma_cntl, ib_cntl;
1220 u32 rb_bufsz; 1220 u32 rb_bufsz;
1221 u32 reg_offset, wb_offset; 1221 u32 reg_offset, wb_offset;
1222 int i, r; 1222 int i, r;
@@ -1265,7 +1265,11 @@ int cayman_dma_resume(struct radeon_device *rdev)
1265 WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8); 1265 WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
1266 1266
1267 /* enable DMA IBs */ 1267 /* enable DMA IBs */
1268 WREG32(DMA_IB_CNTL + reg_offset, DMA_IB_ENABLE | CMD_VMID_FORCE); 1268 ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
1269#ifdef __BIG_ENDIAN
1270 ib_cntl |= DMA_IB_SWAP_ENABLE;
1271#endif
1272 WREG32(DMA_IB_CNTL + reg_offset, ib_cntl);
1269 1273
1270 dma_cntl = RREG32(DMA_CNTL + reg_offset); 1274 dma_cntl = RREG32(DMA_CNTL + reg_offset);
1271 dma_cntl &= ~CTXEMPTY_INT_ENABLE; 1275 dma_cntl &= ~CTXEMPTY_INT_ENABLE;
@@ -1409,6 +1413,12 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
1409{ 1413{
1410 struct evergreen_mc_save save; 1414 struct evergreen_mc_save save;
1411 1415
1416 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
1417 reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
1418
1419 if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
1420 reset_mask &= ~RADEON_RESET_DMA;
1421
1412 if (reset_mask == 0) 1422 if (reset_mask == 0)
1413 return 0; 1423 return 0;
1414 1424
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 537e259b3837..becb03e8b32f 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1378,6 +1378,12 @@ static int r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
1378{ 1378{
1379 struct rv515_mc_save save; 1379 struct rv515_mc_save save;
1380 1380
1381 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
1382 reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
1383
1384 if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
1385 reset_mask &= ~RADEON_RESET_DMA;
1386
1381 if (reset_mask == 0) 1387 if (reset_mask == 0)
1382 return 0; 1388 return 0;
1383 1389
@@ -1456,12 +1462,15 @@ u32 r6xx_remap_render_backend(struct radeon_device *rdev,
1456 u32 disabled_rb_mask) 1462 u32 disabled_rb_mask)
1457{ 1463{
1458 u32 rendering_pipe_num, rb_num_width, req_rb_num; 1464 u32 rendering_pipe_num, rb_num_width, req_rb_num;
1459 u32 pipe_rb_ratio, pipe_rb_remain; 1465 u32 pipe_rb_ratio, pipe_rb_remain, tmp;
1460 u32 data = 0, mask = 1 << (max_rb_num - 1); 1466 u32 data = 0, mask = 1 << (max_rb_num - 1);
1461 unsigned i, j; 1467 unsigned i, j;
1462 1468
1463 /* mask out the RBs that don't exist on that asic */ 1469 /* mask out the RBs that don't exist on that asic */
1464 disabled_rb_mask |= (0xff << max_rb_num) & 0xff; 1470 tmp = disabled_rb_mask | ((0xff << max_rb_num) & 0xff);
1471 /* make sure at least one RB is available */
1472 if ((tmp & 0xff) != 0xff)
1473 disabled_rb_mask = tmp;
1465 1474
1466 rendering_pipe_num = 1 << tiling_pipe_num; 1475 rendering_pipe_num = 1 << tiling_pipe_num;
1467 req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask); 1476 req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
@@ -2307,7 +2316,7 @@ void r600_dma_stop(struct radeon_device *rdev)
2307int r600_dma_resume(struct radeon_device *rdev) 2316int r600_dma_resume(struct radeon_device *rdev)
2308{ 2317{
2309 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; 2318 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
2310 u32 rb_cntl, dma_cntl; 2319 u32 rb_cntl, dma_cntl, ib_cntl;
2311 u32 rb_bufsz; 2320 u32 rb_bufsz;
2312 int r; 2321 int r;
2313 2322
@@ -2347,7 +2356,11 @@ int r600_dma_resume(struct radeon_device *rdev)
2347 WREG32(DMA_RB_BASE, ring->gpu_addr >> 8); 2356 WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
2348 2357
2349 /* enable DMA IBs */ 2358 /* enable DMA IBs */
2350 WREG32(DMA_IB_CNTL, DMA_IB_ENABLE); 2359 ib_cntl = DMA_IB_ENABLE;
2360#ifdef __BIG_ENDIAN
2361 ib_cntl |= DMA_IB_SWAP_ENABLE;
2362#endif
2363 WREG32(DMA_IB_CNTL, ib_cntl);
2351 2364
2352 dma_cntl = RREG32(DMA_CNTL); 2365 dma_cntl = RREG32(DMA_CNTL);
2353 dma_cntl &= ~CTXEMPTY_INT_ENABLE; 2366 dma_cntl &= ~CTXEMPTY_INT_ENABLE;
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 69ec24ab8d63..9b2512bf1a46 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -2623,14 +2623,14 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
2623 return -EINVAL; 2623 return -EINVAL;
2624 } 2624 }
2625 if (tiled) { 2625 if (tiled) {
2626 dst_offset = ib[idx+1]; 2626 dst_offset = radeon_get_ib_value(p, idx+1);
2627 dst_offset <<= 8; 2627 dst_offset <<= 8;
2628 2628
2629 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); 2629 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2630 p->idx += count + 5; 2630 p->idx += count + 5;
2631 } else { 2631 } else {
2632 dst_offset = ib[idx+1]; 2632 dst_offset = radeon_get_ib_value(p, idx+1);
2633 dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32; 2633 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2634 2634
2635 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2635 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2636 ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; 2636 ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
@@ -2658,32 +2658,32 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
2658 /* detile bit */ 2658 /* detile bit */
2659 if (idx_value & (1 << 31)) { 2659 if (idx_value & (1 << 31)) {
2660 /* tiled src, linear dst */ 2660 /* tiled src, linear dst */
2661 src_offset = ib[idx+1]; 2661 src_offset = radeon_get_ib_value(p, idx+1);
2662 src_offset <<= 8; 2662 src_offset <<= 8;
2663 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); 2663 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
2664 2664
2665 dst_offset = ib[idx+5]; 2665 dst_offset = radeon_get_ib_value(p, idx+5);
2666 dst_offset |= ((u64)(ib[idx+6] & 0xff)) << 32; 2666 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
2667 ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2667 ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2668 ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; 2668 ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2669 } else { 2669 } else {
2670 /* linear src, tiled dst */ 2670 /* linear src, tiled dst */
2671 src_offset = ib[idx+5]; 2671 src_offset = radeon_get_ib_value(p, idx+5);
2672 src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32; 2672 src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
2673 ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); 2673 ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2674 ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; 2674 ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2675 2675
2676 dst_offset = ib[idx+1]; 2676 dst_offset = radeon_get_ib_value(p, idx+1);
2677 dst_offset <<= 8; 2677 dst_offset <<= 8;
2678 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); 2678 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2679 } 2679 }
2680 p->idx += 7; 2680 p->idx += 7;
2681 } else { 2681 } else {
2682 if (p->family >= CHIP_RV770) { 2682 if (p->family >= CHIP_RV770) {
2683 src_offset = ib[idx+2]; 2683 src_offset = radeon_get_ib_value(p, idx+2);
2684 src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32; 2684 src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2685 dst_offset = ib[idx+1]; 2685 dst_offset = radeon_get_ib_value(p, idx+1);
2686 dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32; 2686 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
2687 2687
2688 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2688 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2689 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); 2689 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
@@ -2691,10 +2691,10 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
2691 ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; 2691 ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2692 p->idx += 5; 2692 p->idx += 5;
2693 } else { 2693 } else {
2694 src_offset = ib[idx+2]; 2694 src_offset = radeon_get_ib_value(p, idx+2);
2695 src_offset |= ((u64)(ib[idx+3] & 0xff)) << 32; 2695 src_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
2696 dst_offset = ib[idx+1]; 2696 dst_offset = radeon_get_ib_value(p, idx+1);
2697 dst_offset |= ((u64)(ib[idx+3] & 0xff0000)) << 16; 2697 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16;
2698 2698
2699 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2699 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2700 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); 2700 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
@@ -2724,8 +2724,8 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
2724 DRM_ERROR("bad DMA_PACKET_WRITE\n"); 2724 DRM_ERROR("bad DMA_PACKET_WRITE\n");
2725 return -EINVAL; 2725 return -EINVAL;
2726 } 2726 }
2727 dst_offset = ib[idx+1]; 2727 dst_offset = radeon_get_ib_value(p, idx+1);
2728 dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16; 2728 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
2729 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { 2729 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2730 dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n", 2730 dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
2731 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); 2731 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 34e52304a525..a08f657329a0 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -324,7 +324,6 @@ struct radeon_bo {
324 struct list_head list; 324 struct list_head list;
325 /* Protected by tbo.reserved */ 325 /* Protected by tbo.reserved */
326 u32 placements[3]; 326 u32 placements[3];
327 u32 busy_placements[3];
328 struct ttm_placement placement; 327 struct ttm_placement placement;
329 struct ttm_buffer_object tbo; 328 struct ttm_buffer_object tbo;
330 struct ttm_bo_kmap_obj kmap; 329 struct ttm_bo_kmap_obj kmap;
@@ -654,6 +653,8 @@ struct radeon_ring {
654 u32 ptr_reg_mask; 653 u32 ptr_reg_mask;
655 u32 nop; 654 u32 nop;
656 u32 idx; 655 u32 idx;
656 u64 last_semaphore_signal_addr;
657 u64 last_semaphore_wait_addr;
657}; 658};
658 659
659/* 660/*
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 9056fafb00ea..0b202c07fe50 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -1445,7 +1445,7 @@ static struct radeon_asic cayman_asic = {
1445 .vm = { 1445 .vm = {
1446 .init = &cayman_vm_init, 1446 .init = &cayman_vm_init,
1447 .fini = &cayman_vm_fini, 1447 .fini = &cayman_vm_fini,
1448 .pt_ring_index = R600_RING_TYPE_DMA_INDEX, 1448 .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1449 .set_page = &cayman_vm_set_page, 1449 .set_page = &cayman_vm_set_page,
1450 }, 1450 },
1451 .ring = { 1451 .ring = {
@@ -1572,7 +1572,7 @@ static struct radeon_asic trinity_asic = {
1572 .vm = { 1572 .vm = {
1573 .init = &cayman_vm_init, 1573 .init = &cayman_vm_init,
1574 .fini = &cayman_vm_fini, 1574 .fini = &cayman_vm_fini,
1575 .pt_ring_index = R600_RING_TYPE_DMA_INDEX, 1575 .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1576 .set_page = &cayman_vm_set_page, 1576 .set_page = &cayman_vm_set_page,
1577 }, 1577 },
1578 .ring = { 1578 .ring = {
@@ -1699,7 +1699,7 @@ static struct radeon_asic si_asic = {
1699 .vm = { 1699 .vm = {
1700 .init = &si_vm_init, 1700 .init = &si_vm_init,
1701 .fini = &si_vm_fini, 1701 .fini = &si_vm_fini,
1702 .pt_ring_index = R600_RING_TYPE_DMA_INDEX, 1702 .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1703 .set_page = &si_vm_set_page, 1703 .set_page = &si_vm_set_page,
1704 }, 1704 },
1705 .ring = { 1705 .ring = {
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 33a56a09ff10..3e403bdda58f 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -2470,6 +2470,14 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
2470 1), 2470 1),
2471 ATOM_DEVICE_CRT1_SUPPORT); 2471 ATOM_DEVICE_CRT1_SUPPORT);
2472 } 2472 }
2473 /* RV100 board with external TDMS bit mis-set.
2474 * Actually uses internal TMDS, clear the bit.
2475 */
2476 if (dev->pdev->device == 0x5159 &&
2477 dev->pdev->subsystem_vendor == 0x1014 &&
2478 dev->pdev->subsystem_device == 0x029A) {
2479 tmp &= ~(1 << 4);
2480 }
2473 if ((tmp >> 4) & 0x1) { 2481 if ((tmp >> 4) & 0x1) {
2474 devices |= ATOM_DEVICE_DFP2_SUPPORT; 2482 devices |= ATOM_DEVICE_DFP2_SUPPORT;
2475 radeon_add_legacy_encoder(dev, 2483 radeon_add_legacy_encoder(dev,
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 469661fd1903..5407459e56d2 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -286,6 +286,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
286 p->chunks[p->chunk_ib_idx].kpage[1] == NULL) { 286 p->chunks[p->chunk_ib_idx].kpage[1] == NULL) {
287 kfree(p->chunks[p->chunk_ib_idx].kpage[0]); 287 kfree(p->chunks[p->chunk_ib_idx].kpage[0]);
288 kfree(p->chunks[p->chunk_ib_idx].kpage[1]); 288 kfree(p->chunks[p->chunk_ib_idx].kpage[1]);
289 p->chunks[p->chunk_ib_idx].kpage[0] = NULL;
290 p->chunks[p->chunk_ib_idx].kpage[1] = NULL;
289 return -ENOMEM; 291 return -ENOMEM;
290 } 292 }
291 } 293 }
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index ad6df625e8b8..0d67674b64b1 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -241,7 +241,8 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
241 y = 0; 241 y = 0;
242 } 242 }
243 243
244 if (ASIC_IS_AVIVO(rdev)) { 244 /* fixed on DCE6 and newer */
245 if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE6(rdev)) {
245 int i = 0; 246 int i = 0;
246 struct drm_crtc *crtc_p; 247 struct drm_crtc *crtc_p;
247 248
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index edfc54e41842..0d6562bb0c93 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -429,7 +429,8 @@ bool radeon_card_posted(struct radeon_device *rdev)
429{ 429{
430 uint32_t reg; 430 uint32_t reg;
431 431
432 if (efi_enabled && rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) 432 if (efi_enabled(EFI_BOOT) &&
433 rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
433 return false; 434 return false;
434 435
435 /* first check CRTCs */ 436 /* first check CRTCs */
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 1da2386d7cf7..05c96fa0b051 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1115,14 +1115,16 @@ radeon_user_framebuffer_create(struct drm_device *dev,
1115 } 1115 }
1116 1116
1117 radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL); 1117 radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
1118 if (radeon_fb == NULL) 1118 if (radeon_fb == NULL) {
1119 drm_gem_object_unreference_unlocked(obj);
1119 return ERR_PTR(-ENOMEM); 1120 return ERR_PTR(-ENOMEM);
1121 }
1120 1122
1121 ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj); 1123 ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
1122 if (ret) { 1124 if (ret) {
1123 kfree(radeon_fb); 1125 kfree(radeon_fb);
1124 drm_gem_object_unreference_unlocked(obj); 1126 drm_gem_object_unreference_unlocked(obj);
1125 return NULL; 1127 return ERR_PTR(ret);
1126 } 1128 }
1127 1129
1128 return &radeon_fb->base; 1130 return &radeon_fb->base;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index dff6cf77f953..d9bf96ee299a 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -69,9 +69,10 @@
69 * 2.26.0 - r600-eg: fix htile size computation 69 * 2.26.0 - r600-eg: fix htile size computation
70 * 2.27.0 - r600-SI: Add CS ioctl support for async DMA 70 * 2.27.0 - r600-SI: Add CS ioctl support for async DMA
71 * 2.28.0 - r600-eg: Add MEM_WRITE packet support 71 * 2.28.0 - r600-eg: Add MEM_WRITE packet support
72 * 2.29.0 - R500 FP16 color clear registers
72 */ 73 */
73#define KMS_DRIVER_MAJOR 2 74#define KMS_DRIVER_MAJOR 2
74#define KMS_DRIVER_MINOR 28 75#define KMS_DRIVER_MINOR 29
75#define KMS_DRIVER_PATCHLEVEL 0 76#define KMS_DRIVER_PATCHLEVEL 0
76int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 77int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
77int radeon_driver_unload_kms(struct drm_device *dev); 78int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 883c95d8d90f..d3aface2d12d 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -84,6 +84,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
84 rbo->placement.fpfn = 0; 84 rbo->placement.fpfn = 0;
85 rbo->placement.lpfn = 0; 85 rbo->placement.lpfn = 0;
86 rbo->placement.placement = rbo->placements; 86 rbo->placement.placement = rbo->placements;
87 rbo->placement.busy_placement = rbo->placements;
87 if (domain & RADEON_GEM_DOMAIN_VRAM) 88 if (domain & RADEON_GEM_DOMAIN_VRAM)
88 rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | 89 rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
89 TTM_PL_FLAG_VRAM; 90 TTM_PL_FLAG_VRAM;
@@ -104,14 +105,6 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
104 if (!c) 105 if (!c)
105 rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 106 rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
106 rbo->placement.num_placement = c; 107 rbo->placement.num_placement = c;
107
108 c = 0;
109 rbo->placement.busy_placement = rbo->busy_placements;
110 if (rbo->rdev->flags & RADEON_IS_AGP) {
111 rbo->busy_placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT;
112 } else {
113 rbo->busy_placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
114 }
115 rbo->placement.num_busy_placement = c; 108 rbo->placement.num_busy_placement = c;
116} 109}
117 110
@@ -357,6 +350,7 @@ int radeon_bo_list_validate(struct list_head *head)
357{ 350{
358 struct radeon_bo_list *lobj; 351 struct radeon_bo_list *lobj;
359 struct radeon_bo *bo; 352 struct radeon_bo *bo;
353 u32 domain;
360 int r; 354 int r;
361 355
362 r = ttm_eu_reserve_buffers(head); 356 r = ttm_eu_reserve_buffers(head);
@@ -366,9 +360,17 @@ int radeon_bo_list_validate(struct list_head *head)
366 list_for_each_entry(lobj, head, tv.head) { 360 list_for_each_entry(lobj, head, tv.head) {
367 bo = lobj->bo; 361 bo = lobj->bo;
368 if (!bo->pin_count) { 362 if (!bo->pin_count) {
363 domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain;
364
365 retry:
366 radeon_ttm_placement_from_domain(bo, domain);
369 r = ttm_bo_validate(&bo->tbo, &bo->placement, 367 r = ttm_bo_validate(&bo->tbo, &bo->placement,
370 true, false); 368 true, false);
371 if (unlikely(r)) { 369 if (unlikely(r)) {
370 if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) {
371 domain |= RADEON_GEM_DOMAIN_GTT;
372 goto retry;
373 }
372 return r; 374 return r;
373 } 375 }
374 } 376 }
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 141f2b6a9cf2..cd72062d5a91 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -377,6 +377,9 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi
377{ 377{
378 int r; 378 int r;
379 379
380 /* make sure we aren't trying to allocate more space than there is on the ring */
381 if (ndw > (ring->ring_size / 4))
382 return -ENOMEM;
380 /* Align requested size with padding so unlock_commit can 383 /* Align requested size with padding so unlock_commit can
381 * pad safely */ 384 * pad safely */
382 ndw = (ndw + ring->align_mask) & ~ring->align_mask; 385 ndw = (ndw + ring->align_mask) & ~ring->align_mask;
@@ -784,6 +787,8 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
784 } 787 }
785 seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", ring->wptr, ring->wptr); 788 seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", ring->wptr, ring->wptr);
786 seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n", ring->rptr, ring->rptr); 789 seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n", ring->rptr, ring->rptr);
790 seq_printf(m, "last semaphore signal addr : 0x%016llx\n", ring->last_semaphore_signal_addr);
791 seq_printf(m, "last semaphore wait addr : 0x%016llx\n", ring->last_semaphore_wait_addr);
787 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); 792 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
788 seq_printf(m, "%u dwords in ring\n", count); 793 seq_printf(m, "%u dwords in ring\n", count);
789 /* print 8 dw before current rptr as often it's the last executed 794 /* print 8 dw before current rptr as often it's the last executed
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index 97f3ece81cd2..8dcc20f53d73 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -95,6 +95,10 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
95 /* we assume caller has already allocated space on waiters ring */ 95 /* we assume caller has already allocated space on waiters ring */
96 radeon_semaphore_emit_wait(rdev, waiter, semaphore); 96 radeon_semaphore_emit_wait(rdev, waiter, semaphore);
97 97
98 /* for debugging lockup only, used by sysfs debug files */
99 rdev->ring[signaler].last_semaphore_signal_addr = semaphore->gpu_addr;
100 rdev->ring[waiter].last_semaphore_wait_addr = semaphore->gpu_addr;
101
98 return 0; 102 return 0;
99} 103}
100 104
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 1d8ff2f850ba..93f760e27a92 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -38,6 +38,7 @@
38#include <drm/radeon_drm.h> 38#include <drm/radeon_drm.h>
39#include <linux/seq_file.h> 39#include <linux/seq_file.h>
40#include <linux/slab.h> 40#include <linux/slab.h>
41#include <linux/swiotlb.h>
41#include "radeon_reg.h" 42#include "radeon_reg.h"
42#include "radeon.h" 43#include "radeon.h"
43 44
diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman
index 0f656b111c15..a072fa8c46b0 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/cayman
+++ b/drivers/gpu/drm/radeon/reg_srcs/cayman
@@ -1,5 +1,6 @@
1cayman 0x9400 1cayman 0x9400
20x0000802C GRBM_GFX_INDEX 20x0000802C GRBM_GFX_INDEX
30x00008040 WAIT_UNTIL
30x000084FC CP_STRMOUT_CNTL 40x000084FC CP_STRMOUT_CNTL
40x000085F0 CP_COHER_CNTL 50x000085F0 CP_COHER_CNTL
50x000085F4 CP_COHER_SIZE 60x000085F4 CP_COHER_SIZE
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515
index 911a8fbd32bb..78d5e99d759d 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rv515
+++ b/drivers/gpu/drm/radeon/reg_srcs/rv515
@@ -324,6 +324,8 @@ rv515 0x6d40
3240x46AC US_OUT_FMT_2 3240x46AC US_OUT_FMT_2
3250x46B0 US_OUT_FMT_3 3250x46B0 US_OUT_FMT_3
3260x46B4 US_W_FMT 3260x46B4 US_W_FMT
3270x46C0 RB3D_COLOR_CLEAR_VALUE_AR
3280x46C4 RB3D_COLOR_CLEAR_VALUE_GB
3270x4BC0 FG_FOG_BLEND 3290x4BC0 FG_FOG_BLEND
3280x4BC4 FG_FOG_FACTOR 3300x4BC4 FG_FOG_FACTOR
3290x4BC8 FG_FOG_COLOR_R 3310x4BC8 FG_FOG_COLOR_R
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 2bb6d0e84b3d..435ed3551364 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -336,6 +336,8 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
336 WREG32(R600_CITF_CNTL, blackout); 336 WREG32(R600_CITF_CNTL, blackout);
337 } 337 }
338 } 338 }
339 /* wait for the MC to settle */
340 udelay(100);
339} 341}
340 342
341void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save) 343void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 3240a3d64f30..ae8b48205a6c 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2215,6 +2215,12 @@ static int si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
2215{ 2215{
2216 struct evergreen_mc_save save; 2216 struct evergreen_mc_save save;
2217 2217
2218 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
2219 reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
2220
2221 if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
2222 reset_mask &= ~RADEON_RESET_DMA;
2223
2218 if (reset_mask == 0) 2224 if (reset_mask == 0)
2219 return 0; 2225 return 0;
2220 2226
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 33d20be87db5..52b20b12c83a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -434,6 +434,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
434 bo->mem = tmp_mem; 434 bo->mem = tmp_mem;
435 bdev->driver->move_notify(bo, mem); 435 bdev->driver->move_notify(bo, mem);
436 bo->mem = *mem; 436 bo->mem = *mem;
437 *mem = tmp_mem;
437 } 438 }
438 439
439 goto out_err; 440 goto out_err;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index d73d6e3e17b2..8be35c809c7b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -344,8 +344,12 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
344 344
345 if (ttm->state == tt_unpopulated) { 345 if (ttm->state == tt_unpopulated) {
346 ret = ttm->bdev->driver->ttm_tt_populate(ttm); 346 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
347 if (ret) 347 if (ret) {
348 /* if we fail here don't nuke the mm node
349 * as the bo still owns it */
350 old_copy.mm_node = NULL;
348 goto out1; 351 goto out1;
352 }
349 } 353 }
350 354
351 add = 0; 355 add = 0;
@@ -371,8 +375,11 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
371 prot); 375 prot);
372 } else 376 } else
373 ret = ttm_copy_io_page(new_iomap, old_iomap, page); 377 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
374 if (ret) 378 if (ret) {
379 /* failing here, means keep old copy as-is */
380 old_copy.mm_node = NULL;
375 goto out1; 381 goto out1;
382 }
376 } 383 }
377 mb(); 384 mb();
378out2: 385out2:
@@ -422,7 +429,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
422 struct ttm_bo_device *bdev = bo->bdev; 429 struct ttm_bo_device *bdev = bo->bdev;
423 struct ttm_bo_driver *driver = bdev->driver; 430 struct ttm_bo_driver *driver = bdev->driver;
424 431
425 fbo = kzalloc(sizeof(*fbo), GFP_KERNEL); 432 fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
426 if (!fbo) 433 if (!fbo)
427 return -ENOMEM; 434 return -ENOMEM;
428 435
@@ -441,7 +448,12 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
441 fbo->vm_node = NULL; 448 fbo->vm_node = NULL;
442 atomic_set(&fbo->cpu_writers, 0); 449 atomic_set(&fbo->cpu_writers, 0);
443 450
444 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); 451 spin_lock(&bdev->fence_lock);
452 if (bo->sync_obj)
453 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
454 else
455 fbo->sync_obj = NULL;
456 spin_unlock(&bdev->fence_lock);
445 kref_init(&fbo->list_kref); 457 kref_init(&fbo->list_kref);
446 kref_init(&fbo->kref); 458 kref_init(&fbo->kref);
447 fbo->destroy = &ttm_transfered_destroy; 459 fbo->destroy = &ttm_transfered_destroy;
@@ -654,13 +666,11 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
654 */ 666 */
655 667
656 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); 668 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
657
658 /* ttm_buffer_object_transfer accesses bo->sync_obj */
659 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
660 spin_unlock(&bdev->fence_lock); 669 spin_unlock(&bdev->fence_lock);
661 if (tmp_obj) 670 if (tmp_obj)
662 driver->sync_obj_unref(&tmp_obj); 671 driver->sync_obj_unref(&tmp_obj);
663 672
673 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
664 if (ret) 674 if (ret)
665 return ret; 675 return ret;
666 676
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 4dfa605e2d14..34e25471aeaa 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -306,6 +306,9 @@
306#define USB_VENDOR_ID_EZKEY 0x0518 306#define USB_VENDOR_ID_EZKEY 0x0518
307#define USB_DEVICE_ID_BTC_8193 0x0002 307#define USB_DEVICE_ID_BTC_8193 0x0002
308 308
309#define USB_VENDOR_ID_FORMOSA 0x147a
310#define USB_DEVICE_ID_FORMOSA_IR_RECEIVER 0xe03e
311
309#define USB_VENDOR_ID_FREESCALE 0x15A2 312#define USB_VENDOR_ID_FREESCALE 0x15A2
310#define USB_DEVICE_ID_FREESCALE_MX28 0x004F 313#define USB_DEVICE_ID_FREESCALE_MX28 0x004F
311 314
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 12e4fdc810bf..e766b5614ef5 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -540,13 +540,24 @@ static int i2c_hid_output_raw_report(struct hid_device *hid, __u8 *buf,
540{ 540{
541 struct i2c_client *client = hid->driver_data; 541 struct i2c_client *client = hid->driver_data;
542 int report_id = buf[0]; 542 int report_id = buf[0];
543 int ret;
543 544
544 if (report_type == HID_INPUT_REPORT) 545 if (report_type == HID_INPUT_REPORT)
545 return -EINVAL; 546 return -EINVAL;
546 547
547 return i2c_hid_set_report(client, 548 if (report_id) {
549 buf++;
550 count--;
551 }
552
553 ret = i2c_hid_set_report(client,
548 report_type == HID_FEATURE_REPORT ? 0x03 : 0x02, 554 report_type == HID_FEATURE_REPORT ? 0x03 : 0x02,
549 report_id, buf, count); 555 report_id, buf, count);
556
557 if (report_id && ret >= 0)
558 ret++; /* add report_id to the number of transfered bytes */
559
560 return ret;
550} 561}
551 562
552static int i2c_hid_parse(struct hid_device *hid) 563static int i2c_hid_parse(struct hid_device *hid)
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index ac9e35228254..e0e6abf1cd3b 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -70,6 +70,7 @@ static const struct hid_blacklist {
70 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET }, 70 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
71 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, 71 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
72 { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, 72 { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
73 { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
73 { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET }, 74 { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
74 { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, 75 { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
75 { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS }, 76 { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig
index b38ef6d8d049..64630f15f181 100644
--- a/drivers/hv/Kconfig
+++ b/drivers/hv/Kconfig
@@ -2,7 +2,7 @@ menu "Microsoft Hyper-V guest support"
2 2
3config HYPERV 3config HYPERV
4 tristate "Microsoft Hyper-V client drivers" 4 tristate "Microsoft Hyper-V client drivers"
5 depends on X86 && ACPI && PCI 5 depends on X86 && ACPI && PCI && X86_LOCAL_APIC
6 help 6 help
7 Select this option to run Linux as a Hyper-V client operating 7 Select this option to run Linux as a Hyper-V client operating
8 system. 8 system.
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index f6c0011a0337..dd289fd179ca 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -403,7 +403,7 @@ struct dm_info_header {
403 */ 403 */
404 404
405struct dm_info_msg { 405struct dm_info_msg {
406 struct dm_info_header header; 406 struct dm_header hdr;
407 __u32 reserved; 407 __u32 reserved;
408 __u32 info_size; 408 __u32 info_size;
409 __u8 info[]; 409 __u8 info[];
@@ -503,13 +503,17 @@ static void hot_add_req(struct hv_dynmem_device *dm, struct dm_hot_add *msg)
503 503
504static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg) 504static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
505{ 505{
506 switch (msg->header.type) { 506 struct dm_info_header *info_hdr;
507
508 info_hdr = (struct dm_info_header *)msg->info;
509
510 switch (info_hdr->type) {
507 case INFO_TYPE_MAX_PAGE_CNT: 511 case INFO_TYPE_MAX_PAGE_CNT:
508 pr_info("Received INFO_TYPE_MAX_PAGE_CNT\n"); 512 pr_info("Received INFO_TYPE_MAX_PAGE_CNT\n");
509 pr_info("Data Size is %d\n", msg->header.data_size); 513 pr_info("Data Size is %d\n", info_hdr->data_size);
510 break; 514 break;
511 default: 515 default:
512 pr_info("Received Unknown type: %d\n", msg->header.type); 516 pr_info("Received Unknown type: %d\n", info_hdr->type);
513 } 517 }
514} 518}
515 519
@@ -879,7 +883,7 @@ static int balloon_probe(struct hv_device *dev,
879 balloon_onchannelcallback, dev); 883 balloon_onchannelcallback, dev);
880 884
881 if (ret) 885 if (ret)
882 return ret; 886 goto probe_error0;
883 887
884 dm_device.dev = dev; 888 dm_device.dev = dev;
885 dm_device.state = DM_INITIALIZING; 889 dm_device.state = DM_INITIALIZING;
@@ -891,7 +895,7 @@ static int balloon_probe(struct hv_device *dev,
891 kthread_run(dm_thread_func, &dm_device, "hv_balloon"); 895 kthread_run(dm_thread_func, &dm_device, "hv_balloon");
892 if (IS_ERR(dm_device.thread)) { 896 if (IS_ERR(dm_device.thread)) {
893 ret = PTR_ERR(dm_device.thread); 897 ret = PTR_ERR(dm_device.thread);
894 goto probe_error0; 898 goto probe_error1;
895 } 899 }
896 900
897 hv_set_drvdata(dev, &dm_device); 901 hv_set_drvdata(dev, &dm_device);
@@ -914,12 +918,12 @@ static int balloon_probe(struct hv_device *dev,
914 VM_PKT_DATA_INBAND, 918 VM_PKT_DATA_INBAND,
915 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 919 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
916 if (ret) 920 if (ret)
917 goto probe_error1; 921 goto probe_error2;
918 922
919 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ); 923 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
920 if (t == 0) { 924 if (t == 0) {
921 ret = -ETIMEDOUT; 925 ret = -ETIMEDOUT;
922 goto probe_error1; 926 goto probe_error2;
923 } 927 }
924 928
925 /* 929 /*
@@ -928,7 +932,7 @@ static int balloon_probe(struct hv_device *dev,
928 */ 932 */
929 if (dm_device.state == DM_INIT_ERROR) { 933 if (dm_device.state == DM_INIT_ERROR) {
930 ret = -ETIMEDOUT; 934 ret = -ETIMEDOUT;
931 goto probe_error1; 935 goto probe_error2;
932 } 936 }
933 /* 937 /*
934 * Now submit our capabilities to the host. 938 * Now submit our capabilities to the host.
@@ -961,12 +965,12 @@ static int balloon_probe(struct hv_device *dev,
961 VM_PKT_DATA_INBAND, 965 VM_PKT_DATA_INBAND,
962 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 966 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
963 if (ret) 967 if (ret)
964 goto probe_error1; 968 goto probe_error2;
965 969
966 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ); 970 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
967 if (t == 0) { 971 if (t == 0) {
968 ret = -ETIMEDOUT; 972 ret = -ETIMEDOUT;
969 goto probe_error1; 973 goto probe_error2;
970 } 974 }
971 975
972 /* 976 /*
@@ -975,18 +979,20 @@ static int balloon_probe(struct hv_device *dev,
975 */ 979 */
976 if (dm_device.state == DM_INIT_ERROR) { 980 if (dm_device.state == DM_INIT_ERROR) {
977 ret = -ETIMEDOUT; 981 ret = -ETIMEDOUT;
978 goto probe_error1; 982 goto probe_error2;
979 } 983 }
980 984
981 dm_device.state = DM_INITIALIZED; 985 dm_device.state = DM_INITIALIZED;
982 986
983 return 0; 987 return 0;
984 988
985probe_error1: 989probe_error2:
986 kthread_stop(dm_device.thread); 990 kthread_stop(dm_device.thread);
987 991
988probe_error0: 992probe_error1:
989 vmbus_close(dev->channel); 993 vmbus_close(dev->channel);
994probe_error0:
995 kfree(send_buffer);
990 return ret; 996 return ret;
991} 997}
992 998
@@ -999,6 +1005,7 @@ static int balloon_remove(struct hv_device *dev)
999 1005
1000 vmbus_close(dev->channel); 1006 vmbus_close(dev->channel);
1001 kthread_stop(dm->thread); 1007 kthread_stop(dm->thread);
1008 kfree(send_buffer);
1002 1009
1003 return 0; 1010 return 0;
1004} 1011}
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 32f238f3caea..89ac1cb26f24 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -180,11 +180,11 @@ config SENSORS_ADM9240
180 will be called adm9240. 180 will be called adm9240.
181 181
182config SENSORS_ADT7410 182config SENSORS_ADT7410
183 tristate "Analog Devices ADT7410" 183 tristate "Analog Devices ADT7410/ADT7420"
184 depends on I2C 184 depends on I2C
185 help 185 help
186 If you say yes here you get support for the Analog Devices 186 If you say yes here you get support for the Analog Devices
187 ADT7410 temperature monitoring chip. 187 ADT7410 and ADT7420 temperature monitoring chips.
188 188
189 This driver can also be built as a module. If so, the module 189 This driver can also be built as a module. If so, the module
190 will be called adt7410. 190 will be called adt7410.
@@ -506,7 +506,8 @@ config SENSORS_IT87
506 help 506 help
507 If you say yes here you get support for ITE IT8705F, IT8712F, 507 If you say yes here you get support for ITE IT8705F, IT8712F,
508 IT8716F, IT8718F, IT8720F, IT8721F, IT8726F, IT8728F, IT8758E, 508 IT8716F, IT8718F, IT8720F, IT8721F, IT8726F, IT8728F, IT8758E,
509 IT8782F, and IT8783E/F sensor chips, and the SiS950 clone. 509 IT8771E, IT8772E, IT8782F, and IT8783E/F sensor chips, and the
510 SiS950 clone.
510 511
511 This driver can also be built as a module. If so, the module 512 This driver can also be built as a module. If so, the module
512 will be called it87. 513 will be called it87.
@@ -529,8 +530,8 @@ config SENSORS_JC42
529 temperature sensors, which are used on many DDR3 memory modules for 530 temperature sensors, which are used on many DDR3 memory modules for
530 mobile devices and servers. Support will include, but not be limited 531 mobile devices and servers. Support will include, but not be limited
531 to, ADT7408, AT30TS00, CAT34TS02, CAT6095, MAX6604, MCP9804, MCP9805, 532 to, ADT7408, AT30TS00, CAT34TS02, CAT6095, MAX6604, MCP9804, MCP9805,
532 MCP98242, MCP98243, MCP9843, SE97, SE98, STTS424(E), STTS2002, 533 MCP98242, MCP98243, MCP98244, MCP9843, SE97, SE98, STTS424(E),
533 STTS3000, TSE2002B3, TSE2002GB2, TS3000B3, and TS3000GB2. 534 STTS2002, STTS3000, TSE2002B3, TSE2002GB2, TS3000B3, and TS3000GB2.
534 535
535 This driver can also be built as a module. If so, the module 536 This driver can also be built as a module. If so, the module
536 will be called jc42. 537 will be called jc42.
@@ -854,6 +855,17 @@ config SENSORS_MAX6650
854 This driver can also be built as a module. If so, the module 855 This driver can also be built as a module. If so, the module
855 will be called max6650. 856 will be called max6650.
856 857
858config SENSORS_MAX6697
859 tristate "Maxim MAX6697 and compatibles"
860 depends on I2C
861 help
862 If you say yes here you get support for MAX6581, MAX6602, MAX6622,
863 MAX6636, MAX6689, MAX6693, MAX6694, MAX6697, MAX6698, and MAX6699
864 temperature sensor chips.
865
866 This driver can also be built as a module. If so, the module
867 will be called max6697.
868
857config SENSORS_MCP3021 869config SENSORS_MCP3021
858 tristate "Microchip MCP3021 and compatibles" 870 tristate "Microchip MCP3021 and compatibles"
859 depends on I2C 871 depends on I2C
@@ -1145,6 +1157,16 @@ config SENSORS_AMC6821
1145 This driver can also be build as a module. If so, the module 1157 This driver can also be build as a module. If so, the module
1146 will be called amc6821. 1158 will be called amc6821.
1147 1159
1160config SENSORS_INA209
1161 tristate "TI / Burr Brown INA209"
1162 depends on I2C
1163 help
1164 If you say yes here you get support for the TI / Burr Brown INA209
1165 voltage / current / power monitor I2C interface.
1166
1167 This driver can also be built as a module. If so, the module will
1168 be called ina209.
1169
1148config SENSORS_INA2XX 1170config SENSORS_INA2XX
1149 tristate "Texas Instruments INA219 and compatibles" 1171 tristate "Texas Instruments INA219 and compatibles"
1150 depends on I2C 1172 depends on I2C
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 5da287443f6c..8d6d97ea7c1e 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -65,6 +65,7 @@ obj-$(CONFIG_SENSORS_ULTRA45) += ultra45_env.o
65obj-$(CONFIG_SENSORS_I5K_AMB) += i5k_amb.o 65obj-$(CONFIG_SENSORS_I5K_AMB) += i5k_amb.o
66obj-$(CONFIG_SENSORS_IBMAEM) += ibmaem.o 66obj-$(CONFIG_SENSORS_IBMAEM) += ibmaem.o
67obj-$(CONFIG_SENSORS_IBMPEX) += ibmpex.o 67obj-$(CONFIG_SENSORS_IBMPEX) += ibmpex.o
68obj-$(CONFIG_SENSORS_INA209) += ina209.o
68obj-$(CONFIG_SENSORS_INA2XX) += ina2xx.o 69obj-$(CONFIG_SENSORS_INA2XX) += ina2xx.o
69obj-$(CONFIG_SENSORS_IT87) += it87.o 70obj-$(CONFIG_SENSORS_IT87) += it87.o
70obj-$(CONFIG_SENSORS_JC42) += jc42.o 71obj-$(CONFIG_SENSORS_JC42) += jc42.o
@@ -99,6 +100,7 @@ obj-$(CONFIG_SENSORS_MAX197) += max197.o
99obj-$(CONFIG_SENSORS_MAX6639) += max6639.o 100obj-$(CONFIG_SENSORS_MAX6639) += max6639.o
100obj-$(CONFIG_SENSORS_MAX6642) += max6642.o 101obj-$(CONFIG_SENSORS_MAX6642) += max6642.o
101obj-$(CONFIG_SENSORS_MAX6650) += max6650.o 102obj-$(CONFIG_SENSORS_MAX6650) += max6650.o
103obj-$(CONFIG_SENSORS_MAX6697) += max6697.o
102obj-$(CONFIG_SENSORS_MC13783_ADC)+= mc13783-adc.o 104obj-$(CONFIG_SENSORS_MC13783_ADC)+= mc13783-adc.o
103obj-$(CONFIG_SENSORS_MCP3021) += mcp3021.o 105obj-$(CONFIG_SENSORS_MCP3021) += mcp3021.o
104obj-$(CONFIG_SENSORS_NTC_THERMISTOR) += ntc_thermistor.o 106obj-$(CONFIG_SENSORS_NTC_THERMISTOR) += ntc_thermistor.o
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 1672e2a5db46..6351aba8819c 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -911,7 +911,7 @@ exit:
911 return res; 911 return res;
912} 912}
913 913
914static int acpi_power_meter_remove(struct acpi_device *device, int type) 914static int acpi_power_meter_remove(struct acpi_device *device)
915{ 915{
916 struct acpi_power_meter_resource *resource; 916 struct acpi_power_meter_resource *resource;
917 917
diff --git a/drivers/hwmon/ad7414.c b/drivers/hwmon/ad7414.c
index f3a5d4764eb9..5d501adc3e54 100644
--- a/drivers/hwmon/ad7414.c
+++ b/drivers/hwmon/ad7414.c
@@ -137,7 +137,7 @@ static ssize_t set_max_min(struct device *dev,
137 if (ret < 0) 137 if (ret < 0)
138 return ret; 138 return ret;
139 139
140 temp = SENSORS_LIMIT(temp, -40000, 85000); 140 temp = clamp_val(temp, -40000, 85000);
141 temp = (temp + (temp < 0 ? -500 : 500)) / 1000; 141 temp = (temp + (temp < 0 ? -500 : 500)) / 1000;
142 142
143 mutex_lock(&data->lock); 143 mutex_lock(&data->lock);
diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c
index fd1d1b15854e..71bcba8abfc0 100644
--- a/drivers/hwmon/adm1021.c
+++ b/drivers/hwmon/adm1021.c
@@ -193,7 +193,7 @@ static ssize_t set_temp_max(struct device *dev,
193 temp /= 1000; 193 temp /= 1000;
194 194
195 mutex_lock(&data->update_lock); 195 mutex_lock(&data->update_lock);
196 data->temp_max[index] = SENSORS_LIMIT(temp, -128, 127); 196 data->temp_max[index] = clamp_val(temp, -128, 127);
197 if (!read_only) 197 if (!read_only)
198 i2c_smbus_write_byte_data(client, ADM1021_REG_TOS_W(index), 198 i2c_smbus_write_byte_data(client, ADM1021_REG_TOS_W(index),
199 data->temp_max[index]); 199 data->temp_max[index]);
@@ -218,7 +218,7 @@ static ssize_t set_temp_min(struct device *dev,
218 temp /= 1000; 218 temp /= 1000;
219 219
220 mutex_lock(&data->update_lock); 220 mutex_lock(&data->update_lock);
221 data->temp_min[index] = SENSORS_LIMIT(temp, -128, 127); 221 data->temp_min[index] = clamp_val(temp, -128, 127);
222 if (!read_only) 222 if (!read_only)
223 i2c_smbus_write_byte_data(client, ADM1021_REG_THYST_W(index), 223 i2c_smbus_write_byte_data(client, ADM1021_REG_THYST_W(index),
224 data->temp_min[index]); 224 data->temp_min[index]);
diff --git a/drivers/hwmon/adm1026.c b/drivers/hwmon/adm1026.c
index 0f068e7297ee..ea09046e651d 100644
--- a/drivers/hwmon/adm1026.c
+++ b/drivers/hwmon/adm1026.c
@@ -197,7 +197,7 @@ static int adm1026_scaling[] = { /* .001 Volts */
197 }; 197 };
198#define NEG12_OFFSET 16000 198#define NEG12_OFFSET 16000
199#define SCALE(val, from, to) (((val)*(to) + ((from)/2))/(from)) 199#define SCALE(val, from, to) (((val)*(to) + ((from)/2))/(from))
200#define INS_TO_REG(n, val) (SENSORS_LIMIT(SCALE(val, adm1026_scaling[n], 192),\ 200#define INS_TO_REG(n, val) (clamp_val(SCALE(val, adm1026_scaling[n], 192),\
201 0, 255)) 201 0, 255))
202#define INS_FROM_REG(n, val) (SCALE(val, 192, adm1026_scaling[n])) 202#define INS_FROM_REG(n, val) (SCALE(val, 192, adm1026_scaling[n]))
203 203
@@ -207,7 +207,7 @@ static int adm1026_scaling[] = { /* .001 Volts */
207 * 22500 kHz * 60 (sec/min) * 2 (pulse) / 2 (pulse/rev) == 1350000 207 * 22500 kHz * 60 (sec/min) * 2 (pulse) / 2 (pulse/rev) == 1350000
208 */ 208 */
209#define FAN_TO_REG(val, div) ((val) <= 0 ? 0xff : \ 209#define FAN_TO_REG(val, div) ((val) <= 0 ? 0xff : \
210 SENSORS_LIMIT(1350000 / ((val) * (div)), \ 210 clamp_val(1350000 / ((val) * (div)), \
211 1, 254)) 211 1, 254))
212#define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : (val) == 0xff ? 0 : \ 212#define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : (val) == 0xff ? 0 : \
213 1350000 / ((val) * (div))) 213 1350000 / ((val) * (div)))
@@ -215,14 +215,14 @@ static int adm1026_scaling[] = { /* .001 Volts */
215#define DIV_TO_REG(val) ((val) >= 8 ? 3 : (val) >= 4 ? 2 : (val) >= 2 ? 1 : 0) 215#define DIV_TO_REG(val) ((val) >= 8 ? 3 : (val) >= 4 ? 2 : (val) >= 2 ? 1 : 0)
216 216
217/* Temperature is reported in 1 degC increments */ 217/* Temperature is reported in 1 degC increments */
218#define TEMP_TO_REG(val) (SENSORS_LIMIT(((val) + ((val) < 0 ? -500 : 500)) \ 218#define TEMP_TO_REG(val) (clamp_val(((val) + ((val) < 0 ? -500 : 500)) \
219 / 1000, -127, 127)) 219 / 1000, -127, 127))
220#define TEMP_FROM_REG(val) ((val) * 1000) 220#define TEMP_FROM_REG(val) ((val) * 1000)
221#define OFFSET_TO_REG(val) (SENSORS_LIMIT(((val) + ((val) < 0 ? -500 : 500)) \ 221#define OFFSET_TO_REG(val) (clamp_val(((val) + ((val) < 0 ? -500 : 500)) \
222 / 1000, -127, 127)) 222 / 1000, -127, 127))
223#define OFFSET_FROM_REG(val) ((val) * 1000) 223#define OFFSET_FROM_REG(val) ((val) * 1000)
224 224
225#define PWM_TO_REG(val) (SENSORS_LIMIT(val, 0, 255)) 225#define PWM_TO_REG(val) (clamp_val(val, 0, 255))
226#define PWM_FROM_REG(val) (val) 226#define PWM_FROM_REG(val) (val)
227 227
228#define PWM_MIN_TO_REG(val) ((val) & 0xf0) 228#define PWM_MIN_TO_REG(val) ((val) & 0xf0)
@@ -233,7 +233,7 @@ static int adm1026_scaling[] = { /* .001 Volts */
233 * indicates that the DAC could be used to drive the fans, but in our 233 * indicates that the DAC could be used to drive the fans, but in our
234 * example board (Arima HDAMA) it isn't connected to the fans at all. 234 * example board (Arima HDAMA) it isn't connected to the fans at all.
235 */ 235 */
236#define DAC_TO_REG(val) (SENSORS_LIMIT(((((val) * 255) + 500) / 2500), 0, 255)) 236#define DAC_TO_REG(val) (clamp_val(((((val) * 255) + 500) / 2500), 0, 255))
237#define DAC_FROM_REG(val) (((val) * 2500) / 255) 237#define DAC_FROM_REG(val) (((val) * 2500) / 255)
238 238
239/* 239/*
@@ -933,7 +933,7 @@ static void fixup_fan_min(struct device *dev, int fan, int old_div)
933 return; 933 return;
934 934
935 new_min = data->fan_min[fan] * old_div / new_div; 935 new_min = data->fan_min[fan] * old_div / new_div;
936 new_min = SENSORS_LIMIT(new_min, 1, 254); 936 new_min = clamp_val(new_min, 1, 254);
937 data->fan_min[fan] = new_min; 937 data->fan_min[fan] = new_min;
938 adm1026_write_value(client, ADM1026_REG_FAN_MIN(fan), new_min); 938 adm1026_write_value(client, ADM1026_REG_FAN_MIN(fan), new_min);
939} 939}
@@ -1527,7 +1527,7 @@ static ssize_t set_auto_pwm_min(struct device *dev,
1527 return err; 1527 return err;
1528 1528
1529 mutex_lock(&data->update_lock); 1529 mutex_lock(&data->update_lock);
1530 data->pwm1.auto_pwm_min = SENSORS_LIMIT(val, 0, 255); 1530 data->pwm1.auto_pwm_min = clamp_val(val, 0, 255);
1531 if (data->pwm1.enable == 2) { /* apply immediately */ 1531 if (data->pwm1.enable == 2) { /* apply immediately */
1532 data->pwm1.pwm = PWM_TO_REG((data->pwm1.pwm & 0x0f) | 1532 data->pwm1.pwm = PWM_TO_REG((data->pwm1.pwm & 0x0f) |
1533 PWM_MIN_TO_REG(data->pwm1.auto_pwm_min)); 1533 PWM_MIN_TO_REG(data->pwm1.auto_pwm_min));
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index c6a4631e833f..253ea396106d 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -162,13 +162,13 @@ adm1031_write_value(struct i2c_client *client, u8 reg, unsigned int value)
162static int FAN_TO_REG(int reg, int div) 162static int FAN_TO_REG(int reg, int div)
163{ 163{
164 int tmp; 164 int tmp;
165 tmp = FAN_FROM_REG(SENSORS_LIMIT(reg, 0, 65535), div); 165 tmp = FAN_FROM_REG(clamp_val(reg, 0, 65535), div);
166 return tmp > 255 ? 255 : tmp; 166 return tmp > 255 ? 255 : tmp;
167} 167}
168 168
169#define FAN_DIV_FROM_REG(reg) (1<<(((reg)&0xc0)>>6)) 169#define FAN_DIV_FROM_REG(reg) (1<<(((reg)&0xc0)>>6))
170 170
171#define PWM_TO_REG(val) (SENSORS_LIMIT((val), 0, 255) >> 4) 171#define PWM_TO_REG(val) (clamp_val((val), 0, 255) >> 4)
172#define PWM_FROM_REG(val) ((val) << 4) 172#define PWM_FROM_REG(val) ((val) << 4)
173 173
174#define FAN_CHAN_FROM_REG(reg) (((reg) >> 5) & 7) 174#define FAN_CHAN_FROM_REG(reg) (((reg) >> 5) & 7)
@@ -675,7 +675,7 @@ static ssize_t set_temp_offset(struct device *dev,
675 if (ret) 675 if (ret)
676 return ret; 676 return ret;
677 677
678 val = SENSORS_LIMIT(val, -15000, 15000); 678 val = clamp_val(val, -15000, 15000);
679 mutex_lock(&data->update_lock); 679 mutex_lock(&data->update_lock);
680 data->temp_offset[nr] = TEMP_OFFSET_TO_REG(val); 680 data->temp_offset[nr] = TEMP_OFFSET_TO_REG(val);
681 adm1031_write_value(client, ADM1031_REG_TEMP_OFFSET(nr), 681 adm1031_write_value(client, ADM1031_REG_TEMP_OFFSET(nr),
@@ -696,7 +696,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
696 if (ret) 696 if (ret)
697 return ret; 697 return ret;
698 698
699 val = SENSORS_LIMIT(val, -55000, nr == 0 ? 127750 : 127875); 699 val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875);
700 mutex_lock(&data->update_lock); 700 mutex_lock(&data->update_lock);
701 data->temp_min[nr] = TEMP_TO_REG(val); 701 data->temp_min[nr] = TEMP_TO_REG(val);
702 adm1031_write_value(client, ADM1031_REG_TEMP_MIN(nr), 702 adm1031_write_value(client, ADM1031_REG_TEMP_MIN(nr),
@@ -717,7 +717,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
717 if (ret) 717 if (ret)
718 return ret; 718 return ret;
719 719
720 val = SENSORS_LIMIT(val, -55000, nr == 0 ? 127750 : 127875); 720 val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875);
721 mutex_lock(&data->update_lock); 721 mutex_lock(&data->update_lock);
722 data->temp_max[nr] = TEMP_TO_REG(val); 722 data->temp_max[nr] = TEMP_TO_REG(val);
723 adm1031_write_value(client, ADM1031_REG_TEMP_MAX(nr), 723 adm1031_write_value(client, ADM1031_REG_TEMP_MAX(nr),
@@ -738,7 +738,7 @@ static ssize_t set_temp_crit(struct device *dev, struct device_attribute *attr,
738 if (ret) 738 if (ret)
739 return ret; 739 return ret;
740 740
741 val = SENSORS_LIMIT(val, -55000, nr == 0 ? 127750 : 127875); 741 val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875);
742 mutex_lock(&data->update_lock); 742 mutex_lock(&data->update_lock);
743 data->temp_crit[nr] = TEMP_TO_REG(val); 743 data->temp_crit[nr] = TEMP_TO_REG(val);
744 adm1031_write_value(client, ADM1031_REG_TEMP_CRIT(nr), 744 adm1031_write_value(client, ADM1031_REG_TEMP_CRIT(nr),
diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c
index dafa477715e3..2416628e0ab1 100644
--- a/drivers/hwmon/adm9240.c
+++ b/drivers/hwmon/adm9240.c
@@ -98,13 +98,13 @@ static inline unsigned int IN_FROM_REG(u8 reg, int n)
98 98
99static inline u8 IN_TO_REG(unsigned long val, int n) 99static inline u8 IN_TO_REG(unsigned long val, int n)
100{ 100{
101 return SENSORS_LIMIT(SCALE(val, 192, nom_mv[n]), 0, 255); 101 return clamp_val(SCALE(val, 192, nom_mv[n]), 0, 255);
102} 102}
103 103
104/* temperature range: -40..125, 127 disables temperature alarm */ 104/* temperature range: -40..125, 127 disables temperature alarm */
105static inline s8 TEMP_TO_REG(long val) 105static inline s8 TEMP_TO_REG(long val)
106{ 106{
107 return SENSORS_LIMIT(SCALE(val, 1, 1000), -40, 127); 107 return clamp_val(SCALE(val, 1, 1000), -40, 127);
108} 108}
109 109
110/* two fans, each with low fan speed limit */ 110/* two fans, each with low fan speed limit */
@@ -122,7 +122,7 @@ static inline unsigned int FAN_FROM_REG(u8 reg, u8 div)
122/* analog out 0..1250mV */ 122/* analog out 0..1250mV */
123static inline u8 AOUT_TO_REG(unsigned long val) 123static inline u8 AOUT_TO_REG(unsigned long val)
124{ 124{
125 return SENSORS_LIMIT(SCALE(val, 255, 1250), 0, 255); 125 return clamp_val(SCALE(val, 255, 1250), 0, 255);
126} 126}
127 127
128static inline unsigned int AOUT_FROM_REG(u8 reg) 128static inline unsigned int AOUT_FROM_REG(u8 reg)
diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c
index 409b5c16defb..ba962ac4b81f 100644
--- a/drivers/hwmon/ads7828.c
+++ b/drivers/hwmon/ads7828.c
@@ -163,9 +163,9 @@ static int ads7828_probe(struct i2c_client *client,
163 163
164 /* Bound Vref with min/max values if it was provided */ 164 /* Bound Vref with min/max values if it was provided */
165 if (data->vref_mv) 165 if (data->vref_mv)
166 data->vref_mv = SENSORS_LIMIT(data->vref_mv, 166 data->vref_mv = clamp_val(data->vref_mv,
167 ADS7828_EXT_VREF_MV_MIN, 167 ADS7828_EXT_VREF_MV_MIN,
168 ADS7828_EXT_VREF_MV_MAX); 168 ADS7828_EXT_VREF_MV_MAX);
169 else 169 else
170 data->vref_mv = ADS7828_INT_VREF_MV; 170 data->vref_mv = ADS7828_INT_VREF_MV;
171 171
diff --git a/drivers/hwmon/adt7410.c b/drivers/hwmon/adt7410.c
index 030c8d7c33a5..99a7290da0a3 100644
--- a/drivers/hwmon/adt7410.c
+++ b/drivers/hwmon/adt7410.c
@@ -78,10 +78,6 @@ enum adt7410_type { /* keep sorted in alphabetical order */
78 adt7410, 78 adt7410,
79}; 79};
80 80
81/* Addresses scanned */
82static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b,
83 I2C_CLIENT_END };
84
85static const u8 ADT7410_REG_TEMP[4] = { 81static const u8 ADT7410_REG_TEMP[4] = {
86 ADT7410_TEMPERATURE, /* input */ 82 ADT7410_TEMPERATURE, /* input */
87 ADT7410_T_ALARM_HIGH, /* high */ 83 ADT7410_T_ALARM_HIGH, /* high */
@@ -173,8 +169,8 @@ abort:
173 169
174static s16 ADT7410_TEMP_TO_REG(long temp) 170static s16 ADT7410_TEMP_TO_REG(long temp)
175{ 171{
176 return DIV_ROUND_CLOSEST(SENSORS_LIMIT(temp, ADT7410_TEMP_MIN, 172 return DIV_ROUND_CLOSEST(clamp_val(temp, ADT7410_TEMP_MIN,
177 ADT7410_TEMP_MAX) * 128, 1000); 173 ADT7410_TEMP_MAX) * 128, 1000);
178} 174}
179 175
180static int ADT7410_REG_TO_TEMP(struct adt7410_data *data, s16 reg) 176static int ADT7410_REG_TO_TEMP(struct adt7410_data *data, s16 reg)
@@ -269,9 +265,9 @@ static ssize_t adt7410_set_t_hyst(struct device *dev,
269 return ret; 265 return ret;
270 /* convert absolute hysteresis value to a 4 bit delta value */ 266 /* convert absolute hysteresis value to a 4 bit delta value */
271 limit = ADT7410_REG_TO_TEMP(data, data->temp[1]); 267 limit = ADT7410_REG_TO_TEMP(data, data->temp[1]);
272 hyst = SENSORS_LIMIT(hyst, ADT7410_TEMP_MIN, ADT7410_TEMP_MAX); 268 hyst = clamp_val(hyst, ADT7410_TEMP_MIN, ADT7410_TEMP_MAX);
273 data->hyst = SENSORS_LIMIT(DIV_ROUND_CLOSEST(limit - hyst, 1000), 269 data->hyst = clamp_val(DIV_ROUND_CLOSEST(limit - hyst, 1000), 0,
274 0, ADT7410_T_HYST_MASK); 270 ADT7410_T_HYST_MASK);
275 ret = i2c_smbus_write_byte_data(client, ADT7410_T_HYST, data->hyst); 271 ret = i2c_smbus_write_byte_data(client, ADT7410_T_HYST, data->hyst);
276 if (ret) 272 if (ret)
277 return ret; 273 return ret;
@@ -364,6 +360,7 @@ static int adt7410_probe(struct i2c_client *client,
364 /* 360 /*
365 * Set to 16 bit resolution, continous conversion and comparator mode. 361 * Set to 16 bit resolution, continous conversion and comparator mode.
366 */ 362 */
363 ret &= ~ADT7410_MODE_MASK;
367 data->config = ret | ADT7410_FULL | ADT7410_RESOLUTION | 364 data->config = ret | ADT7410_FULL | ADT7410_RESOLUTION |
368 ADT7410_EVENT_MODE; 365 ADT7410_EVENT_MODE;
369 if (data->config != data->oldconfig) { 366 if (data->config != data->oldconfig) {
@@ -410,11 +407,12 @@ static int adt7410_remove(struct i2c_client *client)
410 407
411static const struct i2c_device_id adt7410_ids[] = { 408static const struct i2c_device_id adt7410_ids[] = {
412 { "adt7410", adt7410, }, 409 { "adt7410", adt7410, },
410 { "adt7420", adt7410, },
413 { /* LIST END */ } 411 { /* LIST END */ }
414}; 412};
415MODULE_DEVICE_TABLE(i2c, adt7410_ids); 413MODULE_DEVICE_TABLE(i2c, adt7410_ids);
416 414
417#ifdef CONFIG_PM 415#ifdef CONFIG_PM_SLEEP
418static int adt7410_suspend(struct device *dev) 416static int adt7410_suspend(struct device *dev)
419{ 417{
420 int ret; 418 int ret;
@@ -436,10 +434,8 @@ static int adt7410_resume(struct device *dev)
436 return ret; 434 return ret;
437} 435}
438 436
439static const struct dev_pm_ops adt7410_dev_pm_ops = { 437static SIMPLE_DEV_PM_OPS(adt7410_dev_pm_ops, adt7410_suspend, adt7410_resume);
440 .suspend = adt7410_suspend, 438
441 .resume = adt7410_resume,
442};
443#define ADT7410_DEV_PM_OPS (&adt7410_dev_pm_ops) 439#define ADT7410_DEV_PM_OPS (&adt7410_dev_pm_ops)
444#else 440#else
445#define ADT7410_DEV_PM_OPS NULL 441#define ADT7410_DEV_PM_OPS NULL
@@ -454,11 +450,11 @@ static struct i2c_driver adt7410_driver = {
454 .probe = adt7410_probe, 450 .probe = adt7410_probe,
455 .remove = adt7410_remove, 451 .remove = adt7410_remove,
456 .id_table = adt7410_ids, 452 .id_table = adt7410_ids,
457 .address_list = normal_i2c, 453 .address_list = I2C_ADDRS(0x48, 0x49, 0x4a, 0x4b),
458}; 454};
459 455
460module_i2c_driver(adt7410_driver); 456module_i2c_driver(adt7410_driver);
461 457
462MODULE_AUTHOR("Hartmut Knaack"); 458MODULE_AUTHOR("Hartmut Knaack");
463MODULE_DESCRIPTION("ADT7410 driver"); 459MODULE_DESCRIPTION("ADT7410/ADT7420 driver");
464MODULE_LICENSE("GPL"); 460MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c
index 98a7d81e25c5..69481d3a3d23 100644
--- a/drivers/hwmon/adt7462.c
+++ b/drivers/hwmon/adt7462.c
@@ -836,7 +836,7 @@ static ssize_t set_temp_min(struct device *dev,
836 return -EINVAL; 836 return -EINVAL;
837 837
838 temp = DIV_ROUND_CLOSEST(temp, 1000) + 64; 838 temp = DIV_ROUND_CLOSEST(temp, 1000) + 64;
839 temp = SENSORS_LIMIT(temp, 0, 255); 839 temp = clamp_val(temp, 0, 255);
840 840
841 mutex_lock(&data->lock); 841 mutex_lock(&data->lock);
842 data->temp_min[attr->index] = temp; 842 data->temp_min[attr->index] = temp;
@@ -874,7 +874,7 @@ static ssize_t set_temp_max(struct device *dev,
874 return -EINVAL; 874 return -EINVAL;
875 875
876 temp = DIV_ROUND_CLOSEST(temp, 1000) + 64; 876 temp = DIV_ROUND_CLOSEST(temp, 1000) + 64;
877 temp = SENSORS_LIMIT(temp, 0, 255); 877 temp = clamp_val(temp, 0, 255);
878 878
879 mutex_lock(&data->lock); 879 mutex_lock(&data->lock);
880 data->temp_max[attr->index] = temp; 880 data->temp_max[attr->index] = temp;
@@ -939,7 +939,7 @@ static ssize_t set_volt_max(struct device *dev,
939 939
940 temp *= 1000; /* convert mV to uV */ 940 temp *= 1000; /* convert mV to uV */
941 temp = DIV_ROUND_CLOSEST(temp, x); 941 temp = DIV_ROUND_CLOSEST(temp, x);
942 temp = SENSORS_LIMIT(temp, 0, 255); 942 temp = clamp_val(temp, 0, 255);
943 943
944 mutex_lock(&data->lock); 944 mutex_lock(&data->lock);
945 data->volt_max[attr->index] = temp; 945 data->volt_max[attr->index] = temp;
@@ -981,7 +981,7 @@ static ssize_t set_volt_min(struct device *dev,
981 981
982 temp *= 1000; /* convert mV to uV */ 982 temp *= 1000; /* convert mV to uV */
983 temp = DIV_ROUND_CLOSEST(temp, x); 983 temp = DIV_ROUND_CLOSEST(temp, x);
984 temp = SENSORS_LIMIT(temp, 0, 255); 984 temp = clamp_val(temp, 0, 255);
985 985
986 mutex_lock(&data->lock); 986 mutex_lock(&data->lock);
987 data->volt_min[attr->index] = temp; 987 data->volt_min[attr->index] = temp;
@@ -1071,7 +1071,7 @@ static ssize_t set_fan_min(struct device *dev,
1071 1071
1072 temp = FAN_RPM_TO_PERIOD(temp); 1072 temp = FAN_RPM_TO_PERIOD(temp);
1073 temp >>= 8; 1073 temp >>= 8;
1074 temp = SENSORS_LIMIT(temp, 1, 255); 1074 temp = clamp_val(temp, 1, 255);
1075 1075
1076 mutex_lock(&data->lock); 1076 mutex_lock(&data->lock);
1077 data->fan_min[attr->index] = temp; 1077 data->fan_min[attr->index] = temp;
@@ -1149,7 +1149,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr,
1149 if (kstrtol(buf, 10, &temp)) 1149 if (kstrtol(buf, 10, &temp))
1150 return -EINVAL; 1150 return -EINVAL;
1151 1151
1152 temp = SENSORS_LIMIT(temp, 0, 255); 1152 temp = clamp_val(temp, 0, 255);
1153 1153
1154 mutex_lock(&data->lock); 1154 mutex_lock(&data->lock);
1155 data->pwm[attr->index] = temp; 1155 data->pwm[attr->index] = temp;
@@ -1179,7 +1179,7 @@ static ssize_t set_pwm_max(struct device *dev,
1179 if (kstrtol(buf, 10, &temp)) 1179 if (kstrtol(buf, 10, &temp))
1180 return -EINVAL; 1180 return -EINVAL;
1181 1181
1182 temp = SENSORS_LIMIT(temp, 0, 255); 1182 temp = clamp_val(temp, 0, 255);
1183 1183
1184 mutex_lock(&data->lock); 1184 mutex_lock(&data->lock);
1185 data->pwm_max = temp; 1185 data->pwm_max = temp;
@@ -1211,7 +1211,7 @@ static ssize_t set_pwm_min(struct device *dev,
1211 if (kstrtol(buf, 10, &temp)) 1211 if (kstrtol(buf, 10, &temp))
1212 return -EINVAL; 1212 return -EINVAL;
1213 1213
1214 temp = SENSORS_LIMIT(temp, 0, 255); 1214 temp = clamp_val(temp, 0, 255);
1215 1215
1216 mutex_lock(&data->lock); 1216 mutex_lock(&data->lock);
1217 data->pwm_min[attr->index] = temp; 1217 data->pwm_min[attr->index] = temp;
@@ -1246,7 +1246,7 @@ static ssize_t set_pwm_hyst(struct device *dev,
1246 return -EINVAL; 1246 return -EINVAL;
1247 1247
1248 temp = DIV_ROUND_CLOSEST(temp, 1000); 1248 temp = DIV_ROUND_CLOSEST(temp, 1000);
1249 temp = SENSORS_LIMIT(temp, 0, 15); 1249 temp = clamp_val(temp, 0, 15);
1250 1250
1251 /* package things up */ 1251 /* package things up */
1252 temp &= ADT7462_PWM_HYST_MASK; 1252 temp &= ADT7462_PWM_HYST_MASK;
@@ -1333,7 +1333,7 @@ static ssize_t set_pwm_tmin(struct device *dev,
1333 return -EINVAL; 1333 return -EINVAL;
1334 1334
1335 temp = DIV_ROUND_CLOSEST(temp, 1000) + 64; 1335 temp = DIV_ROUND_CLOSEST(temp, 1000) + 64;
1336 temp = SENSORS_LIMIT(temp, 0, 255); 1336 temp = clamp_val(temp, 0, 255);
1337 1337
1338 mutex_lock(&data->lock); 1338 mutex_lock(&data->lock);
1339 data->pwm_tmin[attr->index] = temp; 1339 data->pwm_tmin[attr->index] = temp;
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index 39ecb1a3b9ef..b83bf4bb95eb 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -452,7 +452,7 @@ static ssize_t set_auto_update_interval(struct device *dev,
452 if (kstrtol(buf, 10, &temp)) 452 if (kstrtol(buf, 10, &temp))
453 return -EINVAL; 453 return -EINVAL;
454 454
455 temp = SENSORS_LIMIT(temp, 0, 60000); 455 temp = clamp_val(temp, 0, 60000);
456 456
457 mutex_lock(&data->lock); 457 mutex_lock(&data->lock);
458 data->auto_update_interval = temp; 458 data->auto_update_interval = temp;
@@ -481,7 +481,7 @@ static ssize_t set_num_temp_sensors(struct device *dev,
481 if (kstrtol(buf, 10, &temp)) 481 if (kstrtol(buf, 10, &temp))
482 return -EINVAL; 482 return -EINVAL;
483 483
484 temp = SENSORS_LIMIT(temp, -1, 10); 484 temp = clamp_val(temp, -1, 10);
485 485
486 mutex_lock(&data->lock); 486 mutex_lock(&data->lock);
487 data->num_temp_sensors = temp; 487 data->num_temp_sensors = temp;
@@ -515,7 +515,7 @@ static ssize_t set_temp_min(struct device *dev,
515 return -EINVAL; 515 return -EINVAL;
516 516
517 temp = DIV_ROUND_CLOSEST(temp, 1000); 517 temp = DIV_ROUND_CLOSEST(temp, 1000);
518 temp = SENSORS_LIMIT(temp, 0, 255); 518 temp = clamp_val(temp, 0, 255);
519 519
520 mutex_lock(&data->lock); 520 mutex_lock(&data->lock);
521 data->temp_min[attr->index] = temp; 521 data->temp_min[attr->index] = temp;
@@ -549,7 +549,7 @@ static ssize_t set_temp_max(struct device *dev,
549 return -EINVAL; 549 return -EINVAL;
550 550
551 temp = DIV_ROUND_CLOSEST(temp, 1000); 551 temp = DIV_ROUND_CLOSEST(temp, 1000);
552 temp = SENSORS_LIMIT(temp, 0, 255); 552 temp = clamp_val(temp, 0, 255);
553 553
554 mutex_lock(&data->lock); 554 mutex_lock(&data->lock);
555 data->temp_max[attr->index] = temp; 555 data->temp_max[attr->index] = temp;
@@ -604,7 +604,7 @@ static ssize_t set_fan_max(struct device *dev,
604 return -EINVAL; 604 return -EINVAL;
605 605
606 temp = FAN_RPM_TO_PERIOD(temp); 606 temp = FAN_RPM_TO_PERIOD(temp);
607 temp = SENSORS_LIMIT(temp, 1, 65534); 607 temp = clamp_val(temp, 1, 65534);
608 608
609 mutex_lock(&data->lock); 609 mutex_lock(&data->lock);
610 data->fan_max[attr->index] = temp; 610 data->fan_max[attr->index] = temp;
@@ -641,7 +641,7 @@ static ssize_t set_fan_min(struct device *dev,
641 return -EINVAL; 641 return -EINVAL;
642 642
643 temp = FAN_RPM_TO_PERIOD(temp); 643 temp = FAN_RPM_TO_PERIOD(temp);
644 temp = SENSORS_LIMIT(temp, 1, 65534); 644 temp = clamp_val(temp, 1, 65534);
645 645
646 mutex_lock(&data->lock); 646 mutex_lock(&data->lock);
647 data->fan_min[attr->index] = temp; 647 data->fan_min[attr->index] = temp;
@@ -717,7 +717,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr,
717 if (kstrtol(buf, 10, &temp)) 717 if (kstrtol(buf, 10, &temp))
718 return -EINVAL; 718 return -EINVAL;
719 719
720 temp = SENSORS_LIMIT(temp, 0, 255); 720 temp = clamp_val(temp, 0, 255);
721 721
722 mutex_lock(&data->lock); 722 mutex_lock(&data->lock);
723 data->pwm[attr->index] = temp; 723 data->pwm[attr->index] = temp;
@@ -749,7 +749,7 @@ static ssize_t set_pwm_max(struct device *dev,
749 if (kstrtol(buf, 10, &temp)) 749 if (kstrtol(buf, 10, &temp))
750 return -EINVAL; 750 return -EINVAL;
751 751
752 temp = SENSORS_LIMIT(temp, 0, 255); 752 temp = clamp_val(temp, 0, 255);
753 753
754 mutex_lock(&data->lock); 754 mutex_lock(&data->lock);
755 data->pwm_max[attr->index] = temp; 755 data->pwm_max[attr->index] = temp;
@@ -782,7 +782,7 @@ static ssize_t set_pwm_min(struct device *dev,
782 if (kstrtol(buf, 10, &temp)) 782 if (kstrtol(buf, 10, &temp))
783 return -EINVAL; 783 return -EINVAL;
784 784
785 temp = SENSORS_LIMIT(temp, 0, 255); 785 temp = clamp_val(temp, 0, 255);
786 786
787 mutex_lock(&data->lock); 787 mutex_lock(&data->lock);
788 data->pwm_min[attr->index] = temp; 788 data->pwm_min[attr->index] = temp;
@@ -826,7 +826,7 @@ static ssize_t set_pwm_tmin(struct device *dev,
826 return -EINVAL; 826 return -EINVAL;
827 827
828 temp = DIV_ROUND_CLOSEST(temp, 1000); 828 temp = DIV_ROUND_CLOSEST(temp, 1000);
829 temp = SENSORS_LIMIT(temp, 0, 255); 829 temp = clamp_val(temp, 0, 255);
830 830
831 mutex_lock(&data->lock); 831 mutex_lock(&data->lock);
832 data->pwm_tmin[attr->index] = temp; 832 data->pwm_tmin[attr->index] = temp;
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index 989e54c39252..22d008bbdc10 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -201,10 +201,10 @@ static inline u16 temp2reg(struct adt7475_data *data, long val)
201 u16 ret; 201 u16 ret;
202 202
203 if (!(data->config5 & CONFIG5_TWOSCOMP)) { 203 if (!(data->config5 & CONFIG5_TWOSCOMP)) {
204 val = SENSORS_LIMIT(val, -64000, 191000); 204 val = clamp_val(val, -64000, 191000);
205 ret = (val + 64500) / 1000; 205 ret = (val + 64500) / 1000;
206 } else { 206 } else {
207 val = SENSORS_LIMIT(val, -128000, 127000); 207 val = clamp_val(val, -128000, 127000);
208 if (val < -500) 208 if (val < -500)
209 ret = (256500 + val) / 1000; 209 ret = (256500 + val) / 1000;
210 else 210 else
@@ -240,7 +240,7 @@ static inline u16 rpm2tach(unsigned long rpm)
240 if (rpm == 0) 240 if (rpm == 0)
241 return 0; 241 return 0;
242 242
243 return SENSORS_LIMIT((90000 * 60) / rpm, 1, 0xFFFF); 243 return clamp_val((90000 * 60) / rpm, 1, 0xFFFF);
244} 244}
245 245
246/* Scaling factors for voltage inputs, taken from the ADT7490 datasheet */ 246/* Scaling factors for voltage inputs, taken from the ADT7490 datasheet */
@@ -271,7 +271,7 @@ static inline u16 volt2reg(int channel, long volt, u8 bypass_attn)
271 reg = (volt * 1024) / 2250; 271 reg = (volt * 1024) / 2250;
272 else 272 else
273 reg = (volt * r[1] * 1024) / ((r[0] + r[1]) * 2250); 273 reg = (volt * r[1] * 1024) / ((r[0] + r[1]) * 2250);
274 return SENSORS_LIMIT(reg, 0, 1023) & (0xff << 2); 274 return clamp_val(reg, 0, 1023) & (0xff << 2);
275} 275}
276 276
277static u16 adt7475_read_word(struct i2c_client *client, int reg) 277static u16 adt7475_read_word(struct i2c_client *client, int reg)
@@ -451,10 +451,10 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *attr,
451 switch (sattr->nr) { 451 switch (sattr->nr) {
452 case OFFSET: 452 case OFFSET:
453 if (data->config5 & CONFIG5_TEMPOFFSET) { 453 if (data->config5 & CONFIG5_TEMPOFFSET) {
454 val = SENSORS_LIMIT(val, -63000, 127000); 454 val = clamp_val(val, -63000, 127000);
455 out = data->temp[OFFSET][sattr->index] = val / 1000; 455 out = data->temp[OFFSET][sattr->index] = val / 1000;
456 } else { 456 } else {
457 val = SENSORS_LIMIT(val, -63000, 64000); 457 val = clamp_val(val, -63000, 64000);
458 out = data->temp[OFFSET][sattr->index] = val / 500; 458 out = data->temp[OFFSET][sattr->index] = val / 500;
459 } 459 }
460 break; 460 break;
@@ -471,7 +471,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *attr,
471 adt7475_read_hystersis(client); 471 adt7475_read_hystersis(client);
472 472
473 temp = reg2temp(data, data->temp[THERM][sattr->index]); 473 temp = reg2temp(data, data->temp[THERM][sattr->index]);
474 val = SENSORS_LIMIT(val, temp - 15000, temp); 474 val = clamp_val(val, temp - 15000, temp);
475 val = (temp - val) / 1000; 475 val = (temp - val) / 1000;
476 476
477 if (sattr->index != 1) { 477 if (sattr->index != 1) {
@@ -577,7 +577,7 @@ static ssize_t set_point2(struct device *dev, struct device_attribute *attr,
577 * to figure the range 577 * to figure the range
578 */ 578 */
579 temp = reg2temp(data, data->temp[AUTOMIN][sattr->index]); 579 temp = reg2temp(data, data->temp[AUTOMIN][sattr->index]);
580 val = SENSORS_LIMIT(val, temp + autorange_table[0], 580 val = clamp_val(val, temp + autorange_table[0],
581 temp + autorange_table[ARRAY_SIZE(autorange_table) - 1]); 581 temp + autorange_table[ARRAY_SIZE(autorange_table) - 1]);
582 val -= temp; 582 val -= temp;
583 583
@@ -701,7 +701,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
701 break; 701 break;
702 } 702 }
703 703
704 data->pwm[sattr->nr][sattr->index] = SENSORS_LIMIT(val, 0, 0xFF); 704 data->pwm[sattr->nr][sattr->index] = clamp_val(val, 0, 0xFF);
705 i2c_smbus_write_byte_data(client, reg, 705 i2c_smbus_write_byte_data(client, reg,
706 data->pwm[sattr->nr][sattr->index]); 706 data->pwm[sattr->nr][sattr->index]);
707 707
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
index ae482e3afdac..4fe49d2bfe1d 100644
--- a/drivers/hwmon/amc6821.c
+++ b/drivers/hwmon/amc6821.c
@@ -241,7 +241,7 @@ static ssize_t set_temp(
241 int ret = kstrtol(buf, 10, &val); 241 int ret = kstrtol(buf, 10, &val);
242 if (ret) 242 if (ret)
243 return ret; 243 return ret;
244 val = SENSORS_LIMIT(val / 1000, -128, 127); 244 val = clamp_val(val / 1000, -128, 127);
245 245
246 mutex_lock(&data->update_lock); 246 mutex_lock(&data->update_lock);
247 data->temp[ix] = val; 247 data->temp[ix] = val;
@@ -332,7 +332,7 @@ static ssize_t set_pwm1(
332 return ret; 332 return ret;
333 333
334 mutex_lock(&data->update_lock); 334 mutex_lock(&data->update_lock);
335 data->pwm1 = SENSORS_LIMIT(val , 0, 255); 335 data->pwm1 = clamp_val(val , 0, 255);
336 i2c_smbus_write_byte_data(client, AMC6821_REG_DCY, data->pwm1); 336 i2c_smbus_write_byte_data(client, AMC6821_REG_DCY, data->pwm1);
337 mutex_unlock(&data->update_lock); 337 mutex_unlock(&data->update_lock);
338 return count; 338 return count;
@@ -499,11 +499,11 @@ static ssize_t set_temp_auto_point_temp(
499 mutex_lock(&data->update_lock); 499 mutex_lock(&data->update_lock);
500 switch (ix) { 500 switch (ix) {
501 case 0: 501 case 0:
502 ptemp[0] = SENSORS_LIMIT(val / 1000, 0, 502 ptemp[0] = clamp_val(val / 1000, 0,
503 data->temp1_auto_point_temp[1]); 503 data->temp1_auto_point_temp[1]);
504 ptemp[0] = SENSORS_LIMIT(ptemp[0], 0, 504 ptemp[0] = clamp_val(ptemp[0], 0,
505 data->temp2_auto_point_temp[1]); 505 data->temp2_auto_point_temp[1]);
506 ptemp[0] = SENSORS_LIMIT(ptemp[0], 0, 63); 506 ptemp[0] = clamp_val(ptemp[0], 0, 63);
507 if (i2c_smbus_write_byte_data( 507 if (i2c_smbus_write_byte_data(
508 client, 508 client,
509 AMC6821_REG_PSV_TEMP, 509 AMC6821_REG_PSV_TEMP,
@@ -515,20 +515,12 @@ static ssize_t set_temp_auto_point_temp(
515 goto EXIT; 515 goto EXIT;
516 break; 516 break;
517 case 1: 517 case 1:
518 ptemp[1] = SENSORS_LIMIT( 518 ptemp[1] = clamp_val(val / 1000, (ptemp[0] & 0x7C) + 4, 124);
519 val / 1000,
520 (ptemp[0] & 0x7C) + 4,
521 124);
522 ptemp[1] &= 0x7C; 519 ptemp[1] &= 0x7C;
523 ptemp[2] = SENSORS_LIMIT( 520 ptemp[2] = clamp_val(ptemp[2], ptemp[1] + 1, 255);
524 ptemp[2], ptemp[1] + 1,
525 255);
526 break; 521 break;
527 case 2: 522 case 2:
528 ptemp[2] = SENSORS_LIMIT( 523 ptemp[2] = clamp_val(val / 1000, ptemp[1]+1, 255);
529 val / 1000,
530 ptemp[1]+1,
531 255);
532 break; 524 break;
533 default: 525 default:
534 dev_dbg(dev, "Unknown attr->index (%d).\n", ix); 526 dev_dbg(dev, "Unknown attr->index (%d).\n", ix);
@@ -561,7 +553,7 @@ static ssize_t set_pwm1_auto_point_pwm(
561 return ret; 553 return ret;
562 554
563 mutex_lock(&data->update_lock); 555 mutex_lock(&data->update_lock);
564 data->pwm1_auto_point_pwm[1] = SENSORS_LIMIT(val, 0, 254); 556 data->pwm1_auto_point_pwm[1] = clamp_val(val, 0, 254);
565 if (i2c_smbus_write_byte_data(client, AMC6821_REG_DCY_LOW_TEMP, 557 if (i2c_smbus_write_byte_data(client, AMC6821_REG_DCY_LOW_TEMP,
566 data->pwm1_auto_point_pwm[1])) { 558 data->pwm1_auto_point_pwm[1])) {
567 dev_err(&client->dev, "Register write error, aborting.\n"); 559 dev_err(&client->dev, "Register write error, aborting.\n");
@@ -629,7 +621,7 @@ static ssize_t set_fan(
629 val = 1 > val ? 0xFFFF : 6000000/val; 621 val = 1 > val ? 0xFFFF : 6000000/val;
630 622
631 mutex_lock(&data->update_lock); 623 mutex_lock(&data->update_lock);
632 data->fan[ix] = (u16) SENSORS_LIMIT(val, 1, 0xFFFF); 624 data->fan[ix] = (u16) clamp_val(val, 1, 0xFFFF);
633 if (i2c_smbus_write_byte_data(client, fan_reg_low[ix], 625 if (i2c_smbus_write_byte_data(client, fan_reg_low[ix],
634 data->fan[ix] & 0xFF)) { 626 data->fan[ix] & 0xFF)) {
635 dev_err(&client->dev, "Register write error, aborting.\n"); 627 dev_err(&client->dev, "Register write error, aborting.\n");
diff --git a/drivers/hwmon/asb100.c b/drivers/hwmon/asb100.c
index 520e5bf4f76d..6ac612cabda1 100644
--- a/drivers/hwmon/asb100.c
+++ b/drivers/hwmon/asb100.c
@@ -114,7 +114,7 @@ static const u16 asb100_reg_temp_hyst[] = {0, 0x3a, 0x153, 0x253, 0x19};
114 */ 114 */
115static u8 IN_TO_REG(unsigned val) 115static u8 IN_TO_REG(unsigned val)
116{ 116{
117 unsigned nval = SENSORS_LIMIT(val, ASB100_IN_MIN, ASB100_IN_MAX); 117 unsigned nval = clamp_val(val, ASB100_IN_MIN, ASB100_IN_MAX);
118 return (nval + 8) / 16; 118 return (nval + 8) / 16;
119} 119}
120 120
@@ -129,8 +129,8 @@ static u8 FAN_TO_REG(long rpm, int div)
129 return 0; 129 return 0;
130 if (rpm == 0) 130 if (rpm == 0)
131 return 255; 131 return 255;
132 rpm = SENSORS_LIMIT(rpm, 1, 1000000); 132 rpm = clamp_val(rpm, 1, 1000000);
133 return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254); 133 return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
134} 134}
135 135
136static int FAN_FROM_REG(u8 val, int div) 136static int FAN_FROM_REG(u8 val, int div)
@@ -148,7 +148,7 @@ static int FAN_FROM_REG(u8 val, int div)
148 */ 148 */
149static u8 TEMP_TO_REG(long temp) 149static u8 TEMP_TO_REG(long temp)
150{ 150{
151 int ntemp = SENSORS_LIMIT(temp, ASB100_TEMP_MIN, ASB100_TEMP_MAX); 151 int ntemp = clamp_val(temp, ASB100_TEMP_MIN, ASB100_TEMP_MAX);
152 ntemp += (ntemp < 0 ? -500 : 500); 152 ntemp += (ntemp < 0 ? -500 : 500);
153 return (u8)(ntemp / 1000); 153 return (u8)(ntemp / 1000);
154} 154}
@@ -164,7 +164,7 @@ static int TEMP_FROM_REG(u8 reg)
164 */ 164 */
165static u8 ASB100_PWM_TO_REG(int pwm) 165static u8 ASB100_PWM_TO_REG(int pwm)
166{ 166{
167 pwm = SENSORS_LIMIT(pwm, 0, 255); 167 pwm = clamp_val(pwm, 0, 255);
168 return (u8)(pwm / 16); 168 return (u8)(pwm / 16);
169} 169}
170 170
diff --git a/drivers/hwmon/asc7621.c b/drivers/hwmon/asc7621.c
index b867aab78049..da7f5b5d5db5 100644
--- a/drivers/hwmon/asc7621.c
+++ b/drivers/hwmon/asc7621.c
@@ -191,7 +191,7 @@ static ssize_t store_u8(struct device *dev, struct device_attribute *attr,
191 if (kstrtol(buf, 10, &reqval)) 191 if (kstrtol(buf, 10, &reqval))
192 return -EINVAL; 192 return -EINVAL;
193 193
194 reqval = SENSORS_LIMIT(reqval, 0, 255); 194 reqval = clamp_val(reqval, 0, 255);
195 195
196 mutex_lock(&data->update_lock); 196 mutex_lock(&data->update_lock);
197 data->reg[param->msb[0]] = reqval; 197 data->reg[param->msb[0]] = reqval;
@@ -224,7 +224,7 @@ static ssize_t store_bitmask(struct device *dev,
224 if (kstrtol(buf, 10, &reqval)) 224 if (kstrtol(buf, 10, &reqval))
225 return -EINVAL; 225 return -EINVAL;
226 226
227 reqval = SENSORS_LIMIT(reqval, 0, param->mask[0]); 227 reqval = clamp_val(reqval, 0, param->mask[0]);
228 228
229 reqval = (reqval & param->mask[0]) << param->shift[0]; 229 reqval = (reqval & param->mask[0]) << param->shift[0];
230 230
@@ -274,7 +274,7 @@ static ssize_t store_fan16(struct device *dev,
274 * generating an alarm. 274 * generating an alarm.
275 */ 275 */
276 reqval = 276 reqval =
277 (reqval <= 0 ? 0xffff : SENSORS_LIMIT(5400000 / reqval, 0, 0xfffe)); 277 (reqval <= 0 ? 0xffff : clamp_val(5400000 / reqval, 0, 0xfffe));
278 278
279 mutex_lock(&data->update_lock); 279 mutex_lock(&data->update_lock);
280 data->reg[param->msb[0]] = (reqval >> 8) & 0xff; 280 data->reg[param->msb[0]] = (reqval >> 8) & 0xff;
@@ -343,11 +343,11 @@ static ssize_t store_in8(struct device *dev, struct device_attribute *attr,
343 if (kstrtol(buf, 10, &reqval)) 343 if (kstrtol(buf, 10, &reqval))
344 return -EINVAL; 344 return -EINVAL;
345 345
346 reqval = SENSORS_LIMIT(reqval, 0, 0xffff); 346 reqval = clamp_val(reqval, 0, 0xffff);
347 347
348 reqval = reqval * 0xc0 / asc7621_in_scaling[nr]; 348 reqval = reqval * 0xc0 / asc7621_in_scaling[nr];
349 349
350 reqval = SENSORS_LIMIT(reqval, 0, 0xff); 350 reqval = clamp_val(reqval, 0, 0xff);
351 351
352 mutex_lock(&data->update_lock); 352 mutex_lock(&data->update_lock);
353 data->reg[param->msb[0]] = reqval; 353 data->reg[param->msb[0]] = reqval;
@@ -376,7 +376,7 @@ static ssize_t store_temp8(struct device *dev,
376 if (kstrtol(buf, 10, &reqval)) 376 if (kstrtol(buf, 10, &reqval))
377 return -EINVAL; 377 return -EINVAL;
378 378
379 reqval = SENSORS_LIMIT(reqval, -127000, 127000); 379 reqval = clamp_val(reqval, -127000, 127000);
380 380
381 temp = reqval / 1000; 381 temp = reqval / 1000;
382 382
@@ -432,7 +432,7 @@ static ssize_t store_temp62(struct device *dev,
432 if (kstrtol(buf, 10, &reqval)) 432 if (kstrtol(buf, 10, &reqval))
433 return -EINVAL; 433 return -EINVAL;
434 434
435 reqval = SENSORS_LIMIT(reqval, -32000, 31750); 435 reqval = clamp_val(reqval, -32000, 31750);
436 i = reqval / 1000; 436 i = reqval / 1000;
437 f = reqval - (i * 1000); 437 f = reqval - (i * 1000);
438 temp = i << 2; 438 temp = i << 2;
@@ -468,7 +468,7 @@ static ssize_t show_ap2_temp(struct device *dev,
468 auto_point1 = ((s8) data->reg[param->msb[1]]) * 1000; 468 auto_point1 = ((s8) data->reg[param->msb[1]]) * 1000;
469 regval = 469 regval =
470 ((data->reg[param->msb[0]] >> param->shift[0]) & param->mask[0]); 470 ((data->reg[param->msb[0]] >> param->shift[0]) & param->mask[0]);
471 temp = auto_point1 + asc7621_range_map[SENSORS_LIMIT(regval, 0, 15)]; 471 temp = auto_point1 + asc7621_range_map[clamp_val(regval, 0, 15)];
472 mutex_unlock(&data->update_lock); 472 mutex_unlock(&data->update_lock);
473 473
474 return sprintf(buf, "%d\n", temp); 474 return sprintf(buf, "%d\n", temp);
@@ -489,7 +489,7 @@ static ssize_t store_ap2_temp(struct device *dev,
489 489
490 mutex_lock(&data->update_lock); 490 mutex_lock(&data->update_lock);
491 auto_point1 = data->reg[param->msb[1]] * 1000; 491 auto_point1 = data->reg[param->msb[1]] * 1000;
492 reqval = SENSORS_LIMIT(reqval, auto_point1 + 2000, auto_point1 + 80000); 492 reqval = clamp_val(reqval, auto_point1 + 2000, auto_point1 + 80000);
493 493
494 for (i = ARRAY_SIZE(asc7621_range_map) - 1; i >= 0; i--) { 494 for (i = ARRAY_SIZE(asc7621_range_map) - 1; i >= 0; i--) {
495 if (reqval >= auto_point1 + asc7621_range_map[i]) { 495 if (reqval >= auto_point1 + asc7621_range_map[i]) {
@@ -523,7 +523,7 @@ static ssize_t show_pwm_ac(struct device *dev,
523 regval = config | (altbit << 3); 523 regval = config | (altbit << 3);
524 mutex_unlock(&data->update_lock); 524 mutex_unlock(&data->update_lock);
525 525
526 return sprintf(buf, "%u\n", map[SENSORS_LIMIT(regval, 0, 15)]); 526 return sprintf(buf, "%u\n", map[clamp_val(regval, 0, 15)]);
527} 527}
528 528
529static ssize_t store_pwm_ac(struct device *dev, 529static ssize_t store_pwm_ac(struct device *dev,
@@ -663,7 +663,7 @@ static ssize_t show_pwm_freq(struct device *dev,
663 u8 regval = 663 u8 regval =
664 (data->reg[param->msb[0]] >> param->shift[0]) & param->mask[0]; 664 (data->reg[param->msb[0]] >> param->shift[0]) & param->mask[0];
665 665
666 regval = SENSORS_LIMIT(regval, 0, 15); 666 regval = clamp_val(regval, 0, 15);
667 667
668 return sprintf(buf, "%u\n", asc7621_pwm_freq_map[regval]); 668 return sprintf(buf, "%u\n", asc7621_pwm_freq_map[regval]);
669} 669}
@@ -711,7 +711,7 @@ static ssize_t show_pwm_ast(struct device *dev,
711 u8 regval = 711 u8 regval =
712 (data->reg[param->msb[0]] >> param->shift[0]) & param->mask[0]; 712 (data->reg[param->msb[0]] >> param->shift[0]) & param->mask[0];
713 713
714 regval = SENSORS_LIMIT(regval, 0, 7); 714 regval = clamp_val(regval, 0, 7);
715 715
716 return sprintf(buf, "%u\n", asc7621_pwm_auto_spinup_map[regval]); 716 return sprintf(buf, "%u\n", asc7621_pwm_auto_spinup_map[regval]);
717 717
@@ -759,7 +759,7 @@ static ssize_t show_temp_st(struct device *dev,
759 SETUP_SHOW_data_param(dev, attr); 759 SETUP_SHOW_data_param(dev, attr);
760 u8 regval = 760 u8 regval =
761 (data->reg[param->msb[0]] >> param->shift[0]) & param->mask[0]; 761 (data->reg[param->msb[0]] >> param->shift[0]) & param->mask[0];
762 regval = SENSORS_LIMIT(regval, 0, 7); 762 regval = clamp_val(regval, 0, 7);
763 763
764 return sprintf(buf, "%u\n", asc7621_temp_smoothing_time_map[regval]); 764 return sprintf(buf, "%u\n", asc7621_temp_smoothing_time_map[regval]);
765} 765}
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index 56dbcfb3e301..b25c64302cbc 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -190,7 +190,7 @@ struct atk_acpi_input_buf {
190}; 190};
191 191
192static int atk_add(struct acpi_device *device); 192static int atk_add(struct acpi_device *device);
193static int atk_remove(struct acpi_device *device, int type); 193static int atk_remove(struct acpi_device *device);
194static void atk_print_sensor(struct atk_data *data, union acpi_object *obj); 194static void atk_print_sensor(struct atk_data *data, union acpi_object *obj);
195static int atk_read_value(struct atk_sensor_data *sensor, u64 *value); 195static int atk_read_value(struct atk_sensor_data *sensor, u64 *value);
196static void atk_free_sensors(struct atk_data *data); 196static void atk_free_sensors(struct atk_data *data);
@@ -1416,7 +1416,7 @@ out:
1416 return err; 1416 return err;
1417} 1417}
1418 1418
1419static int atk_remove(struct acpi_device *device, int type) 1419static int atk_remove(struct acpi_device *device)
1420{ 1420{
1421 struct atk_data *data = device->driver_data; 1421 struct atk_data *data = device->driver_data;
1422 dev_dbg(&device->dev, "removing...\n"); 1422 dev_dbg(&device->dev, "removing...\n");
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index d64923d63537..3f1e297663ad 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -198,7 +198,7 @@ struct tjmax {
198static const struct tjmax __cpuinitconst tjmax_table[] = { 198static const struct tjmax __cpuinitconst tjmax_table[] = {
199 { "CPU 230", 100000 }, /* Model 0x1c, stepping 2 */ 199 { "CPU 230", 100000 }, /* Model 0x1c, stepping 2 */
200 { "CPU 330", 125000 }, /* Model 0x1c, stepping 2 */ 200 { "CPU 330", 125000 }, /* Model 0x1c, stepping 2 */
201 { "CPU CE4110", 110000 }, /* Model 0x1c, stepping 10 */ 201 { "CPU CE4110", 110000 }, /* Model 0x1c, stepping 10 Sodaville */
202 { "CPU CE4150", 110000 }, /* Model 0x1c, stepping 10 */ 202 { "CPU CE4150", 110000 }, /* Model 0x1c, stepping 10 */
203 { "CPU CE4170", 110000 }, /* Model 0x1c, stepping 10 */ 203 { "CPU CE4170", 110000 }, /* Model 0x1c, stepping 10 */
204}; 204};
@@ -212,7 +212,7 @@ struct tjmax_model {
212#define ANY 0xff 212#define ANY 0xff
213 213
214static const struct tjmax_model __cpuinitconst tjmax_model_table[] = { 214static const struct tjmax_model __cpuinitconst tjmax_model_table[] = {
215 { 0x1c, 10, 100000 }, /* D4xx, N4xx, D5xx, N5xx */ 215 { 0x1c, 10, 100000 }, /* D4xx, K4xx, N4xx, D5xx, K5xx, N5xx */
216 { 0x1c, ANY, 90000 }, /* Z5xx, N2xx, possibly others 216 { 0x1c, ANY, 90000 }, /* Z5xx, N2xx, possibly others
217 * Note: Also matches 230 and 330, 217 * Note: Also matches 230 and 330,
218 * which are covered by tjmax_table 218 * which are covered by tjmax_table
@@ -222,6 +222,7 @@ static const struct tjmax_model __cpuinitconst tjmax_model_table[] = {
222 * is undetectable by software 222 * is undetectable by software
223 */ 223 */
224 { 0x27, ANY, 90000 }, /* Atom Medfield (Z2460) */ 224 { 0x27, ANY, 90000 }, /* Atom Medfield (Z2460) */
225 { 0x35, ANY, 90000 }, /* Atom Clover Trail/Cloverview (Z2760) */
225 { 0x36, ANY, 100000 }, /* Atom Cedar Trail/Cedarview (N2xxx, D2xxx) */ 226 { 0x36, ANY, 100000 }, /* Atom Cedar Trail/Cedarview (N2xxx, D2xxx) */
226}; 227};
227 228
diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c
index 7430f70ae452..c347c94f2f73 100644
--- a/drivers/hwmon/dme1737.c
+++ b/drivers/hwmon/dme1737.c
@@ -277,7 +277,7 @@ static inline int IN_FROM_REG(int reg, int nominal, int res)
277 277
278static inline int IN_TO_REG(int val, int nominal) 278static inline int IN_TO_REG(int val, int nominal)
279{ 279{
280 return SENSORS_LIMIT((val * 192 + nominal / 2) / nominal, 0, 255); 280 return clamp_val((val * 192 + nominal / 2) / nominal, 0, 255);
281} 281}
282 282
283/* 283/*
@@ -293,8 +293,7 @@ static inline int TEMP_FROM_REG(int reg, int res)
293 293
294static inline int TEMP_TO_REG(int val) 294static inline int TEMP_TO_REG(int val)
295{ 295{
296 return SENSORS_LIMIT((val < 0 ? val - 500 : val + 500) / 1000, 296 return clamp_val((val < 0 ? val - 500 : val + 500) / 1000, -128, 127);
297 -128, 127);
298} 297}
299 298
300/* Temperature range */ 299/* Temperature range */
@@ -332,7 +331,7 @@ static inline int TEMP_HYST_FROM_REG(int reg, int ix)
332 331
333static inline int TEMP_HYST_TO_REG(int val, int ix, int reg) 332static inline int TEMP_HYST_TO_REG(int val, int ix, int reg)
334{ 333{
335 int hyst = SENSORS_LIMIT((val + 500) / 1000, 0, 15); 334 int hyst = clamp_val((val + 500) / 1000, 0, 15);
336 335
337 return (ix == 1) ? (reg & 0xf0) | hyst : (reg & 0x0f) | (hyst << 4); 336 return (ix == 1) ? (reg & 0xf0) | hyst : (reg & 0x0f) | (hyst << 4);
338} 337}
@@ -349,10 +348,10 @@ static inline int FAN_FROM_REG(int reg, int tpc)
349static inline int FAN_TO_REG(int val, int tpc) 348static inline int FAN_TO_REG(int val, int tpc)
350{ 349{
351 if (tpc) { 350 if (tpc) {
352 return SENSORS_LIMIT(val / tpc, 0, 0xffff); 351 return clamp_val(val / tpc, 0, 0xffff);
353 } else { 352 } else {
354 return (val <= 0) ? 0xffff : 353 return (val <= 0) ? 0xffff :
355 SENSORS_LIMIT(90000 * 60 / val, 0, 0xfffe); 354 clamp_val(90000 * 60 / val, 0, 0xfffe);
356 } 355 }
357} 356}
358 357
@@ -1282,7 +1281,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
1282 mutex_lock(&data->update_lock); 1281 mutex_lock(&data->update_lock);
1283 switch (fn) { 1282 switch (fn) {
1284 case SYS_PWM: 1283 case SYS_PWM:
1285 data->pwm[ix] = SENSORS_LIMIT(val, 0, 255); 1284 data->pwm[ix] = clamp_val(val, 0, 255);
1286 dme1737_write(data, DME1737_REG_PWM(ix), data->pwm[ix]); 1285 dme1737_write(data, DME1737_REG_PWM(ix), data->pwm[ix]);
1287 break; 1286 break;
1288 case SYS_PWM_FREQ: 1287 case SYS_PWM_FREQ:
@@ -1450,7 +1449,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
1450 break; 1449 break;
1451 case SYS_PWM_AUTO_POINT1_PWM: 1450 case SYS_PWM_AUTO_POINT1_PWM:
1452 /* Only valid for pwm[1-3] */ 1451 /* Only valid for pwm[1-3] */
1453 data->pwm_min[ix] = SENSORS_LIMIT(val, 0, 255); 1452 data->pwm_min[ix] = clamp_val(val, 0, 255);
1454 dme1737_write(data, DME1737_REG_PWM_MIN(ix), 1453 dme1737_write(data, DME1737_REG_PWM_MIN(ix),
1455 data->pwm_min[ix]); 1454 data->pwm_min[ix]);
1456 break; 1455 break;
diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c
index 77f434c58236..b07305622087 100644
--- a/drivers/hwmon/emc2103.c
+++ b/drivers/hwmon/emc2103.c
@@ -405,7 +405,7 @@ static ssize_t set_fan_target(struct device *dev, struct device_attribute *da,
405 if (rpm_target == 0) 405 if (rpm_target == 0)
406 data->fan_target = 0x1fff; 406 data->fan_target = 0x1fff;
407 else 407 else
408 data->fan_target = SENSORS_LIMIT( 408 data->fan_target = clamp_val(
409 (FAN_RPM_FACTOR * data->fan_multiplier) / rpm_target, 409 (FAN_RPM_FACTOR * data->fan_multiplier) / rpm_target,
410 0, 0x1fff); 410 0, 0x1fff);
411 411
diff --git a/drivers/hwmon/emc6w201.c b/drivers/hwmon/emc6w201.c
index 789bd4fb329b..936898f82f94 100644
--- a/drivers/hwmon/emc6w201.c
+++ b/drivers/hwmon/emc6w201.c
@@ -220,7 +220,7 @@ static ssize_t set_in(struct device *dev, struct device_attribute *devattr,
220 : EMC6W201_REG_IN_HIGH(nr); 220 : EMC6W201_REG_IN_HIGH(nr);
221 221
222 mutex_lock(&data->update_lock); 222 mutex_lock(&data->update_lock);
223 data->in[sf][nr] = SENSORS_LIMIT(val, 0, 255); 223 data->in[sf][nr] = clamp_val(val, 0, 255);
224 err = emc6w201_write8(client, reg, data->in[sf][nr]); 224 err = emc6w201_write8(client, reg, data->in[sf][nr]);
225 mutex_unlock(&data->update_lock); 225 mutex_unlock(&data->update_lock);
226 226
@@ -257,7 +257,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *devattr,
257 : EMC6W201_REG_TEMP_HIGH(nr); 257 : EMC6W201_REG_TEMP_HIGH(nr);
258 258
259 mutex_lock(&data->update_lock); 259 mutex_lock(&data->update_lock);
260 data->temp[sf][nr] = SENSORS_LIMIT(val, -127, 128); 260 data->temp[sf][nr] = clamp_val(val, -127, 128);
261 err = emc6w201_write8(client, reg, data->temp[sf][nr]); 261 err = emc6w201_write8(client, reg, data->temp[sf][nr]);
262 mutex_unlock(&data->update_lock); 262 mutex_unlock(&data->update_lock);
263 263
@@ -298,7 +298,7 @@ static ssize_t set_fan(struct device *dev, struct device_attribute *devattr,
298 val = 0xFFFF; 298 val = 0xFFFF;
299 } else { 299 } else {
300 val = DIV_ROUND_CLOSEST(5400000U, val); 300 val = DIV_ROUND_CLOSEST(5400000U, val);
301 val = SENSORS_LIMIT(val, 0, 0xFFFE); 301 val = clamp_val(val, 0, 0xFFFE);
302 } 302 }
303 303
304 mutex_lock(&data->update_lock); 304 mutex_lock(&data->update_lock);
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index bb7275cc47f3..cfb02dd91aad 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -1350,7 +1350,7 @@ static ssize_t store_fan_full_speed(struct device *dev,
1350 if (err) 1350 if (err)
1351 return err; 1351 return err;
1352 1352
1353 val = SENSORS_LIMIT(val, 23, 1500000); 1353 val = clamp_val(val, 23, 1500000);
1354 val = fan_to_reg(val); 1354 val = fan_to_reg(val);
1355 1355
1356 mutex_lock(&data->update_lock); 1356 mutex_lock(&data->update_lock);
@@ -1438,7 +1438,7 @@ static ssize_t store_in_max(struct device *dev, struct device_attribute
1438 return err; 1438 return err;
1439 1439
1440 val /= 8; 1440 val /= 8;
1441 val = SENSORS_LIMIT(val, 0, 255); 1441 val = clamp_val(val, 0, 255);
1442 1442
1443 mutex_lock(&data->update_lock); 1443 mutex_lock(&data->update_lock);
1444 f71882fg_write8(data, F71882FG_REG_IN1_HIGH, val); 1444 f71882fg_write8(data, F71882FG_REG_IN1_HIGH, val);
@@ -1542,7 +1542,7 @@ static ssize_t store_temp_max(struct device *dev, struct device_attribute
1542 return err; 1542 return err;
1543 1543
1544 val /= 1000; 1544 val /= 1000;
1545 val = SENSORS_LIMIT(val, 0, 255); 1545 val = clamp_val(val, 0, 255);
1546 1546
1547 mutex_lock(&data->update_lock); 1547 mutex_lock(&data->update_lock);
1548 f71882fg_write8(data, F71882FG_REG_TEMP_HIGH(nr), val); 1548 f71882fg_write8(data, F71882FG_REG_TEMP_HIGH(nr), val);
@@ -1589,8 +1589,7 @@ static ssize_t store_temp_max_hyst(struct device *dev, struct device_attribute
1589 1589
1590 /* convert abs to relative and check */ 1590 /* convert abs to relative and check */
1591 data->temp_high[nr] = f71882fg_read8(data, F71882FG_REG_TEMP_HIGH(nr)); 1591 data->temp_high[nr] = f71882fg_read8(data, F71882FG_REG_TEMP_HIGH(nr));
1592 val = SENSORS_LIMIT(val, data->temp_high[nr] - 15, 1592 val = clamp_val(val, data->temp_high[nr] - 15, data->temp_high[nr]);
1593 data->temp_high[nr]);
1594 val = data->temp_high[nr] - val; 1593 val = data->temp_high[nr] - val;
1595 1594
1596 /* convert value to register contents */ 1595 /* convert value to register contents */
@@ -1627,7 +1626,7 @@ static ssize_t store_temp_crit(struct device *dev, struct device_attribute
1627 return err; 1626 return err;
1628 1627
1629 val /= 1000; 1628 val /= 1000;
1630 val = SENSORS_LIMIT(val, 0, 255); 1629 val = clamp_val(val, 0, 255);
1631 1630
1632 mutex_lock(&data->update_lock); 1631 mutex_lock(&data->update_lock);
1633 f71882fg_write8(data, F71882FG_REG_TEMP_OVT(nr), val); 1632 f71882fg_write8(data, F71882FG_REG_TEMP_OVT(nr), val);
@@ -1754,7 +1753,7 @@ static ssize_t store_pwm(struct device *dev,
1754 if (err) 1753 if (err)
1755 return err; 1754 return err;
1756 1755
1757 val = SENSORS_LIMIT(val, 0, 255); 1756 val = clamp_val(val, 0, 255);
1758 1757
1759 mutex_lock(&data->update_lock); 1758 mutex_lock(&data->update_lock);
1760 data->pwm_enable = f71882fg_read8(data, F71882FG_REG_PWM_ENABLE); 1759 data->pwm_enable = f71882fg_read8(data, F71882FG_REG_PWM_ENABLE);
@@ -1805,7 +1804,7 @@ static ssize_t store_simple_pwm(struct device *dev,
1805 if (err) 1804 if (err)
1806 return err; 1805 return err;
1807 1806
1808 val = SENSORS_LIMIT(val, 0, 255); 1807 val = clamp_val(val, 0, 255);
1809 1808
1810 mutex_lock(&data->update_lock); 1809 mutex_lock(&data->update_lock);
1811 f71882fg_write8(data, F71882FG_REG_PWM(nr), val); 1810 f71882fg_write8(data, F71882FG_REG_PWM(nr), val);
@@ -1932,7 +1931,7 @@ static ssize_t store_pwm_auto_point_pwm(struct device *dev,
1932 if (err) 1931 if (err)
1933 return err; 1932 return err;
1934 1933
1935 val = SENSORS_LIMIT(val, 0, 255); 1934 val = clamp_val(val, 0, 255);
1936 1935
1937 mutex_lock(&data->update_lock); 1936 mutex_lock(&data->update_lock);
1938 data->pwm_enable = f71882fg_read8(data, F71882FG_REG_PWM_ENABLE); 1937 data->pwm_enable = f71882fg_read8(data, F71882FG_REG_PWM_ENABLE);
@@ -1991,8 +1990,8 @@ static ssize_t store_pwm_auto_point_temp_hyst(struct device *dev,
1991 mutex_lock(&data->update_lock); 1990 mutex_lock(&data->update_lock);
1992 data->pwm_auto_point_temp[nr][point] = 1991 data->pwm_auto_point_temp[nr][point] =
1993 f71882fg_read8(data, F71882FG_REG_POINT_TEMP(nr, point)); 1992 f71882fg_read8(data, F71882FG_REG_POINT_TEMP(nr, point));
1994 val = SENSORS_LIMIT(val, data->pwm_auto_point_temp[nr][point] - 15, 1993 val = clamp_val(val, data->pwm_auto_point_temp[nr][point] - 15,
1995 data->pwm_auto_point_temp[nr][point]); 1994 data->pwm_auto_point_temp[nr][point]);
1996 val = data->pwm_auto_point_temp[nr][point] - val; 1995 val = data->pwm_auto_point_temp[nr][point] - val;
1997 1996
1998 reg = f71882fg_read8(data, F71882FG_REG_FAN_HYST(nr / 2)); 1997 reg = f71882fg_read8(data, F71882FG_REG_FAN_HYST(nr / 2));
@@ -2126,9 +2125,9 @@ static ssize_t store_pwm_auto_point_temp(struct device *dev,
2126 val /= 1000; 2125 val /= 1000;
2127 2126
2128 if (data->auto_point_temp_signed) 2127 if (data->auto_point_temp_signed)
2129 val = SENSORS_LIMIT(val, -128, 127); 2128 val = clamp_val(val, -128, 127);
2130 else 2129 else
2131 val = SENSORS_LIMIT(val, 0, 127); 2130 val = clamp_val(val, 0, 127);
2132 2131
2133 mutex_lock(&data->update_lock); 2132 mutex_lock(&data->update_lock);
2134 f71882fg_write8(data, F71882FG_REG_POINT_TEMP(pwm, point), val); 2133 f71882fg_write8(data, F71882FG_REG_POINT_TEMP(pwm, point), val);
diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c
index f7dba229395f..9e300e567f15 100644
--- a/drivers/hwmon/f75375s.c
+++ b/drivers/hwmon/f75375s.c
@@ -359,7 +359,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
359 return -EINVAL; 359 return -EINVAL;
360 360
361 mutex_lock(&data->update_lock); 361 mutex_lock(&data->update_lock);
362 data->pwm[nr] = SENSORS_LIMIT(val, 0, 255); 362 data->pwm[nr] = clamp_val(val, 0, 255);
363 f75375_write_pwm(client, nr); 363 f75375_write_pwm(client, nr);
364 mutex_unlock(&data->update_lock); 364 mutex_unlock(&data->update_lock);
365 return count; 365 return count;
@@ -556,7 +556,7 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
556 if (err < 0) 556 if (err < 0)
557 return err; 557 return err;
558 558
559 val = SENSORS_LIMIT(VOLT_TO_REG(val), 0, 0xff); 559 val = clamp_val(VOLT_TO_REG(val), 0, 0xff);
560 mutex_lock(&data->update_lock); 560 mutex_lock(&data->update_lock);
561 data->in_max[nr] = val; 561 data->in_max[nr] = val;
562 f75375_write8(client, F75375_REG_VOLT_HIGH(nr), data->in_max[nr]); 562 f75375_write8(client, F75375_REG_VOLT_HIGH(nr), data->in_max[nr]);
@@ -577,7 +577,7 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
577 if (err < 0) 577 if (err < 0)
578 return err; 578 return err;
579 579
580 val = SENSORS_LIMIT(VOLT_TO_REG(val), 0, 0xff); 580 val = clamp_val(VOLT_TO_REG(val), 0, 0xff);
581 mutex_lock(&data->update_lock); 581 mutex_lock(&data->update_lock);
582 data->in_min[nr] = val; 582 data->in_min[nr] = val;
583 f75375_write8(client, F75375_REG_VOLT_LOW(nr), data->in_min[nr]); 583 f75375_write8(client, F75375_REG_VOLT_LOW(nr), data->in_min[nr]);
@@ -625,7 +625,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
625 if (err < 0) 625 if (err < 0)
626 return err; 626 return err;
627 627
628 val = SENSORS_LIMIT(TEMP_TO_REG(val), 0, 127); 628 val = clamp_val(TEMP_TO_REG(val), 0, 127);
629 mutex_lock(&data->update_lock); 629 mutex_lock(&data->update_lock);
630 data->temp_high[nr] = val; 630 data->temp_high[nr] = val;
631 f75375_write8(client, F75375_REG_TEMP_HIGH(nr), data->temp_high[nr]); 631 f75375_write8(client, F75375_REG_TEMP_HIGH(nr), data->temp_high[nr]);
@@ -646,7 +646,7 @@ static ssize_t set_temp_max_hyst(struct device *dev,
646 if (err < 0) 646 if (err < 0)
647 return err; 647 return err;
648 648
649 val = SENSORS_LIMIT(TEMP_TO_REG(val), 0, 127); 649 val = clamp_val(TEMP_TO_REG(val), 0, 127);
650 mutex_lock(&data->update_lock); 650 mutex_lock(&data->update_lock);
651 data->temp_max_hyst[nr] = val; 651 data->temp_max_hyst[nr] = val;
652 f75375_write8(client, F75375_REG_TEMP_HYST(nr), 652 f75375_write8(client, F75375_REG_TEMP_HYST(nr),
@@ -822,7 +822,7 @@ static void f75375_init(struct i2c_client *client, struct f75375_data *data,
822 if (auto_mode_enabled(f75375s_pdata->pwm_enable[nr]) || 822 if (auto_mode_enabled(f75375s_pdata->pwm_enable[nr]) ||
823 !duty_mode_enabled(f75375s_pdata->pwm_enable[nr])) 823 !duty_mode_enabled(f75375s_pdata->pwm_enable[nr]))
824 continue; 824 continue;
825 data->pwm[nr] = SENSORS_LIMIT(f75375s_pdata->pwm[nr], 0, 255); 825 data->pwm[nr] = clamp_val(f75375s_pdata->pwm[nr], 0, 255);
826 f75375_write_pwm(client, nr); 826 f75375_write_pwm(client, nr);
827 } 827 }
828 828
diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c
index 519ce8b9c142..8af2755cdb87 100644
--- a/drivers/hwmon/fschmd.c
+++ b/drivers/hwmon/fschmd.c
@@ -379,7 +379,7 @@ static ssize_t store_temp_max(struct device *dev, struct device_attribute
379 if (err) 379 if (err)
380 return err; 380 return err;
381 381
382 v = SENSORS_LIMIT(v / 1000, -128, 127) + 128; 382 v = clamp_val(v / 1000, -128, 127) + 128;
383 383
384 mutex_lock(&data->update_lock); 384 mutex_lock(&data->update_lock);
385 i2c_smbus_write_byte_data(to_i2c_client(dev), 385 i2c_smbus_write_byte_data(to_i2c_client(dev),
@@ -540,7 +540,7 @@ static ssize_t store_pwm_auto_point1_pwm(struct device *dev,
540 540
541 /* reg: 0 = allow turning off (except on the syl), 1-255 = 50-100% */ 541 /* reg: 0 = allow turning off (except on the syl), 1-255 = 50-100% */
542 if (v || data->kind == fscsyl) { 542 if (v || data->kind == fscsyl) {
543 v = SENSORS_LIMIT(v, 128, 255); 543 v = clamp_val(v, 128, 255);
544 v = (v - 128) * 2 + 1; 544 v = (v - 128) * 2 + 1;
545 } 545 }
546 546
diff --git a/drivers/hwmon/g760a.c b/drivers/hwmon/g760a.c
index 8b2106f60eda..ea6480b80e7f 100644
--- a/drivers/hwmon/g760a.c
+++ b/drivers/hwmon/g760a.c
@@ -171,7 +171,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *da,
171 return -EINVAL; 171 return -EINVAL;
172 172
173 mutex_lock(&data->update_lock); 173 mutex_lock(&data->update_lock);
174 data->set_cnt = PWM_TO_CNT(SENSORS_LIMIT(val, 0, 255)); 174 data->set_cnt = PWM_TO_CNT(clamp_val(val, 0, 255));
175 g760a_write_value(client, G760A_REG_SET_CNT, data->set_cnt); 175 g760a_write_value(client, G760A_REG_SET_CNT, data->set_cnt);
176 mutex_unlock(&data->update_lock); 176 mutex_unlock(&data->update_lock);
177 177
diff --git a/drivers/hwmon/gl518sm.c b/drivers/hwmon/gl518sm.c
index 2c74673f48e5..e2e5909a34df 100644
--- a/drivers/hwmon/gl518sm.c
+++ b/drivers/hwmon/gl518sm.c
@@ -86,7 +86,7 @@ enum chips { gl518sm_r00, gl518sm_r80 };
86#define BOOL_FROM_REG(val) ((val) ? 0 : 1) 86#define BOOL_FROM_REG(val) ((val) ? 0 : 1)
87#define BOOL_TO_REG(val) ((val) ? 0 : 1) 87#define BOOL_TO_REG(val) ((val) ? 0 : 1)
88 88
89#define TEMP_TO_REG(val) SENSORS_LIMIT(((((val) < 0 ? \ 89#define TEMP_TO_REG(val) clamp_val(((((val) < 0 ? \
90 (val) - 500 : \ 90 (val) - 500 : \
91 (val) + 500) / 1000) + 119), 0, 255) 91 (val) + 500) / 1000) + 119), 0, 255)
92#define TEMP_FROM_REG(val) (((val) - 119) * 1000) 92#define TEMP_FROM_REG(val) (((val) - 119) * 1000)
@@ -96,15 +96,15 @@ static inline u8 FAN_TO_REG(long rpm, int div)
96 long rpmdiv; 96 long rpmdiv;
97 if (rpm == 0) 97 if (rpm == 0)
98 return 0; 98 return 0;
99 rpmdiv = SENSORS_LIMIT(rpm, 1, 960000) * div; 99 rpmdiv = clamp_val(rpm, 1, 960000) * div;
100 return SENSORS_LIMIT((480000 + rpmdiv / 2) / rpmdiv, 1, 255); 100 return clamp_val((480000 + rpmdiv / 2) / rpmdiv, 1, 255);
101} 101}
102#define FAN_FROM_REG(val, div) ((val) == 0 ? 0 : (480000 / ((val) * (div)))) 102#define FAN_FROM_REG(val, div) ((val) == 0 ? 0 : (480000 / ((val) * (div))))
103 103
104#define IN_TO_REG(val) SENSORS_LIMIT((((val) + 9) / 19), 0, 255) 104#define IN_TO_REG(val) clamp_val((((val) + 9) / 19), 0, 255)
105#define IN_FROM_REG(val) ((val) * 19) 105#define IN_FROM_REG(val) ((val) * 19)
106 106
107#define VDD_TO_REG(val) SENSORS_LIMIT((((val) * 4 + 47) / 95), 0, 255) 107#define VDD_TO_REG(val) clamp_val((((val) * 4 + 47) / 95), 0, 255)
108#define VDD_FROM_REG(val) (((val) * 95 + 2) / 4) 108#define VDD_FROM_REG(val) (((val) * 95 + 2) / 4)
109 109
110#define DIV_FROM_REG(val) (1 << (val)) 110#define DIV_FROM_REG(val) (1 << (val))
diff --git a/drivers/hwmon/gl520sm.c b/drivers/hwmon/gl520sm.c
index a21ff252f2f1..ed56e09c3dd7 100644
--- a/drivers/hwmon/gl520sm.c
+++ b/drivers/hwmon/gl520sm.c
@@ -144,10 +144,10 @@ static ssize_t get_cpu_vid(struct device *dev, struct device_attribute *attr,
144static DEVICE_ATTR(cpu0_vid, S_IRUGO, get_cpu_vid, NULL); 144static DEVICE_ATTR(cpu0_vid, S_IRUGO, get_cpu_vid, NULL);
145 145
146#define VDD_FROM_REG(val) (((val) * 95 + 2) / 4) 146#define VDD_FROM_REG(val) (((val) * 95 + 2) / 4)
147#define VDD_TO_REG(val) SENSORS_LIMIT((((val) * 4 + 47) / 95), 0, 255) 147#define VDD_TO_REG(val) clamp_val((((val) * 4 + 47) / 95), 0, 255)
148 148
149#define IN_FROM_REG(val) ((val) * 19) 149#define IN_FROM_REG(val) ((val) * 19)
150#define IN_TO_REG(val) SENSORS_LIMIT((((val) + 9) / 19), 0, 255) 150#define IN_TO_REG(val) clamp_val((((val) + 9) / 19), 0, 255)
151 151
152static ssize_t get_in_input(struct device *dev, struct device_attribute *attr, 152static ssize_t get_in_input(struct device *dev, struct device_attribute *attr,
153 char *buf) 153 char *buf)
@@ -285,8 +285,7 @@ static SENSOR_DEVICE_ATTR(in4_max, S_IRUGO | S_IWUSR,
285#define DIV_FROM_REG(val) (1 << (val)) 285#define DIV_FROM_REG(val) (1 << (val))
286#define FAN_FROM_REG(val, div) ((val) == 0 ? 0 : (480000 / ((val) << (div)))) 286#define FAN_FROM_REG(val, div) ((val) == 0 ? 0 : (480000 / ((val) << (div))))
287#define FAN_TO_REG(val, div) ((val) <= 0 ? 0 : \ 287#define FAN_TO_REG(val, div) ((val) <= 0 ? 0 : \
288 SENSORS_LIMIT((480000 + ((val) << ((div)-1))) / ((val) << (div)), 1, \ 288 clamp_val((480000 + ((val) << ((div)-1))) / ((val) << (div)), 1, 255))
289 255))
290 289
291static ssize_t get_fan_input(struct device *dev, struct device_attribute *attr, 290static ssize_t get_fan_input(struct device *dev, struct device_attribute *attr,
292 char *buf) 291 char *buf)
@@ -450,7 +449,7 @@ static DEVICE_ATTR(fan1_off, S_IRUGO | S_IWUSR,
450 get_fan_off, set_fan_off); 449 get_fan_off, set_fan_off);
451 450
452#define TEMP_FROM_REG(val) (((val) - 130) * 1000) 451#define TEMP_FROM_REG(val) (((val) - 130) * 1000)
453#define TEMP_TO_REG(val) SENSORS_LIMIT(((((val) < 0 ? \ 452#define TEMP_TO_REG(val) clamp_val(((((val) < 0 ? \
454 (val) - 500 : (val) + 500) / 1000) + 130), 0, 255) 453 (val) - 500 : (val) + 500) / 1000) + 130), 0, 255)
455 454
456static ssize_t get_temp_input(struct device *dev, struct device_attribute *attr, 455static ssize_t get_temp_input(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index 4e04c1228e51..39781945a5d2 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -422,7 +422,7 @@ static int gpio_fan_get_of_pdata(struct device *dev,
422 422
423 /* Fill GPIO pin array */ 423 /* Fill GPIO pin array */
424 pdata->num_ctrl = of_gpio_count(node); 424 pdata->num_ctrl = of_gpio_count(node);
425 if (!pdata->num_ctrl) { 425 if (pdata->num_ctrl <= 0) {
426 dev_err(dev, "gpios DT property empty / missing"); 426 dev_err(dev, "gpios DT property empty / missing");
427 return -ENODEV; 427 return -ENODEV;
428 } 428 }
@@ -477,7 +477,7 @@ static int gpio_fan_get_of_pdata(struct device *dev,
477 pdata->speed = speed; 477 pdata->speed = speed;
478 478
479 /* Alarm GPIO if one exists */ 479 /* Alarm GPIO if one exists */
480 if (of_gpio_named_count(node, "alarm-gpios")) { 480 if (of_gpio_named_count(node, "alarm-gpios") > 0) {
481 struct gpio_fan_alarm *alarm; 481 struct gpio_fan_alarm *alarm;
482 int val; 482 int val;
483 enum of_gpio_flags flags; 483 enum of_gpio_flags flags;
diff --git a/drivers/hwmon/ina209.c b/drivers/hwmon/ina209.c
new file mode 100644
index 000000000000..c6fdd5bd395e
--- /dev/null
+++ b/drivers/hwmon/ina209.c
@@ -0,0 +1,636 @@
1/*
2 * Driver for the Texas Instruments / Burr Brown INA209
3 * Bidirectional Current/Power Monitor
4 *
5 * Copyright (C) 2012 Guenter Roeck <linux@roeck-us.net>
6 *
7 * Derived from Ira W. Snyder's original driver submission
8 * Copyright (C) 2008 Paul Hays <Paul.Hays@cattail.ca>
9 * Copyright (C) 2008-2009 Ira W. Snyder <iws@ovro.caltech.edu>
10 *
11 * Aligned with ina2xx driver
12 * Copyright (C) 2012 Lothar Felten <l-felten@ti.com>
13 * Thanks to Jan Volkering
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; version 2 of the License.
18 *
19 * Datasheet:
20 * http://www.ti.com/lit/gpn/ina209
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/init.h>
26#include <linux/err.h>
27#include <linux/slab.h>
28#include <linux/bug.h>
29#include <linux/i2c.h>
30#include <linux/hwmon.h>
31#include <linux/hwmon-sysfs.h>
32
33#include <linux/platform_data/ina2xx.h>
34
35/* register definitions */
36#define INA209_CONFIGURATION 0x00
37#define INA209_STATUS 0x01
38#define INA209_STATUS_MASK 0x02
39#define INA209_SHUNT_VOLTAGE 0x03
40#define INA209_BUS_VOLTAGE 0x04
41#define INA209_POWER 0x05
42#define INA209_CURRENT 0x06
43#define INA209_SHUNT_VOLTAGE_POS_PEAK 0x07
44#define INA209_SHUNT_VOLTAGE_NEG_PEAK 0x08
45#define INA209_BUS_VOLTAGE_MAX_PEAK 0x09
46#define INA209_BUS_VOLTAGE_MIN_PEAK 0x0a
47#define INA209_POWER_PEAK 0x0b
48#define INA209_SHUNT_VOLTAGE_POS_WARN 0x0c
49#define INA209_SHUNT_VOLTAGE_NEG_WARN 0x0d
50#define INA209_POWER_WARN 0x0e
51#define INA209_BUS_VOLTAGE_OVER_WARN 0x0f
52#define INA209_BUS_VOLTAGE_UNDER_WARN 0x10
53#define INA209_POWER_OVER_LIMIT 0x11
54#define INA209_BUS_VOLTAGE_OVER_LIMIT 0x12
55#define INA209_BUS_VOLTAGE_UNDER_LIMIT 0x13
56#define INA209_CRITICAL_DAC_POS 0x14
57#define INA209_CRITICAL_DAC_NEG 0x15
58#define INA209_CALIBRATION 0x16
59
60#define INA209_REGISTERS 0x17
61
62#define INA209_CONFIG_DEFAULT 0x3c47 /* PGA=8, full range */
63#define INA209_SHUNT_DEFAULT 10000 /* uOhm */
64
65struct ina209_data {
66 struct device *hwmon_dev;
67
68 struct mutex update_lock;
69 bool valid;
70 unsigned long last_updated; /* in jiffies */
71
72 u16 regs[INA209_REGISTERS]; /* All chip registers */
73
74 u16 config_orig; /* Original configuration */
75 u16 calibration_orig; /* Original calibration */
76 u16 update_interval;
77};
78
79static struct ina209_data *ina209_update_device(struct device *dev)
80{
81 struct i2c_client *client = to_i2c_client(dev);
82 struct ina209_data *data = i2c_get_clientdata(client);
83 struct ina209_data *ret = data;
84 s32 val;
85 int i;
86
87 mutex_lock(&data->update_lock);
88
89 if (!data->valid ||
90 time_after(jiffies, data->last_updated + data->update_interval)) {
91 for (i = 0; i < ARRAY_SIZE(data->regs); i++) {
92 val = i2c_smbus_read_word_swapped(client, i);
93 if (val < 0) {
94 ret = ERR_PTR(val);
95 goto abort;
96 }
97 data->regs[i] = val;
98 }
99 data->last_updated = jiffies;
100 data->valid = true;
101 }
102abort:
103 mutex_unlock(&data->update_lock);
104 return ret;
105}
106
107/*
108 * Read a value from a device register and convert it to the
109 * appropriate sysfs units
110 */
111static long ina209_from_reg(const u8 reg, const u16 val)
112{
113 switch (reg) {
114 case INA209_SHUNT_VOLTAGE:
115 case INA209_SHUNT_VOLTAGE_POS_PEAK:
116 case INA209_SHUNT_VOLTAGE_NEG_PEAK:
117 case INA209_SHUNT_VOLTAGE_POS_WARN:
118 case INA209_SHUNT_VOLTAGE_NEG_WARN:
119 /* LSB=10 uV. Convert to mV. */
120 return DIV_ROUND_CLOSEST(val, 100);
121
122 case INA209_BUS_VOLTAGE:
123 case INA209_BUS_VOLTAGE_MAX_PEAK:
124 case INA209_BUS_VOLTAGE_MIN_PEAK:
125 case INA209_BUS_VOLTAGE_OVER_WARN:
126 case INA209_BUS_VOLTAGE_UNDER_WARN:
127 case INA209_BUS_VOLTAGE_OVER_LIMIT:
128 case INA209_BUS_VOLTAGE_UNDER_LIMIT:
129 /* LSB=4 mV, last 3 bits unused */
130 return (val >> 3) * 4;
131
132 case INA209_CRITICAL_DAC_POS:
133 /* LSB=1 mV, in the upper 8 bits */
134 return val >> 8;
135
136 case INA209_CRITICAL_DAC_NEG:
137 /* LSB=1 mV, in the upper 8 bits */
138 return -1 * (val >> 8);
139
140 case INA209_POWER:
141 case INA209_POWER_PEAK:
142 case INA209_POWER_WARN:
143 case INA209_POWER_OVER_LIMIT:
144 /* LSB=20 mW. Convert to uW */
145 return val * 20 * 1000L;
146
147 case INA209_CURRENT:
148 /* LSB=1 mA (selected). Is in mA */
149 return val;
150 }
151
152 /* programmer goofed */
153 WARN_ON_ONCE(1);
154 return 0;
155}
156
157/*
158 * Take a value and convert it to register format, clamping the value
159 * to the appropriate range.
160 */
161static int ina209_to_reg(u8 reg, u16 old, long val)
162{
163 switch (reg) {
164 case INA209_SHUNT_VOLTAGE_POS_WARN:
165 case INA209_SHUNT_VOLTAGE_NEG_WARN:
166 /* Limit to +- 320 mV, 10 uV LSB */
167 return clamp_val(val, -320, 320) * 100;
168
169 case INA209_BUS_VOLTAGE_OVER_WARN:
170 case INA209_BUS_VOLTAGE_UNDER_WARN:
171 case INA209_BUS_VOLTAGE_OVER_LIMIT:
172 case INA209_BUS_VOLTAGE_UNDER_LIMIT:
173 /*
174 * Limit to 0-32000 mV, 4 mV LSB
175 *
176 * The last three bits aren't part of the value, but we'll
177 * preserve them in their original state.
178 */
179 return (DIV_ROUND_CLOSEST(clamp_val(val, 0, 32000), 4) << 3)
180 | (old & 0x7);
181
182 case INA209_CRITICAL_DAC_NEG:
183 /*
184 * Limit to -255-0 mV, 1 mV LSB
185 * Convert the value to a positive value for the register
186 *
187 * The value lives in the top 8 bits only, be careful
188 * and keep original value of other bits.
189 */
190 return (clamp_val(-val, 0, 255) << 8) | (old & 0xff);
191
192 case INA209_CRITICAL_DAC_POS:
193 /*
194 * Limit to 0-255 mV, 1 mV LSB
195 *
196 * The value lives in the top 8 bits only, be careful
197 * and keep original value of other bits.
198 */
199 return (clamp_val(val, 0, 255) << 8) | (old & 0xff);
200
201 case INA209_POWER_WARN:
202 case INA209_POWER_OVER_LIMIT:
203 /* 20 mW LSB */
204 return DIV_ROUND_CLOSEST(val, 20 * 1000);
205 }
206
207 /* Other registers are read-only, return access error */
208 return -EACCES;
209}
210
211static int ina209_interval_from_reg(u16 reg)
212{
213 return 68 >> (15 - ((reg >> 3) & 0x0f));
214}
215
216static u16 ina209_reg_from_interval(u16 config, long interval)
217{
218 int i, adc;
219
220 if (interval <= 0) {
221 adc = 8;
222 } else {
223 adc = 15;
224 for (i = 34 + 34 / 2; i; i >>= 1) {
225 if (i < interval)
226 break;
227 adc--;
228 }
229 }
230 return (config & 0xf807) | (adc << 3) | (adc << 7);
231}
232
233static ssize_t ina209_set_interval(struct device *dev,
234 struct device_attribute *da,
235 const char *buf, size_t count)
236{
237 struct i2c_client *client = to_i2c_client(dev);
238 struct ina209_data *data = ina209_update_device(dev);
239 long val;
240 u16 regval;
241 int ret;
242
243 if (IS_ERR(data))
244 return PTR_ERR(data);
245
246 ret = kstrtol(buf, 10, &val);
247 if (ret < 0)
248 return ret;
249
250 mutex_lock(&data->update_lock);
251 regval = ina209_reg_from_interval(data->regs[INA209_CONFIGURATION],
252 val);
253 i2c_smbus_write_word_swapped(client, INA209_CONFIGURATION, regval);
254 data->regs[INA209_CONFIGURATION] = regval;
255 data->update_interval = ina209_interval_from_reg(regval);
256 mutex_unlock(&data->update_lock);
257 return count;
258}
259
260static ssize_t ina209_show_interval(struct device *dev,
261 struct device_attribute *da, char *buf)
262{
263 struct i2c_client *client = to_i2c_client(dev);
264 struct ina209_data *data = i2c_get_clientdata(client);
265
266 return snprintf(buf, PAGE_SIZE, "%d\n", data->update_interval);
267}
268
269/*
270 * History is reset by writing 1 into bit 0 of the respective peak register.
271 * Since more than one peak register may be affected by the scope of a
272 * reset_history attribute write, use a bit mask in attr->index to identify
273 * which registers are affected.
274 */
275static u16 ina209_reset_history_regs[] = {
276 INA209_SHUNT_VOLTAGE_POS_PEAK,
277 INA209_SHUNT_VOLTAGE_NEG_PEAK,
278 INA209_BUS_VOLTAGE_MAX_PEAK,
279 INA209_BUS_VOLTAGE_MIN_PEAK,
280 INA209_POWER_PEAK
281};
282
283static ssize_t ina209_reset_history(struct device *dev,
284 struct device_attribute *da,
285 const char *buf,
286 size_t count)
287{
288 struct i2c_client *client = to_i2c_client(dev);
289 struct ina209_data *data = i2c_get_clientdata(client);
290 struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
291 u32 mask = attr->index;
292 long val;
293 int i, ret;
294
295 ret = kstrtol(buf, 10, &val);
296 if (ret < 0)
297 return ret;
298
299 mutex_lock(&data->update_lock);
300 for (i = 0; i < ARRAY_SIZE(ina209_reset_history_regs); i++) {
301 if (mask & (1 << i))
302 i2c_smbus_write_word_swapped(client,
303 ina209_reset_history_regs[i], 1);
304 }
305 data->valid = false;
306 mutex_unlock(&data->update_lock);
307 return count;
308}
309
310static ssize_t ina209_set_value(struct device *dev,
311 struct device_attribute *da,
312 const char *buf,
313 size_t count)
314{
315 struct i2c_client *client = to_i2c_client(dev);
316 struct ina209_data *data = ina209_update_device(dev);
317 struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
318 int reg = attr->index;
319 long val;
320 int ret;
321
322 if (IS_ERR(data))
323 return PTR_ERR(data);
324
325 ret = kstrtol(buf, 10, &val);
326 if (ret < 0)
327 return ret;
328
329 mutex_lock(&data->update_lock);
330 ret = ina209_to_reg(reg, data->regs[reg], val);
331 if (ret < 0) {
332 count = ret;
333 goto abort;
334 }
335 i2c_smbus_write_word_swapped(client, reg, ret);
336 data->regs[reg] = ret;
337abort:
338 mutex_unlock(&data->update_lock);
339 return count;
340}
341
342static ssize_t ina209_show_value(struct device *dev,
343 struct device_attribute *da,
344 char *buf)
345{
346 struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
347 struct ina209_data *data = ina209_update_device(dev);
348 long val;
349
350 if (IS_ERR(data))
351 return PTR_ERR(data);
352
353 val = ina209_from_reg(attr->index, data->regs[attr->index]);
354 return snprintf(buf, PAGE_SIZE, "%ld\n", val);
355}
356
357static ssize_t ina209_show_alarm(struct device *dev,
358 struct device_attribute *da,
359 char *buf)
360{
361 struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
362 struct ina209_data *data = ina209_update_device(dev);
363 const unsigned int mask = attr->index;
364 u16 status;
365
366 if (IS_ERR(data))
367 return PTR_ERR(data);
368
369 status = data->regs[INA209_STATUS];
370
371 /*
372 * All alarms are in the INA209_STATUS register. To avoid a long
373 * switch statement, the mask is passed in attr->index
374 */
375 return snprintf(buf, PAGE_SIZE, "%u\n", !!(status & mask));
376}
377
378/* Shunt voltage, history, limits, alarms */
379static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, ina209_show_value, NULL,
380 INA209_SHUNT_VOLTAGE);
381static SENSOR_DEVICE_ATTR(in0_input_highest, S_IRUGO, ina209_show_value, NULL,
382 INA209_SHUNT_VOLTAGE_POS_PEAK);
383static SENSOR_DEVICE_ATTR(in0_input_lowest, S_IRUGO, ina209_show_value, NULL,
384 INA209_SHUNT_VOLTAGE_NEG_PEAK);
385static SENSOR_DEVICE_ATTR(in0_reset_history, S_IWUSR, NULL,
386 ina209_reset_history, (1 << 0) | (1 << 1));
387static SENSOR_DEVICE_ATTR(in0_max, S_IRUGO | S_IWUSR, ina209_show_value,
388 ina209_set_value, INA209_SHUNT_VOLTAGE_POS_WARN);
389static SENSOR_DEVICE_ATTR(in0_min, S_IRUGO | S_IWUSR, ina209_show_value,
390 ina209_set_value, INA209_SHUNT_VOLTAGE_NEG_WARN);
391static SENSOR_DEVICE_ATTR(in0_crit_max, S_IRUGO | S_IWUSR, ina209_show_value,
392 ina209_set_value, INA209_CRITICAL_DAC_POS);
393static SENSOR_DEVICE_ATTR(in0_crit_min, S_IRUGO | S_IWUSR, ina209_show_value,
394 ina209_set_value, INA209_CRITICAL_DAC_NEG);
395
396static SENSOR_DEVICE_ATTR(in0_min_alarm, S_IRUGO, ina209_show_alarm, NULL,
397 1 << 11);
398static SENSOR_DEVICE_ATTR(in0_max_alarm, S_IRUGO, ina209_show_alarm, NULL,
399 1 << 12);
400static SENSOR_DEVICE_ATTR(in0_crit_min_alarm, S_IRUGO, ina209_show_alarm, NULL,
401 1 << 6);
402static SENSOR_DEVICE_ATTR(in0_crit_max_alarm, S_IRUGO, ina209_show_alarm, NULL,
403 1 << 7);
404
405/* Bus voltage, history, limits, alarms */
406static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, ina209_show_value, NULL,
407 INA209_BUS_VOLTAGE);
408static SENSOR_DEVICE_ATTR(in1_input_highest, S_IRUGO, ina209_show_value, NULL,
409 INA209_BUS_VOLTAGE_MAX_PEAK);
410static SENSOR_DEVICE_ATTR(in1_input_lowest, S_IRUGO, ina209_show_value, NULL,
411 INA209_BUS_VOLTAGE_MIN_PEAK);
412static SENSOR_DEVICE_ATTR(in1_reset_history, S_IWUSR, NULL,
413 ina209_reset_history, (1 << 2) | (1 << 3));
414static SENSOR_DEVICE_ATTR(in1_max, S_IRUGO | S_IWUSR, ina209_show_value,
415 ina209_set_value, INA209_BUS_VOLTAGE_OVER_WARN);
416static SENSOR_DEVICE_ATTR(in1_min, S_IRUGO | S_IWUSR, ina209_show_value,
417 ina209_set_value, INA209_BUS_VOLTAGE_UNDER_WARN);
418static SENSOR_DEVICE_ATTR(in1_crit_max, S_IRUGO | S_IWUSR, ina209_show_value,
419 ina209_set_value, INA209_BUS_VOLTAGE_OVER_LIMIT);
420static SENSOR_DEVICE_ATTR(in1_crit_min, S_IRUGO | S_IWUSR, ina209_show_value,
421 ina209_set_value, INA209_BUS_VOLTAGE_UNDER_LIMIT);
422
423static SENSOR_DEVICE_ATTR(in1_min_alarm, S_IRUGO, ina209_show_alarm, NULL,
424 1 << 14);
425static SENSOR_DEVICE_ATTR(in1_max_alarm, S_IRUGO, ina209_show_alarm, NULL,
426 1 << 15);
427static SENSOR_DEVICE_ATTR(in1_crit_min_alarm, S_IRUGO, ina209_show_alarm, NULL,
428 1 << 9);
429static SENSOR_DEVICE_ATTR(in1_crit_max_alarm, S_IRUGO, ina209_show_alarm, NULL,
430 1 << 10);
431
432/* Power */
433static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina209_show_value, NULL,
434 INA209_POWER);
435static SENSOR_DEVICE_ATTR(power1_input_highest, S_IRUGO, ina209_show_value,
436 NULL, INA209_POWER_PEAK);
437static SENSOR_DEVICE_ATTR(power1_reset_history, S_IWUSR, NULL,
438 ina209_reset_history, 1 << 4);
439static SENSOR_DEVICE_ATTR(power1_max, S_IRUGO | S_IWUSR, ina209_show_value,
440 ina209_set_value, INA209_POWER_WARN);
441static SENSOR_DEVICE_ATTR(power1_crit, S_IRUGO | S_IWUSR, ina209_show_value,
442 ina209_set_value, INA209_POWER_OVER_LIMIT);
443
444static SENSOR_DEVICE_ATTR(power1_max_alarm, S_IRUGO, ina209_show_alarm, NULL,
445 1 << 13);
446static SENSOR_DEVICE_ATTR(power1_crit_alarm, S_IRUGO, ina209_show_alarm, NULL,
447 1 << 8);
448
449/* Current */
450static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, ina209_show_value, NULL,
451 INA209_CURRENT);
452
453static SENSOR_DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR,
454 ina209_show_interval, ina209_set_interval, 0);
455
456/*
457 * Finally, construct an array of pointers to members of the above objects,
458 * as required for sysfs_create_group()
459 */
460static struct attribute *ina209_attributes[] = {
461 &sensor_dev_attr_in0_input.dev_attr.attr,
462 &sensor_dev_attr_in0_input_highest.dev_attr.attr,
463 &sensor_dev_attr_in0_input_lowest.dev_attr.attr,
464 &sensor_dev_attr_in0_reset_history.dev_attr.attr,
465 &sensor_dev_attr_in0_max.dev_attr.attr,
466 &sensor_dev_attr_in0_min.dev_attr.attr,
467 &sensor_dev_attr_in0_crit_max.dev_attr.attr,
468 &sensor_dev_attr_in0_crit_min.dev_attr.attr,
469 &sensor_dev_attr_in0_max_alarm.dev_attr.attr,
470 &sensor_dev_attr_in0_min_alarm.dev_attr.attr,
471 &sensor_dev_attr_in0_crit_max_alarm.dev_attr.attr,
472 &sensor_dev_attr_in0_crit_min_alarm.dev_attr.attr,
473
474 &sensor_dev_attr_in1_input.dev_attr.attr,
475 &sensor_dev_attr_in1_input_highest.dev_attr.attr,
476 &sensor_dev_attr_in1_input_lowest.dev_attr.attr,
477 &sensor_dev_attr_in1_reset_history.dev_attr.attr,
478 &sensor_dev_attr_in1_max.dev_attr.attr,
479 &sensor_dev_attr_in1_min.dev_attr.attr,
480 &sensor_dev_attr_in1_crit_max.dev_attr.attr,
481 &sensor_dev_attr_in1_crit_min.dev_attr.attr,
482 &sensor_dev_attr_in1_max_alarm.dev_attr.attr,
483 &sensor_dev_attr_in1_min_alarm.dev_attr.attr,
484 &sensor_dev_attr_in1_crit_max_alarm.dev_attr.attr,
485 &sensor_dev_attr_in1_crit_min_alarm.dev_attr.attr,
486
487 &sensor_dev_attr_power1_input.dev_attr.attr,
488 &sensor_dev_attr_power1_input_highest.dev_attr.attr,
489 &sensor_dev_attr_power1_reset_history.dev_attr.attr,
490 &sensor_dev_attr_power1_max.dev_attr.attr,
491 &sensor_dev_attr_power1_crit.dev_attr.attr,
492 &sensor_dev_attr_power1_max_alarm.dev_attr.attr,
493 &sensor_dev_attr_power1_crit_alarm.dev_attr.attr,
494
495 &sensor_dev_attr_curr1_input.dev_attr.attr,
496
497 &sensor_dev_attr_update_interval.dev_attr.attr,
498
499 NULL,
500};
501
502static const struct attribute_group ina209_group = {
503 .attrs = ina209_attributes,
504};
505
506static void ina209_restore_conf(struct i2c_client *client,
507 struct ina209_data *data)
508{
509 /* Restore initial configuration */
510 i2c_smbus_write_word_swapped(client, INA209_CONFIGURATION,
511 data->config_orig);
512 i2c_smbus_write_word_swapped(client, INA209_CALIBRATION,
513 data->calibration_orig);
514}
515
516static int ina209_init_client(struct i2c_client *client,
517 struct ina209_data *data)
518{
519 struct ina2xx_platform_data *pdata = dev_get_platdata(&client->dev);
520 u32 shunt;
521 int reg;
522
523 reg = i2c_smbus_read_word_swapped(client, INA209_CALIBRATION);
524 if (reg < 0)
525 return reg;
526 data->calibration_orig = reg;
527
528 reg = i2c_smbus_read_word_swapped(client, INA209_CONFIGURATION);
529 if (reg < 0)
530 return reg;
531 data->config_orig = reg;
532
533 if (pdata) {
534 if (pdata->shunt_uohms <= 0)
535 return -EINVAL;
536 shunt = pdata->shunt_uohms;
537 } else if (!of_property_read_u32(client->dev.of_node, "shunt-resistor",
538 &shunt)) {
539 if (shunt == 0)
540 return -EINVAL;
541 } else {
542 shunt = data->calibration_orig ?
543 40960000 / data->calibration_orig : INA209_SHUNT_DEFAULT;
544 }
545
546 i2c_smbus_write_word_swapped(client, INA209_CONFIGURATION,
547 INA209_CONFIG_DEFAULT);
548 data->update_interval = ina209_interval_from_reg(INA209_CONFIG_DEFAULT);
549
550 /*
551 * Calibrate current LSB to 1mA. Shunt is in uOhms.
552 * See equation 13 in datasheet.
553 */
554 i2c_smbus_write_word_swapped(client, INA209_CALIBRATION,
555 clamp_val(40960000 / shunt, 1, 65535));
556
557 /* Clear status register */
558 i2c_smbus_read_word_swapped(client, INA209_STATUS);
559
560 return 0;
561}
562
563static int ina209_probe(struct i2c_client *client,
564 const struct i2c_device_id *id)
565{
566 struct i2c_adapter *adapter = client->adapter;
567 struct ina209_data *data;
568 int ret;
569
570 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA))
571 return -ENODEV;
572
573 data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
574 if (!data)
575 return -ENOMEM;
576
577 i2c_set_clientdata(client, data);
578 mutex_init(&data->update_lock);
579
580 ret = ina209_init_client(client, data);
581 if (ret)
582 return ret;
583
584 /* Register sysfs hooks */
585 ret = sysfs_create_group(&client->dev.kobj, &ina209_group);
586 if (ret)
587 goto out_restore_conf;
588
589 data->hwmon_dev = hwmon_device_register(&client->dev);
590 if (IS_ERR(data->hwmon_dev)) {
591 ret = PTR_ERR(data->hwmon_dev);
592 goto out_hwmon_device_register;
593 }
594
595 return 0;
596
597out_hwmon_device_register:
598 sysfs_remove_group(&client->dev.kobj, &ina209_group);
599out_restore_conf:
600 ina209_restore_conf(client, data);
601 return ret;
602}
603
604static int ina209_remove(struct i2c_client *client)
605{
606 struct ina209_data *data = i2c_get_clientdata(client);
607
608 hwmon_device_unregister(data->hwmon_dev);
609 sysfs_remove_group(&client->dev.kobj, &ina209_group);
610 ina209_restore_conf(client, data);
611
612 return 0;
613}
614
615static const struct i2c_device_id ina209_id[] = {
616 { "ina209", 0 },
617 { }
618};
619MODULE_DEVICE_TABLE(i2c, ina209_id);
620
621/* This is the driver that will be inserted */
622static struct i2c_driver ina209_driver = {
623 .class = I2C_CLASS_HWMON,
624 .driver = {
625 .name = "ina209",
626 },
627 .probe = ina209_probe,
628 .remove = ina209_remove,
629 .id_table = ina209_id,
630};
631
632module_i2c_driver(ina209_driver);
633
634MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>, Paul Hays <Paul.Hays@cattail.ca>, Guenter Roeck <linux@roeck-us.net>");
635MODULE_DESCRIPTION("INA209 driver");
636MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index 117d66fcded6..37fc980fde24 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -19,6 +19,8 @@
19 * IT8726F Super I/O chip w/LPC interface 19 * IT8726F Super I/O chip w/LPC interface
20 * IT8728F Super I/O chip w/LPC interface 20 * IT8728F Super I/O chip w/LPC interface
21 * IT8758E Super I/O chip w/LPC interface 21 * IT8758E Super I/O chip w/LPC interface
22 * IT8771E Super I/O chip w/LPC interface
23 * IT8772E Super I/O chip w/LPC interface
22 * IT8782F Super I/O chip w/LPC interface 24 * IT8782F Super I/O chip w/LPC interface
23 * IT8783E/F Super I/O chip w/LPC interface 25 * IT8783E/F Super I/O chip w/LPC interface
24 * Sis950 A clone of the IT8705F 26 * Sis950 A clone of the IT8705F
@@ -61,8 +63,8 @@
61 63
62#define DRVNAME "it87" 64#define DRVNAME "it87"
63 65
64enum chips { it87, it8712, it8716, it8718, it8720, it8721, it8728, it8782, 66enum chips { it87, it8712, it8716, it8718, it8720, it8721, it8728, it8771,
65 it8783 }; 67 it8772, it8782, it8783 };
66 68
67static unsigned short force_id; 69static unsigned short force_id;
68module_param(force_id, ushort, 0); 70module_param(force_id, ushort, 0);
@@ -140,6 +142,8 @@ static inline void superio_exit(void)
140#define IT8721F_DEVID 0x8721 142#define IT8721F_DEVID 0x8721
141#define IT8726F_DEVID 0x8726 143#define IT8726F_DEVID 0x8726
142#define IT8728F_DEVID 0x8728 144#define IT8728F_DEVID 0x8728
145#define IT8771E_DEVID 0x8771
146#define IT8772E_DEVID 0x8772
143#define IT8782F_DEVID 0x8782 147#define IT8782F_DEVID 0x8782
144#define IT8783E_DEVID 0x8783 148#define IT8783E_DEVID 0x8783
145#define IT87_ACT_REG 0x30 149#define IT87_ACT_REG 0x30
@@ -281,6 +285,24 @@ static const struct it87_devices it87_devices[] = {
281 | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI, 285 | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI,
282 .peci_mask = 0x07, 286 .peci_mask = 0x07,
283 }, 287 },
288 [it8771] = {
289 .name = "it8771",
290 .features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
291 | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI,
292 /* PECI: guesswork */
293 /* 12mV ADC (OHM) */
294 /* 16 bit fans (OHM) */
295 .peci_mask = 0x07,
296 },
297 [it8772] = {
298 .name = "it8772",
299 .features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
300 | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI,
301 /* PECI (coreboot) */
302 /* 12mV ADC (HWSensors4, OHM) */
303 /* 16 bit fans (HWSensors4, OHM) */
304 .peci_mask = 0x07,
305 },
284 [it8782] = { 306 [it8782] = {
285 .name = "it8782", 307 .name = "it8782",
286 .features = FEAT_16BIT_FANS | FEAT_TEMP_OFFSET 308 .features = FEAT_16BIT_FANS | FEAT_TEMP_OFFSET
@@ -384,7 +406,7 @@ static int adc_lsb(const struct it87_data *data, int nr)
384static u8 in_to_reg(const struct it87_data *data, int nr, long val) 406static u8 in_to_reg(const struct it87_data *data, int nr, long val)
385{ 407{
386 val = DIV_ROUND_CLOSEST(val, adc_lsb(data, nr)); 408 val = DIV_ROUND_CLOSEST(val, adc_lsb(data, nr));
387 return SENSORS_LIMIT(val, 0, 255); 409 return clamp_val(val, 0, 255);
388} 410}
389 411
390static int in_from_reg(const struct it87_data *data, int nr, int val) 412static int in_from_reg(const struct it87_data *data, int nr, int val)
@@ -396,16 +418,15 @@ static inline u8 FAN_TO_REG(long rpm, int div)
396{ 418{
397 if (rpm == 0) 419 if (rpm == 0)
398 return 255; 420 return 255;
399 rpm = SENSORS_LIMIT(rpm, 1, 1000000); 421 rpm = clamp_val(rpm, 1, 1000000);
400 return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 422 return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
401 254);
402} 423}
403 424
404static inline u16 FAN16_TO_REG(long rpm) 425static inline u16 FAN16_TO_REG(long rpm)
405{ 426{
406 if (rpm == 0) 427 if (rpm == 0)
407 return 0xffff; 428 return 0xffff;
408 return SENSORS_LIMIT((1350000 + rpm) / (rpm * 2), 1, 0xfffe); 429 return clamp_val((1350000 + rpm) / (rpm * 2), 1, 0xfffe);
409} 430}
410 431
411#define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : (val) == 255 ? 0 : \ 432#define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : (val) == 255 ? 0 : \
@@ -414,8 +435,8 @@ static inline u16 FAN16_TO_REG(long rpm)
414#define FAN16_FROM_REG(val) ((val) == 0 ? -1 : (val) == 0xffff ? 0 : \ 435#define FAN16_FROM_REG(val) ((val) == 0 ? -1 : (val) == 0xffff ? 0 : \
415 1350000 / ((val) * 2)) 436 1350000 / ((val) * 2))
416 437
417#define TEMP_TO_REG(val) (SENSORS_LIMIT(((val) < 0 ? (((val) - 500) / 1000) : \ 438#define TEMP_TO_REG(val) (clamp_val(((val) < 0 ? (((val) - 500) / 1000) : \
418 ((val) + 500) / 1000), -128, 127)) 439 ((val) + 500) / 1000), -128, 127))
419#define TEMP_FROM_REG(val) ((val) * 1000) 440#define TEMP_FROM_REG(val) ((val) * 1000)
420 441
421static u8 pwm_to_reg(const struct it87_data *data, long val) 442static u8 pwm_to_reg(const struct it87_data *data, long val)
@@ -1709,6 +1730,12 @@ static int __init it87_find(unsigned short *address,
1709 case IT8728F_DEVID: 1730 case IT8728F_DEVID:
1710 sio_data->type = it8728; 1731 sio_data->type = it8728;
1711 break; 1732 break;
1733 case IT8771E_DEVID:
1734 sio_data->type = it8771;
1735 break;
1736 case IT8772E_DEVID:
1737 sio_data->type = it8772;
1738 break;
1712 case IT8782F_DEVID: 1739 case IT8782F_DEVID:
1713 sio_data->type = it8782; 1740 sio_data->type = it8782;
1714 break; 1741 break;
@@ -1826,10 +1853,11 @@ static int __init it87_find(unsigned short *address,
1826 1853
1827 reg = superio_inb(IT87_SIO_GPIO3_REG); 1854 reg = superio_inb(IT87_SIO_GPIO3_REG);
1828 if (sio_data->type == it8721 || sio_data->type == it8728 || 1855 if (sio_data->type == it8721 || sio_data->type == it8728 ||
1856 sio_data->type == it8771 || sio_data->type == it8772 ||
1829 sio_data->type == it8782) { 1857 sio_data->type == it8782) {
1830 /* 1858 /*
1831 * IT8721F/IT8758E, and IT8782F don't have VID pins 1859 * IT8721F/IT8758E, and IT8782F don't have VID pins
1832 * at all, not sure about the IT8728F. 1860 * at all, not sure about the IT8728F and compatibles.
1833 */ 1861 */
1834 sio_data->skip_vid = 1; 1862 sio_data->skip_vid = 1;
1835 } else { 1863 } else {
@@ -1883,7 +1911,9 @@ static int __init it87_find(unsigned short *address,
1883 if (reg & (1 << 0)) 1911 if (reg & (1 << 0))
1884 sio_data->internal |= (1 << 0); 1912 sio_data->internal |= (1 << 0);
1885 if ((reg & (1 << 1)) || sio_data->type == it8721 || 1913 if ((reg & (1 << 1)) || sio_data->type == it8721 ||
1886 sio_data->type == it8728) 1914 sio_data->type == it8728 ||
1915 sio_data->type == it8771 ||
1916 sio_data->type == it8772)
1887 sio_data->internal |= (1 << 1); 1917 sio_data->internal |= (1 << 1);
1888 1918
1889 /* 1919 /*
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index e21e43c13156..4a58f130fd4e 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -103,6 +103,9 @@ static const unsigned short normal_i2c[] = {
103#define MCP98243_DEVID 0x2100 103#define MCP98243_DEVID 0x2100
104#define MCP98243_DEVID_MASK 0xfffc 104#define MCP98243_DEVID_MASK 0xfffc
105 105
106#define MCP98244_DEVID 0x2200
107#define MCP98244_DEVID_MASK 0xfffc
108
106#define MCP9843_DEVID 0x0000 /* Also matches mcp9805 */ 109#define MCP9843_DEVID 0x0000 /* Also matches mcp9805 */
107#define MCP9843_DEVID_MASK 0xfffe 110#define MCP9843_DEVID_MASK 0xfffe
108 111
@@ -147,6 +150,7 @@ static struct jc42_chips jc42_chips[] = {
147 { MCP_MANID, MCP9804_DEVID, MCP9804_DEVID_MASK }, 150 { MCP_MANID, MCP9804_DEVID, MCP9804_DEVID_MASK },
148 { MCP_MANID, MCP98242_DEVID, MCP98242_DEVID_MASK }, 151 { MCP_MANID, MCP98242_DEVID, MCP98242_DEVID_MASK },
149 { MCP_MANID, MCP98243_DEVID, MCP98243_DEVID_MASK }, 152 { MCP_MANID, MCP98243_DEVID, MCP98243_DEVID_MASK },
153 { MCP_MANID, MCP98244_DEVID, MCP98244_DEVID_MASK },
150 { MCP_MANID, MCP9843_DEVID, MCP9843_DEVID_MASK }, 154 { MCP_MANID, MCP9843_DEVID, MCP9843_DEVID_MASK },
151 { NXP_MANID, SE97_DEVID, SE97_DEVID_MASK }, 155 { NXP_MANID, SE97_DEVID, SE97_DEVID_MASK },
152 { ONS_MANID, CAT6095_DEVID, CAT6095_DEVID_MASK }, 156 { ONS_MANID, CAT6095_DEVID, CAT6095_DEVID_MASK },
@@ -237,9 +241,9 @@ static struct i2c_driver jc42_driver = {
237 241
238static u16 jc42_temp_to_reg(int temp, bool extended) 242static u16 jc42_temp_to_reg(int temp, bool extended)
239{ 243{
240 int ntemp = SENSORS_LIMIT(temp, 244 int ntemp = clamp_val(temp,
241 extended ? JC42_TEMP_MIN_EXTENDED : 245 extended ? JC42_TEMP_MIN_EXTENDED :
242 JC42_TEMP_MIN, JC42_TEMP_MAX); 246 JC42_TEMP_MIN, JC42_TEMP_MAX);
243 247
244 /* convert from 0.001 to 0.0625 resolution */ 248 /* convert from 0.001 to 0.0625 resolution */
245 return (ntemp * 2 / 125) & 0x1fff; 249 return (ntemp * 2 / 125) & 0x1fff;
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
index eed4d9401788..f644a2e57599 100644
--- a/drivers/hwmon/lm63.c
+++ b/drivers/hwmon/lm63.c
@@ -209,9 +209,9 @@ static inline int lut_temp_to_reg(struct lm63_data *data, long val)
209{ 209{
210 val -= data->temp2_offset; 210 val -= data->temp2_offset;
211 if (data->lut_temp_highres) 211 if (data->lut_temp_highres)
212 return DIV_ROUND_CLOSEST(SENSORS_LIMIT(val, 0, 127500), 500); 212 return DIV_ROUND_CLOSEST(clamp_val(val, 0, 127500), 500);
213 else 213 else
214 return DIV_ROUND_CLOSEST(SENSORS_LIMIT(val, 0, 127000), 1000); 214 return DIV_ROUND_CLOSEST(clamp_val(val, 0, 127000), 1000);
215} 215}
216 216
217/* 217/*
@@ -415,7 +415,7 @@ static ssize_t set_pwm1(struct device *dev, struct device_attribute *devattr,
415 return err; 415 return err;
416 416
417 reg = nr ? LM63_REG_LUT_PWM(nr - 1) : LM63_REG_PWM_VALUE; 417 reg = nr ? LM63_REG_LUT_PWM(nr - 1) : LM63_REG_PWM_VALUE;
418 val = SENSORS_LIMIT(val, 0, 255); 418 val = clamp_val(val, 0, 255);
419 419
420 mutex_lock(&data->update_lock); 420 mutex_lock(&data->update_lock);
421 data->pwm1[nr] = data->pwm_highres ? val : 421 data->pwm1[nr] = data->pwm_highres ? val :
@@ -700,7 +700,7 @@ static ssize_t set_update_interval(struct device *dev,
700 return err; 700 return err;
701 701
702 mutex_lock(&data->update_lock); 702 mutex_lock(&data->update_lock);
703 lm63_set_convrate(client, data, SENSORS_LIMIT(val, 0, 100000)); 703 lm63_set_convrate(client, data, clamp_val(val, 0, 100000));
704 mutex_unlock(&data->update_lock); 704 mutex_unlock(&data->update_lock);
705 705
706 return count; 706 return count;
diff --git a/drivers/hwmon/lm73.c b/drivers/hwmon/lm73.c
index 7272176a9ec7..9bde9644b102 100644
--- a/drivers/hwmon/lm73.c
+++ b/drivers/hwmon/lm73.c
@@ -8,6 +8,7 @@
8 * Guillaume Ligneul <guillaume.ligneul@gmail.com> 8 * Guillaume Ligneul <guillaume.ligneul@gmail.com>
9 * Adrien Demarez <adrien.demarez@bolloretelecom.eu> 9 * Adrien Demarez <adrien.demarez@bolloretelecom.eu>
10 * Jeremy Laine <jeremy.laine@bolloretelecom.eu> 10 * Jeremy Laine <jeremy.laine@bolloretelecom.eu>
11 * Chris Verges <kg4ysn@gmail.com>
11 * 12 *
12 * This software program is licensed subject to the GNU General Public License 13 * This software program is licensed subject to the GNU General Public License
13 * (GPL).Version 2,June 1991, available at 14 * (GPL).Version 2,June 1991, available at
@@ -36,11 +37,30 @@ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4c,
36 37
37#define LM73_ID 0x9001 /* 0x0190, byte-swapped */ 38#define LM73_ID 0x9001 /* 0x0190, byte-swapped */
38#define DRVNAME "lm73" 39#define DRVNAME "lm73"
39#define LM73_TEMP_MIN (-40) 40#define LM73_TEMP_MIN (-256000 / 250)
40#define LM73_TEMP_MAX 150 41#define LM73_TEMP_MAX (255750 / 250)
41 42
42/*-----------------------------------------------------------------------*/ 43#define LM73_CTRL_RES_SHIFT 5
44#define LM73_CTRL_RES_MASK (BIT(5) | BIT(6))
45#define LM73_CTRL_TO_MASK BIT(7)
46
47#define LM73_CTRL_HI_SHIFT 2
48#define LM73_CTRL_LO_SHIFT 1
49
50static const unsigned short lm73_convrates[] = {
51 14, /* 11-bits (0.25000 C/LSB): RES1 Bit = 0, RES0 Bit = 0 */
52 28, /* 12-bits (0.12500 C/LSB): RES1 Bit = 0, RES0 Bit = 1 */
53 56, /* 13-bits (0.06250 C/LSB): RES1 Bit = 1, RES0 Bit = 0 */
54 112, /* 14-bits (0.03125 C/LSB): RES1 Bit = 1, RES0 Bit = 1 */
55};
43 56
57struct lm73_data {
58 struct device *hwmon_dev;
59 struct mutex lock;
60 u8 ctrl; /* control register value */
61};
62
63/*-----------------------------------------------------------------------*/
44 64
45static ssize_t set_temp(struct device *dev, struct device_attribute *da, 65static ssize_t set_temp(struct device *dev, struct device_attribute *da,
46 const char *buf, size_t count) 66 const char *buf, size_t count)
@@ -56,8 +76,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
56 return status; 76 return status;
57 77
58 /* Write value */ 78 /* Write value */
59 value = (short) SENSORS_LIMIT(temp/250, (LM73_TEMP_MIN*4), 79 value = clamp_val(temp / 250, LM73_TEMP_MIN, LM73_TEMP_MAX) << 5;
60 (LM73_TEMP_MAX*4)) << 5;
61 err = i2c_smbus_write_word_swapped(client, attr->index, value); 80 err = i2c_smbus_write_word_swapped(client, attr->index, value);
62 return (err < 0) ? err : count; 81 return (err < 0) ? err : count;
63} 82}
@@ -79,6 +98,73 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *da,
79 return scnprintf(buf, PAGE_SIZE, "%d\n", temp); 98 return scnprintf(buf, PAGE_SIZE, "%d\n", temp);
80} 99}
81 100
101static ssize_t set_convrate(struct device *dev, struct device_attribute *da,
102 const char *buf, size_t count)
103{
104 struct i2c_client *client = to_i2c_client(dev);
105 struct lm73_data *data = i2c_get_clientdata(client);
106 unsigned long convrate;
107 s32 err;
108 int res = 0;
109
110 err = kstrtoul(buf, 10, &convrate);
111 if (err < 0)
112 return err;
113
114 /*
115 * Convert the desired conversion rate into register bits.
116 * res is already initialized, and everything past the second-to-last
117 * value in the array is treated as belonging to the last value
118 * in the array.
119 */
120 while (res < (ARRAY_SIZE(lm73_convrates) - 1) &&
121 convrate > lm73_convrates[res])
122 res++;
123
124 mutex_lock(&data->lock);
125 data->ctrl &= LM73_CTRL_TO_MASK;
126 data->ctrl |= res << LM73_CTRL_RES_SHIFT;
127 err = i2c_smbus_write_byte_data(client, LM73_REG_CTRL, data->ctrl);
128 mutex_unlock(&data->lock);
129
130 if (err < 0)
131 return err;
132
133 return count;
134}
135
136static ssize_t show_convrate(struct device *dev, struct device_attribute *da,
137 char *buf)
138{
139 struct i2c_client *client = to_i2c_client(dev);
140 struct lm73_data *data = i2c_get_clientdata(client);
141 int res;
142
143 res = (data->ctrl & LM73_CTRL_RES_MASK) >> LM73_CTRL_RES_SHIFT;
144 return scnprintf(buf, PAGE_SIZE, "%hu\n", lm73_convrates[res]);
145}
146
147static ssize_t show_maxmin_alarm(struct device *dev,
148 struct device_attribute *da, char *buf)
149{
150 struct i2c_client *client = to_i2c_client(dev);
151 struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
152 struct lm73_data *data = i2c_get_clientdata(client);
153 s32 ctrl;
154
155 mutex_lock(&data->lock);
156 ctrl = i2c_smbus_read_byte_data(client, LM73_REG_CTRL);
157 if (ctrl < 0)
158 goto abort;
159 data->ctrl = ctrl;
160 mutex_unlock(&data->lock);
161
162 return scnprintf(buf, PAGE_SIZE, "%d\n", (ctrl >> attr->index) & 1);
163
164abort:
165 mutex_unlock(&data->lock);
166 return ctrl;
167}
82 168
83/*-----------------------------------------------------------------------*/ 169/*-----------------------------------------------------------------------*/
84 170
@@ -90,13 +176,20 @@ static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO,
90 show_temp, set_temp, LM73_REG_MIN); 176 show_temp, set_temp, LM73_REG_MIN);
91static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, 177static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
92 show_temp, NULL, LM73_REG_INPUT); 178 show_temp, NULL, LM73_REG_INPUT);
93 179static SENSOR_DEVICE_ATTR(update_interval, S_IWUSR | S_IRUGO,
180 show_convrate, set_convrate, 0);
181static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO,
182 show_maxmin_alarm, NULL, LM73_CTRL_HI_SHIFT);
183static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO,
184 show_maxmin_alarm, NULL, LM73_CTRL_LO_SHIFT);
94 185
95static struct attribute *lm73_attributes[] = { 186static struct attribute *lm73_attributes[] = {
96 &sensor_dev_attr_temp1_input.dev_attr.attr, 187 &sensor_dev_attr_temp1_input.dev_attr.attr,
97 &sensor_dev_attr_temp1_max.dev_attr.attr, 188 &sensor_dev_attr_temp1_max.dev_attr.attr,
98 &sensor_dev_attr_temp1_min.dev_attr.attr, 189 &sensor_dev_attr_temp1_min.dev_attr.attr,
99 190 &sensor_dev_attr_update_interval.dev_attr.attr,
191 &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
192 &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
100 NULL 193 NULL
101}; 194};
102 195
@@ -111,23 +204,36 @@ static const struct attribute_group lm73_group = {
111static int 204static int
112lm73_probe(struct i2c_client *client, const struct i2c_device_id *id) 205lm73_probe(struct i2c_client *client, const struct i2c_device_id *id)
113{ 206{
114 struct device *hwmon_dev;
115 int status; 207 int status;
208 struct lm73_data *data;
209 int ctrl;
210
211 data = devm_kzalloc(&client->dev, sizeof(struct lm73_data),
212 GFP_KERNEL);
213 if (!data)
214 return -ENOMEM;
215
216 i2c_set_clientdata(client, data);
217 mutex_init(&data->lock);
218
219 ctrl = i2c_smbus_read_byte_data(client, LM73_REG_CTRL);
220 if (ctrl < 0)
221 return ctrl;
222 data->ctrl = ctrl;
116 223
117 /* Register sysfs hooks */ 224 /* Register sysfs hooks */
118 status = sysfs_create_group(&client->dev.kobj, &lm73_group); 225 status = sysfs_create_group(&client->dev.kobj, &lm73_group);
119 if (status) 226 if (status)
120 return status; 227 return status;
121 228
122 hwmon_dev = hwmon_device_register(&client->dev); 229 data->hwmon_dev = hwmon_device_register(&client->dev);
123 if (IS_ERR(hwmon_dev)) { 230 if (IS_ERR(data->hwmon_dev)) {
124 status = PTR_ERR(hwmon_dev); 231 status = PTR_ERR(data->hwmon_dev);
125 goto exit_remove; 232 goto exit_remove;
126 } 233 }
127 i2c_set_clientdata(client, hwmon_dev);
128 234
129 dev_info(&client->dev, "%s: sensor '%s'\n", 235 dev_info(&client->dev, "%s: sensor '%s'\n",
130 dev_name(hwmon_dev), client->name); 236 dev_name(data->hwmon_dev), client->name);
131 237
132 return 0; 238 return 0;
133 239
@@ -138,9 +244,9 @@ exit_remove:
138 244
139static int lm73_remove(struct i2c_client *client) 245static int lm73_remove(struct i2c_client *client)
140{ 246{
141 struct device *hwmon_dev = i2c_get_clientdata(client); 247 struct lm73_data *data = i2c_get_clientdata(client);
142 248
143 hwmon_device_unregister(hwmon_dev); 249 hwmon_device_unregister(data->hwmon_dev);
144 sysfs_remove_group(&client->dev.kobj, &lm73_group); 250 sysfs_remove_group(&client->dev.kobj, &lm73_group);
145 return 0; 251 return 0;
146} 252}
diff --git a/drivers/hwmon/lm75.h b/drivers/hwmon/lm75.h
index 89aa9098ba5b..668ff4721323 100644
--- a/drivers/hwmon/lm75.h
+++ b/drivers/hwmon/lm75.h
@@ -36,7 +36,7 @@
36 REG: (0.5C/bit, two's complement) << 7 */ 36 REG: (0.5C/bit, two's complement) << 7 */
37static inline u16 LM75_TEMP_TO_REG(long temp) 37static inline u16 LM75_TEMP_TO_REG(long temp)
38{ 38{
39 int ntemp = SENSORS_LIMIT(temp, LM75_TEMP_MIN, LM75_TEMP_MAX); 39 int ntemp = clamp_val(temp, LM75_TEMP_MIN, LM75_TEMP_MAX);
40 ntemp += (ntemp < 0 ? -250 : 250); 40 ntemp += (ntemp < 0 ? -250 : 250);
41 return (u16)((ntemp / 500) << 7); 41 return (u16)((ntemp / 500) << 7);
42} 42}
diff --git a/drivers/hwmon/lm77.c b/drivers/hwmon/lm77.c
index f82acf67acf5..f17beb5e6dd6 100644
--- a/drivers/hwmon/lm77.c
+++ b/drivers/hwmon/lm77.c
@@ -101,7 +101,7 @@ static struct i2c_driver lm77_driver = {
101 */ 101 */
102static inline s16 LM77_TEMP_TO_REG(int temp) 102static inline s16 LM77_TEMP_TO_REG(int temp)
103{ 103{
104 int ntemp = SENSORS_LIMIT(temp, LM77_TEMP_MIN, LM77_TEMP_MAX); 104 int ntemp = clamp_val(temp, LM77_TEMP_MIN, LM77_TEMP_MAX);
105 return (ntemp / 500) * 8; 105 return (ntemp / 500) * 8;
106} 106}
107 107
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index 53d6ee8ffa33..483538fa1bd5 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -85,7 +85,7 @@ enum chips { lm78, lm79 };
85 */ 85 */
86static inline u8 IN_TO_REG(unsigned long val) 86static inline u8 IN_TO_REG(unsigned long val)
87{ 87{
88 unsigned long nval = SENSORS_LIMIT(val, 0, 4080); 88 unsigned long nval = clamp_val(val, 0, 4080);
89 return (nval + 8) / 16; 89 return (nval + 8) / 16;
90} 90}
91#define IN_FROM_REG(val) ((val) * 16) 91#define IN_FROM_REG(val) ((val) * 16)
@@ -94,7 +94,7 @@ static inline u8 FAN_TO_REG(long rpm, int div)
94{ 94{
95 if (rpm <= 0) 95 if (rpm <= 0)
96 return 255; 96 return 255;
97 return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254); 97 return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
98} 98}
99 99
100static inline int FAN_FROM_REG(u8 val, int div) 100static inline int FAN_FROM_REG(u8 val, int div)
@@ -108,7 +108,7 @@ static inline int FAN_FROM_REG(u8 val, int div)
108 */ 108 */
109static inline s8 TEMP_TO_REG(int val) 109static inline s8 TEMP_TO_REG(int val)
110{ 110{
111 int nval = SENSORS_LIMIT(val, -128000, 127000) ; 111 int nval = clamp_val(val, -128000, 127000) ;
112 return nval < 0 ? (nval - 500) / 1000 : (nval + 500) / 1000; 112 return nval < 0 ? (nval - 500) / 1000 : (nval + 500) / 1000;
113} 113}
114 114
diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
index 28a8b71f4571..357fbb998728 100644
--- a/drivers/hwmon/lm80.c
+++ b/drivers/hwmon/lm80.c
@@ -72,15 +72,15 @@ static const unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d,
72 * Fixing this is just not worth it. 72 * Fixing this is just not worth it.
73 */ 73 */
74 74
75#define IN_TO_REG(val) (SENSORS_LIMIT(((val) + 5) / 10, 0, 255)) 75#define IN_TO_REG(val) (clamp_val(((val) + 5) / 10, 0, 255))
76#define IN_FROM_REG(val) ((val) * 10) 76#define IN_FROM_REG(val) ((val) * 10)
77 77
78static inline unsigned char FAN_TO_REG(unsigned rpm, unsigned div) 78static inline unsigned char FAN_TO_REG(unsigned rpm, unsigned div)
79{ 79{
80 if (rpm == 0) 80 if (rpm == 0)
81 return 255; 81 return 255;
82 rpm = SENSORS_LIMIT(rpm, 1, 1000000); 82 rpm = clamp_val(rpm, 1, 1000000);
83 return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254); 83 return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
84} 84}
85 85
86#define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : \ 86#define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : \
@@ -102,7 +102,7 @@ static inline long TEMP_FROM_REG(u16 temp)
102#define TEMP_LIMIT_FROM_REG(val) (((val) > 0x80 ? \ 102#define TEMP_LIMIT_FROM_REG(val) (((val) > 0x80 ? \
103 (val) - 0x100 : (val)) * 1000) 103 (val) - 0x100 : (val)) * 1000)
104 104
105#define TEMP_LIMIT_TO_REG(val) SENSORS_LIMIT((val) < 0 ? \ 105#define TEMP_LIMIT_TO_REG(val) clamp_val((val) < 0 ? \
106 ((val) - 500) / 1000 : ((val) + 500) / 1000, 0, 255) 106 ((val) - 500) / 1000 : ((val) + 500) / 1000, 0, 255)
107 107
108#define DIV_FROM_REG(val) (1 << (val)) 108#define DIV_FROM_REG(val) (1 << (val))
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index 9f2dd77e1e0e..47ade8ba152d 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -139,7 +139,7 @@ static const int lm85_scaling[] = { /* .001 Volts */
139#define SCALE(val, from, to) (((val) * (to) + ((from) / 2)) / (from)) 139#define SCALE(val, from, to) (((val) * (to) + ((from) / 2)) / (from))
140 140
141#define INS_TO_REG(n, val) \ 141#define INS_TO_REG(n, val) \
142 SENSORS_LIMIT(SCALE(val, lm85_scaling[n], 192), 0, 255) 142 clamp_val(SCALE(val, lm85_scaling[n], 192), 0, 255)
143 143
144#define INSEXT_FROM_REG(n, val, ext) \ 144#define INSEXT_FROM_REG(n, val, ext) \
145 SCALE(((val) << 4) + (ext), 192 << 4, lm85_scaling[n]) 145 SCALE(((val) << 4) + (ext), 192 << 4, lm85_scaling[n])
@@ -151,19 +151,19 @@ static inline u16 FAN_TO_REG(unsigned long val)
151{ 151{
152 if (!val) 152 if (!val)
153 return 0xffff; 153 return 0xffff;
154 return SENSORS_LIMIT(5400000 / val, 1, 0xfffe); 154 return clamp_val(5400000 / val, 1, 0xfffe);
155} 155}
156#define FAN_FROM_REG(val) ((val) == 0 ? -1 : (val) == 0xffff ? 0 : \ 156#define FAN_FROM_REG(val) ((val) == 0 ? -1 : (val) == 0xffff ? 0 : \
157 5400000 / (val)) 157 5400000 / (val))
158 158
159/* Temperature is reported in .001 degC increments */ 159/* Temperature is reported in .001 degC increments */
160#define TEMP_TO_REG(val) \ 160#define TEMP_TO_REG(val) \
161 SENSORS_LIMIT(SCALE(val, 1000, 1), -127, 127) 161 clamp_val(SCALE(val, 1000, 1), -127, 127)
162#define TEMPEXT_FROM_REG(val, ext) \ 162#define TEMPEXT_FROM_REG(val, ext) \
163 SCALE(((val) << 4) + (ext), 16, 1000) 163 SCALE(((val) << 4) + (ext), 16, 1000)
164#define TEMP_FROM_REG(val) ((val) * 1000) 164#define TEMP_FROM_REG(val) ((val) * 1000)
165 165
166#define PWM_TO_REG(val) SENSORS_LIMIT(val, 0, 255) 166#define PWM_TO_REG(val) clamp_val(val, 0, 255)
167#define PWM_FROM_REG(val) (val) 167#define PWM_FROM_REG(val) (val)
168 168
169 169
@@ -258,7 +258,7 @@ static int ZONE_TO_REG(int zone)
258 return i << 5; 258 return i << 5;
259} 259}
260 260
261#define HYST_TO_REG(val) SENSORS_LIMIT(((val) + 500) / 1000, 0, 15) 261#define HYST_TO_REG(val) clamp_val(((val) + 500) / 1000, 0, 15)
262#define HYST_FROM_REG(val) ((val) * 1000) 262#define HYST_FROM_REG(val) ((val) * 1000)
263 263
264/* 264/*
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 863412a02bdd..8eeb141c85ac 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -931,7 +931,7 @@ static ssize_t set_update_interval(struct device *dev,
931 return err; 931 return err;
932 932
933 mutex_lock(&data->update_lock); 933 mutex_lock(&data->update_lock);
934 lm90_set_convrate(client, data, SENSORS_LIMIT(val, 0, 100000)); 934 lm90_set_convrate(client, data, clamp_val(val, 0, 100000));
935 mutex_unlock(&data->update_lock); 935 mutex_unlock(&data->update_lock);
936 936
937 return count; 937 return count;
diff --git a/drivers/hwmon/lm93.c b/drivers/hwmon/lm93.c
index 1a003f73e4e4..b40f34cdb3ca 100644
--- a/drivers/hwmon/lm93.c
+++ b/drivers/hwmon/lm93.c
@@ -371,8 +371,8 @@ static unsigned LM93_IN_FROM_REG(int nr, u8 reg)
371static u8 LM93_IN_TO_REG(int nr, unsigned val) 371static u8 LM93_IN_TO_REG(int nr, unsigned val)
372{ 372{
373 /* range limit */ 373 /* range limit */
374 const long mV = SENSORS_LIMIT(val, 374 const long mV = clamp_val(val,
375 lm93_vin_val_min[nr], lm93_vin_val_max[nr]); 375 lm93_vin_val_min[nr], lm93_vin_val_max[nr]);
376 376
377 /* try not to lose too much precision here */ 377 /* try not to lose too much precision here */
378 const long uV = mV * 1000; 378 const long uV = mV * 1000;
@@ -385,8 +385,8 @@ static u8 LM93_IN_TO_REG(int nr, unsigned val)
385 const long intercept = uV_min - slope * lm93_vin_reg_min[nr]; 385 const long intercept = uV_min - slope * lm93_vin_reg_min[nr];
386 386
387 u8 result = ((uV - intercept + (slope/2)) / slope); 387 u8 result = ((uV - intercept + (slope/2)) / slope);
388 result = SENSORS_LIMIT(result, 388 result = clamp_val(result,
389 lm93_vin_reg_min[nr], lm93_vin_reg_max[nr]); 389 lm93_vin_reg_min[nr], lm93_vin_reg_max[nr]);
390 return result; 390 return result;
391} 391}
392 392
@@ -411,10 +411,10 @@ static u8 LM93_IN_REL_TO_REG(unsigned val, int upper, int vid)
411{ 411{
412 long uV_offset = vid * 1000 - val * 10000; 412 long uV_offset = vid * 1000 - val * 10000;
413 if (upper) { 413 if (upper) {
414 uV_offset = SENSORS_LIMIT(uV_offset, 12500, 200000); 414 uV_offset = clamp_val(uV_offset, 12500, 200000);
415 return (u8)((uV_offset / 12500 - 1) << 4); 415 return (u8)((uV_offset / 12500 - 1) << 4);
416 } else { 416 } else {
417 uV_offset = SENSORS_LIMIT(uV_offset, -400000, -25000); 417 uV_offset = clamp_val(uV_offset, -400000, -25000);
418 return (u8)((uV_offset / -25000 - 1) << 0); 418 return (u8)((uV_offset / -25000 - 1) << 0);
419 } 419 }
420} 420}
@@ -437,7 +437,7 @@ static int LM93_TEMP_FROM_REG(u8 reg)
437 */ 437 */
438static u8 LM93_TEMP_TO_REG(long temp) 438static u8 LM93_TEMP_TO_REG(long temp)
439{ 439{
440 int ntemp = SENSORS_LIMIT(temp, LM93_TEMP_MIN, LM93_TEMP_MAX); 440 int ntemp = clamp_val(temp, LM93_TEMP_MIN, LM93_TEMP_MAX);
441 ntemp += (ntemp < 0 ? -500 : 500); 441 ntemp += (ntemp < 0 ? -500 : 500);
442 return (u8)(ntemp / 1000); 442 return (u8)(ntemp / 1000);
443} 443}
@@ -472,7 +472,7 @@ static u8 LM93_TEMP_OFFSET_TO_REG(int off, int mode)
472{ 472{
473 int factor = mode ? 5 : 10; 473 int factor = mode ? 5 : 10;
474 474
475 off = SENSORS_LIMIT(off, LM93_TEMP_OFFSET_MIN, 475 off = clamp_val(off, LM93_TEMP_OFFSET_MIN,
476 mode ? LM93_TEMP_OFFSET_MAX1 : LM93_TEMP_OFFSET_MAX0); 476 mode ? LM93_TEMP_OFFSET_MAX1 : LM93_TEMP_OFFSET_MAX0);
477 return (u8)((off + factor/2) / factor); 477 return (u8)((off + factor/2) / factor);
478} 478}
@@ -620,8 +620,8 @@ static u16 LM93_FAN_TO_REG(long rpm)
620 if (rpm == 0) { 620 if (rpm == 0) {
621 count = 0x3fff; 621 count = 0x3fff;
622 } else { 622 } else {
623 rpm = SENSORS_LIMIT(rpm, 1, 1000000); 623 rpm = clamp_val(rpm, 1, 1000000);
624 count = SENSORS_LIMIT((1350000 + rpm) / rpm, 1, 0x3ffe); 624 count = clamp_val((1350000 + rpm) / rpm, 1, 0x3ffe);
625 } 625 }
626 626
627 regs = count << 2; 627 regs = count << 2;
@@ -692,7 +692,7 @@ static int LM93_RAMP_FROM_REG(u8 reg)
692 */ 692 */
693static u8 LM93_RAMP_TO_REG(int ramp) 693static u8 LM93_RAMP_TO_REG(int ramp)
694{ 694{
695 ramp = SENSORS_LIMIT(ramp, LM93_RAMP_MIN, LM93_RAMP_MAX); 695 ramp = clamp_val(ramp, LM93_RAMP_MIN, LM93_RAMP_MAX);
696 return (u8)((ramp + 2) / 5); 696 return (u8)((ramp + 2) / 5);
697} 697}
698 698
@@ -702,7 +702,7 @@ static u8 LM93_RAMP_TO_REG(int ramp)
702 */ 702 */
703static u8 LM93_PROCHOT_TO_REG(long prochot) 703static u8 LM93_PROCHOT_TO_REG(long prochot)
704{ 704{
705 prochot = SENSORS_LIMIT(prochot, 0, 255); 705 prochot = clamp_val(prochot, 0, 255);
706 return (u8)prochot; 706 return (u8)prochot;
707} 707}
708 708
@@ -2052,7 +2052,7 @@ static ssize_t store_pwm_auto_channels(struct device *dev,
2052 return err; 2052 return err;
2053 2053
2054 mutex_lock(&data->update_lock); 2054 mutex_lock(&data->update_lock);
2055 data->block9[nr][LM93_PWM_CTL1] = SENSORS_LIMIT(val, 0, 255); 2055 data->block9[nr][LM93_PWM_CTL1] = clamp_val(val, 0, 255);
2056 lm93_write_byte(client, LM93_REG_PWM_CTL(nr, LM93_PWM_CTL1), 2056 lm93_write_byte(client, LM93_REG_PWM_CTL(nr, LM93_PWM_CTL1),
2057 data->block9[nr][LM93_PWM_CTL1]); 2057 data->block9[nr][LM93_PWM_CTL1]);
2058 mutex_unlock(&data->update_lock); 2058 mutex_unlock(&data->update_lock);
@@ -2397,7 +2397,7 @@ static ssize_t store_prochot_override_duty_cycle(struct device *dev,
2397 2397
2398 mutex_lock(&data->update_lock); 2398 mutex_lock(&data->update_lock);
2399 data->prochot_override = (data->prochot_override & 0xf0) | 2399 data->prochot_override = (data->prochot_override & 0xf0) |
2400 SENSORS_LIMIT(val, 0, 15); 2400 clamp_val(val, 0, 15);
2401 lm93_write_byte(client, LM93_REG_PROCHOT_OVERRIDE, 2401 lm93_write_byte(client, LM93_REG_PROCHOT_OVERRIDE,
2402 data->prochot_override); 2402 data->prochot_override);
2403 mutex_unlock(&data->update_lock); 2403 mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/lm95245.c b/drivers/hwmon/lm95245.c
index 2915fd908364..a6c85f0ff8f3 100644
--- a/drivers/hwmon/lm95245.c
+++ b/drivers/hwmon/lm95245.c
@@ -259,7 +259,7 @@ static ssize_t set_limit(struct device *dev, struct device_attribute *attr,
259 259
260 val /= 1000; 260 val /= 1000;
261 261
262 val = SENSORS_LIMIT(val, 0, (index == 6 ? 127 : 255)); 262 val = clamp_val(val, 0, (index == 6 ? 127 : 255));
263 263
264 mutex_lock(&data->update_lock); 264 mutex_lock(&data->update_lock);
265 265
@@ -284,7 +284,7 @@ static ssize_t set_crit_hyst(struct device *dev, struct device_attribute *attr,
284 284
285 val /= 1000; 285 val /= 1000;
286 286
287 val = SENSORS_LIMIT(val, 0, 31); 287 val = clamp_val(val, 0, 31);
288 288
289 mutex_lock(&data->update_lock); 289 mutex_lock(&data->update_lock);
290 290
diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c
index e0019c69d1bb..2fa2c02f5569 100644
--- a/drivers/hwmon/max16065.c
+++ b/drivers/hwmon/max16065.c
@@ -118,7 +118,7 @@ static inline int LIMIT_TO_MV(int limit, int range)
118 118
119static inline int MV_TO_LIMIT(int mv, int range) 119static inline int MV_TO_LIMIT(int mv, int range)
120{ 120{
121 return SENSORS_LIMIT(DIV_ROUND_CLOSEST(mv * 256, range), 0, 255); 121 return clamp_val(DIV_ROUND_CLOSEST(mv * 256, range), 0, 255);
122} 122}
123 123
124static inline int ADC_TO_CURR(int adc, int gain) 124static inline int ADC_TO_CURR(int adc, int gain)
diff --git a/drivers/hwmon/max1668.c b/drivers/hwmon/max1668.c
index 666d9f6263eb..a7626358c95d 100644
--- a/drivers/hwmon/max1668.c
+++ b/drivers/hwmon/max1668.c
@@ -215,7 +215,7 @@ static ssize_t set_temp_max(struct device *dev,
215 return ret; 215 return ret;
216 216
217 mutex_lock(&data->update_lock); 217 mutex_lock(&data->update_lock);
218 data->temp_max[index] = SENSORS_LIMIT(temp/1000, -128, 127); 218 data->temp_max[index] = clamp_val(temp/1000, -128, 127);
219 if (i2c_smbus_write_byte_data(client, 219 if (i2c_smbus_write_byte_data(client,
220 MAX1668_REG_LIMH_WR(index), 220 MAX1668_REG_LIMH_WR(index),
221 data->temp_max[index])) 221 data->temp_max[index]))
@@ -240,7 +240,7 @@ static ssize_t set_temp_min(struct device *dev,
240 return ret; 240 return ret;
241 241
242 mutex_lock(&data->update_lock); 242 mutex_lock(&data->update_lock);
243 data->temp_min[index] = SENSORS_LIMIT(temp/1000, -128, 127); 243 data->temp_min[index] = clamp_val(temp/1000, -128, 127);
244 if (i2c_smbus_write_byte_data(client, 244 if (i2c_smbus_write_byte_data(client,
245 MAX1668_REG_LIML_WR(index), 245 MAX1668_REG_LIML_WR(index),
246 data->temp_max[index])) 246 data->temp_max[index]))
diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
index 6e60036abfa7..3e7b4269f5b9 100644
--- a/drivers/hwmon/max6639.c
+++ b/drivers/hwmon/max6639.c
@@ -74,7 +74,7 @@ static const int rpm_ranges[] = { 2000, 4000, 8000, 16000 };
74 74
75#define FAN_FROM_REG(val, rpm_range) ((val) == 0 || (val) == 255 ? \ 75#define FAN_FROM_REG(val, rpm_range) ((val) == 0 || (val) == 255 ? \
76 0 : (rpm_ranges[rpm_range] * 30) / (val)) 76 0 : (rpm_ranges[rpm_range] * 30) / (val))
77#define TEMP_LIMIT_TO_REG(val) SENSORS_LIMIT((val) / 1000, 0, 255) 77#define TEMP_LIMIT_TO_REG(val) clamp_val((val) / 1000, 0, 255)
78 78
79/* 79/*
80 * Client data (each client gets its own) 80 * Client data (each client gets its own)
@@ -312,7 +312,7 @@ static ssize_t set_pwm(struct device *dev,
312 if (res) 312 if (res)
313 return res; 313 return res;
314 314
315 val = SENSORS_LIMIT(val, 0, 255); 315 val = clamp_val(val, 0, 255);
316 316
317 mutex_lock(&data->update_lock); 317 mutex_lock(&data->update_lock);
318 data->pwm[attr->index] = (u8)(val * 120 / 255); 318 data->pwm[attr->index] = (u8)(val * 120 / 255);
diff --git a/drivers/hwmon/max6642.c b/drivers/hwmon/max6642.c
index 223461a6d70f..57d58cd32206 100644
--- a/drivers/hwmon/max6642.c
+++ b/drivers/hwmon/max6642.c
@@ -239,7 +239,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
239 return err; 239 return err;
240 240
241 mutex_lock(&data->update_lock); 241 mutex_lock(&data->update_lock);
242 data->temp_high[attr2->nr] = SENSORS_LIMIT(temp_to_reg(val), 0, 255); 242 data->temp_high[attr2->nr] = clamp_val(temp_to_reg(val), 0, 255);
243 i2c_smbus_write_byte_data(client, attr2->index, 243 i2c_smbus_write_byte_data(client, attr2->index,
244 data->temp_high[attr2->nr]); 244 data->temp_high[attr2->nr]);
245 mutex_unlock(&data->update_lock); 245 mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c
index f739f83bafb9..3c16cbd4c002 100644
--- a/drivers/hwmon/max6650.c
+++ b/drivers/hwmon/max6650.c
@@ -245,7 +245,7 @@ static ssize_t set_target(struct device *dev, struct device_attribute *devattr,
245 if (err) 245 if (err)
246 return err; 246 return err;
247 247
248 rpm = SENSORS_LIMIT(rpm, FAN_RPM_MIN, FAN_RPM_MAX); 248 rpm = clamp_val(rpm, FAN_RPM_MIN, FAN_RPM_MAX);
249 249
250 /* 250 /*
251 * Divide the required speed by 60 to get from rpm to rps, then 251 * Divide the required speed by 60 to get from rpm to rps, then
@@ -313,7 +313,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr,
313 if (err) 313 if (err)
314 return err; 314 return err;
315 315
316 pwm = SENSORS_LIMIT(pwm, 0, 255); 316 pwm = clamp_val(pwm, 0, 255);
317 317
318 mutex_lock(&data->update_lock); 318 mutex_lock(&data->update_lock);
319 319
diff --git a/drivers/hwmon/max6697.c b/drivers/hwmon/max6697.c
new file mode 100644
index 000000000000..bf4aa3777fc1
--- /dev/null
+++ b/drivers/hwmon/max6697.c
@@ -0,0 +1,726 @@
1/*
2 * Copyright (c) 2012 Guenter Roeck <linux@roeck-us.net>
3 *
4 * based on max1668.c
5 * Copyright (c) 2011 David George <david.george@ska.ac.za>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/slab.h>
21#include <linux/jiffies.h>
22#include <linux/i2c.h>
23#include <linux/hwmon.h>
24#include <linux/hwmon-sysfs.h>
25#include <linux/err.h>
26#include <linux/mutex.h>
27#include <linux/of.h>
28
29#include <linux/platform_data/max6697.h>
30
31enum chips { max6581, max6602, max6622, max6636, max6689, max6693, max6694,
32 max6697, max6698, max6699 };
33
34/* Report local sensor as temp1 */
35
36static const u8 MAX6697_REG_TEMP[] = {
37 0x07, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x08 };
38static const u8 MAX6697_REG_TEMP_EXT[] = {
39 0x57, 0x09, 0x52, 0x53, 0x54, 0x55, 0x56, 0 };
40static const u8 MAX6697_REG_MAX[] = {
41 0x17, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x18 };
42static const u8 MAX6697_REG_CRIT[] = {
43 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27 };
44
45/*
46 * Map device tree / platform data register bit map to chip bit map.
47 * Applies to alert register and over-temperature register.
48 */
49#define MAX6697_MAP_BITS(reg) ((((reg) & 0x7e) >> 1) | \
50 (((reg) & 0x01) << 6) | ((reg) & 0x80))
51
52#define MAX6697_REG_STAT(n) (0x44 + (n))
53
54#define MAX6697_REG_CONFIG 0x41
55#define MAX6581_CONF_EXTENDED (1 << 1)
56#define MAX6693_CONF_BETA (1 << 2)
57#define MAX6697_CONF_RESISTANCE (1 << 3)
58#define MAX6697_CONF_TIMEOUT (1 << 5)
59#define MAX6697_REG_ALERT_MASK 0x42
60#define MAX6697_REG_OVERT_MASK 0x43
61
62#define MAX6581_REG_RESISTANCE 0x4a
63#define MAX6581_REG_IDEALITY 0x4b
64#define MAX6581_REG_IDEALITY_SELECT 0x4c
65#define MAX6581_REG_OFFSET 0x4d
66#define MAX6581_REG_OFFSET_SELECT 0x4e
67
68#define MAX6697_CONV_TIME 156 /* ms per channel, worst case */
69
70struct max6697_chip_data {
71 int channels;
72 u32 have_ext;
73 u32 have_crit;
74 u32 have_fault;
75 u8 valid_conf;
76 const u8 *alarm_map;
77};
78
79struct max6697_data {
80 struct device *hwmon_dev;
81
82 enum chips type;
83 const struct max6697_chip_data *chip;
84
85 int update_interval; /* in milli-seconds */
86 int temp_offset; /* in degrees C */
87
88 struct mutex update_lock;
89 unsigned long last_updated; /* In jiffies */
90 bool valid; /* true if following fields are valid */
91
92 /* 1x local and up to 7x remote */
93 u8 temp[8][4]; /* [nr][0]=temp [1]=ext [2]=max [3]=crit */
94#define MAX6697_TEMP_INPUT 0
95#define MAX6697_TEMP_EXT 1
96#define MAX6697_TEMP_MAX 2
97#define MAX6697_TEMP_CRIT 3
98 u32 alarms;
99};
100
101/* Diode fault status bits on MAX6581 are right shifted by one bit */
102static const u8 max6581_alarm_map[] = {
103 0, 0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15,
104 16, 17, 18, 19, 20, 21, 22, 23 };
105
106static const struct max6697_chip_data max6697_chip_data[] = {
107 [max6581] = {
108 .channels = 8,
109 .have_crit = 0xff,
110 .have_ext = 0x7f,
111 .have_fault = 0xfe,
112 .valid_conf = MAX6581_CONF_EXTENDED | MAX6697_CONF_TIMEOUT,
113 .alarm_map = max6581_alarm_map,
114 },
115 [max6602] = {
116 .channels = 5,
117 .have_crit = 0x12,
118 .have_ext = 0x02,
119 .have_fault = 0x1e,
120 .valid_conf = MAX6697_CONF_RESISTANCE | MAX6697_CONF_TIMEOUT,
121 },
122 [max6622] = {
123 .channels = 5,
124 .have_crit = 0x12,
125 .have_ext = 0x02,
126 .have_fault = 0x1e,
127 .valid_conf = MAX6697_CONF_RESISTANCE | MAX6697_CONF_TIMEOUT,
128 },
129 [max6636] = {
130 .channels = 7,
131 .have_crit = 0x72,
132 .have_ext = 0x02,
133 .have_fault = 0x7e,
134 .valid_conf = MAX6697_CONF_RESISTANCE | MAX6697_CONF_TIMEOUT,
135 },
136 [max6689] = {
137 .channels = 7,
138 .have_crit = 0x72,
139 .have_ext = 0x02,
140 .have_fault = 0x7e,
141 .valid_conf = MAX6697_CONF_RESISTANCE | MAX6697_CONF_TIMEOUT,
142 },
143 [max6693] = {
144 .channels = 7,
145 .have_crit = 0x72,
146 .have_ext = 0x02,
147 .have_fault = 0x7e,
148 .valid_conf = MAX6697_CONF_RESISTANCE | MAX6693_CONF_BETA |
149 MAX6697_CONF_TIMEOUT,
150 },
151 [max6694] = {
152 .channels = 5,
153 .have_crit = 0x12,
154 .have_ext = 0x02,
155 .have_fault = 0x1e,
156 .valid_conf = MAX6697_CONF_RESISTANCE | MAX6693_CONF_BETA |
157 MAX6697_CONF_TIMEOUT,
158 },
159 [max6697] = {
160 .channels = 7,
161 .have_crit = 0x72,
162 .have_ext = 0x02,
163 .have_fault = 0x7e,
164 .valid_conf = MAX6697_CONF_RESISTANCE | MAX6697_CONF_TIMEOUT,
165 },
166 [max6698] = {
167 .channels = 7,
168 .have_crit = 0x72,
169 .have_ext = 0x02,
170 .have_fault = 0x0e,
171 .valid_conf = MAX6697_CONF_RESISTANCE | MAX6697_CONF_TIMEOUT,
172 },
173 [max6699] = {
174 .channels = 5,
175 .have_crit = 0x12,
176 .have_ext = 0x02,
177 .have_fault = 0x1e,
178 .valid_conf = MAX6697_CONF_RESISTANCE | MAX6697_CONF_TIMEOUT,
179 },
180};
181
182static struct max6697_data *max6697_update_device(struct device *dev)
183{
184 struct i2c_client *client = to_i2c_client(dev);
185 struct max6697_data *data = i2c_get_clientdata(client);
186 struct max6697_data *ret = data;
187 int val;
188 int i;
189 u32 alarms;
190
191 mutex_lock(&data->update_lock);
192
193 if (data->valid &&
194 !time_after(jiffies, data->last_updated
195 + msecs_to_jiffies(data->update_interval)))
196 goto abort;
197
198 for (i = 0; i < data->chip->channels; i++) {
199 if (data->chip->have_ext & (1 << i)) {
200 val = i2c_smbus_read_byte_data(client,
201 MAX6697_REG_TEMP_EXT[i]);
202 if (unlikely(val < 0)) {
203 ret = ERR_PTR(val);
204 goto abort;
205 }
206 data->temp[i][MAX6697_TEMP_EXT] = val;
207 }
208
209 val = i2c_smbus_read_byte_data(client, MAX6697_REG_TEMP[i]);
210 if (unlikely(val < 0)) {
211 ret = ERR_PTR(val);
212 goto abort;
213 }
214 data->temp[i][MAX6697_TEMP_INPUT] = val;
215
216 val = i2c_smbus_read_byte_data(client, MAX6697_REG_MAX[i]);
217 if (unlikely(val < 0)) {
218 ret = ERR_PTR(val);
219 goto abort;
220 }
221 data->temp[i][MAX6697_TEMP_MAX] = val;
222
223 if (data->chip->have_crit & (1 << i)) {
224 val = i2c_smbus_read_byte_data(client,
225 MAX6697_REG_CRIT[i]);
226 if (unlikely(val < 0)) {
227 ret = ERR_PTR(val);
228 goto abort;
229 }
230 data->temp[i][MAX6697_TEMP_CRIT] = val;
231 }
232 }
233
234 alarms = 0;
235 for (i = 0; i < 3; i++) {
236 val = i2c_smbus_read_byte_data(client, MAX6697_REG_STAT(i));
237 if (unlikely(val < 0)) {
238 ret = ERR_PTR(val);
239 goto abort;
240 }
241 alarms = (alarms << 8) | val;
242 }
243 data->alarms = alarms;
244 data->last_updated = jiffies;
245 data->valid = true;
246abort:
247 mutex_unlock(&data->update_lock);
248
249 return ret;
250}
251
252static ssize_t show_temp_input(struct device *dev,
253 struct device_attribute *devattr, char *buf)
254{
255 int index = to_sensor_dev_attr(devattr)->index;
256 struct max6697_data *data = max6697_update_device(dev);
257 int temp;
258
259 if (IS_ERR(data))
260 return PTR_ERR(data);
261
262 temp = (data->temp[index][MAX6697_TEMP_INPUT] - data->temp_offset) << 3;
263 temp |= data->temp[index][MAX6697_TEMP_EXT] >> 5;
264
265 return sprintf(buf, "%d\n", temp * 125);
266}
267
268static ssize_t show_temp(struct device *dev,
269 struct device_attribute *devattr, char *buf)
270{
271 int nr = to_sensor_dev_attr_2(devattr)->nr;
272 int index = to_sensor_dev_attr_2(devattr)->index;
273 struct max6697_data *data = max6697_update_device(dev);
274 int temp;
275
276 if (IS_ERR(data))
277 return PTR_ERR(data);
278
279 temp = data->temp[nr][index];
280 temp -= data->temp_offset;
281
282 return sprintf(buf, "%d\n", temp * 1000);
283}
284
285static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
286 char *buf)
287{
288 int index = to_sensor_dev_attr(attr)->index;
289 struct max6697_data *data = max6697_update_device(dev);
290
291 if (IS_ERR(data))
292 return PTR_ERR(data);
293
294 if (data->chip->alarm_map)
295 index = data->chip->alarm_map[index];
296
297 return sprintf(buf, "%u\n", (data->alarms >> index) & 0x1);
298}
299
300static ssize_t set_temp(struct device *dev,
301 struct device_attribute *devattr,
302 const char *buf, size_t count)
303{
304 int nr = to_sensor_dev_attr_2(devattr)->nr;
305 int index = to_sensor_dev_attr_2(devattr)->index;
306 struct i2c_client *client = to_i2c_client(dev);
307 struct max6697_data *data = i2c_get_clientdata(client);
308 long temp;
309 int ret;
310
311 ret = kstrtol(buf, 10, &temp);
312 if (ret < 0)
313 return ret;
314
315 mutex_lock(&data->update_lock);
316 temp = DIV_ROUND_CLOSEST(temp, 1000) + data->temp_offset;
317 temp = clamp_val(temp, 0, data->type == max6581 ? 255 : 127);
318 data->temp[nr][index] = temp;
319 ret = i2c_smbus_write_byte_data(client,
320 index == 2 ? MAX6697_REG_MAX[nr]
321 : MAX6697_REG_CRIT[nr],
322 temp);
323 mutex_unlock(&data->update_lock);
324
325 return ret < 0 ? ret : count;
326}
327
328static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_input, NULL, 0);
329static SENSOR_DEVICE_ATTR_2(temp1_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
330 0, MAX6697_TEMP_MAX);
331static SENSOR_DEVICE_ATTR_2(temp1_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
332 0, MAX6697_TEMP_CRIT);
333
334static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp_input, NULL, 1);
335static SENSOR_DEVICE_ATTR_2(temp2_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
336 1, MAX6697_TEMP_MAX);
337static SENSOR_DEVICE_ATTR_2(temp2_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
338 1, MAX6697_TEMP_CRIT);
339
340static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp_input, NULL, 2);
341static SENSOR_DEVICE_ATTR_2(temp3_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
342 2, MAX6697_TEMP_MAX);
343static SENSOR_DEVICE_ATTR_2(temp3_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
344 2, MAX6697_TEMP_CRIT);
345
346static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp_input, NULL, 3);
347static SENSOR_DEVICE_ATTR_2(temp4_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
348 3, MAX6697_TEMP_MAX);
349static SENSOR_DEVICE_ATTR_2(temp4_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
350 3, MAX6697_TEMP_CRIT);
351
352static SENSOR_DEVICE_ATTR(temp5_input, S_IRUGO, show_temp_input, NULL, 4);
353static SENSOR_DEVICE_ATTR_2(temp5_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
354 4, MAX6697_TEMP_MAX);
355static SENSOR_DEVICE_ATTR_2(temp5_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
356 4, MAX6697_TEMP_CRIT);
357
358static SENSOR_DEVICE_ATTR(temp6_input, S_IRUGO, show_temp_input, NULL, 5);
359static SENSOR_DEVICE_ATTR_2(temp6_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
360 5, MAX6697_TEMP_MAX);
361static SENSOR_DEVICE_ATTR_2(temp6_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
362 5, MAX6697_TEMP_CRIT);
363
364static SENSOR_DEVICE_ATTR(temp7_input, S_IRUGO, show_temp_input, NULL, 6);
365static SENSOR_DEVICE_ATTR_2(temp7_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
366 6, MAX6697_TEMP_MAX);
367static SENSOR_DEVICE_ATTR_2(temp7_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
368 6, MAX6697_TEMP_CRIT);
369
370static SENSOR_DEVICE_ATTR(temp8_input, S_IRUGO, show_temp_input, NULL, 7);
371static SENSOR_DEVICE_ATTR_2(temp8_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
372 7, MAX6697_TEMP_MAX);
373static SENSOR_DEVICE_ATTR_2(temp8_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
374 7, MAX6697_TEMP_CRIT);
375
376static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 22);
377static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 16);
378static SENSOR_DEVICE_ATTR(temp3_max_alarm, S_IRUGO, show_alarm, NULL, 17);
379static SENSOR_DEVICE_ATTR(temp4_max_alarm, S_IRUGO, show_alarm, NULL, 18);
380static SENSOR_DEVICE_ATTR(temp5_max_alarm, S_IRUGO, show_alarm, NULL, 19);
381static SENSOR_DEVICE_ATTR(temp6_max_alarm, S_IRUGO, show_alarm, NULL, 20);
382static SENSOR_DEVICE_ATTR(temp7_max_alarm, S_IRUGO, show_alarm, NULL, 21);
383static SENSOR_DEVICE_ATTR(temp8_max_alarm, S_IRUGO, show_alarm, NULL, 23);
384
385static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 14);
386static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL, 8);
387static SENSOR_DEVICE_ATTR(temp3_crit_alarm, S_IRUGO, show_alarm, NULL, 9);
388static SENSOR_DEVICE_ATTR(temp4_crit_alarm, S_IRUGO, show_alarm, NULL, 10);
389static SENSOR_DEVICE_ATTR(temp5_crit_alarm, S_IRUGO, show_alarm, NULL, 11);
390static SENSOR_DEVICE_ATTR(temp6_crit_alarm, S_IRUGO, show_alarm, NULL, 12);
391static SENSOR_DEVICE_ATTR(temp7_crit_alarm, S_IRUGO, show_alarm, NULL, 13);
392static SENSOR_DEVICE_ATTR(temp8_crit_alarm, S_IRUGO, show_alarm, NULL, 15);
393
394static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 1);
395static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 2);
396static SENSOR_DEVICE_ATTR(temp4_fault, S_IRUGO, show_alarm, NULL, 3);
397static SENSOR_DEVICE_ATTR(temp5_fault, S_IRUGO, show_alarm, NULL, 4);
398static SENSOR_DEVICE_ATTR(temp6_fault, S_IRUGO, show_alarm, NULL, 5);
399static SENSOR_DEVICE_ATTR(temp7_fault, S_IRUGO, show_alarm, NULL, 6);
400static SENSOR_DEVICE_ATTR(temp8_fault, S_IRUGO, show_alarm, NULL, 7);
401
402static struct attribute *max6697_attributes[8][7] = {
403 {
404 &sensor_dev_attr_temp1_input.dev_attr.attr,
405 &sensor_dev_attr_temp1_max.dev_attr.attr,
406 &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
407 &sensor_dev_attr_temp1_crit.dev_attr.attr,
408 &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
409 NULL
410 }, {
411 &sensor_dev_attr_temp2_input.dev_attr.attr,
412 &sensor_dev_attr_temp2_max.dev_attr.attr,
413 &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
414 &sensor_dev_attr_temp2_crit.dev_attr.attr,
415 &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
416 &sensor_dev_attr_temp2_fault.dev_attr.attr,
417 NULL
418 }, {
419 &sensor_dev_attr_temp3_input.dev_attr.attr,
420 &sensor_dev_attr_temp3_max.dev_attr.attr,
421 &sensor_dev_attr_temp3_max_alarm.dev_attr.attr,
422 &sensor_dev_attr_temp3_crit.dev_attr.attr,
423 &sensor_dev_attr_temp3_crit_alarm.dev_attr.attr,
424 &sensor_dev_attr_temp3_fault.dev_attr.attr,
425 NULL
426 }, {
427 &sensor_dev_attr_temp4_input.dev_attr.attr,
428 &sensor_dev_attr_temp4_max.dev_attr.attr,
429 &sensor_dev_attr_temp4_max_alarm.dev_attr.attr,
430 &sensor_dev_attr_temp4_crit.dev_attr.attr,
431 &sensor_dev_attr_temp4_crit_alarm.dev_attr.attr,
432 &sensor_dev_attr_temp4_fault.dev_attr.attr,
433 NULL
434 }, {
435 &sensor_dev_attr_temp5_input.dev_attr.attr,
436 &sensor_dev_attr_temp5_max.dev_attr.attr,
437 &sensor_dev_attr_temp5_max_alarm.dev_attr.attr,
438 &sensor_dev_attr_temp5_crit.dev_attr.attr,
439 &sensor_dev_attr_temp5_crit_alarm.dev_attr.attr,
440 &sensor_dev_attr_temp5_fault.dev_attr.attr,
441 NULL
442 }, {
443 &sensor_dev_attr_temp6_input.dev_attr.attr,
444 &sensor_dev_attr_temp6_max.dev_attr.attr,
445 &sensor_dev_attr_temp6_max_alarm.dev_attr.attr,
446 &sensor_dev_attr_temp6_crit.dev_attr.attr,
447 &sensor_dev_attr_temp6_crit_alarm.dev_attr.attr,
448 &sensor_dev_attr_temp6_fault.dev_attr.attr,
449 NULL
450 }, {
451 &sensor_dev_attr_temp7_input.dev_attr.attr,
452 &sensor_dev_attr_temp7_max.dev_attr.attr,
453 &sensor_dev_attr_temp7_max_alarm.dev_attr.attr,
454 &sensor_dev_attr_temp7_crit.dev_attr.attr,
455 &sensor_dev_attr_temp7_crit_alarm.dev_attr.attr,
456 &sensor_dev_attr_temp7_fault.dev_attr.attr,
457 NULL
458 }, {
459 &sensor_dev_attr_temp8_input.dev_attr.attr,
460 &sensor_dev_attr_temp8_max.dev_attr.attr,
461 &sensor_dev_attr_temp8_max_alarm.dev_attr.attr,
462 &sensor_dev_attr_temp8_crit.dev_attr.attr,
463 &sensor_dev_attr_temp8_crit_alarm.dev_attr.attr,
464 &sensor_dev_attr_temp8_fault.dev_attr.attr,
465 NULL
466 }
467};
468
469static const struct attribute_group max6697_group[8] = {
470 { .attrs = max6697_attributes[0] },
471 { .attrs = max6697_attributes[1] },
472 { .attrs = max6697_attributes[2] },
473 { .attrs = max6697_attributes[3] },
474 { .attrs = max6697_attributes[4] },
475 { .attrs = max6697_attributes[5] },
476 { .attrs = max6697_attributes[6] },
477 { .attrs = max6697_attributes[7] },
478};
479
480static void max6697_get_config_of(struct device_node *node,
481 struct max6697_platform_data *pdata)
482{
483 int len;
484 const __be32 *prop;
485
486 prop = of_get_property(node, "smbus-timeout-disable", &len);
487 if (prop)
488 pdata->smbus_timeout_disable = true;
489 prop = of_get_property(node, "extended-range-enable", &len);
490 if (prop)
491 pdata->extended_range_enable = true;
492 prop = of_get_property(node, "beta-compensation-enable", &len);
493 if (prop)
494 pdata->beta_compensation = true;
495 prop = of_get_property(node, "alert-mask", &len);
496 if (prop && len == sizeof(u32))
497 pdata->alert_mask = be32_to_cpu(prop[0]);
498 prop = of_get_property(node, "over-temperature-mask", &len);
499 if (prop && len == sizeof(u32))
500 pdata->over_temperature_mask = be32_to_cpu(prop[0]);
501 prop = of_get_property(node, "resistance-cancellation", &len);
502 if (prop) {
503 if (len == sizeof(u32))
504 pdata->resistance_cancellation = be32_to_cpu(prop[0]);
505 else
506 pdata->resistance_cancellation = 0xfe;
507 }
508 prop = of_get_property(node, "transistor-ideality", &len);
509 if (prop && len == 2 * sizeof(u32)) {
510 pdata->ideality_mask = be32_to_cpu(prop[0]);
511 pdata->ideality_value = be32_to_cpu(prop[1]);
512 }
513}
514
515static int max6697_init_chip(struct i2c_client *client)
516{
517 struct max6697_data *data = i2c_get_clientdata(client);
518 struct max6697_platform_data *pdata = dev_get_platdata(&client->dev);
519 struct max6697_platform_data p;
520 const struct max6697_chip_data *chip = data->chip;
521 int factor = chip->channels;
522 int ret, reg;
523
524 /*
525 * Don't touch configuration if neither platform data nor OF
526 * configuration was specified. If that is the case, use the
527 * current chip configuration.
528 */
529 if (!pdata && !client->dev.of_node) {
530 reg = i2c_smbus_read_byte_data(client, MAX6697_REG_CONFIG);
531 if (reg < 0)
532 return reg;
533 if (data->type == max6581) {
534 if (reg & MAX6581_CONF_EXTENDED)
535 data->temp_offset = 64;
536 reg = i2c_smbus_read_byte_data(client,
537 MAX6581_REG_RESISTANCE);
538 if (reg < 0)
539 return reg;
540 factor += hweight8(reg);
541 } else {
542 if (reg & MAX6697_CONF_RESISTANCE)
543 factor++;
544 }
545 goto done;
546 }
547
548 if (client->dev.of_node) {
549 memset(&p, 0, sizeof(p));
550 max6697_get_config_of(client->dev.of_node, &p);
551 pdata = &p;
552 }
553
554 reg = 0;
555 if (pdata->smbus_timeout_disable &&
556 (chip->valid_conf & MAX6697_CONF_TIMEOUT)) {
557 reg |= MAX6697_CONF_TIMEOUT;
558 }
559 if (pdata->extended_range_enable &&
560 (chip->valid_conf & MAX6581_CONF_EXTENDED)) {
561 reg |= MAX6581_CONF_EXTENDED;
562 data->temp_offset = 64;
563 }
564 if (pdata->resistance_cancellation &&
565 (chip->valid_conf & MAX6697_CONF_RESISTANCE)) {
566 reg |= MAX6697_CONF_RESISTANCE;
567 factor++;
568 }
569 if (pdata->beta_compensation &&
570 (chip->valid_conf & MAX6693_CONF_BETA)) {
571 reg |= MAX6693_CONF_BETA;
572 }
573
574 ret = i2c_smbus_write_byte_data(client, MAX6697_REG_CONFIG, reg);
575 if (ret < 0)
576 return ret;
577
578 ret = i2c_smbus_write_byte_data(client, MAX6697_REG_ALERT_MASK,
579 MAX6697_MAP_BITS(pdata->alert_mask));
580 if (ret < 0)
581 return ret;
582
583 ret = i2c_smbus_write_byte_data(client, MAX6697_REG_OVERT_MASK,
584 MAX6697_MAP_BITS(pdata->over_temperature_mask));
585 if (ret < 0)
586 return ret;
587
588 if (data->type == max6581) {
589 factor += hweight8(pdata->resistance_cancellation >> 1);
590 ret = i2c_smbus_write_byte_data(client, MAX6581_REG_RESISTANCE,
591 pdata->resistance_cancellation >> 1);
592 if (ret < 0)
593 return ret;
594 ret = i2c_smbus_write_byte_data(client, MAX6581_REG_IDEALITY,
595 pdata->ideality_mask >> 1);
596 if (ret < 0)
597 return ret;
598 ret = i2c_smbus_write_byte_data(client,
599 MAX6581_REG_IDEALITY_SELECT,
600 pdata->ideality_value);
601 if (ret < 0)
602 return ret;
603 }
604done:
605 data->update_interval = factor * MAX6697_CONV_TIME;
606 return 0;
607}
608
609static void max6697_remove_files(struct i2c_client *client)
610{
611 int i;
612
613 for (i = 0; i < ARRAY_SIZE(max6697_group); i++)
614 sysfs_remove_group(&client->dev.kobj, &max6697_group[i]);
615}
616
617static int max6697_probe(struct i2c_client *client,
618 const struct i2c_device_id *id)
619{
620 struct i2c_adapter *adapter = client->adapter;
621 struct device *dev = &client->dev;
622 struct max6697_data *data;
623 int i, err;
624
625 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
626 return -ENODEV;
627
628 data = devm_kzalloc(dev, sizeof(struct max6697_data), GFP_KERNEL);
629 if (!data)
630 return -ENOMEM;
631
632 data->type = id->driver_data;
633 data->chip = &max6697_chip_data[data->type];
634
635 i2c_set_clientdata(client, data);
636 mutex_init(&data->update_lock);
637
638 err = max6697_init_chip(client);
639 if (err)
640 return err;
641
642 for (i = 0; i < data->chip->channels; i++) {
643 err = sysfs_create_file(&dev->kobj,
644 max6697_attributes[i][0]);
645 if (err)
646 goto error;
647 err = sysfs_create_file(&dev->kobj,
648 max6697_attributes[i][1]);
649 if (err)
650 goto error;
651 err = sysfs_create_file(&dev->kobj,
652 max6697_attributes[i][2]);
653 if (err)
654 goto error;
655
656 if (data->chip->have_crit & (1 << i)) {
657 err = sysfs_create_file(&dev->kobj,
658 max6697_attributes[i][3]);
659 if (err)
660 goto error;
661 err = sysfs_create_file(&dev->kobj,
662 max6697_attributes[i][4]);
663 if (err)
664 goto error;
665 }
666 if (data->chip->have_fault & (1 << i)) {
667 err = sysfs_create_file(&dev->kobj,
668 max6697_attributes[i][5]);
669 if (err)
670 goto error;
671 }
672 }
673
674 data->hwmon_dev = hwmon_device_register(dev);
675 if (IS_ERR(data->hwmon_dev)) {
676 err = PTR_ERR(data->hwmon_dev);
677 goto error;
678 }
679
680 return 0;
681
682error:
683 max6697_remove_files(client);
684 return err;
685}
686
687static int max6697_remove(struct i2c_client *client)
688{
689 struct max6697_data *data = i2c_get_clientdata(client);
690
691 hwmon_device_unregister(data->hwmon_dev);
692 max6697_remove_files(client);
693
694 return 0;
695}
696
697static const struct i2c_device_id max6697_id[] = {
698 { "max6581", max6581 },
699 { "max6602", max6602 },
700 { "max6622", max6622 },
701 { "max6636", max6636 },
702 { "max6689", max6689 },
703 { "max6693", max6693 },
704 { "max6694", max6694 },
705 { "max6697", max6697 },
706 { "max6698", max6698 },
707 { "max6699", max6699 },
708 { }
709};
710MODULE_DEVICE_TABLE(i2c, max6697_id);
711
712static struct i2c_driver max6697_driver = {
713 .class = I2C_CLASS_HWMON,
714 .driver = {
715 .name = "max6697",
716 },
717 .probe = max6697_probe,
718 .remove = max6697_remove,
719 .id_table = max6697_id,
720};
721
722module_i2c_driver(max6697_driver);
723
724MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
725MODULE_DESCRIPTION("MAX6697 temperature sensor driver");
726MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index a87eb8986e36..b5f63f9c0ce1 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -43,7 +43,7 @@ struct ntc_compensation {
43 * The following compensation tables are from the specification of Murata NTC 43 * The following compensation tables are from the specification of Murata NTC
44 * Thermistors Datasheet 44 * Thermistors Datasheet
45 */ 45 */
46const struct ntc_compensation ncpXXwb473[] = { 46static const struct ntc_compensation ncpXXwb473[] = {
47 { .temp_C = -40, .ohm = 1747920 }, 47 { .temp_C = -40, .ohm = 1747920 },
48 { .temp_C = -35, .ohm = 1245428 }, 48 { .temp_C = -35, .ohm = 1245428 },
49 { .temp_C = -30, .ohm = 898485 }, 49 { .temp_C = -30, .ohm = 898485 },
@@ -79,7 +79,7 @@ const struct ntc_compensation ncpXXwb473[] = {
79 { .temp_C = 120, .ohm = 1615 }, 79 { .temp_C = 120, .ohm = 1615 },
80 { .temp_C = 125, .ohm = 1406 }, 80 { .temp_C = 125, .ohm = 1406 },
81}; 81};
82const struct ntc_compensation ncpXXwl333[] = { 82static const struct ntc_compensation ncpXXwl333[] = {
83 { .temp_C = -40, .ohm = 1610154 }, 83 { .temp_C = -40, .ohm = 1610154 },
84 { .temp_C = -35, .ohm = 1130850 }, 84 { .temp_C = -35, .ohm = 1130850 },
85 { .temp_C = -30, .ohm = 802609 }, 85 { .temp_C = -30, .ohm = 802609 },
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 60745a535821..4f9eb0af5229 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -72,7 +72,7 @@ config SENSORS_MAX34440
72 default n 72 default n
73 help 73 help
74 If you say yes here you get hardware monitoring support for Maxim 74 If you say yes here you get hardware monitoring support for Maxim
75 MAX34440, MAX34441, and MAX34446. 75 MAX34440, MAX34441, MAX34446, MAX34460, and MAX34461.
76 76
77 This driver can also be built as a module. If so, the module will 77 This driver can also be built as a module. If so, the module will
78 be called max34440. 78 be called max34440.
diff --git a/drivers/hwmon/pmbus/max34440.c b/drivers/hwmon/pmbus/max34440.c
index 2ada7b021fbe..7e930c3ce1ab 100644
--- a/drivers/hwmon/pmbus/max34440.c
+++ b/drivers/hwmon/pmbus/max34440.c
@@ -2,6 +2,7 @@
2 * Hardware monitoring driver for Maxim MAX34440/MAX34441 2 * Hardware monitoring driver for Maxim MAX34440/MAX34441
3 * 3 *
4 * Copyright (c) 2011 Ericsson AB. 4 * Copyright (c) 2011 Ericsson AB.
5 * Copyright (c) 2012 Guenter Roeck
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -25,7 +26,7 @@
25#include <linux/i2c.h> 26#include <linux/i2c.h>
26#include "pmbus.h" 27#include "pmbus.h"
27 28
28enum chips { max34440, max34441, max34446 }; 29enum chips { max34440, max34441, max34446, max34460, max34461 };
29 30
30#define MAX34440_MFR_VOUT_PEAK 0xd4 31#define MAX34440_MFR_VOUT_PEAK 0xd4
31#define MAX34440_MFR_IOUT_PEAK 0xd5 32#define MAX34440_MFR_IOUT_PEAK 0xd5
@@ -87,7 +88,8 @@ static int max34440_read_word_data(struct i2c_client *client, int page, int reg)
87 MAX34446_MFR_POUT_PEAK); 88 MAX34446_MFR_POUT_PEAK);
88 break; 89 break;
89 case PMBUS_VIRT_READ_TEMP_AVG: 90 case PMBUS_VIRT_READ_TEMP_AVG:
90 if (data->id != max34446) 91 if (data->id != max34446 && data->id != max34460 &&
92 data->id != max34461)
91 return -ENXIO; 93 return -ENXIO;
92 ret = pmbus_read_word_data(client, page, 94 ret = pmbus_read_word_data(client, page,
93 MAX34446_MFR_TEMPERATURE_AVG); 95 MAX34446_MFR_TEMPERATURE_AVG);
@@ -322,6 +324,73 @@ static struct pmbus_driver_info max34440_info[] = {
322 .read_word_data = max34440_read_word_data, 324 .read_word_data = max34440_read_word_data,
323 .write_word_data = max34440_write_word_data, 325 .write_word_data = max34440_write_word_data,
324 }, 326 },
327 [max34460] = {
328 .pages = 18,
329 .format[PSC_VOLTAGE_OUT] = direct,
330 .format[PSC_TEMPERATURE] = direct,
331 .m[PSC_VOLTAGE_OUT] = 1,
332 .b[PSC_VOLTAGE_OUT] = 0,
333 .R[PSC_VOLTAGE_OUT] = 3,
334 .m[PSC_TEMPERATURE] = 1,
335 .b[PSC_TEMPERATURE] = 0,
336 .R[PSC_TEMPERATURE] = 2,
337 .func[0] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
338 .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
339 .func[2] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
340 .func[3] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
341 .func[4] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
342 .func[5] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
343 .func[6] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
344 .func[7] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
345 .func[8] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
346 .func[9] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
347 .func[10] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
348 .func[11] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
349 .func[13] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
350 .func[14] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
351 .func[15] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
352 .func[16] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
353 .func[17] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
354 .read_byte_data = max34440_read_byte_data,
355 .read_word_data = max34440_read_word_data,
356 .write_word_data = max34440_write_word_data,
357 },
358 [max34461] = {
359 .pages = 23,
360 .format[PSC_VOLTAGE_OUT] = direct,
361 .format[PSC_TEMPERATURE] = direct,
362 .m[PSC_VOLTAGE_OUT] = 1,
363 .b[PSC_VOLTAGE_OUT] = 0,
364 .R[PSC_VOLTAGE_OUT] = 3,
365 .m[PSC_TEMPERATURE] = 1,
366 .b[PSC_TEMPERATURE] = 0,
367 .R[PSC_TEMPERATURE] = 2,
368 .func[0] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
369 .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
370 .func[2] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
371 .func[3] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
372 .func[4] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
373 .func[5] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
374 .func[6] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
375 .func[7] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
376 .func[8] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
377 .func[9] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
378 .func[10] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
379 .func[11] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
380 .func[12] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
381 .func[13] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
382 .func[14] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
383 .func[15] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
384 /* page 16 is reserved */
385 .func[17] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
386 .func[18] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
387 .func[19] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
388 .func[20] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
389 .func[21] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
390 .read_byte_data = max34440_read_byte_data,
391 .read_word_data = max34440_read_word_data,
392 .write_word_data = max34440_write_word_data,
393 },
325}; 394};
326 395
327static int max34440_probe(struct i2c_client *client, 396static int max34440_probe(struct i2c_client *client,
@@ -343,6 +412,8 @@ static const struct i2c_device_id max34440_id[] = {
343 {"max34440", max34440}, 412 {"max34440", max34440},
344 {"max34441", max34441}, 413 {"max34441", max34441},
345 {"max34446", max34446}, 414 {"max34446", max34446},
415 {"max34460", max34460},
416 {"max34461", max34461},
346 {} 417 {}
347}; 418};
348MODULE_DEVICE_TABLE(i2c, max34440_id); 419MODULE_DEVICE_TABLE(i2c, max34440_id);
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index 3fe03dc47eb7..fa9beb3eb60c 100644
--- a/drivers/hwmon/pmbus/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -2,6 +2,7 @@
2 * pmbus.h - Common defines and structures for PMBus devices 2 * pmbus.h - Common defines and structures for PMBus devices
3 * 3 *
4 * Copyright (c) 2010, 2011 Ericsson AB. 4 * Copyright (c) 2010, 2011 Ericsson AB.
5 * Copyright (c) 2012 Guenter Roeck
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -177,6 +178,13 @@
177#define PMBUS_VIRT_READ_TEMP2_MAX (PMBUS_VIRT_BASE + 28) 178#define PMBUS_VIRT_READ_TEMP2_MAX (PMBUS_VIRT_BASE + 28)
178#define PMBUS_VIRT_RESET_TEMP2_HISTORY (PMBUS_VIRT_BASE + 29) 179#define PMBUS_VIRT_RESET_TEMP2_HISTORY (PMBUS_VIRT_BASE + 29)
179 180
181#define PMBUS_VIRT_READ_VMON (PMBUS_VIRT_BASE + 30)
182#define PMBUS_VIRT_VMON_UV_WARN_LIMIT (PMBUS_VIRT_BASE + 31)
183#define PMBUS_VIRT_VMON_OV_WARN_LIMIT (PMBUS_VIRT_BASE + 32)
184#define PMBUS_VIRT_VMON_UV_FAULT_LIMIT (PMBUS_VIRT_BASE + 33)
185#define PMBUS_VIRT_VMON_OV_FAULT_LIMIT (PMBUS_VIRT_BASE + 34)
186#define PMBUS_VIRT_STATUS_VMON (PMBUS_VIRT_BASE + 35)
187
180/* 188/*
181 * CAPABILITY 189 * CAPABILITY
182 */ 190 */
@@ -317,6 +325,8 @@ enum pmbus_sensor_classes {
317#define PMBUS_HAVE_STATUS_TEMP (1 << 15) 325#define PMBUS_HAVE_STATUS_TEMP (1 << 15)
318#define PMBUS_HAVE_STATUS_FAN12 (1 << 16) 326#define PMBUS_HAVE_STATUS_FAN12 (1 << 16)
319#define PMBUS_HAVE_STATUS_FAN34 (1 << 17) 327#define PMBUS_HAVE_STATUS_FAN34 (1 << 17)
328#define PMBUS_HAVE_VMON (1 << 18)
329#define PMBUS_HAVE_STATUS_VMON (1 << 19)
320 330
321enum pmbus_data_format { linear = 0, direct, vid }; 331enum pmbus_data_format { linear = 0, direct, vid };
322 332
@@ -359,6 +369,7 @@ struct pmbus_driver_info {
359 369
360/* Function declarations */ 370/* Function declarations */
361 371
372void pmbus_clear_cache(struct i2c_client *client);
362int pmbus_set_page(struct i2c_client *client, u8 page); 373int pmbus_set_page(struct i2c_client *client, u8 page);
363int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg); 374int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg);
364int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word); 375int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word);
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 7d19b1bb9ce6..80eef50c50fd 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -2,6 +2,7 @@
2 * Hardware monitoring driver for PMBus devices 2 * Hardware monitoring driver for PMBus devices
3 * 3 *
4 * Copyright (c) 2010, 2011 Ericsson AB. 4 * Copyright (c) 2010, 2011 Ericsson AB.
5 * Copyright (c) 2012 Guenter Roeck
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -31,45 +32,10 @@
31#include "pmbus.h" 32#include "pmbus.h"
32 33
33/* 34/*
34 * Constants needed to determine number of sensors, booleans, and labels. 35 * Number of additional attribute pointers to allocate
36 * with each call to krealloc
35 */ 37 */
36#define PMBUS_MAX_INPUT_SENSORS 22 /* 10*volt, 7*curr, 5*power */ 38#define PMBUS_ATTR_ALLOC_SIZE 32
37#define PMBUS_VOUT_SENSORS_PER_PAGE 9 /* input, min, max, lcrit,
38 crit, lowest, highest, avg,
39 reset */
40#define PMBUS_IOUT_SENSORS_PER_PAGE 8 /* input, min, max, crit,
41 lowest, highest, avg,
42 reset */
43#define PMBUS_POUT_SENSORS_PER_PAGE 7 /* input, cap, max, crit,
44 * highest, avg, reset
45 */
46#define PMBUS_MAX_SENSORS_PER_FAN 1 /* input */
47#define PMBUS_MAX_SENSORS_PER_TEMP 9 /* input, min, max, lcrit,
48 * crit, lowest, highest, avg,
49 * reset
50 */
51
52#define PMBUS_MAX_INPUT_BOOLEANS 7 /* v: min_alarm, max_alarm,
53 lcrit_alarm, crit_alarm;
54 c: alarm, crit_alarm;
55 p: crit_alarm */
56#define PMBUS_VOUT_BOOLEANS_PER_PAGE 4 /* min_alarm, max_alarm,
57 lcrit_alarm, crit_alarm */
58#define PMBUS_IOUT_BOOLEANS_PER_PAGE 3 /* alarm, lcrit_alarm,
59 crit_alarm */
60#define PMBUS_POUT_BOOLEANS_PER_PAGE 3 /* cap_alarm, alarm, crit_alarm
61 */
62#define PMBUS_MAX_BOOLEANS_PER_FAN 2 /* alarm, fault */
63#define PMBUS_MAX_BOOLEANS_PER_TEMP 4 /* min_alarm, max_alarm,
64 lcrit_alarm, crit_alarm */
65
66#define PMBUS_MAX_INPUT_LABELS 4 /* vin, vcap, iin, pin */
67
68/*
69 * status, status_vout, status_iout, status_fans, status_fan34, and status_temp
70 * are paged. status_input is unpaged.
71 */
72#define PB_NUM_STATUS_REG (PMBUS_PAGES * 6 + 1)
73 39
74/* 40/*
75 * Index into status register array, per status register group 41 * Index into status register array, per status register group
@@ -79,14 +45,18 @@
79#define PB_STATUS_IOUT_BASE (PB_STATUS_VOUT_BASE + PMBUS_PAGES) 45#define PB_STATUS_IOUT_BASE (PB_STATUS_VOUT_BASE + PMBUS_PAGES)
80#define PB_STATUS_FAN_BASE (PB_STATUS_IOUT_BASE + PMBUS_PAGES) 46#define PB_STATUS_FAN_BASE (PB_STATUS_IOUT_BASE + PMBUS_PAGES)
81#define PB_STATUS_FAN34_BASE (PB_STATUS_FAN_BASE + PMBUS_PAGES) 47#define PB_STATUS_FAN34_BASE (PB_STATUS_FAN_BASE + PMBUS_PAGES)
82#define PB_STATUS_INPUT_BASE (PB_STATUS_FAN34_BASE + PMBUS_PAGES) 48#define PB_STATUS_TEMP_BASE (PB_STATUS_FAN34_BASE + PMBUS_PAGES)
83#define PB_STATUS_TEMP_BASE (PB_STATUS_INPUT_BASE + 1) 49#define PB_STATUS_INPUT_BASE (PB_STATUS_TEMP_BASE + PMBUS_PAGES)
50#define PB_STATUS_VMON_BASE (PB_STATUS_INPUT_BASE + 1)
51
52#define PB_NUM_STATUS_REG (PB_STATUS_VMON_BASE + 1)
84 53
85#define PMBUS_NAME_SIZE 24 54#define PMBUS_NAME_SIZE 24
86 55
87struct pmbus_sensor { 56struct pmbus_sensor {
57 struct pmbus_sensor *next;
88 char name[PMBUS_NAME_SIZE]; /* sysfs sensor name */ 58 char name[PMBUS_NAME_SIZE]; /* sysfs sensor name */
89 struct sensor_device_attribute attribute; 59 struct device_attribute attribute;
90 u8 page; /* page number */ 60 u8 page; /* page number */
91 u16 reg; /* register */ 61 u16 reg; /* register */
92 enum pmbus_sensor_classes class; /* sensor class */ 62 enum pmbus_sensor_classes class; /* sensor class */
@@ -94,19 +64,28 @@ struct pmbus_sensor {
94 int data; /* Sensor data. 64 int data; /* Sensor data.
95 Negative if there was a read error */ 65 Negative if there was a read error */
96}; 66};
67#define to_pmbus_sensor(_attr) \
68 container_of(_attr, struct pmbus_sensor, attribute)
97 69
98struct pmbus_boolean { 70struct pmbus_boolean {
99 char name[PMBUS_NAME_SIZE]; /* sysfs boolean name */ 71 char name[PMBUS_NAME_SIZE]; /* sysfs boolean name */
100 struct sensor_device_attribute attribute; 72 struct sensor_device_attribute attribute;
73 struct pmbus_sensor *s1;
74 struct pmbus_sensor *s2;
101}; 75};
76#define to_pmbus_boolean(_attr) \
77 container_of(_attr, struct pmbus_boolean, attribute)
102 78
103struct pmbus_label { 79struct pmbus_label {
104 char name[PMBUS_NAME_SIZE]; /* sysfs label name */ 80 char name[PMBUS_NAME_SIZE]; /* sysfs label name */
105 struct sensor_device_attribute attribute; 81 struct device_attribute attribute;
106 char label[PMBUS_NAME_SIZE]; /* label */ 82 char label[PMBUS_NAME_SIZE]; /* label */
107}; 83};
84#define to_pmbus_label(_attr) \
85 container_of(_attr, struct pmbus_label, attribute)
108 86
109struct pmbus_data { 87struct pmbus_data {
88 struct device *dev;
110 struct device *hwmon_dev; 89 struct device *hwmon_dev;
111 90
112 u32 flags; /* from platform data */ 91 u32 flags; /* from platform data */
@@ -117,29 +96,9 @@ struct pmbus_data {
117 96
118 int max_attributes; 97 int max_attributes;
119 int num_attributes; 98 int num_attributes;
120 struct attribute **attributes;
121 struct attribute_group group; 99 struct attribute_group group;
122 100
123 /*
124 * Sensors cover both sensor and limit registers.
125 */
126 int max_sensors;
127 int num_sensors;
128 struct pmbus_sensor *sensors; 101 struct pmbus_sensor *sensors;
129 /*
130 * Booleans are used for alarms.
131 * Values are determined from status registers.
132 */
133 int max_booleans;
134 int num_booleans;
135 struct pmbus_boolean *booleans;
136 /*
137 * Labels are used to map generic names (e.g., "in1")
138 * to PMBus specific names (e.g., "vin" or "vout1").
139 */
140 int max_labels;
141 int num_labels;
142 struct pmbus_label *labels;
143 102
144 struct mutex update_lock; 103 struct mutex update_lock;
145 bool valid; 104 bool valid;
@@ -150,10 +109,19 @@ struct pmbus_data {
150 * so we keep them all together. 109 * so we keep them all together.
151 */ 110 */
152 u8 status[PB_NUM_STATUS_REG]; 111 u8 status[PB_NUM_STATUS_REG];
112 u8 status_register;
153 113
154 u8 currpage; 114 u8 currpage;
155}; 115};
156 116
117void pmbus_clear_cache(struct i2c_client *client)
118{
119 struct pmbus_data *data = i2c_get_clientdata(client);
120
121 data->valid = false;
122}
123EXPORT_SYMBOL_GPL(pmbus_clear_cache);
124
157int pmbus_set_page(struct i2c_client *client, u8 page) 125int pmbus_set_page(struct i2c_client *client, u8 page)
158{ 126{
159 struct pmbus_data *data = i2c_get_clientdata(client); 127 struct pmbus_data *data = i2c_get_clientdata(client);
@@ -318,9 +286,10 @@ EXPORT_SYMBOL_GPL(pmbus_clear_faults);
318 286
319static int pmbus_check_status_cml(struct i2c_client *client) 287static int pmbus_check_status_cml(struct i2c_client *client)
320{ 288{
289 struct pmbus_data *data = i2c_get_clientdata(client);
321 int status, status2; 290 int status, status2;
322 291
323 status = _pmbus_read_byte_data(client, -1, PMBUS_STATUS_BYTE); 292 status = _pmbus_read_byte_data(client, -1, data->status_register);
324 if (status < 0 || (status & PB_STATUS_CML)) { 293 if (status < 0 || (status & PB_STATUS_CML)) {
325 status2 = _pmbus_read_byte_data(client, -1, PMBUS_STATUS_CML); 294 status2 = _pmbus_read_byte_data(client, -1, PMBUS_STATUS_CML);
326 if (status2 < 0 || (status2 & PB_CML_FAULT_INVALID_COMMAND)) 295 if (status2 < 0 || (status2 & PB_CML_FAULT_INVALID_COMMAND))
@@ -329,29 +298,30 @@ static int pmbus_check_status_cml(struct i2c_client *client)
329 return 0; 298 return 0;
330} 299}
331 300
332bool pmbus_check_byte_register(struct i2c_client *client, int page, int reg) 301static bool pmbus_check_register(struct i2c_client *client,
302 int (*func)(struct i2c_client *client,
303 int page, int reg),
304 int page, int reg)
333{ 305{
334 int rv; 306 int rv;
335 struct pmbus_data *data = i2c_get_clientdata(client); 307 struct pmbus_data *data = i2c_get_clientdata(client);
336 308
337 rv = _pmbus_read_byte_data(client, page, reg); 309 rv = func(client, page, reg);
338 if (rv >= 0 && !(data->flags & PMBUS_SKIP_STATUS_CHECK)) 310 if (rv >= 0 && !(data->flags & PMBUS_SKIP_STATUS_CHECK))
339 rv = pmbus_check_status_cml(client); 311 rv = pmbus_check_status_cml(client);
340 pmbus_clear_fault_page(client, -1); 312 pmbus_clear_fault_page(client, -1);
341 return rv >= 0; 313 return rv >= 0;
342} 314}
315
316bool pmbus_check_byte_register(struct i2c_client *client, int page, int reg)
317{
318 return pmbus_check_register(client, _pmbus_read_byte_data, page, reg);
319}
343EXPORT_SYMBOL_GPL(pmbus_check_byte_register); 320EXPORT_SYMBOL_GPL(pmbus_check_byte_register);
344 321
345bool pmbus_check_word_register(struct i2c_client *client, int page, int reg) 322bool pmbus_check_word_register(struct i2c_client *client, int page, int reg)
346{ 323{
347 int rv; 324 return pmbus_check_register(client, _pmbus_read_word_data, page, reg);
348 struct pmbus_data *data = i2c_get_clientdata(client);
349
350 rv = _pmbus_read_word_data(client, page, reg);
351 if (rv >= 0 && !(data->flags & PMBUS_SKIP_STATUS_CHECK))
352 rv = pmbus_check_status_cml(client);
353 pmbus_clear_fault_page(client, -1);
354 return rv >= 0;
355} 325}
356EXPORT_SYMBOL_GPL(pmbus_check_word_register); 326EXPORT_SYMBOL_GPL(pmbus_check_word_register);
357 327
@@ -363,53 +333,43 @@ const struct pmbus_driver_info *pmbus_get_driver_info(struct i2c_client *client)
363} 333}
364EXPORT_SYMBOL_GPL(pmbus_get_driver_info); 334EXPORT_SYMBOL_GPL(pmbus_get_driver_info);
365 335
336static struct _pmbus_status {
337 u32 func;
338 u16 base;
339 u16 reg;
340} pmbus_status[] = {
341 { PMBUS_HAVE_STATUS_VOUT, PB_STATUS_VOUT_BASE, PMBUS_STATUS_VOUT },
342 { PMBUS_HAVE_STATUS_IOUT, PB_STATUS_IOUT_BASE, PMBUS_STATUS_IOUT },
343 { PMBUS_HAVE_STATUS_TEMP, PB_STATUS_TEMP_BASE,
344 PMBUS_STATUS_TEMPERATURE },
345 { PMBUS_HAVE_STATUS_FAN12, PB_STATUS_FAN_BASE, PMBUS_STATUS_FAN_12 },
346 { PMBUS_HAVE_STATUS_FAN34, PB_STATUS_FAN34_BASE, PMBUS_STATUS_FAN_34 },
347};
348
366static struct pmbus_data *pmbus_update_device(struct device *dev) 349static struct pmbus_data *pmbus_update_device(struct device *dev)
367{ 350{
368 struct i2c_client *client = to_i2c_client(dev); 351 struct i2c_client *client = to_i2c_client(dev);
369 struct pmbus_data *data = i2c_get_clientdata(client); 352 struct pmbus_data *data = i2c_get_clientdata(client);
370 const struct pmbus_driver_info *info = data->info; 353 const struct pmbus_driver_info *info = data->info;
354 struct pmbus_sensor *sensor;
371 355
372 mutex_lock(&data->update_lock); 356 mutex_lock(&data->update_lock);
373 if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { 357 if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
374 int i; 358 int i, j;
375 359
376 for (i = 0; i < info->pages; i++) 360 for (i = 0; i < info->pages; i++) {
377 data->status[PB_STATUS_BASE + i] 361 data->status[PB_STATUS_BASE + i]
378 = _pmbus_read_byte_data(client, i, 362 = _pmbus_read_byte_data(client, i,
379 PMBUS_STATUS_BYTE); 363 data->status_register);
380 for (i = 0; i < info->pages; i++) { 364 for (j = 0; j < ARRAY_SIZE(pmbus_status); j++) {
381 if (!(info->func[i] & PMBUS_HAVE_STATUS_VOUT)) 365 struct _pmbus_status *s = &pmbus_status[j];
382 continue; 366
383 data->status[PB_STATUS_VOUT_BASE + i] 367 if (!(info->func[i] & s->func))
384 = _pmbus_read_byte_data(client, i, PMBUS_STATUS_VOUT); 368 continue;
385 } 369 data->status[s->base + i]
386 for (i = 0; i < info->pages; i++) { 370 = _pmbus_read_byte_data(client, i,
387 if (!(info->func[i] & PMBUS_HAVE_STATUS_IOUT)) 371 s->reg);
388 continue; 372 }
389 data->status[PB_STATUS_IOUT_BASE + i]
390 = _pmbus_read_byte_data(client, i, PMBUS_STATUS_IOUT);
391 }
392 for (i = 0; i < info->pages; i++) {
393 if (!(info->func[i] & PMBUS_HAVE_STATUS_TEMP))
394 continue;
395 data->status[PB_STATUS_TEMP_BASE + i]
396 = _pmbus_read_byte_data(client, i,
397 PMBUS_STATUS_TEMPERATURE);
398 }
399 for (i = 0; i < info->pages; i++) {
400 if (!(info->func[i] & PMBUS_HAVE_STATUS_FAN12))
401 continue;
402 data->status[PB_STATUS_FAN_BASE + i]
403 = _pmbus_read_byte_data(client, i,
404 PMBUS_STATUS_FAN_12);
405 }
406
407 for (i = 0; i < info->pages; i++) {
408 if (!(info->func[i] & PMBUS_HAVE_STATUS_FAN34))
409 continue;
410 data->status[PB_STATUS_FAN34_BASE + i]
411 = _pmbus_read_byte_data(client, i,
412 PMBUS_STATUS_FAN_34);
413 } 373 }
414 374
415 if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) 375 if (info->func[0] & PMBUS_HAVE_STATUS_INPUT)
@@ -417,9 +377,12 @@ static struct pmbus_data *pmbus_update_device(struct device *dev)
417 = _pmbus_read_byte_data(client, 0, 377 = _pmbus_read_byte_data(client, 0,
418 PMBUS_STATUS_INPUT); 378 PMBUS_STATUS_INPUT);
419 379
420 for (i = 0; i < data->num_sensors; i++) { 380 if (info->func[0] & PMBUS_HAVE_STATUS_VMON)
421 struct pmbus_sensor *sensor = &data->sensors[i]; 381 data->status[PB_STATUS_VMON_BASE]
382 = _pmbus_read_byte_data(client, 0,
383 PMBUS_VIRT_STATUS_VMON);
422 384
385 for (sensor = data->sensors; sensor; sensor = sensor->next) {
423 if (!data->valid || sensor->update) 386 if (!data->valid || sensor->update)
424 sensor->data 387 sensor->data
425 = _pmbus_read_word_data(client, 388 = _pmbus_read_word_data(client,
@@ -657,7 +620,7 @@ static u16 pmbus_data2reg_direct(struct pmbus_data *data,
657static u16 pmbus_data2reg_vid(struct pmbus_data *data, 620static u16 pmbus_data2reg_vid(struct pmbus_data *data,
658 enum pmbus_sensor_classes class, long val) 621 enum pmbus_sensor_classes class, long val)
659{ 622{
660 val = SENSORS_LIMIT(val, 500, 1600); 623 val = clamp_val(val, 500, 1600);
661 624
662 return 2 + DIV_ROUND_CLOSEST((1600 - val) * 100, 625); 625 return 2 + DIV_ROUND_CLOSEST((1600 - val) * 100, 625);
663} 626}
@@ -684,25 +647,20 @@ static u16 pmbus_data2reg(struct pmbus_data *data,
684 647
685/* 648/*
686 * Return boolean calculated from converted data. 649 * Return boolean calculated from converted data.
687 * <index> defines a status register index and mask, and optionally 650 * <index> defines a status register index and mask.
688 * two sensor indexes. 651 * The mask is in the lower 8 bits, the register index is in bits 8..23.
689 * The upper half-word references the two sensors,
690 * two sensor indices.
691 * The upper half-word references the two optional sensors,
692 * the lower half word references status register and mask.
693 * The function returns true if (status[reg] & mask) is true and,
694 * if specified, if v1 >= v2.
695 * To determine if an object exceeds upper limits, specify <v, limit>.
696 * To determine if an object exceeds lower limits, specify <limit, v>.
697 * 652 *
698 * For booleans created with pmbus_add_boolean_reg(), only the lower 16 bits of 653 * The associated pmbus_boolean structure contains optional pointers to two
699 * index are set. s1 and s2 (the sensor index values) are zero in this case. 654 * sensor attributes. If specified, those attributes are compared against each
700 * The function returns true if (status[reg] & mask) is true. 655 * other to determine if a limit has been exceeded.
701 * 656 *
702 * If the boolean was created with pmbus_add_boolean_cmp(), a comparison against 657 * If the sensor attribute pointers are NULL, the function returns true if
703 * a specified limit has to be performed to determine the boolean result. 658 * (status[reg] & mask) is true.
659 *
660 * If sensor attribute pointers are provided, a comparison against a specified
661 * limit has to be performed to determine the boolean result.
704 * In this case, the function returns true if v1 >= v2 (where v1 and v2 are 662 * In this case, the function returns true if v1 >= v2 (where v1 and v2 are
705 * sensor values referenced by sensor indices s1 and s2). 663 * sensor values referenced by sensor attribute pointers s1 and s2).
706 * 664 *
707 * To determine if an object exceeds upper limits, specify <s1,s2> = <v,limit>. 665 * To determine if an object exceeds upper limits, specify <s1,s2> = <v,limit>.
708 * To determine if an object exceeds lower limits, specify <s1,s2> = <limit,v>. 666 * To determine if an object exceeds lower limits, specify <s1,s2> = <limit,v>.
@@ -710,11 +668,12 @@ static u16 pmbus_data2reg(struct pmbus_data *data,
710 * If a negative value is stored in any of the referenced registers, this value 668 * If a negative value is stored in any of the referenced registers, this value
711 * reflects an error code which will be returned. 669 * reflects an error code which will be returned.
712 */ 670 */
713static int pmbus_get_boolean(struct pmbus_data *data, int index) 671static int pmbus_get_boolean(struct pmbus_data *data, struct pmbus_boolean *b,
672 int index)
714{ 673{
715 u8 s1 = (index >> 24) & 0xff; 674 struct pmbus_sensor *s1 = b->s1;
716 u8 s2 = (index >> 16) & 0xff; 675 struct pmbus_sensor *s2 = b->s2;
717 u8 reg = (index >> 8) & 0xff; 676 u16 reg = (index >> 8) & 0xffff;
718 u8 mask = index & 0xff; 677 u8 mask = index & 0xff;
719 int ret, status; 678 int ret, status;
720 u8 regval; 679 u8 regval;
@@ -724,21 +683,21 @@ static int pmbus_get_boolean(struct pmbus_data *data, int index)
724 return status; 683 return status;
725 684
726 regval = status & mask; 685 regval = status & mask;
727 if (!s1 && !s2) 686 if (!s1 && !s2) {
728 ret = !!regval; 687 ret = !!regval;
729 else { 688 } else if (!s1 || !s2) {
689 BUG();
690 return 0;
691 } else {
730 long v1, v2; 692 long v1, v2;
731 struct pmbus_sensor *sensor1, *sensor2;
732 693
733 sensor1 = &data->sensors[s1]; 694 if (s1->data < 0)
734 if (sensor1->data < 0) 695 return s1->data;
735 return sensor1->data; 696 if (s2->data < 0)
736 sensor2 = &data->sensors[s2]; 697 return s2->data;
737 if (sensor2->data < 0)
738 return sensor2->data;
739 698
740 v1 = pmbus_reg2data(data, sensor1); 699 v1 = pmbus_reg2data(data, s1);
741 v2 = pmbus_reg2data(data, sensor2); 700 v2 = pmbus_reg2data(data, s2);
742 ret = !!(regval && v1 >= v2); 701 ret = !!(regval && v1 >= v2);
743 } 702 }
744 return ret; 703 return ret;
@@ -748,23 +707,22 @@ static ssize_t pmbus_show_boolean(struct device *dev,
748 struct device_attribute *da, char *buf) 707 struct device_attribute *da, char *buf)
749{ 708{
750 struct sensor_device_attribute *attr = to_sensor_dev_attr(da); 709 struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
710 struct pmbus_boolean *boolean = to_pmbus_boolean(attr);
751 struct pmbus_data *data = pmbus_update_device(dev); 711 struct pmbus_data *data = pmbus_update_device(dev);
752 int val; 712 int val;
753 713
754 val = pmbus_get_boolean(data, attr->index); 714 val = pmbus_get_boolean(data, boolean, attr->index);
755 if (val < 0) 715 if (val < 0)
756 return val; 716 return val;
757 return snprintf(buf, PAGE_SIZE, "%d\n", val); 717 return snprintf(buf, PAGE_SIZE, "%d\n", val);
758} 718}
759 719
760static ssize_t pmbus_show_sensor(struct device *dev, 720static ssize_t pmbus_show_sensor(struct device *dev,
761 struct device_attribute *da, char *buf) 721 struct device_attribute *devattr, char *buf)
762{ 722{
763 struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
764 struct pmbus_data *data = pmbus_update_device(dev); 723 struct pmbus_data *data = pmbus_update_device(dev);
765 struct pmbus_sensor *sensor; 724 struct pmbus_sensor *sensor = to_pmbus_sensor(devattr);
766 725
767 sensor = &data->sensors[attr->index];
768 if (sensor->data < 0) 726 if (sensor->data < 0)
769 return sensor->data; 727 return sensor->data;
770 728
@@ -775,10 +733,9 @@ static ssize_t pmbus_set_sensor(struct device *dev,
775 struct device_attribute *devattr, 733 struct device_attribute *devattr,
776 const char *buf, size_t count) 734 const char *buf, size_t count)
777{ 735{
778 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
779 struct i2c_client *client = to_i2c_client(dev); 736 struct i2c_client *client = to_i2c_client(dev);
780 struct pmbus_data *data = i2c_get_clientdata(client); 737 struct pmbus_data *data = i2c_get_clientdata(client);
781 struct pmbus_sensor *sensor = &data->sensors[attr->index]; 738 struct pmbus_sensor *sensor = to_pmbus_sensor(devattr);
782 ssize_t rv = count; 739 ssize_t rv = count;
783 long val = 0; 740 long val = 0;
784 int ret; 741 int ret;
@@ -793,7 +750,7 @@ static ssize_t pmbus_set_sensor(struct device *dev,
793 if (ret < 0) 750 if (ret < 0)
794 rv = ret; 751 rv = ret;
795 else 752 else
796 data->sensors[attr->index].data = regval; 753 sensor->data = regval;
797 mutex_unlock(&data->update_lock); 754 mutex_unlock(&data->update_lock);
798 return rv; 755 return rv;
799} 756}
@@ -801,102 +758,130 @@ static ssize_t pmbus_set_sensor(struct device *dev,
801static ssize_t pmbus_show_label(struct device *dev, 758static ssize_t pmbus_show_label(struct device *dev,
802 struct device_attribute *da, char *buf) 759 struct device_attribute *da, char *buf)
803{ 760{
804 struct i2c_client *client = to_i2c_client(dev); 761 struct pmbus_label *label = to_pmbus_label(da);
805 struct pmbus_data *data = i2c_get_clientdata(client);
806 struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
807 762
808 return snprintf(buf, PAGE_SIZE, "%s\n", 763 return snprintf(buf, PAGE_SIZE, "%s\n", label->label);
809 data->labels[attr->index].label);
810} 764}
811 765
812#define PMBUS_ADD_ATTR(data, _name, _idx, _mode, _type, _show, _set) \ 766static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
813do { \
814 struct sensor_device_attribute *a \
815 = &data->_type##s[data->num_##_type##s].attribute; \
816 BUG_ON(data->num_attributes >= data->max_attributes); \
817 sysfs_attr_init(&a->dev_attr.attr); \
818 a->dev_attr.attr.name = _name; \
819 a->dev_attr.attr.mode = _mode; \
820 a->dev_attr.show = _show; \
821 a->dev_attr.store = _set; \
822 a->index = _idx; \
823 data->attributes[data->num_attributes] = &a->dev_attr.attr; \
824 data->num_attributes++; \
825} while (0)
826
827#define PMBUS_ADD_GET_ATTR(data, _name, _type, _idx) \
828 PMBUS_ADD_ATTR(data, _name, _idx, S_IRUGO, _type, \
829 pmbus_show_##_type, NULL)
830
831#define PMBUS_ADD_SET_ATTR(data, _name, _type, _idx) \
832 PMBUS_ADD_ATTR(data, _name, _idx, S_IWUSR | S_IRUGO, _type, \
833 pmbus_show_##_type, pmbus_set_##_type)
834
835static void pmbus_add_boolean(struct pmbus_data *data,
836 const char *name, const char *type, int seq,
837 int idx)
838{ 767{
839 struct pmbus_boolean *boolean; 768 if (data->num_attributes >= data->max_attributes - 1) {
840 769 data->max_attributes += PMBUS_ATTR_ALLOC_SIZE;
841 BUG_ON(data->num_booleans >= data->max_booleans); 770 data->group.attrs = krealloc(data->group.attrs,
842 771 sizeof(struct attribute *) *
843 boolean = &data->booleans[data->num_booleans]; 772 data->max_attributes, GFP_KERNEL);
773 if (data->group.attrs == NULL)
774 return -ENOMEM;
775 }
844 776
845 snprintf(boolean->name, sizeof(boolean->name), "%s%d_%s", 777 data->group.attrs[data->num_attributes++] = attr;
846 name, seq, type); 778 data->group.attrs[data->num_attributes] = NULL;
847 PMBUS_ADD_GET_ATTR(data, boolean->name, boolean, idx); 779 return 0;
848 data->num_booleans++;
849} 780}
850 781
851static void pmbus_add_boolean_reg(struct pmbus_data *data, 782static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
852 const char *name, const char *type, 783 const char *name,
853 int seq, int reg, int bit) 784 umode_t mode,
785 ssize_t (*show)(struct device *dev,
786 struct device_attribute *attr,
787 char *buf),
788 ssize_t (*store)(struct device *dev,
789 struct device_attribute *attr,
790 const char *buf, size_t count))
854{ 791{
855 pmbus_add_boolean(data, name, type, seq, (reg << 8) | bit); 792 sysfs_attr_init(&dev_attr->attr);
793 dev_attr->attr.name = name;
794 dev_attr->attr.mode = mode;
795 dev_attr->show = show;
796 dev_attr->store = store;
856} 797}
857 798
858static void pmbus_add_boolean_cmp(struct pmbus_data *data, 799static void pmbus_attr_init(struct sensor_device_attribute *a,
859 const char *name, const char *type, 800 const char *name,
860 int seq, int i1, int i2, int reg, int mask) 801 umode_t mode,
802 ssize_t (*show)(struct device *dev,
803 struct device_attribute *attr,
804 char *buf),
805 ssize_t (*store)(struct device *dev,
806 struct device_attribute *attr,
807 const char *buf, size_t count),
808 int idx)
861{ 809{
862 pmbus_add_boolean(data, name, type, seq, 810 pmbus_dev_attr_init(&a->dev_attr, name, mode, show, store);
863 (i1 << 24) | (i2 << 16) | (reg << 8) | mask); 811 a->index = idx;
864} 812}
865 813
866static void pmbus_add_sensor(struct pmbus_data *data, 814static int pmbus_add_boolean(struct pmbus_data *data,
867 const char *name, const char *type, int seq, 815 const char *name, const char *type, int seq,
868 int page, int reg, enum pmbus_sensor_classes class, 816 struct pmbus_sensor *s1,
869 bool update, bool readonly) 817 struct pmbus_sensor *s2,
818 u16 reg, u8 mask)
819{
820 struct pmbus_boolean *boolean;
821 struct sensor_device_attribute *a;
822
823 boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
824 if (!boolean)
825 return -ENOMEM;
826
827 a = &boolean->attribute;
828
829 snprintf(boolean->name, sizeof(boolean->name), "%s%d_%s",
830 name, seq, type);
831 boolean->s1 = s1;
832 boolean->s2 = s2;
833 pmbus_attr_init(a, boolean->name, S_IRUGO, pmbus_show_boolean, NULL,
834 (reg << 8) | mask);
835
836 return pmbus_add_attribute(data, &a->dev_attr.attr);
837}
838
839static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
840 const char *name, const char *type,
841 int seq, int page, int reg,
842 enum pmbus_sensor_classes class,
843 bool update, bool readonly)
870{ 844{
871 struct pmbus_sensor *sensor; 845 struct pmbus_sensor *sensor;
846 struct device_attribute *a;
872 847
873 BUG_ON(data->num_sensors >= data->max_sensors); 848 sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
849 if (!sensor)
850 return NULL;
851 a = &sensor->attribute;
874 852
875 sensor = &data->sensors[data->num_sensors];
876 snprintf(sensor->name, sizeof(sensor->name), "%s%d_%s", 853 snprintf(sensor->name, sizeof(sensor->name), "%s%d_%s",
877 name, seq, type); 854 name, seq, type);
878 sensor->page = page; 855 sensor->page = page;
879 sensor->reg = reg; 856 sensor->reg = reg;
880 sensor->class = class; 857 sensor->class = class;
881 sensor->update = update; 858 sensor->update = update;
882 if (readonly) 859 pmbus_dev_attr_init(a, sensor->name,
883 PMBUS_ADD_GET_ATTR(data, sensor->name, sensor, 860 readonly ? S_IRUGO : S_IRUGO | S_IWUSR,
884 data->num_sensors); 861 pmbus_show_sensor, pmbus_set_sensor);
885 else 862
886 PMBUS_ADD_SET_ATTR(data, sensor->name, sensor, 863 if (pmbus_add_attribute(data, &a->attr))
887 data->num_sensors); 864 return NULL;
888 data->num_sensors++; 865
866 sensor->next = data->sensors;
867 data->sensors = sensor;
868
869 return sensor;
889} 870}
890 871
891static void pmbus_add_label(struct pmbus_data *data, 872static int pmbus_add_label(struct pmbus_data *data,
892 const char *name, int seq, 873 const char *name, int seq,
893 const char *lstring, int index) 874 const char *lstring, int index)
894{ 875{
895 struct pmbus_label *label; 876 struct pmbus_label *label;
877 struct device_attribute *a;
896 878
897 BUG_ON(data->num_labels >= data->max_labels); 879 label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
880 if (!label)
881 return -ENOMEM;
882
883 a = &label->attribute;
898 884
899 label = &data->labels[data->num_labels];
900 snprintf(label->name, sizeof(label->name), "%s%d_label", name, seq); 885 snprintf(label->name, sizeof(label->name), "%s%d_label", name, seq);
901 if (!index) 886 if (!index)
902 strncpy(label->label, lstring, sizeof(label->label) - 1); 887 strncpy(label->label, lstring, sizeof(label->label) - 1);
@@ -904,65 +889,8 @@ static void pmbus_add_label(struct pmbus_data *data,
904 snprintf(label->label, sizeof(label->label), "%s%d", lstring, 889 snprintf(label->label, sizeof(label->label), "%s%d", lstring,
905 index); 890 index);
906 891
907 PMBUS_ADD_GET_ATTR(data, label->name, label, data->num_labels); 892 pmbus_dev_attr_init(a, label->name, S_IRUGO, pmbus_show_label, NULL);
908 data->num_labels++; 893 return pmbus_add_attribute(data, &a->attr);
909}
910
911/*
912 * Determine maximum number of sensors, booleans, and labels.
913 * To keep things simple, only make a rough high estimate.
914 */
915static void pmbus_find_max_attr(struct i2c_client *client,
916 struct pmbus_data *data)
917{
918 const struct pmbus_driver_info *info = data->info;
919 int page, max_sensors, max_booleans, max_labels;
920
921 max_sensors = PMBUS_MAX_INPUT_SENSORS;
922 max_booleans = PMBUS_MAX_INPUT_BOOLEANS;
923 max_labels = PMBUS_MAX_INPUT_LABELS;
924
925 for (page = 0; page < info->pages; page++) {
926 if (info->func[page] & PMBUS_HAVE_VOUT) {
927 max_sensors += PMBUS_VOUT_SENSORS_PER_PAGE;
928 max_booleans += PMBUS_VOUT_BOOLEANS_PER_PAGE;
929 max_labels++;
930 }
931 if (info->func[page] & PMBUS_HAVE_IOUT) {
932 max_sensors += PMBUS_IOUT_SENSORS_PER_PAGE;
933 max_booleans += PMBUS_IOUT_BOOLEANS_PER_PAGE;
934 max_labels++;
935 }
936 if (info->func[page] & PMBUS_HAVE_POUT) {
937 max_sensors += PMBUS_POUT_SENSORS_PER_PAGE;
938 max_booleans += PMBUS_POUT_BOOLEANS_PER_PAGE;
939 max_labels++;
940 }
941 if (info->func[page] & PMBUS_HAVE_FAN12) {
942 max_sensors += 2 * PMBUS_MAX_SENSORS_PER_FAN;
943 max_booleans += 2 * PMBUS_MAX_BOOLEANS_PER_FAN;
944 }
945 if (info->func[page] & PMBUS_HAVE_FAN34) {
946 max_sensors += 2 * PMBUS_MAX_SENSORS_PER_FAN;
947 max_booleans += 2 * PMBUS_MAX_BOOLEANS_PER_FAN;
948 }
949 if (info->func[page] & PMBUS_HAVE_TEMP) {
950 max_sensors += PMBUS_MAX_SENSORS_PER_TEMP;
951 max_booleans += PMBUS_MAX_BOOLEANS_PER_TEMP;
952 }
953 if (info->func[page] & PMBUS_HAVE_TEMP2) {
954 max_sensors += PMBUS_MAX_SENSORS_PER_TEMP;
955 max_booleans += PMBUS_MAX_BOOLEANS_PER_TEMP;
956 }
957 if (info->func[page] & PMBUS_HAVE_TEMP3) {
958 max_sensors += PMBUS_MAX_SENSORS_PER_TEMP;
959 max_booleans += PMBUS_MAX_BOOLEANS_PER_TEMP;
960 }
961 }
962 data->max_sensors = max_sensors;
963 data->max_booleans = max_booleans;
964 data->max_labels = max_labels;
965 data->max_attributes = max_sensors + max_booleans + max_labels;
966} 894}
967 895
968/* 896/*
@@ -975,12 +903,12 @@ static void pmbus_find_max_attr(struct i2c_client *client,
975 */ 903 */
976struct pmbus_limit_attr { 904struct pmbus_limit_attr {
977 u16 reg; /* Limit register */ 905 u16 reg; /* Limit register */
906 u16 sbit; /* Alarm attribute status bit */
978 bool update; /* True if register needs updates */ 907 bool update; /* True if register needs updates */
979 bool low; /* True if low limit; for limits with compare 908 bool low; /* True if low limit; for limits with compare
980 functions only */ 909 functions only */
981 const char *attr; /* Attribute name */ 910 const char *attr; /* Attribute name */
982 const char *alarm; /* Alarm attribute name */ 911 const char *alarm; /* Alarm attribute name */
983 u32 sbit; /* Alarm attribute status bit */
984}; 912};
985 913
986/* 914/*
@@ -988,7 +916,9 @@ struct pmbus_limit_attr {
988 * description includes a reference to the associated limit attributes. 916 * description includes a reference to the associated limit attributes.
989 */ 917 */
990struct pmbus_sensor_attr { 918struct pmbus_sensor_attr {
991 u8 reg; /* sensor register */ 919 u16 reg; /* sensor register */
920 u8 gbit; /* generic status bit */
921 u8 nlimit; /* # of limit registers */
992 enum pmbus_sensor_classes class;/* sensor class */ 922 enum pmbus_sensor_classes class;/* sensor class */
993 const char *label; /* sensor label */ 923 const char *label; /* sensor label */
994 bool paged; /* true if paged sensor */ 924 bool paged; /* true if paged sensor */
@@ -997,47 +927,47 @@ struct pmbus_sensor_attr {
997 u32 func; /* sensor mask */ 927 u32 func; /* sensor mask */
998 u32 sfunc; /* sensor status mask */ 928 u32 sfunc; /* sensor status mask */
999 int sbase; /* status base register */ 929 int sbase; /* status base register */
1000 u32 gbit; /* generic status bit */
1001 const struct pmbus_limit_attr *limit;/* limit registers */ 930 const struct pmbus_limit_attr *limit;/* limit registers */
1002 int nlimit; /* # of limit registers */
1003}; 931};
1004 932
1005/* 933/*
1006 * Add a set of limit attributes and, if supported, the associated 934 * Add a set of limit attributes and, if supported, the associated
1007 * alarm attributes. 935 * alarm attributes.
936 * returns 0 if no alarm register found, 1 if an alarm register was found,
937 * < 0 on errors.
1008 */ 938 */
1009static bool pmbus_add_limit_attrs(struct i2c_client *client, 939static int pmbus_add_limit_attrs(struct i2c_client *client,
1010 struct pmbus_data *data, 940 struct pmbus_data *data,
1011 const struct pmbus_driver_info *info, 941 const struct pmbus_driver_info *info,
1012 const char *name, int index, int page, 942 const char *name, int index, int page,
1013 int cbase, 943 struct pmbus_sensor *base,
1014 const struct pmbus_sensor_attr *attr) 944 const struct pmbus_sensor_attr *attr)
1015{ 945{
1016 const struct pmbus_limit_attr *l = attr->limit; 946 const struct pmbus_limit_attr *l = attr->limit;
1017 int nlimit = attr->nlimit; 947 int nlimit = attr->nlimit;
1018 bool have_alarm = false; 948 int have_alarm = 0;
1019 int i, cindex; 949 int i, ret;
950 struct pmbus_sensor *curr;
1020 951
1021 for (i = 0; i < nlimit; i++) { 952 for (i = 0; i < nlimit; i++) {
1022 if (pmbus_check_word_register(client, page, l->reg)) { 953 if (pmbus_check_word_register(client, page, l->reg)) {
1023 cindex = data->num_sensors; 954 curr = pmbus_add_sensor(data, name, l->attr, index,
1024 pmbus_add_sensor(data, name, l->attr, index, page, 955 page, l->reg, attr->class,
1025 l->reg, attr->class, 956 attr->update || l->update,
1026 attr->update || l->update, 957 false);
1027 false); 958 if (!curr)
959 return -ENOMEM;
1028 if (l->sbit && (info->func[page] & attr->sfunc)) { 960 if (l->sbit && (info->func[page] & attr->sfunc)) {
1029 if (attr->compare) { 961 ret = pmbus_add_boolean(data, name,
1030 pmbus_add_boolean_cmp(data, name, 962 l->alarm, index,
1031 l->alarm, index, 963 attr->compare ? l->low ? curr : base
1032 l->low ? cindex : cbase, 964 : NULL,
1033 l->low ? cbase : cindex, 965 attr->compare ? l->low ? base : curr
1034 attr->sbase + page, l->sbit); 966 : NULL,
1035 } else { 967 attr->sbase + page, l->sbit);
1036 pmbus_add_boolean_reg(data, name, 968 if (ret)
1037 l->alarm, index, 969 return ret;
1038 attr->sbase + page, l->sbit); 970 have_alarm = 1;
1039 }
1040 have_alarm = true;
1041 } 971 }
1042 } 972 }
1043 l++; 973 l++;
@@ -1045,45 +975,59 @@ static bool pmbus_add_limit_attrs(struct i2c_client *client,
1045 return have_alarm; 975 return have_alarm;
1046} 976}
1047 977
1048static void pmbus_add_sensor_attrs_one(struct i2c_client *client, 978static int pmbus_add_sensor_attrs_one(struct i2c_client *client,
1049 struct pmbus_data *data, 979 struct pmbus_data *data,
1050 const struct pmbus_driver_info *info, 980 const struct pmbus_driver_info *info,
1051 const char *name, 981 const char *name,
1052 int index, int page, 982 int index, int page,
1053 const struct pmbus_sensor_attr *attr) 983 const struct pmbus_sensor_attr *attr)
1054{ 984{
1055 bool have_alarm; 985 struct pmbus_sensor *base;
1056 int cbase = data->num_sensors; 986 int ret;
1057 987
1058 if (attr->label) 988 if (attr->label) {
1059 pmbus_add_label(data, name, index, attr->label, 989 ret = pmbus_add_label(data, name, index, attr->label,
1060 attr->paged ? page + 1 : 0); 990 attr->paged ? page + 1 : 0);
1061 pmbus_add_sensor(data, name, "input", index, page, attr->reg, 991 if (ret)
1062 attr->class, true, true); 992 return ret;
993 }
994 base = pmbus_add_sensor(data, name, "input", index, page, attr->reg,
995 attr->class, true, true);
996 if (!base)
997 return -ENOMEM;
1063 if (attr->sfunc) { 998 if (attr->sfunc) {
1064 have_alarm = pmbus_add_limit_attrs(client, data, info, name, 999 ret = pmbus_add_limit_attrs(client, data, info, name,
1065 index, page, cbase, attr); 1000 index, page, base, attr);
1001 if (ret < 0)
1002 return ret;
1066 /* 1003 /*
1067 * Add generic alarm attribute only if there are no individual 1004 * Add generic alarm attribute only if there are no individual
1068 * alarm attributes, if there is a global alarm bit, and if 1005 * alarm attributes, if there is a global alarm bit, and if
1069 * the generic status register for this page is accessible. 1006 * the generic status register for this page is accessible.
1070 */ 1007 */
1071 if (!have_alarm && attr->gbit && 1008 if (!ret && attr->gbit &&
1072 pmbus_check_byte_register(client, page, PMBUS_STATUS_BYTE)) 1009 pmbus_check_byte_register(client, page,
1073 pmbus_add_boolean_reg(data, name, "alarm", index, 1010 data->status_register)) {
1074 PB_STATUS_BASE + page, 1011 ret = pmbus_add_boolean(data, name, "alarm", index,
1075 attr->gbit); 1012 NULL, NULL,
1013 PB_STATUS_BASE + page,
1014 attr->gbit);
1015 if (ret)
1016 return ret;
1017 }
1076 } 1018 }
1019 return 0;
1077} 1020}
1078 1021
1079static void pmbus_add_sensor_attrs(struct i2c_client *client, 1022static int pmbus_add_sensor_attrs(struct i2c_client *client,
1080 struct pmbus_data *data, 1023 struct pmbus_data *data,
1081 const char *name, 1024 const char *name,
1082 const struct pmbus_sensor_attr *attrs, 1025 const struct pmbus_sensor_attr *attrs,
1083 int nattrs) 1026 int nattrs)
1084{ 1027{
1085 const struct pmbus_driver_info *info = data->info; 1028 const struct pmbus_driver_info *info = data->info;
1086 int index, i; 1029 int index, i;
1030 int ret;
1087 1031
1088 index = 1; 1032 index = 1;
1089 for (i = 0; i < nattrs; i++) { 1033 for (i = 0; i < nattrs; i++) {
@@ -1093,12 +1037,16 @@ static void pmbus_add_sensor_attrs(struct i2c_client *client,
1093 for (page = 0; page < pages; page++) { 1037 for (page = 0; page < pages; page++) {
1094 if (!(info->func[page] & attrs->func)) 1038 if (!(info->func[page] & attrs->func))
1095 continue; 1039 continue;
1096 pmbus_add_sensor_attrs_one(client, data, info, name, 1040 ret = pmbus_add_sensor_attrs_one(client, data, info,
1097 index, page, attrs); 1041 name, index, page,
1042 attrs);
1043 if (ret)
1044 return ret;
1098 index++; 1045 index++;
1099 } 1046 }
1100 attrs++; 1047 attrs++;
1101 } 1048 }
1049 return 0;
1102} 1050}
1103 1051
1104static const struct pmbus_limit_attr vin_limit_attrs[] = { 1052static const struct pmbus_limit_attr vin_limit_attrs[] = {
@@ -1140,6 +1088,30 @@ static const struct pmbus_limit_attr vin_limit_attrs[] = {
1140 }, 1088 },
1141}; 1089};
1142 1090
1091static const struct pmbus_limit_attr vmon_limit_attrs[] = {
1092 {
1093 .reg = PMBUS_VIRT_VMON_UV_WARN_LIMIT,
1094 .attr = "min",
1095 .alarm = "min_alarm",
1096 .sbit = PB_VOLTAGE_UV_WARNING,
1097 }, {
1098 .reg = PMBUS_VIRT_VMON_UV_FAULT_LIMIT,
1099 .attr = "lcrit",
1100 .alarm = "lcrit_alarm",
1101 .sbit = PB_VOLTAGE_UV_FAULT,
1102 }, {
1103 .reg = PMBUS_VIRT_VMON_OV_WARN_LIMIT,
1104 .attr = "max",
1105 .alarm = "max_alarm",
1106 .sbit = PB_VOLTAGE_OV_WARNING,
1107 }, {
1108 .reg = PMBUS_VIRT_VMON_OV_FAULT_LIMIT,
1109 .attr = "crit",
1110 .alarm = "crit_alarm",
1111 .sbit = PB_VOLTAGE_OV_FAULT,
1112 }
1113};
1114
1143static const struct pmbus_limit_attr vout_limit_attrs[] = { 1115static const struct pmbus_limit_attr vout_limit_attrs[] = {
1144 { 1116 {
1145 .reg = PMBUS_VOUT_UV_WARN_LIMIT, 1117 .reg = PMBUS_VOUT_UV_WARN_LIMIT,
@@ -1191,6 +1163,15 @@ static const struct pmbus_sensor_attr voltage_attributes[] = {
1191 .limit = vin_limit_attrs, 1163 .limit = vin_limit_attrs,
1192 .nlimit = ARRAY_SIZE(vin_limit_attrs), 1164 .nlimit = ARRAY_SIZE(vin_limit_attrs),
1193 }, { 1165 }, {
1166 .reg = PMBUS_VIRT_READ_VMON,
1167 .class = PSC_VOLTAGE_IN,
1168 .label = "vmon",
1169 .func = PMBUS_HAVE_VMON,
1170 .sfunc = PMBUS_HAVE_STATUS_VMON,
1171 .sbase = PB_STATUS_VMON_BASE,
1172 .limit = vmon_limit_attrs,
1173 .nlimit = ARRAY_SIZE(vmon_limit_attrs),
1174 }, {
1194 .reg = PMBUS_READ_VCAP, 1175 .reg = PMBUS_READ_VCAP,
1195 .class = PSC_VOLTAGE_IN, 1176 .class = PSC_VOLTAGE_IN,
1196 .label = "vcap", 1177 .label = "vcap",
@@ -1553,12 +1534,13 @@ static const u32 pmbus_fan_status_flags[] = {
1553}; 1534};
1554 1535
1555/* Fans */ 1536/* Fans */
1556static void pmbus_add_fan_attributes(struct i2c_client *client, 1537static int pmbus_add_fan_attributes(struct i2c_client *client,
1557 struct pmbus_data *data) 1538 struct pmbus_data *data)
1558{ 1539{
1559 const struct pmbus_driver_info *info = data->info; 1540 const struct pmbus_driver_info *info = data->info;
1560 int index = 1; 1541 int index = 1;
1561 int page; 1542 int page;
1543 int ret;
1562 1544
1563 for (page = 0; page < info->pages; page++) { 1545 for (page = 0; page < info->pages; page++) {
1564 int f; 1546 int f;
@@ -1584,9 +1566,10 @@ static void pmbus_add_fan_attributes(struct i2c_client *client,
1584 (!(regval & (PB_FAN_1_INSTALLED >> ((f & 1) * 4))))) 1566 (!(regval & (PB_FAN_1_INSTALLED >> ((f & 1) * 4)))))
1585 continue; 1567 continue;
1586 1568
1587 pmbus_add_sensor(data, "fan", "input", index, page, 1569 if (pmbus_add_sensor(data, "fan", "input", index,
1588 pmbus_fan_registers[f], PSC_FAN, true, 1570 page, pmbus_fan_registers[f],
1589 true); 1571 PSC_FAN, true, true) == NULL)
1572 return -ENOMEM;
1590 1573
1591 /* 1574 /*
1592 * Each fan status register covers multiple fans, 1575 * Each fan status register covers multiple fans,
@@ -1601,39 +1584,55 @@ static void pmbus_add_fan_attributes(struct i2c_client *client,
1601 base = PB_STATUS_FAN34_BASE + page; 1584 base = PB_STATUS_FAN34_BASE + page;
1602 else 1585 else
1603 base = PB_STATUS_FAN_BASE + page; 1586 base = PB_STATUS_FAN_BASE + page;
1604 pmbus_add_boolean_reg(data, "fan", "alarm", 1587 ret = pmbus_add_boolean(data, "fan",
1605 index, base, 1588 "alarm", index, NULL, NULL, base,
1606 PB_FAN_FAN1_WARNING >> (f & 1)); 1589 PB_FAN_FAN1_WARNING >> (f & 1));
1607 pmbus_add_boolean_reg(data, "fan", "fault", 1590 if (ret)
1608 index, base, 1591 return ret;
1592 ret = pmbus_add_boolean(data, "fan",
1593 "fault", index, NULL, NULL, base,
1609 PB_FAN_FAN1_FAULT >> (f & 1)); 1594 PB_FAN_FAN1_FAULT >> (f & 1));
1595 if (ret)
1596 return ret;
1610 } 1597 }
1611 index++; 1598 index++;
1612 } 1599 }
1613 } 1600 }
1601 return 0;
1614} 1602}
1615 1603
1616static void pmbus_find_attributes(struct i2c_client *client, 1604static int pmbus_find_attributes(struct i2c_client *client,
1617 struct pmbus_data *data) 1605 struct pmbus_data *data)
1618{ 1606{
1607 int ret;
1608
1619 /* Voltage sensors */ 1609 /* Voltage sensors */
1620 pmbus_add_sensor_attrs(client, data, "in", voltage_attributes, 1610 ret = pmbus_add_sensor_attrs(client, data, "in", voltage_attributes,
1621 ARRAY_SIZE(voltage_attributes)); 1611 ARRAY_SIZE(voltage_attributes));
1612 if (ret)
1613 return ret;
1622 1614
1623 /* Current sensors */ 1615 /* Current sensors */
1624 pmbus_add_sensor_attrs(client, data, "curr", current_attributes, 1616 ret = pmbus_add_sensor_attrs(client, data, "curr", current_attributes,
1625 ARRAY_SIZE(current_attributes)); 1617 ARRAY_SIZE(current_attributes));
1618 if (ret)
1619 return ret;
1626 1620
1627 /* Power sensors */ 1621 /* Power sensors */
1628 pmbus_add_sensor_attrs(client, data, "power", power_attributes, 1622 ret = pmbus_add_sensor_attrs(client, data, "power", power_attributes,
1629 ARRAY_SIZE(power_attributes)); 1623 ARRAY_SIZE(power_attributes));
1624 if (ret)
1625 return ret;
1630 1626
1631 /* Temperature sensors */ 1627 /* Temperature sensors */
1632 pmbus_add_sensor_attrs(client, data, "temp", temp_attributes, 1628 ret = pmbus_add_sensor_attrs(client, data, "temp", temp_attributes,
1633 ARRAY_SIZE(temp_attributes)); 1629 ARRAY_SIZE(temp_attributes));
1630 if (ret)
1631 return ret;
1634 1632
1635 /* Fans */ 1633 /* Fans */
1636 pmbus_add_fan_attributes(client, data); 1634 ret = pmbus_add_fan_attributes(client, data);
1635 return ret;
1637} 1636}
1638 1637
1639/* 1638/*
@@ -1672,127 +1671,119 @@ static int pmbus_identify_common(struct i2c_client *client,
1672 } 1671 }
1673 } 1672 }
1674 1673
1675 /* Determine maximum number of sensors, booleans, and labels */
1676 pmbus_find_max_attr(client, data);
1677 pmbus_clear_fault_page(client, 0); 1674 pmbus_clear_fault_page(client, 0);
1678 return 0; 1675 return 0;
1679} 1676}
1680 1677
1681int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id, 1678static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
1682 struct pmbus_driver_info *info) 1679 struct pmbus_driver_info *info)
1683{ 1680{
1684 const struct pmbus_platform_data *pdata = client->dev.platform_data; 1681 struct device *dev = &client->dev;
1685 struct pmbus_data *data;
1686 int ret; 1682 int ret;
1687 1683
1688 if (!info) { 1684 /*
1689 dev_err(&client->dev, "Missing chip information"); 1685 * Some PMBus chips don't support PMBUS_STATUS_BYTE, so try
1690 return -ENODEV; 1686 * to use PMBUS_STATUS_WORD instead if that is the case.
1691 } 1687 * Bail out if both registers are not supported.
1692 1688 */
1693 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WRITE_BYTE 1689 data->status_register = PMBUS_STATUS_BYTE;
1694 | I2C_FUNC_SMBUS_BYTE_DATA 1690 ret = i2c_smbus_read_byte_data(client, PMBUS_STATUS_BYTE);
1695 | I2C_FUNC_SMBUS_WORD_DATA)) 1691 if (ret < 0 || ret == 0xff) {
1696 return -ENODEV; 1692 data->status_register = PMBUS_STATUS_WORD;
1697 1693 ret = i2c_smbus_read_word_data(client, PMBUS_STATUS_WORD);
1698 data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL); 1694 if (ret < 0 || ret == 0xffff) {
1699 if (!data) { 1695 dev_err(dev, "PMBus status register not found\n");
1700 dev_err(&client->dev, "No memory to allocate driver data\n"); 1696 return -ENODEV;
1701 return -ENOMEM; 1697 }
1702 }
1703
1704 i2c_set_clientdata(client, data);
1705 mutex_init(&data->update_lock);
1706
1707 /* Bail out if PMBus status register does not exist. */
1708 if (i2c_smbus_read_byte_data(client, PMBUS_STATUS_BYTE) < 0) {
1709 dev_err(&client->dev, "PMBus status register not found\n");
1710 return -ENODEV;
1711 } 1698 }
1712 1699
1713 if (pdata)
1714 data->flags = pdata->flags;
1715 data->info = info;
1716
1717 pmbus_clear_faults(client); 1700 pmbus_clear_faults(client);
1718 1701
1719 if (info->identify) { 1702 if (info->identify) {
1720 ret = (*info->identify)(client, info); 1703 ret = (*info->identify)(client, info);
1721 if (ret < 0) { 1704 if (ret < 0) {
1722 dev_err(&client->dev, "Chip identification failed\n"); 1705 dev_err(dev, "Chip identification failed\n");
1723 return ret; 1706 return ret;
1724 } 1707 }
1725 } 1708 }
1726 1709
1727 if (info->pages <= 0 || info->pages > PMBUS_PAGES) { 1710 if (info->pages <= 0 || info->pages > PMBUS_PAGES) {
1728 dev_err(&client->dev, "Bad number of PMBus pages: %d\n", 1711 dev_err(dev, "Bad number of PMBus pages: %d\n", info->pages);
1729 info->pages);
1730 return -ENODEV; 1712 return -ENODEV;
1731 } 1713 }
1732 1714
1733 ret = pmbus_identify_common(client, data); 1715 ret = pmbus_identify_common(client, data);
1734 if (ret < 0) { 1716 if (ret < 0) {
1735 dev_err(&client->dev, "Failed to identify chip capabilities\n"); 1717 dev_err(dev, "Failed to identify chip capabilities\n");
1736 return ret; 1718 return ret;
1737 } 1719 }
1720 return 0;
1721}
1738 1722
1739 ret = -ENOMEM; 1723int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
1740 data->sensors = devm_kzalloc(&client->dev, sizeof(struct pmbus_sensor) 1724 struct pmbus_driver_info *info)
1741 * data->max_sensors, GFP_KERNEL); 1725{
1742 if (!data->sensors) { 1726 struct device *dev = &client->dev;
1743 dev_err(&client->dev, "No memory to allocate sensor data\n"); 1727 const struct pmbus_platform_data *pdata = dev->platform_data;
1744 return -ENOMEM; 1728 struct pmbus_data *data;
1745 } 1729 int ret;
1746 1730
1747 data->booleans = devm_kzalloc(&client->dev, sizeof(struct pmbus_boolean) 1731 if (!info)
1748 * data->max_booleans, GFP_KERNEL); 1732 return -ENODEV;
1749 if (!data->booleans) {
1750 dev_err(&client->dev, "No memory to allocate boolean data\n");
1751 return -ENOMEM;
1752 }
1753 1733
1754 data->labels = devm_kzalloc(&client->dev, sizeof(struct pmbus_label) 1734 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WRITE_BYTE
1755 * data->max_labels, GFP_KERNEL); 1735 | I2C_FUNC_SMBUS_BYTE_DATA
1756 if (!data->labels) { 1736 | I2C_FUNC_SMBUS_WORD_DATA))
1757 dev_err(&client->dev, "No memory to allocate label data\n"); 1737 return -ENODEV;
1758 return -ENOMEM;
1759 }
1760 1738
1761 data->attributes = devm_kzalloc(&client->dev, sizeof(struct attribute *) 1739 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
1762 * data->max_attributes, GFP_KERNEL); 1740 if (!data)
1763 if (!data->attributes) {
1764 dev_err(&client->dev, "No memory to allocate attribute data\n");
1765 return -ENOMEM; 1741 return -ENOMEM;
1766 }
1767 1742
1768 pmbus_find_attributes(client, data); 1743 i2c_set_clientdata(client, data);
1744 mutex_init(&data->update_lock);
1745 data->dev = dev;
1746
1747 if (pdata)
1748 data->flags = pdata->flags;
1749 data->info = info;
1750
1751 ret = pmbus_init_common(client, data, info);
1752 if (ret < 0)
1753 return ret;
1754
1755 ret = pmbus_find_attributes(client, data);
1756 if (ret)
1757 goto out_kfree;
1769 1758
1770 /* 1759 /*
1771 * If there are no attributes, something is wrong. 1760 * If there are no attributes, something is wrong.
1772 * Bail out instead of trying to register nothing. 1761 * Bail out instead of trying to register nothing.
1773 */ 1762 */
1774 if (!data->num_attributes) { 1763 if (!data->num_attributes) {
1775 dev_err(&client->dev, "No attributes found\n"); 1764 dev_err(dev, "No attributes found\n");
1776 return -ENODEV; 1765 ret = -ENODEV;
1766 goto out_kfree;
1777 } 1767 }
1778 1768
1779 /* Register sysfs hooks */ 1769 /* Register sysfs hooks */
1780 data->group.attrs = data->attributes; 1770 ret = sysfs_create_group(&dev->kobj, &data->group);
1781 ret = sysfs_create_group(&client->dev.kobj, &data->group);
1782 if (ret) { 1771 if (ret) {
1783 dev_err(&client->dev, "Failed to create sysfs entries\n"); 1772 dev_err(dev, "Failed to create sysfs entries\n");
1784 return ret; 1773 goto out_kfree;
1785 } 1774 }
1786 data->hwmon_dev = hwmon_device_register(&client->dev); 1775 data->hwmon_dev = hwmon_device_register(dev);
1787 if (IS_ERR(data->hwmon_dev)) { 1776 if (IS_ERR(data->hwmon_dev)) {
1788 ret = PTR_ERR(data->hwmon_dev); 1777 ret = PTR_ERR(data->hwmon_dev);
1789 dev_err(&client->dev, "Failed to register hwmon device\n"); 1778 dev_err(dev, "Failed to register hwmon device\n");
1790 goto out_hwmon_device_register; 1779 goto out_hwmon_device_register;
1791 } 1780 }
1792 return 0; 1781 return 0;
1793 1782
1794out_hwmon_device_register: 1783out_hwmon_device_register:
1795 sysfs_remove_group(&client->dev.kobj, &data->group); 1784 sysfs_remove_group(&dev->kobj, &data->group);
1785out_kfree:
1786 kfree(data->group.attrs);
1796 return ret; 1787 return ret;
1797} 1788}
1798EXPORT_SYMBOL_GPL(pmbus_do_probe); 1789EXPORT_SYMBOL_GPL(pmbus_do_probe);
@@ -1802,6 +1793,7 @@ int pmbus_do_remove(struct i2c_client *client)
1802 struct pmbus_data *data = i2c_get_clientdata(client); 1793 struct pmbus_data *data = i2c_get_clientdata(client);
1803 hwmon_device_unregister(data->hwmon_dev); 1794 hwmon_device_unregister(data->hwmon_dev);
1804 sysfs_remove_group(&client->dev.kobj, &data->group); 1795 sysfs_remove_group(&client->dev.kobj, &data->group);
1796 kfree(data->group.attrs);
1805 return 0; 1797 return 0;
1806} 1798}
1807EXPORT_SYMBOL_GPL(pmbus_do_remove); 1799EXPORT_SYMBOL_GPL(pmbus_do_remove);
diff --git a/drivers/hwmon/pmbus/zl6100.c b/drivers/hwmon/pmbus/zl6100.c
index fc5eed8e85bb..819644121259 100644
--- a/drivers/hwmon/pmbus/zl6100.c
+++ b/drivers/hwmon/pmbus/zl6100.c
@@ -2,6 +2,7 @@
2 * Hardware monitoring driver for ZL6100 and compatibles 2 * Hardware monitoring driver for ZL6100 and compatibles
3 * 3 *
4 * Copyright (c) 2011 Ericsson AB. 4 * Copyright (c) 2011 Ericsson AB.
5 * Copyright (c) 2012 Guenter Roeck
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -45,12 +46,87 @@ struct zl6100_data {
45 46
46#define ZL6100_MFR_XTEMP_ENABLE (1 << 7) 47#define ZL6100_MFR_XTEMP_ENABLE (1 << 7)
47 48
49#define MFR_VMON_OV_FAULT_LIMIT 0xf5
50#define MFR_VMON_UV_FAULT_LIMIT 0xf6
51#define MFR_READ_VMON 0xf7
52
53#define VMON_UV_WARNING (1 << 5)
54#define VMON_OV_WARNING (1 << 4)
55#define VMON_UV_FAULT (1 << 1)
56#define VMON_OV_FAULT (1 << 0)
57
48#define ZL6100_WAIT_TIME 1000 /* uS */ 58#define ZL6100_WAIT_TIME 1000 /* uS */
49 59
50static ushort delay = ZL6100_WAIT_TIME; 60static ushort delay = ZL6100_WAIT_TIME;
51module_param(delay, ushort, 0644); 61module_param(delay, ushort, 0644);
52MODULE_PARM_DESC(delay, "Delay between chip accesses in uS"); 62MODULE_PARM_DESC(delay, "Delay between chip accesses in uS");
53 63
64/* Convert linear sensor value to milli-units */
65static long zl6100_l2d(s16 l)
66{
67 s16 exponent;
68 s32 mantissa;
69 long val;
70
71 exponent = l >> 11;
72 mantissa = ((s16)((l & 0x7ff) << 5)) >> 5;
73
74 val = mantissa;
75
76 /* scale result to milli-units */
77 val = val * 1000L;
78
79 if (exponent >= 0)
80 val <<= exponent;
81 else
82 val >>= -exponent;
83
84 return val;
85}
86
87#define MAX_MANTISSA (1023 * 1000)
88#define MIN_MANTISSA (511 * 1000)
89
90static u16 zl6100_d2l(long val)
91{
92 s16 exponent = 0, mantissa;
93 bool negative = false;
94
95 /* simple case */
96 if (val == 0)
97 return 0;
98
99 if (val < 0) {
100 negative = true;
101 val = -val;
102 }
103
104 /* Reduce large mantissa until it fits into 10 bit */
105 while (val >= MAX_MANTISSA && exponent < 15) {
106 exponent++;
107 val >>= 1;
108 }
109 /* Increase small mantissa to improve precision */
110 while (val < MIN_MANTISSA && exponent > -15) {
111 exponent--;
112 val <<= 1;
113 }
114
115 /* Convert mantissa from milli-units to units */
116 mantissa = DIV_ROUND_CLOSEST(val, 1000);
117
118 /* Ensure that resulting number is within range */
119 if (mantissa > 0x3ff)
120 mantissa = 0x3ff;
121
122 /* restore sign */
123 if (negative)
124 mantissa = -mantissa;
125
126 /* Convert to 5 bit exponent, 11 bit mantissa */
127 return (mantissa & 0x7ff) | ((exponent << 11) & 0xf800);
128}
129
54/* Some chips need a delay between accesses */ 130/* Some chips need a delay between accesses */
55static inline void zl6100_wait(const struct zl6100_data *data) 131static inline void zl6100_wait(const struct zl6100_data *data)
56{ 132{
@@ -65,9 +141,9 @@ static int zl6100_read_word_data(struct i2c_client *client, int page, int reg)
65{ 141{
66 const struct pmbus_driver_info *info = pmbus_get_driver_info(client); 142 const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
67 struct zl6100_data *data = to_zl6100_data(info); 143 struct zl6100_data *data = to_zl6100_data(info);
68 int ret; 144 int ret, vreg;
69 145
70 if (page || reg >= PMBUS_VIRT_BASE) 146 if (page > 0)
71 return -ENXIO; 147 return -ENXIO;
72 148
73 if (data->id == zl2005) { 149 if (data->id == zl2005) {
@@ -83,9 +159,39 @@ static int zl6100_read_word_data(struct i2c_client *client, int page, int reg)
83 } 159 }
84 } 160 }
85 161
162 switch (reg) {
163 case PMBUS_VIRT_READ_VMON:
164 vreg = MFR_READ_VMON;
165 break;
166 case PMBUS_VIRT_VMON_OV_WARN_LIMIT:
167 case PMBUS_VIRT_VMON_OV_FAULT_LIMIT:
168 vreg = MFR_VMON_OV_FAULT_LIMIT;
169 break;
170 case PMBUS_VIRT_VMON_UV_WARN_LIMIT:
171 case PMBUS_VIRT_VMON_UV_FAULT_LIMIT:
172 vreg = MFR_VMON_UV_FAULT_LIMIT;
173 break;
174 default:
175 if (reg >= PMBUS_VIRT_BASE)
176 return -ENXIO;
177 vreg = reg;
178 break;
179 }
180
86 zl6100_wait(data); 181 zl6100_wait(data);
87 ret = pmbus_read_word_data(client, page, reg); 182 ret = pmbus_read_word_data(client, page, vreg);
88 data->access = ktime_get(); 183 data->access = ktime_get();
184 if (ret < 0)
185 return ret;
186
187 switch (reg) {
188 case PMBUS_VIRT_VMON_OV_WARN_LIMIT:
189 ret = zl6100_d2l(DIV_ROUND_CLOSEST(zl6100_l2d(ret) * 9, 10));
190 break;
191 case PMBUS_VIRT_VMON_UV_WARN_LIMIT:
192 ret = zl6100_d2l(DIV_ROUND_CLOSEST(zl6100_l2d(ret) * 11, 10));
193 break;
194 }
89 195
90 return ret; 196 return ret;
91} 197}
@@ -94,13 +200,35 @@ static int zl6100_read_byte_data(struct i2c_client *client, int page, int reg)
94{ 200{
95 const struct pmbus_driver_info *info = pmbus_get_driver_info(client); 201 const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
96 struct zl6100_data *data = to_zl6100_data(info); 202 struct zl6100_data *data = to_zl6100_data(info);
97 int ret; 203 int ret, status;
98 204
99 if (page > 0) 205 if (page > 0)
100 return -ENXIO; 206 return -ENXIO;
101 207
102 zl6100_wait(data); 208 zl6100_wait(data);
103 ret = pmbus_read_byte_data(client, page, reg); 209
210 switch (reg) {
211 case PMBUS_VIRT_STATUS_VMON:
212 ret = pmbus_read_byte_data(client, 0,
213 PMBUS_STATUS_MFR_SPECIFIC);
214 if (ret < 0)
215 break;
216
217 status = 0;
218 if (ret & VMON_UV_WARNING)
219 status |= PB_VOLTAGE_UV_WARNING;
220 if (ret & VMON_OV_WARNING)
221 status |= PB_VOLTAGE_OV_WARNING;
222 if (ret & VMON_UV_FAULT)
223 status |= PB_VOLTAGE_UV_FAULT;
224 if (ret & VMON_OV_FAULT)
225 status |= PB_VOLTAGE_OV_FAULT;
226 ret = status;
227 break;
228 default:
229 ret = pmbus_read_byte_data(client, page, reg);
230 break;
231 }
104 data->access = ktime_get(); 232 data->access = ktime_get();
105 233
106 return ret; 234 return ret;
@@ -111,13 +239,38 @@ static int zl6100_write_word_data(struct i2c_client *client, int page, int reg,
111{ 239{
112 const struct pmbus_driver_info *info = pmbus_get_driver_info(client); 240 const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
113 struct zl6100_data *data = to_zl6100_data(info); 241 struct zl6100_data *data = to_zl6100_data(info);
114 int ret; 242 int ret, vreg;
115 243
116 if (page || reg >= PMBUS_VIRT_BASE) 244 if (page > 0)
117 return -ENXIO; 245 return -ENXIO;
118 246
247 switch (reg) {
248 case PMBUS_VIRT_VMON_OV_WARN_LIMIT:
249 word = zl6100_d2l(DIV_ROUND_CLOSEST(zl6100_l2d(word) * 10, 9));
250 vreg = MFR_VMON_OV_FAULT_LIMIT;
251 pmbus_clear_cache(client);
252 break;
253 case PMBUS_VIRT_VMON_OV_FAULT_LIMIT:
254 vreg = MFR_VMON_OV_FAULT_LIMIT;
255 pmbus_clear_cache(client);
256 break;
257 case PMBUS_VIRT_VMON_UV_WARN_LIMIT:
258 word = zl6100_d2l(DIV_ROUND_CLOSEST(zl6100_l2d(word) * 10, 11));
259 vreg = MFR_VMON_UV_FAULT_LIMIT;
260 pmbus_clear_cache(client);
261 break;
262 case PMBUS_VIRT_VMON_UV_FAULT_LIMIT:
263 vreg = MFR_VMON_UV_FAULT_LIMIT;
264 pmbus_clear_cache(client);
265 break;
266 default:
267 if (reg >= PMBUS_VIRT_BASE)
268 return -ENXIO;
269 vreg = reg;
270 }
271
119 zl6100_wait(data); 272 zl6100_wait(data);
120 ret = pmbus_write_word_data(client, page, reg, word); 273 ret = pmbus_write_word_data(client, page, vreg, word);
121 data->access = ktime_get(); 274 data->access = ktime_get();
122 275
123 return ret; 276 return ret;
@@ -225,6 +378,13 @@ static int zl6100_probe(struct i2c_client *client,
225 | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT 378 | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT
226 | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP; 379 | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
227 380
381 /*
382 * ZL2004, ZL9101M, and ZL9117M support monitoring an extra voltage
383 * (VMON for ZL2004, VDRV for ZL9101M and ZL9117M). Report it as vmon.
384 */
385 if (data->id == zl2004 || data->id == zl9101 || data->id == zl9117)
386 info->func[0] |= PMBUS_HAVE_VMON | PMBUS_HAVE_STATUS_VMON;
387
228 ret = i2c_smbus_read_word_data(client, ZL6100_MFR_CONFIG); 388 ret = i2c_smbus_read_word_data(client, ZL6100_MFR_CONFIG);
229 if (ret < 0) 389 if (ret < 0)
230 return ret; 390 return ret;
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index 1c85d39df171..bfe326e896df 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -139,12 +139,12 @@ static const u8 sht15_crc8_table[] = {
139 * @reg: associated regulator (if specified). 139 * @reg: associated regulator (if specified).
140 * @nb: notifier block to handle notifications of voltage 140 * @nb: notifier block to handle notifications of voltage
141 * changes. 141 * changes.
142 * @supply_uV: local copy of supply voltage used to allow use of 142 * @supply_uv: local copy of supply voltage used to allow use of
143 * regulator consumer if available. 143 * regulator consumer if available.
144 * @supply_uV_valid: indicates that an updated value has not yet been 144 * @supply_uv_valid: indicates that an updated value has not yet been
145 * obtained from the regulator and so any calculations 145 * obtained from the regulator and so any calculations
146 * based upon it will be invalid. 146 * based upon it will be invalid.
147 * @update_supply_work: work struct that is used to update the supply_uV. 147 * @update_supply_work: work struct that is used to update the supply_uv.
148 * @interrupt_handled: flag used to indicate a handler has been scheduled. 148 * @interrupt_handled: flag used to indicate a handler has been scheduled.
149 */ 149 */
150struct sht15_data { 150struct sht15_data {
@@ -166,8 +166,8 @@ struct sht15_data {
166 struct device *hwmon_dev; 166 struct device *hwmon_dev;
167 struct regulator *reg; 167 struct regulator *reg;
168 struct notifier_block nb; 168 struct notifier_block nb;
169 int supply_uV; 169 int supply_uv;
170 bool supply_uV_valid; 170 bool supply_uv_valid;
171 struct work_struct update_supply_work; 171 struct work_struct update_supply_work;
172 atomic_t interrupt_handled; 172 atomic_t interrupt_handled;
173}; 173};
@@ -212,11 +212,13 @@ static u8 sht15_crc8(struct sht15_data *data,
212 * 212 *
213 * This implements section 3.4 of the data sheet 213 * This implements section 3.4 of the data sheet
214 */ 214 */
215static void sht15_connection_reset(struct sht15_data *data) 215static int sht15_connection_reset(struct sht15_data *data)
216{ 216{
217 int i; 217 int i, err;
218 218
219 gpio_direction_output(data->pdata->gpio_data, 1); 219 err = gpio_direction_output(data->pdata->gpio_data, 1);
220 if (err)
221 return err;
220 ndelay(SHT15_TSCKL); 222 ndelay(SHT15_TSCKL);
221 gpio_set_value(data->pdata->gpio_sck, 0); 223 gpio_set_value(data->pdata->gpio_sck, 0);
222 ndelay(SHT15_TSCKL); 224 ndelay(SHT15_TSCKL);
@@ -226,6 +228,7 @@ static void sht15_connection_reset(struct sht15_data *data)
226 gpio_set_value(data->pdata->gpio_sck, 0); 228 gpio_set_value(data->pdata->gpio_sck, 0);
227 ndelay(SHT15_TSCKL); 229 ndelay(SHT15_TSCKL);
228 } 230 }
231 return 0;
229} 232}
230 233
231/** 234/**
@@ -251,10 +254,14 @@ static inline void sht15_send_bit(struct sht15_data *data, int val)
251 * conservative ones used in implementation. This implements 254 * conservative ones used in implementation. This implements
252 * figure 12 on the data sheet. 255 * figure 12 on the data sheet.
253 */ 256 */
254static void sht15_transmission_start(struct sht15_data *data) 257static int sht15_transmission_start(struct sht15_data *data)
255{ 258{
259 int err;
260
256 /* ensure data is high and output */ 261 /* ensure data is high and output */
257 gpio_direction_output(data->pdata->gpio_data, 1); 262 err = gpio_direction_output(data->pdata->gpio_data, 1);
263 if (err)
264 return err;
258 ndelay(SHT15_TSU); 265 ndelay(SHT15_TSU);
259 gpio_set_value(data->pdata->gpio_sck, 0); 266 gpio_set_value(data->pdata->gpio_sck, 0);
260 ndelay(SHT15_TSCKL); 267 ndelay(SHT15_TSCKL);
@@ -270,6 +277,7 @@ static void sht15_transmission_start(struct sht15_data *data)
270 ndelay(SHT15_TSU); 277 ndelay(SHT15_TSU);
271 gpio_set_value(data->pdata->gpio_sck, 0); 278 gpio_set_value(data->pdata->gpio_sck, 0);
272 ndelay(SHT15_TSCKL); 279 ndelay(SHT15_TSCKL);
280 return 0;
273} 281}
274 282
275/** 283/**
@@ -293,13 +301,19 @@ static void sht15_send_byte(struct sht15_data *data, u8 byte)
293 */ 301 */
294static int sht15_wait_for_response(struct sht15_data *data) 302static int sht15_wait_for_response(struct sht15_data *data)
295{ 303{
296 gpio_direction_input(data->pdata->gpio_data); 304 int err;
305
306 err = gpio_direction_input(data->pdata->gpio_data);
307 if (err)
308 return err;
297 gpio_set_value(data->pdata->gpio_sck, 1); 309 gpio_set_value(data->pdata->gpio_sck, 1);
298 ndelay(SHT15_TSCKH); 310 ndelay(SHT15_TSCKH);
299 if (gpio_get_value(data->pdata->gpio_data)) { 311 if (gpio_get_value(data->pdata->gpio_data)) {
300 gpio_set_value(data->pdata->gpio_sck, 0); 312 gpio_set_value(data->pdata->gpio_sck, 0);
301 dev_err(data->dev, "Command not acknowledged\n"); 313 dev_err(data->dev, "Command not acknowledged\n");
302 sht15_connection_reset(data); 314 err = sht15_connection_reset(data);
315 if (err)
316 return err;
303 return -EIO; 317 return -EIO;
304 } 318 }
305 gpio_set_value(data->pdata->gpio_sck, 0); 319 gpio_set_value(data->pdata->gpio_sck, 0);
@@ -317,12 +331,13 @@ static int sht15_wait_for_response(struct sht15_data *data)
317 */ 331 */
318static int sht15_send_cmd(struct sht15_data *data, u8 cmd) 332static int sht15_send_cmd(struct sht15_data *data, u8 cmd)
319{ 333{
320 int ret = 0; 334 int err;
321 335
322 sht15_transmission_start(data); 336 err = sht15_transmission_start(data);
337 if (err)
338 return err;
323 sht15_send_byte(data, cmd); 339 sht15_send_byte(data, cmd);
324 ret = sht15_wait_for_response(data); 340 return sht15_wait_for_response(data);
325 return ret;
326} 341}
327 342
328/** 343/**
@@ -352,9 +367,13 @@ static int sht15_soft_reset(struct sht15_data *data)
352 * Each byte of data is acknowledged by pulling the data line 367 * Each byte of data is acknowledged by pulling the data line
353 * low for one clock pulse. 368 * low for one clock pulse.
354 */ 369 */
355static void sht15_ack(struct sht15_data *data) 370static int sht15_ack(struct sht15_data *data)
356{ 371{
357 gpio_direction_output(data->pdata->gpio_data, 0); 372 int err;
373
374 err = gpio_direction_output(data->pdata->gpio_data, 0);
375 if (err)
376 return err;
358 ndelay(SHT15_TSU); 377 ndelay(SHT15_TSU);
359 gpio_set_value(data->pdata->gpio_sck, 1); 378 gpio_set_value(data->pdata->gpio_sck, 1);
360 ndelay(SHT15_TSU); 379 ndelay(SHT15_TSU);
@@ -362,7 +381,7 @@ static void sht15_ack(struct sht15_data *data)
362 ndelay(SHT15_TSU); 381 ndelay(SHT15_TSU);
363 gpio_set_value(data->pdata->gpio_data, 1); 382 gpio_set_value(data->pdata->gpio_data, 1);
364 383
365 gpio_direction_input(data->pdata->gpio_data); 384 return gpio_direction_input(data->pdata->gpio_data);
366} 385}
367 386
368/** 387/**
@@ -371,14 +390,19 @@ static void sht15_ack(struct sht15_data *data)
371 * 390 *
372 * This is basically a NAK (single clock pulse, data high). 391 * This is basically a NAK (single clock pulse, data high).
373 */ 392 */
374static void sht15_end_transmission(struct sht15_data *data) 393static int sht15_end_transmission(struct sht15_data *data)
375{ 394{
376 gpio_direction_output(data->pdata->gpio_data, 1); 395 int err;
396
397 err = gpio_direction_output(data->pdata->gpio_data, 1);
398 if (err)
399 return err;
377 ndelay(SHT15_TSU); 400 ndelay(SHT15_TSU);
378 gpio_set_value(data->pdata->gpio_sck, 1); 401 gpio_set_value(data->pdata->gpio_sck, 1);
379 ndelay(SHT15_TSCKH); 402 ndelay(SHT15_TSCKH);
380 gpio_set_value(data->pdata->gpio_sck, 0); 403 gpio_set_value(data->pdata->gpio_sck, 0);
381 ndelay(SHT15_TSCKL); 404 ndelay(SHT15_TSCKL);
405 return 0;
382} 406}
383 407
384/** 408/**
@@ -410,17 +434,19 @@ static u8 sht15_read_byte(struct sht15_data *data)
410 */ 434 */
411static int sht15_send_status(struct sht15_data *data, u8 status) 435static int sht15_send_status(struct sht15_data *data, u8 status)
412{ 436{
413 int ret; 437 int err;
414 438
415 ret = sht15_send_cmd(data, SHT15_WRITE_STATUS); 439 err = sht15_send_cmd(data, SHT15_WRITE_STATUS);
416 if (ret) 440 if (err)
417 return ret; 441 return err;
418 gpio_direction_output(data->pdata->gpio_data, 1); 442 err = gpio_direction_output(data->pdata->gpio_data, 1);
443 if (err)
444 return err;
419 ndelay(SHT15_TSU); 445 ndelay(SHT15_TSU);
420 sht15_send_byte(data, status); 446 sht15_send_byte(data, status);
421 ret = sht15_wait_for_response(data); 447 err = sht15_wait_for_response(data);
422 if (ret) 448 if (err)
423 return ret; 449 return err;
424 450
425 data->val_status = status; 451 data->val_status = status;
426 return 0; 452 return 0;
@@ -446,7 +472,7 @@ static int sht15_update_status(struct sht15_data *data)
446 || !data->status_valid) { 472 || !data->status_valid) {
447 ret = sht15_send_cmd(data, SHT15_READ_STATUS); 473 ret = sht15_send_cmd(data, SHT15_READ_STATUS);
448 if (ret) 474 if (ret)
449 goto error_ret; 475 goto unlock;
450 status = sht15_read_byte(data); 476 status = sht15_read_byte(data);
451 477
452 if (data->checksumming) { 478 if (data->checksumming) {
@@ -458,7 +484,9 @@ static int sht15_update_status(struct sht15_data *data)
458 == dev_checksum); 484 == dev_checksum);
459 } 485 }
460 486
461 sht15_end_transmission(data); 487 ret = sht15_end_transmission(data);
488 if (ret)
489 goto unlock;
462 490
463 /* 491 /*
464 * Perform checksum validation on the received data. 492 * Perform checksum validation on the received data.
@@ -469,27 +497,27 @@ static int sht15_update_status(struct sht15_data *data)
469 previous_config = data->val_status & 0x07; 497 previous_config = data->val_status & 0x07;
470 ret = sht15_soft_reset(data); 498 ret = sht15_soft_reset(data);
471 if (ret) 499 if (ret)
472 goto error_ret; 500 goto unlock;
473 if (previous_config) { 501 if (previous_config) {
474 ret = sht15_send_status(data, previous_config); 502 ret = sht15_send_status(data, previous_config);
475 if (ret) { 503 if (ret) {
476 dev_err(data->dev, 504 dev_err(data->dev,
477 "CRC validation failed, unable " 505 "CRC validation failed, unable "
478 "to restore device settings\n"); 506 "to restore device settings\n");
479 goto error_ret; 507 goto unlock;
480 } 508 }
481 } 509 }
482 ret = -EAGAIN; 510 ret = -EAGAIN;
483 goto error_ret; 511 goto unlock;
484 } 512 }
485 513
486 data->val_status = status; 514 data->val_status = status;
487 data->status_valid = true; 515 data->status_valid = true;
488 data->last_status = jiffies; 516 data->last_status = jiffies;
489 } 517 }
490error_ret:
491 mutex_unlock(&data->read_lock);
492 518
519unlock:
520 mutex_unlock(&data->read_lock);
493 return ret; 521 return ret;
494} 522}
495 523
@@ -511,7 +539,9 @@ static int sht15_measurement(struct sht15_data *data,
511 if (ret) 539 if (ret)
512 return ret; 540 return ret;
513 541
514 gpio_direction_input(data->pdata->gpio_data); 542 ret = gpio_direction_input(data->pdata->gpio_data);
543 if (ret)
544 return ret;
515 atomic_set(&data->interrupt_handled, 0); 545 atomic_set(&data->interrupt_handled, 0);
516 546
517 enable_irq(gpio_to_irq(data->pdata->gpio_data)); 547 enable_irq(gpio_to_irq(data->pdata->gpio_data));
@@ -524,9 +554,14 @@ static int sht15_measurement(struct sht15_data *data,
524 ret = wait_event_timeout(data->wait_queue, 554 ret = wait_event_timeout(data->wait_queue,
525 (data->state == SHT15_READING_NOTHING), 555 (data->state == SHT15_READING_NOTHING),
526 msecs_to_jiffies(timeout_msecs)); 556 msecs_to_jiffies(timeout_msecs));
527 if (ret == 0) {/* timeout occurred */ 557 if (data->state != SHT15_READING_NOTHING) { /* I/O error occurred */
558 data->state = SHT15_READING_NOTHING;
559 return -EIO;
560 } else if (ret == 0) { /* timeout occurred */
528 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data)); 561 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
529 sht15_connection_reset(data); 562 ret = sht15_connection_reset(data);
563 if (ret)
564 return ret;
530 return -ETIME; 565 return -ETIME;
531 } 566 }
532 567
@@ -570,17 +605,17 @@ static int sht15_update_measurements(struct sht15_data *data)
570 data->state = SHT15_READING_HUMID; 605 data->state = SHT15_READING_HUMID;
571 ret = sht15_measurement(data, SHT15_MEASURE_RH, 160); 606 ret = sht15_measurement(data, SHT15_MEASURE_RH, 160);
572 if (ret) 607 if (ret)
573 goto error_ret; 608 goto unlock;
574 data->state = SHT15_READING_TEMP; 609 data->state = SHT15_READING_TEMP;
575 ret = sht15_measurement(data, SHT15_MEASURE_TEMP, 400); 610 ret = sht15_measurement(data, SHT15_MEASURE_TEMP, 400);
576 if (ret) 611 if (ret)
577 goto error_ret; 612 goto unlock;
578 data->measurements_valid = true; 613 data->measurements_valid = true;
579 data->last_measurement = jiffies; 614 data->last_measurement = jiffies;
580 } 615 }
581error_ret:
582 mutex_unlock(&data->read_lock);
583 616
617unlock:
618 mutex_unlock(&data->read_lock);
584 return ret; 619 return ret;
585} 620}
586 621
@@ -598,8 +633,8 @@ static inline int sht15_calc_temp(struct sht15_data *data)
598 633
599 for (i = ARRAY_SIZE(temppoints) - 1; i > 0; i--) 634 for (i = ARRAY_SIZE(temppoints) - 1; i > 0; i--)
600 /* Find pointer to interpolate */ 635 /* Find pointer to interpolate */
601 if (data->supply_uV > temppoints[i - 1].vdd) { 636 if (data->supply_uv > temppoints[i - 1].vdd) {
602 d1 = (data->supply_uV - temppoints[i - 1].vdd) 637 d1 = (data->supply_uv - temppoints[i - 1].vdd)
603 * (temppoints[i].d1 - temppoints[i - 1].d1) 638 * (temppoints[i].d1 - temppoints[i - 1].d1)
604 / (temppoints[i].vdd - temppoints[i - 1].vdd) 639 / (temppoints[i].vdd - temppoints[i - 1].vdd)
605 + temppoints[i - 1].d1; 640 + temppoints[i - 1].d1;
@@ -818,7 +853,8 @@ static void sht15_bh_read_data(struct work_struct *work_s)
818 /* Read the data back from the device */ 853 /* Read the data back from the device */
819 val = sht15_read_byte(data); 854 val = sht15_read_byte(data);
820 val <<= 8; 855 val <<= 8;
821 sht15_ack(data); 856 if (sht15_ack(data))
857 goto wakeup;
822 val |= sht15_read_byte(data); 858 val |= sht15_read_byte(data);
823 859
824 if (data->checksumming) { 860 if (data->checksumming) {
@@ -826,7 +862,8 @@ static void sht15_bh_read_data(struct work_struct *work_s)
826 * Ask the device for a checksum and read it back. 862 * Ask the device for a checksum and read it back.
827 * Note: the device sends the checksum byte reversed. 863 * Note: the device sends the checksum byte reversed.
828 */ 864 */
829 sht15_ack(data); 865 if (sht15_ack(data))
866 goto wakeup;
830 dev_checksum = sht15_reverse(sht15_read_byte(data)); 867 dev_checksum = sht15_reverse(sht15_read_byte(data));
831 checksum_vals[0] = (data->state == SHT15_READING_TEMP) ? 868 checksum_vals[0] = (data->state == SHT15_READING_TEMP) ?
832 SHT15_MEASURE_TEMP : SHT15_MEASURE_RH; 869 SHT15_MEASURE_TEMP : SHT15_MEASURE_RH;
@@ -837,7 +874,8 @@ static void sht15_bh_read_data(struct work_struct *work_s)
837 } 874 }
838 875
839 /* Tell the device we are done */ 876 /* Tell the device we are done */
840 sht15_end_transmission(data); 877 if (sht15_end_transmission(data))
878 goto wakeup;
841 879
842 switch (data->state) { 880 switch (data->state) {
843 case SHT15_READING_TEMP: 881 case SHT15_READING_TEMP:
@@ -851,6 +889,7 @@ static void sht15_bh_read_data(struct work_struct *work_s)
851 } 889 }
852 890
853 data->state = SHT15_READING_NOTHING; 891 data->state = SHT15_READING_NOTHING;
892wakeup:
854 wake_up(&data->wait_queue); 893 wake_up(&data->wait_queue);
855} 894}
856 895
@@ -859,7 +898,7 @@ static void sht15_update_voltage(struct work_struct *work_s)
859 struct sht15_data *data 898 struct sht15_data *data
860 = container_of(work_s, struct sht15_data, 899 = container_of(work_s, struct sht15_data,
861 update_supply_work); 900 update_supply_work);
862 data->supply_uV = regulator_get_voltage(data->reg); 901 data->supply_uv = regulator_get_voltage(data->reg);
863} 902}
864 903
865/** 904/**
@@ -878,7 +917,7 @@ static int sht15_invalidate_voltage(struct notifier_block *nb,
878 struct sht15_data *data = container_of(nb, struct sht15_data, nb); 917 struct sht15_data *data = container_of(nb, struct sht15_data, nb);
879 918
880 if (event == REGULATOR_EVENT_VOLTAGE_CHANGE) 919 if (event == REGULATOR_EVENT_VOLTAGE_CHANGE)
881 data->supply_uV_valid = false; 920 data->supply_uv_valid = false;
882 schedule_work(&data->update_supply_work); 921 schedule_work(&data->update_supply_work);
883 922
884 return NOTIFY_OK; 923 return NOTIFY_OK;
@@ -906,7 +945,7 @@ static int sht15_probe(struct platform_device *pdev)
906 return -EINVAL; 945 return -EINVAL;
907 } 946 }
908 data->pdata = pdev->dev.platform_data; 947 data->pdata = pdev->dev.platform_data;
909 data->supply_uV = data->pdata->supply_mv * 1000; 948 data->supply_uv = data->pdata->supply_mv * 1000;
910 if (data->pdata->checksum) 949 if (data->pdata->checksum)
911 data->checksumming = true; 950 data->checksumming = true;
912 if (data->pdata->no_otp_reload) 951 if (data->pdata->no_otp_reload)
@@ -924,7 +963,7 @@ static int sht15_probe(struct platform_device *pdev)
924 963
925 voltage = regulator_get_voltage(data->reg); 964 voltage = regulator_get_voltage(data->reg);
926 if (voltage) 965 if (voltage)
927 data->supply_uV = voltage; 966 data->supply_uv = voltage;
928 967
929 regulator_enable(data->reg); 968 regulator_enable(data->reg);
930 /* 969 /*
@@ -942,17 +981,17 @@ static int sht15_probe(struct platform_device *pdev)
942 } 981 }
943 982
944 /* Try requesting the GPIOs */ 983 /* Try requesting the GPIOs */
945 ret = devm_gpio_request(&pdev->dev, data->pdata->gpio_sck, "SHT15 sck"); 984 ret = devm_gpio_request_one(&pdev->dev, data->pdata->gpio_sck,
985 GPIOF_OUT_INIT_LOW, "SHT15 sck");
946 if (ret) { 986 if (ret) {
947 dev_err(&pdev->dev, "gpio request failed\n"); 987 dev_err(&pdev->dev, "clock line GPIO request failed\n");
948 goto err_release_reg; 988 goto err_release_reg;
949 } 989 }
950 gpio_direction_output(data->pdata->gpio_sck, 0);
951 990
952 ret = devm_gpio_request(&pdev->dev, data->pdata->gpio_data, 991 ret = devm_gpio_request(&pdev->dev, data->pdata->gpio_data,
953 "SHT15 data"); 992 "SHT15 data");
954 if (ret) { 993 if (ret) {
955 dev_err(&pdev->dev, "gpio request failed\n"); 994 dev_err(&pdev->dev, "data line GPIO request failed\n");
956 goto err_release_reg; 995 goto err_release_reg;
957 } 996 }
958 997
@@ -966,7 +1005,9 @@ static int sht15_probe(struct platform_device *pdev)
966 goto err_release_reg; 1005 goto err_release_reg;
967 } 1006 }
968 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data)); 1007 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
969 sht15_connection_reset(data); 1008 ret = sht15_connection_reset(data);
1009 if (ret)
1010 goto err_release_reg;
970 ret = sht15_soft_reset(data); 1011 ret = sht15_soft_reset(data);
971 if (ret) 1012 if (ret)
972 goto err_release_reg; 1013 goto err_release_reg;
diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
index 06ce3c911db9..c35847a1a0a3 100644
--- a/drivers/hwmon/sis5595.c
+++ b/drivers/hwmon/sis5595.c
@@ -132,7 +132,7 @@ static struct platform_device *pdev;
132 */ 132 */
133static inline u8 IN_TO_REG(unsigned long val) 133static inline u8 IN_TO_REG(unsigned long val)
134{ 134{
135 unsigned long nval = SENSORS_LIMIT(val, 0, 4080); 135 unsigned long nval = clamp_val(val, 0, 4080);
136 return (nval + 8) / 16; 136 return (nval + 8) / 16;
137} 137}
138#define IN_FROM_REG(val) ((val) * 16) 138#define IN_FROM_REG(val) ((val) * 16)
@@ -141,7 +141,7 @@ static inline u8 FAN_TO_REG(long rpm, int div)
141{ 141{
142 if (rpm <= 0) 142 if (rpm <= 0)
143 return 255; 143 return 255;
144 return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254); 144 return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
145} 145}
146 146
147static inline int FAN_FROM_REG(u8 val, int div) 147static inline int FAN_FROM_REG(u8 val, int div)
@@ -159,7 +159,7 @@ static inline int TEMP_FROM_REG(s8 val)
159} 159}
160static inline s8 TEMP_TO_REG(int val) 160static inline s8 TEMP_TO_REG(int val)
161{ 161{
162 int nval = SENSORS_LIMIT(val, -54120, 157530) ; 162 int nval = clamp_val(val, -54120, 157530) ;
163 return nval < 0 ? (nval - 5212 - 415) / 830 : (nval - 5212 + 415) / 830; 163 return nval < 0 ? (nval - 5212 - 415) / 830 : (nval - 5212 + 415) / 830;
164} 164}
165 165
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
index dba0c567e7a1..6d8255ccf07a 100644
--- a/drivers/hwmon/smsc47m1.c
+++ b/drivers/hwmon/smsc47m1.c
@@ -326,7 +326,7 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute
326 /* Preserve fan min */ 326 /* Preserve fan min */
327 tmp = 192 - (old_div * (192 - data->fan_preload[nr]) 327 tmp = 192 - (old_div * (192 - data->fan_preload[nr])
328 + new_div / 2) / new_div; 328 + new_div / 2) / new_div;
329 data->fan_preload[nr] = SENSORS_LIMIT(tmp, 0, 191); 329 data->fan_preload[nr] = clamp_val(tmp, 0, 191);
330 smsc47m1_write_value(data, SMSC47M1_REG_FAN_PRELOAD[nr], 330 smsc47m1_write_value(data, SMSC47M1_REG_FAN_PRELOAD[nr],
331 data->fan_preload[nr]); 331 data->fan_preload[nr]);
332 mutex_unlock(&data->update_lock); 332 mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c
index 36a3478d0799..efee4c59239f 100644
--- a/drivers/hwmon/smsc47m192.c
+++ b/drivers/hwmon/smsc47m192.c
@@ -77,7 +77,7 @@ static inline unsigned int IN_FROM_REG(u8 reg, int n)
77 77
78static inline u8 IN_TO_REG(unsigned long val, int n) 78static inline u8 IN_TO_REG(unsigned long val, int n)
79{ 79{
80 return SENSORS_LIMIT(SCALE(val, 192, nom_mv[n]), 0, 255); 80 return clamp_val(SCALE(val, 192, nom_mv[n]), 0, 255);
81} 81}
82 82
83/* 83/*
@@ -86,7 +86,7 @@ static inline u8 IN_TO_REG(unsigned long val, int n)
86 */ 86 */
87static inline s8 TEMP_TO_REG(int val) 87static inline s8 TEMP_TO_REG(int val)
88{ 88{
89 return SENSORS_LIMIT(SCALE(val, 1, 1000), -128000, 127000); 89 return clamp_val(SCALE(val, 1, 1000), -128000, 127000);
90} 90}
91 91
92static inline int TEMP_FROM_REG(s8 val) 92static inline int TEMP_FROM_REG(s8 val)
diff --git a/drivers/hwmon/thmc50.c b/drivers/hwmon/thmc50.c
index 3c2c48d904e6..4b59eb53b18a 100644
--- a/drivers/hwmon/thmc50.c
+++ b/drivers/hwmon/thmc50.c
@@ -134,7 +134,7 @@ static ssize_t set_analog_out(struct device *dev,
134 return err; 134 return err;
135 135
136 mutex_lock(&data->update_lock); 136 mutex_lock(&data->update_lock);
137 data->analog_out = SENSORS_LIMIT(tmp, 0, 255); 137 data->analog_out = clamp_val(tmp, 0, 255);
138 i2c_smbus_write_byte_data(client, THMC50_REG_ANALOG_OUT, 138 i2c_smbus_write_byte_data(client, THMC50_REG_ANALOG_OUT,
139 data->analog_out); 139 data->analog_out);
140 140
@@ -187,7 +187,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
187 return err; 187 return err;
188 188
189 mutex_lock(&data->update_lock); 189 mutex_lock(&data->update_lock);
190 data->temp_min[nr] = SENSORS_LIMIT(val / 1000, -128, 127); 190 data->temp_min[nr] = clamp_val(val / 1000, -128, 127);
191 i2c_smbus_write_byte_data(client, THMC50_REG_TEMP_MIN[nr], 191 i2c_smbus_write_byte_data(client, THMC50_REG_TEMP_MIN[nr],
192 data->temp_min[nr]); 192 data->temp_min[nr]);
193 mutex_unlock(&data->update_lock); 193 mutex_unlock(&data->update_lock);
@@ -216,7 +216,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
216 return err; 216 return err;
217 217
218 mutex_lock(&data->update_lock); 218 mutex_lock(&data->update_lock);
219 data->temp_max[nr] = SENSORS_LIMIT(val / 1000, -128, 127); 219 data->temp_max[nr] = clamp_val(val / 1000, -128, 127);
220 i2c_smbus_write_byte_data(client, THMC50_REG_TEMP_MAX[nr], 220 i2c_smbus_write_byte_data(client, THMC50_REG_TEMP_MAX[nr],
221 data->temp_max[nr]); 221 data->temp_max[nr]);
222 mutex_unlock(&data->update_lock); 222 mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index b10c3d36ccbc..523dd89ba498 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -115,7 +115,7 @@ static ssize_t tmp102_set_temp(struct device *dev,
115 115
116 if (kstrtol(buf, 10, &val) < 0) 116 if (kstrtol(buf, 10, &val) < 0)
117 return -EINVAL; 117 return -EINVAL;
118 val = SENSORS_LIMIT(val, -256000, 255000); 118 val = clamp_val(val, -256000, 255000);
119 119
120 mutex_lock(&tmp102->lock); 120 mutex_lock(&tmp102->lock);
121 tmp102->temp[sda->index] = val; 121 tmp102->temp[sda->index] = val;
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
index e62054875164..c85f6967ccc3 100644
--- a/drivers/hwmon/tmp401.c
+++ b/drivers/hwmon/tmp401.c
@@ -142,10 +142,10 @@ static int tmp401_register_to_temp(u16 reg, u8 config)
142static u16 tmp401_temp_to_register(long temp, u8 config) 142static u16 tmp401_temp_to_register(long temp, u8 config)
143{ 143{
144 if (config & TMP401_CONFIG_RANGE) { 144 if (config & TMP401_CONFIG_RANGE) {
145 temp = SENSORS_LIMIT(temp, -64000, 191000); 145 temp = clamp_val(temp, -64000, 191000);
146 temp += 64000; 146 temp += 64000;
147 } else 147 } else
148 temp = SENSORS_LIMIT(temp, 0, 127000); 148 temp = clamp_val(temp, 0, 127000);
149 149
150 return (temp * 160 + 312) / 625; 150 return (temp * 160 + 312) / 625;
151} 151}
@@ -163,10 +163,10 @@ static int tmp401_crit_register_to_temp(u8 reg, u8 config)
163static u8 tmp401_crit_temp_to_register(long temp, u8 config) 163static u8 tmp401_crit_temp_to_register(long temp, u8 config)
164{ 164{
165 if (config & TMP401_CONFIG_RANGE) { 165 if (config & TMP401_CONFIG_RANGE) {
166 temp = SENSORS_LIMIT(temp, -64000, 191000); 166 temp = clamp_val(temp, -64000, 191000);
167 temp += 64000; 167 temp += 64000;
168 } else 168 } else
169 temp = SENSORS_LIMIT(temp, 0, 127000); 169 temp = clamp_val(temp, 0, 127000);
170 170
171 return (temp + 500) / 1000; 171 return (temp + 500) / 1000;
172} 172}
@@ -417,14 +417,14 @@ static ssize_t store_temp_crit_hyst(struct device *dev, struct device_attribute
417 return -EINVAL; 417 return -EINVAL;
418 418
419 if (data->config & TMP401_CONFIG_RANGE) 419 if (data->config & TMP401_CONFIG_RANGE)
420 val = SENSORS_LIMIT(val, -64000, 191000); 420 val = clamp_val(val, -64000, 191000);
421 else 421 else
422 val = SENSORS_LIMIT(val, 0, 127000); 422 val = clamp_val(val, 0, 127000);
423 423
424 mutex_lock(&data->update_lock); 424 mutex_lock(&data->update_lock);
425 temp = tmp401_crit_register_to_temp(data->temp_crit[index], 425 temp = tmp401_crit_register_to_temp(data->temp_crit[index],
426 data->config); 426 data->config);
427 val = SENSORS_LIMIT(val, temp - 255000, temp); 427 val = clamp_val(val, temp - 255000, temp);
428 reg = ((temp - val) + 500) / 1000; 428 reg = ((temp - val) + 500) / 1000;
429 429
430 i2c_smbus_write_byte_data(to_i2c_client(dev), 430 i2c_smbus_write_byte_data(to_i2c_client(dev),
diff --git a/drivers/hwmon/via686a.c b/drivers/hwmon/via686a.c
index e0e14a9f1658..3123b30208c5 100644
--- a/drivers/hwmon/via686a.c
+++ b/drivers/hwmon/via686a.c
@@ -135,17 +135,14 @@ static inline u8 IN_TO_REG(long val, int inNum)
135 * for the constants. 135 * for the constants.
136 */ 136 */
137 if (inNum <= 1) 137 if (inNum <= 1)
138 return (u8) 138 return (u8) clamp_val((val * 21024 - 1205000) / 250000, 0, 255);
139 SENSORS_LIMIT((val * 21024 - 1205000) / 250000, 0, 255);
140 else if (inNum == 2) 139 else if (inNum == 2)
141 return (u8) 140 return (u8) clamp_val((val * 15737 - 1205000) / 250000, 0, 255);
142 SENSORS_LIMIT((val * 15737 - 1205000) / 250000, 0, 255);
143 else if (inNum == 3) 141 else if (inNum == 3)
144 return (u8) 142 return (u8) clamp_val((val * 10108 - 1205000) / 250000, 0, 255);
145 SENSORS_LIMIT((val * 10108 - 1205000) / 250000, 0, 255);
146 else 143 else
147 return (u8) 144 return (u8) clamp_val((val * 41714 - 12050000) / 2500000, 0,
148 SENSORS_LIMIT((val * 41714 - 12050000) / 2500000, 0, 255); 145 255);
149} 146}
150 147
151static inline long IN_FROM_REG(u8 val, int inNum) 148static inline long IN_FROM_REG(u8 val, int inNum)
@@ -175,8 +172,8 @@ static inline u8 FAN_TO_REG(long rpm, int div)
175{ 172{
176 if (rpm == 0) 173 if (rpm == 0)
177 return 0; 174 return 0;
178 rpm = SENSORS_LIMIT(rpm, 1, 1000000); 175 rpm = clamp_val(rpm, 1, 1000000);
179 return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 255); 176 return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 255);
180} 177}
181 178
182#define FAN_FROM_REG(val, div) ((val) == 0 ? 0 : (val) == 255 ? 0 : 1350000 / \ 179#define FAN_FROM_REG(val, div) ((val) == 0 ? 0 : (val) == 255 ? 0 : 1350000 / \
diff --git a/drivers/hwmon/vt1211.c b/drivers/hwmon/vt1211.c
index 751703059fae..dcc62f80f67b 100644
--- a/drivers/hwmon/vt1211.c
+++ b/drivers/hwmon/vt1211.c
@@ -158,7 +158,7 @@ struct vt1211_data {
158#define IN_FROM_REG(ix, reg) ((reg) < 3 ? 0 : (ix) == 5 ? \ 158#define IN_FROM_REG(ix, reg) ((reg) < 3 ? 0 : (ix) == 5 ? \
159 (((reg) - 3) * 15882 + 479) / 958 : \ 159 (((reg) - 3) * 15882 + 479) / 958 : \
160 (((reg) - 3) * 10000 + 479) / 958) 160 (((reg) - 3) * 10000 + 479) / 958)
161#define IN_TO_REG(ix, val) (SENSORS_LIMIT((ix) == 5 ? \ 161#define IN_TO_REG(ix, val) (clamp_val((ix) == 5 ? \
162 ((val) * 958 + 7941) / 15882 + 3 : \ 162 ((val) * 958 + 7941) / 15882 + 3 : \
163 ((val) * 958 + 5000) / 10000 + 3, 0, 255)) 163 ((val) * 958 + 5000) / 10000 + 3, 0, 255))
164 164
@@ -173,7 +173,7 @@ struct vt1211_data {
173 (ix) == 1 ? (reg) < 51 ? 0 : \ 173 (ix) == 1 ? (reg) < 51 ? 0 : \
174 ((reg) - 51) * 1000 : \ 174 ((reg) - 51) * 1000 : \
175 ((253 - (reg)) * 2200 + 105) / 210) 175 ((253 - (reg)) * 2200 + 105) / 210)
176#define TEMP_TO_REG(ix, val) SENSORS_LIMIT( \ 176#define TEMP_TO_REG(ix, val) clamp_val( \
177 ((ix) == 0 ? ((val) + 500) / 1000 : \ 177 ((ix) == 0 ? ((val) + 500) / 1000 : \
178 (ix) == 1 ? ((val) + 500) / 1000 + 51 : \ 178 (ix) == 1 ? ((val) + 500) / 1000 + 51 : \
179 253 - ((val) * 210 + 1100) / 2200), 0, 255) 179 253 - ((val) * 210 + 1100) / 2200), 0, 255)
@@ -183,7 +183,7 @@ struct vt1211_data {
183#define RPM_FROM_REG(reg, div) (((reg) == 0) || ((reg) == 255) ? 0 : \ 183#define RPM_FROM_REG(reg, div) (((reg) == 0) || ((reg) == 255) ? 0 : \
184 1310720 / (reg) / DIV_FROM_REG(div)) 184 1310720 / (reg) / DIV_FROM_REG(div))
185#define RPM_TO_REG(val, div) ((val) == 0 ? 255 : \ 185#define RPM_TO_REG(val, div) ((val) == 0 ? 255 : \
186 SENSORS_LIMIT((1310720 / (val) / \ 186 clamp_val((1310720 / (val) / \
187 DIV_FROM_REG(div)), 1, 254)) 187 DIV_FROM_REG(div)), 1, 254))
188 188
189/* --------------------------------------------------------------------- 189/* ---------------------------------------------------------------------
@@ -687,7 +687,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
687 data->fan_ctl)); 687 data->fan_ctl));
688 break; 688 break;
689 case SHOW_SET_PWM_FREQ: 689 case SHOW_SET_PWM_FREQ:
690 val = 135000 / SENSORS_LIMIT(val, 135000 >> 7, 135000); 690 val = 135000 / clamp_val(val, 135000 >> 7, 135000);
691 /* calculate tmp = log2(val) */ 691 /* calculate tmp = log2(val) */
692 tmp = 0; 692 tmp = 0;
693 for (val >>= 1; val > 0; val >>= 1) 693 for (val >>= 1; val > 0; val >>= 1)
@@ -845,7 +845,7 @@ static ssize_t set_pwm_auto_point_pwm(struct device *dev,
845 return err; 845 return err;
846 846
847 mutex_lock(&data->update_lock); 847 mutex_lock(&data->update_lock);
848 data->pwm_auto_pwm[ix][ap] = SENSORS_LIMIT(val, 0, 255); 848 data->pwm_auto_pwm[ix][ap] = clamp_val(val, 0, 255);
849 vt1211_write8(data, VT1211_REG_PWM_AUTO_PWM(ix, ap), 849 vt1211_write8(data, VT1211_REG_PWM_AUTO_PWM(ix, ap),
850 data->pwm_auto_pwm[ix][ap]); 850 data->pwm_auto_pwm[ix][ap]);
851 mutex_unlock(&data->update_lock); 851 mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c
index a56355cef184..988a2a796764 100644
--- a/drivers/hwmon/vt8231.c
+++ b/drivers/hwmon/vt8231.c
@@ -147,7 +147,7 @@ static inline u8 FAN_TO_REG(long rpm, int div)
147{ 147{
148 if (rpm == 0) 148 if (rpm == 0)
149 return 0; 149 return 0;
150 return SENSORS_LIMIT(1310720 / (rpm * div), 1, 255); 150 return clamp_val(1310720 / (rpm * div), 1, 255);
151} 151}
152 152
153#define FAN_FROM_REG(val, div) ((val) == 0 ? 0 : 1310720 / ((val) * (div))) 153#define FAN_FROM_REG(val, div) ((val) == 0 ? 0 : 1310720 / ((val) * (div)))
@@ -236,7 +236,7 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
236 return err; 236 return err;
237 237
238 mutex_lock(&data->update_lock); 238 mutex_lock(&data->update_lock);
239 data->in_min[nr] = SENSORS_LIMIT(((val * 958) / 10000) + 3, 0, 255); 239 data->in_min[nr] = clamp_val(((val * 958) / 10000) + 3, 0, 255);
240 vt8231_write_value(data, regvoltmin[nr], data->in_min[nr]); 240 vt8231_write_value(data, regvoltmin[nr], data->in_min[nr]);
241 mutex_unlock(&data->update_lock); 241 mutex_unlock(&data->update_lock);
242 return count; 242 return count;
@@ -256,7 +256,7 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
256 return err; 256 return err;
257 257
258 mutex_lock(&data->update_lock); 258 mutex_lock(&data->update_lock);
259 data->in_max[nr] = SENSORS_LIMIT(((val * 958) / 10000) + 3, 0, 255); 259 data->in_max[nr] = clamp_val(((val * 958) / 10000) + 3, 0, 255);
260 vt8231_write_value(data, regvoltmax[nr], data->in_max[nr]); 260 vt8231_write_value(data, regvoltmax[nr], data->in_max[nr]);
261 mutex_unlock(&data->update_lock); 261 mutex_unlock(&data->update_lock);
262 return count; 262 return count;
@@ -302,8 +302,8 @@ static ssize_t set_in5_min(struct device *dev, struct device_attribute *attr,
302 return err; 302 return err;
303 303
304 mutex_lock(&data->update_lock); 304 mutex_lock(&data->update_lock);
305 data->in_min[5] = SENSORS_LIMIT(((val * 958 * 34) / (10000 * 54)) + 3, 305 data->in_min[5] = clamp_val(((val * 958 * 34) / (10000 * 54)) + 3,
306 0, 255); 306 0, 255);
307 vt8231_write_value(data, regvoltmin[5], data->in_min[5]); 307 vt8231_write_value(data, regvoltmin[5], data->in_min[5]);
308 mutex_unlock(&data->update_lock); 308 mutex_unlock(&data->update_lock);
309 return count; 309 return count;
@@ -321,8 +321,8 @@ static ssize_t set_in5_max(struct device *dev, struct device_attribute *attr,
321 return err; 321 return err;
322 322
323 mutex_lock(&data->update_lock); 323 mutex_lock(&data->update_lock);
324 data->in_max[5] = SENSORS_LIMIT(((val * 958 * 34) / (10000 * 54)) + 3, 324 data->in_max[5] = clamp_val(((val * 958 * 34) / (10000 * 54)) + 3,
325 0, 255); 325 0, 255);
326 vt8231_write_value(data, regvoltmax[5], data->in_max[5]); 326 vt8231_write_value(data, regvoltmax[5], data->in_max[5]);
327 mutex_unlock(&data->update_lock); 327 mutex_unlock(&data->update_lock);
328 return count; 328 return count;
@@ -380,7 +380,7 @@ static ssize_t set_temp0_max(struct device *dev, struct device_attribute *attr,
380 return err; 380 return err;
381 381
382 mutex_lock(&data->update_lock); 382 mutex_lock(&data->update_lock);
383 data->temp_max[0] = SENSORS_LIMIT((val + 500) / 1000, 0, 255); 383 data->temp_max[0] = clamp_val((val + 500) / 1000, 0, 255);
384 vt8231_write_value(data, regtempmax[0], data->temp_max[0]); 384 vt8231_write_value(data, regtempmax[0], data->temp_max[0]);
385 mutex_unlock(&data->update_lock); 385 mutex_unlock(&data->update_lock);
386 return count; 386 return count;
@@ -397,7 +397,7 @@ static ssize_t set_temp0_min(struct device *dev, struct device_attribute *attr,
397 return err; 397 return err;
398 398
399 mutex_lock(&data->update_lock); 399 mutex_lock(&data->update_lock);
400 data->temp_min[0] = SENSORS_LIMIT((val + 500) / 1000, 0, 255); 400 data->temp_min[0] = clamp_val((val + 500) / 1000, 0, 255);
401 vt8231_write_value(data, regtempmin[0], data->temp_min[0]); 401 vt8231_write_value(data, regtempmin[0], data->temp_min[0]);
402 mutex_unlock(&data->update_lock); 402 mutex_unlock(&data->update_lock);
403 return count; 403 return count;
@@ -444,7 +444,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
444 return err; 444 return err;
445 445
446 mutex_lock(&data->update_lock); 446 mutex_lock(&data->update_lock);
447 data->temp_max[nr] = SENSORS_LIMIT(TEMP_MAXMIN_TO_REG(val), 0, 255); 447 data->temp_max[nr] = clamp_val(TEMP_MAXMIN_TO_REG(val), 0, 255);
448 vt8231_write_value(data, regtempmax[nr], data->temp_max[nr]); 448 vt8231_write_value(data, regtempmax[nr], data->temp_max[nr]);
449 mutex_unlock(&data->update_lock); 449 mutex_unlock(&data->update_lock);
450 return count; 450 return count;
@@ -463,7 +463,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
463 return err; 463 return err;
464 464
465 mutex_lock(&data->update_lock); 465 mutex_lock(&data->update_lock);
466 data->temp_min[nr] = SENSORS_LIMIT(TEMP_MAXMIN_TO_REG(val), 0, 255); 466 data->temp_min[nr] = clamp_val(TEMP_MAXMIN_TO_REG(val), 0, 255);
467 vt8231_write_value(data, regtempmin[nr], data->temp_min[nr]); 467 vt8231_write_value(data, regtempmin[nr], data->temp_min[nr]);
468 mutex_unlock(&data->update_lock); 468 mutex_unlock(&data->update_lock);
469 return count; 469 return count;
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index 0e8ffd6059a0..0a89211c25f6 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -354,8 +354,8 @@ static inline unsigned int step_time_from_reg(u8 reg, u8 mode)
354 354
355static inline u8 step_time_to_reg(unsigned int msec, u8 mode) 355static inline u8 step_time_to_reg(unsigned int msec, u8 mode)
356{ 356{
357 return SENSORS_LIMIT((mode ? (msec + 50) / 100 : 357 return clamp_val((mode ? (msec + 50) / 100 : (msec + 200) / 400),
358 (msec + 200) / 400), 1, 255); 358 1, 255);
359} 359}
360 360
361static unsigned int fan_from_reg8(u16 reg, unsigned int divreg) 361static unsigned int fan_from_reg8(u16 reg, unsigned int divreg)
@@ -414,8 +414,7 @@ static inline long in_from_reg(u8 reg, u8 nr, const u16 *scale_in)
414 414
415static inline u8 in_to_reg(u32 val, u8 nr, const u16 *scale_in) 415static inline u8 in_to_reg(u32 val, u8 nr, const u16 *scale_in)
416{ 416{
417 return SENSORS_LIMIT(DIV_ROUND_CLOSEST(val * 100, scale_in[nr]), 0, 417 return clamp_val(DIV_ROUND_CLOSEST(val * 100, scale_in[nr]), 0, 255);
418 255);
419} 418}
420 419
421/* 420/*
@@ -1267,7 +1266,7 @@ store_temp_offset(struct device *dev, struct device_attribute *attr,
1267 if (err < 0) 1266 if (err < 0)
1268 return err; 1267 return err;
1269 1268
1270 val = SENSORS_LIMIT(DIV_ROUND_CLOSEST(val, 1000), -128, 127); 1269 val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127);
1271 1270
1272 mutex_lock(&data->update_lock); 1271 mutex_lock(&data->update_lock);
1273 data->temp_offset[nr] = val; 1272 data->temp_offset[nr] = val;
@@ -1435,7 +1434,7 @@ store_pwm(struct device *dev, struct device_attribute *attr,
1435 if (err < 0) 1434 if (err < 0)
1436 return err; 1435 return err;
1437 1436
1438 val = SENSORS_LIMIT(val, 0, 255); 1437 val = clamp_val(val, 0, 255);
1439 1438
1440 mutex_lock(&data->update_lock); 1439 mutex_lock(&data->update_lock);
1441 data->pwm[nr] = val; 1440 data->pwm[nr] = val;
@@ -1514,7 +1513,7 @@ store_target_temp(struct device *dev, struct device_attribute *attr,
1514 if (err < 0) 1513 if (err < 0)
1515 return err; 1514 return err;
1516 1515
1517 val = SENSORS_LIMIT(DIV_ROUND_CLOSEST(val, 1000), 0, 127); 1516 val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 127);
1518 1517
1519 mutex_lock(&data->update_lock); 1518 mutex_lock(&data->update_lock);
1520 data->target_temp[nr] = val; 1519 data->target_temp[nr] = val;
@@ -1540,7 +1539,7 @@ store_tolerance(struct device *dev, struct device_attribute *attr,
1540 return err; 1539 return err;
1541 1540
1542 /* Limit the temp to 0C - 15C */ 1541 /* Limit the temp to 0C - 15C */
1543 val = SENSORS_LIMIT(DIV_ROUND_CLOSEST(val, 1000), 0, 15); 1542 val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 15);
1544 1543
1545 mutex_lock(&data->update_lock); 1544 mutex_lock(&data->update_lock);
1546 if (sio_data->kind == nct6775 || sio_data->kind == nct6776) { 1545 if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
@@ -1639,7 +1638,7 @@ store_##reg(struct device *dev, struct device_attribute *attr, \
1639 err = kstrtoul(buf, 10, &val); \ 1638 err = kstrtoul(buf, 10, &val); \
1640 if (err < 0) \ 1639 if (err < 0) \
1641 return err; \ 1640 return err; \
1642 val = SENSORS_LIMIT(val, 1, 255); \ 1641 val = clamp_val(val, 1, 255); \
1643 mutex_lock(&data->update_lock); \ 1642 mutex_lock(&data->update_lock); \
1644 data->reg[nr] = val; \ 1643 data->reg[nr] = val; \
1645 w83627ehf_write_value(data, data->REG_##REG[nr], val); \ 1644 w83627ehf_write_value(data, data->REG_##REG[nr], val); \
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index 81f486520cea..3b9ef2d23452 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -254,16 +254,15 @@ static const u8 BIT_SCFG2[] = { 0x10, 0x20, 0x40 };
254 * these macros are called: arguments may be evaluated more than once. 254 * these macros are called: arguments may be evaluated more than once.
255 * Fixing this is just not worth it. 255 * Fixing this is just not worth it.
256 */ 256 */
257#define IN_TO_REG(val) (SENSORS_LIMIT((((val) + 8)/16),0,255)) 257#define IN_TO_REG(val) (clamp_val((((val) + 8) / 16), 0, 255))
258#define IN_FROM_REG(val) ((val) * 16) 258#define IN_FROM_REG(val) ((val) * 16)
259 259
260static inline u8 FAN_TO_REG(long rpm, int div) 260static inline u8 FAN_TO_REG(long rpm, int div)
261{ 261{
262 if (rpm == 0) 262 if (rpm == 0)
263 return 255; 263 return 255;
264 rpm = SENSORS_LIMIT(rpm, 1, 1000000); 264 rpm = clamp_val(rpm, 1, 1000000);
265 return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 265 return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
266 254);
267} 266}
268 267
269#define TEMP_MIN (-128000) 268#define TEMP_MIN (-128000)
@@ -275,9 +274,9 @@ static inline u8 FAN_TO_REG(long rpm, int div)
275 */ 274 */
276static u8 TEMP_TO_REG(long temp) 275static u8 TEMP_TO_REG(long temp)
277{ 276{
278 int ntemp = SENSORS_LIMIT(temp, TEMP_MIN, TEMP_MAX); 277 int ntemp = clamp_val(temp, TEMP_MIN, TEMP_MAX);
279 ntemp += (ntemp<0 ? -500 : 500); 278 ntemp += (ntemp < 0 ? -500 : 500);
280 return (u8)(ntemp / 1000); 279 return (u8)(ntemp / 1000);
281} 280}
282 281
283static int TEMP_FROM_REG(u8 reg) 282static int TEMP_FROM_REG(u8 reg)
@@ -287,7 +286,7 @@ static int TEMP_FROM_REG(u8 reg)
287 286
288#define FAN_FROM_REG(val,div) ((val)==0?-1:(val)==255?0:1350000/((val)*(div))) 287#define FAN_FROM_REG(val,div) ((val)==0?-1:(val)==255?0:1350000/((val)*(div)))
289 288
290#define PWM_TO_REG(val) (SENSORS_LIMIT((val),0,255)) 289#define PWM_TO_REG(val) (clamp_val((val), 0, 255))
291 290
292static inline unsigned long pwm_freq_from_reg_627hf(u8 reg) 291static inline unsigned long pwm_freq_from_reg_627hf(u8 reg)
293{ 292{
@@ -342,7 +341,7 @@ static inline u8 pwm_freq_to_reg(unsigned long val)
342static inline u8 DIV_TO_REG(long val) 341static inline u8 DIV_TO_REG(long val)
343{ 342{
344 int i; 343 int i;
345 val = SENSORS_LIMIT(val, 1, 128) >> 1; 344 val = clamp_val(val, 1, 128) >> 1;
346 for (i = 0; i < 7; i++) { 345 for (i = 0; i < 7; i++) {
347 if (val == 0) 346 if (val == 0)
348 break; 347 break;
@@ -614,8 +613,7 @@ static ssize_t store_regs_in_min0(struct device *dev, struct device_attribute *a
614 613
615 /* use VRM9 calculation */ 614 /* use VRM9 calculation */
616 data->in_min[0] = 615 data->in_min[0] =
617 SENSORS_LIMIT(((val * 100) - 70000 + 244) / 488, 0, 616 clamp_val(((val * 100) - 70000 + 244) / 488, 0, 255);
618 255);
619 else 617 else
620 /* use VRM8 (standard) calculation */ 618 /* use VRM8 (standard) calculation */
621 data->in_min[0] = IN_TO_REG(val); 619 data->in_min[0] = IN_TO_REG(val);
@@ -644,8 +642,7 @@ static ssize_t store_regs_in_max0(struct device *dev, struct device_attribute *a
644 642
645 /* use VRM9 calculation */ 643 /* use VRM9 calculation */
646 data->in_max[0] = 644 data->in_max[0] =
647 SENSORS_LIMIT(((val * 100) - 70000 + 244) / 488, 0, 645 clamp_val(((val * 100) - 70000 + 244) / 488, 0, 255);
648 255);
649 else 646 else
650 /* use VRM8 (standard) calculation */ 647 /* use VRM8 (standard) calculation */
651 data->in_max[0] = IN_TO_REG(val); 648 data->in_max[0] = IN_TO_REG(val);
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index 93bd28639595..aeec5b1d81c9 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -159,7 +159,7 @@ static const u8 BIT_SCFG2[] = { 0x10, 0x20, 0x40 };
159#define W83781D_DEFAULT_BETA 3435 159#define W83781D_DEFAULT_BETA 3435
160 160
161/* Conversions */ 161/* Conversions */
162#define IN_TO_REG(val) SENSORS_LIMIT(((val) + 8) / 16, 0, 255) 162#define IN_TO_REG(val) clamp_val(((val) + 8) / 16, 0, 255)
163#define IN_FROM_REG(val) ((val) * 16) 163#define IN_FROM_REG(val) ((val) * 16)
164 164
165static inline u8 165static inline u8
@@ -167,8 +167,8 @@ FAN_TO_REG(long rpm, int div)
167{ 167{
168 if (rpm == 0) 168 if (rpm == 0)
169 return 255; 169 return 255;
170 rpm = SENSORS_LIMIT(rpm, 1, 1000000); 170 rpm = clamp_val(rpm, 1, 1000000);
171 return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254); 171 return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
172} 172}
173 173
174static inline long 174static inline long
@@ -181,7 +181,7 @@ FAN_FROM_REG(u8 val, int div)
181 return 1350000 / (val * div); 181 return 1350000 / (val * div);
182} 182}
183 183
184#define TEMP_TO_REG(val) SENSORS_LIMIT((val) / 1000, -127, 128) 184#define TEMP_TO_REG(val) clamp_val((val) / 1000, -127, 128)
185#define TEMP_FROM_REG(val) ((val) * 1000) 185#define TEMP_FROM_REG(val) ((val) * 1000)
186 186
187#define BEEP_MASK_FROM_REG(val, type) ((type) == as99127f ? \ 187#define BEEP_MASK_FROM_REG(val, type) ((type) == as99127f ? \
@@ -195,9 +195,8 @@ static inline u8
195DIV_TO_REG(long val, enum chips type) 195DIV_TO_REG(long val, enum chips type)
196{ 196{
197 int i; 197 int i;
198 val = SENSORS_LIMIT(val, 1, 198 val = clamp_val(val, 1,
199 ((type == w83781d 199 ((type == w83781d || type == as99127f) ? 8 : 128)) >> 1;
200 || type == as99127f) ? 8 : 128)) >> 1;
201 for (i = 0; i < 7; i++) { 200 for (i = 0; i < 7; i++) {
202 if (val == 0) 201 if (val == 0)
203 break; 202 break;
@@ -443,7 +442,7 @@ store_vrm_reg(struct device *dev, struct device_attribute *attr,
443 err = kstrtoul(buf, 10, &val); 442 err = kstrtoul(buf, 10, &val);
444 if (err) 443 if (err)
445 return err; 444 return err;
446 data->vrm = SENSORS_LIMIT(val, 0, 255); 445 data->vrm = clamp_val(val, 0, 255);
447 446
448 return count; 447 return count;
449} 448}
@@ -730,7 +729,7 @@ store_pwm(struct device *dev, struct device_attribute *da, const char *buf,
730 return err; 729 return err;
731 730
732 mutex_lock(&data->update_lock); 731 mutex_lock(&data->update_lock);
733 data->pwm[nr] = SENSORS_LIMIT(val, 0, 255); 732 data->pwm[nr] = clamp_val(val, 0, 255);
734 w83781d_write_value(data, W83781D_REG_PWM[nr], data->pwm[nr]); 733 w83781d_write_value(data, W83781D_REG_PWM[nr], data->pwm[nr]);
735 mutex_unlock(&data->update_lock); 734 mutex_unlock(&data->update_lock);
736 return count; 735 return count;
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
index ed397c645198..38dddddf8875 100644
--- a/drivers/hwmon/w83791d.c
+++ b/drivers/hwmon/w83791d.c
@@ -220,15 +220,15 @@ static inline int w83791d_write(struct i2c_client *client, u8 reg, u8 value)
220 * in mV as would be measured on the chip input pin, need to just 220 * in mV as would be measured on the chip input pin, need to just
221 * multiply/divide by 16 to translate from/to register values. 221 * multiply/divide by 16 to translate from/to register values.
222 */ 222 */
223#define IN_TO_REG(val) (SENSORS_LIMIT((((val) + 8) / 16), 0, 255)) 223#define IN_TO_REG(val) (clamp_val((((val) + 8) / 16), 0, 255))
224#define IN_FROM_REG(val) ((val) * 16) 224#define IN_FROM_REG(val) ((val) * 16)
225 225
226static u8 fan_to_reg(long rpm, int div) 226static u8 fan_to_reg(long rpm, int div)
227{ 227{
228 if (rpm == 0) 228 if (rpm == 0)
229 return 255; 229 return 255;
230 rpm = SENSORS_LIMIT(rpm, 1, 1000000); 230 rpm = clamp_val(rpm, 1, 1000000);
231 return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254); 231 return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
232} 232}
233 233
234#define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : \ 234#define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : \
@@ -273,7 +273,7 @@ static u8 div_to_reg(int nr, long val)
273 int i; 273 int i;
274 274
275 /* fan divisors max out at 128 */ 275 /* fan divisors max out at 128 */
276 val = SENSORS_LIMIT(val, 1, 128) >> 1; 276 val = clamp_val(val, 1, 128) >> 1;
277 for (i = 0; i < 7; i++) { 277 for (i = 0; i < 7; i++) {
278 if (val == 0) 278 if (val == 0)
279 break; 279 break;
@@ -747,7 +747,7 @@ static ssize_t store_pwm(struct device *dev, struct device_attribute *attr,
747 return -EINVAL; 747 return -EINVAL;
748 748
749 mutex_lock(&data->update_lock); 749 mutex_lock(&data->update_lock);
750 data->pwm[nr] = SENSORS_LIMIT(val, 0, 255); 750 data->pwm[nr] = clamp_val(val, 0, 255);
751 w83791d_write(client, W83791D_REG_PWM[nr], data->pwm[nr]); 751 w83791d_write(client, W83791D_REG_PWM[nr], data->pwm[nr]);
752 mutex_unlock(&data->update_lock); 752 mutex_unlock(&data->update_lock);
753 return count; 753 return count;
diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
index 301942d08453..5cb83ddf2cc6 100644
--- a/drivers/hwmon/w83792d.c
+++ b/drivers/hwmon/w83792d.c
@@ -235,8 +235,8 @@ FAN_TO_REG(long rpm, int div)
235{ 235{
236 if (rpm == 0) 236 if (rpm == 0)
237 return 255; 237 return 255;
238 rpm = SENSORS_LIMIT(rpm, 1, 1000000); 238 rpm = clamp_val(rpm, 1, 1000000);
239 return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254); 239 return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
240} 240}
241 241
242#define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : \ 242#define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : \
@@ -244,16 +244,15 @@ FAN_TO_REG(long rpm, int div)
244 1350000 / ((val) * (div)))) 244 1350000 / ((val) * (div))))
245 245
246/* for temp1 */ 246/* for temp1 */
247#define TEMP1_TO_REG(val) (SENSORS_LIMIT(((val) < 0 ? (val)+0x100*1000 \ 247#define TEMP1_TO_REG(val) (clamp_val(((val) < 0 ? (val) + 0x100 * 1000 \
248 : (val)) / 1000, 0, 0xff)) 248 : (val)) / 1000, 0, 0xff))
249#define TEMP1_FROM_REG(val) (((val) & 0x80 ? (val)-0x100 : (val)) * 1000) 249#define TEMP1_FROM_REG(val) (((val) & 0x80 ? (val)-0x100 : (val)) * 1000)
250/* for temp2 and temp3, because they need additional resolution */ 250/* for temp2 and temp3, because they need additional resolution */
251#define TEMP_ADD_FROM_REG(val1, val2) \ 251#define TEMP_ADD_FROM_REG(val1, val2) \
252 ((((val1) & 0x80 ? (val1)-0x100 \ 252 ((((val1) & 0x80 ? (val1)-0x100 \
253 : (val1)) * 1000) + ((val2 & 0x80) ? 500 : 0)) 253 : (val1)) * 1000) + ((val2 & 0x80) ? 500 : 0))
254#define TEMP_ADD_TO_REG_HIGH(val) \ 254#define TEMP_ADD_TO_REG_HIGH(val) \
255 (SENSORS_LIMIT(((val) < 0 ? (val)+0x100*1000 \ 255 (clamp_val(((val) < 0 ? (val) + 0x100 * 1000 : (val)) / 1000, 0, 0xff))
256 : (val)) / 1000, 0, 0xff))
257#define TEMP_ADD_TO_REG_LOW(val) ((val%1000) ? 0x80 : 0x00) 256#define TEMP_ADD_TO_REG_LOW(val) ((val%1000) ? 0x80 : 0x00)
258 257
259#define DIV_FROM_REG(val) (1 << (val)) 258#define DIV_FROM_REG(val) (1 << (val))
@@ -262,7 +261,7 @@ static inline u8
262DIV_TO_REG(long val) 261DIV_TO_REG(long val)
263{ 262{
264 int i; 263 int i;
265 val = SENSORS_LIMIT(val, 1, 128) >> 1; 264 val = clamp_val(val, 1, 128) >> 1;
266 for (i = 0; i < 7; i++) { 265 for (i = 0; i < 7; i++) {
267 if (val == 0) 266 if (val == 0)
268 break; 267 break;
@@ -397,7 +396,7 @@ static ssize_t store_in_##reg(struct device *dev, \
397 if (err) \ 396 if (err) \
398 return err; \ 397 return err; \
399 mutex_lock(&data->update_lock); \ 398 mutex_lock(&data->update_lock); \
400 data->in_##reg[nr] = SENSORS_LIMIT(IN_TO_REG(nr, val) / 4, 0, 255); \ 399 data->in_##reg[nr] = clamp_val(IN_TO_REG(nr, val) / 4, 0, 255); \
401 w83792d_write_value(client, W83792D_REG_IN_##REG[nr], \ 400 w83792d_write_value(client, W83792D_REG_IN_##REG[nr], \
402 data->in_##reg[nr]); \ 401 data->in_##reg[nr]); \
403 mutex_unlock(&data->update_lock); \ 402 mutex_unlock(&data->update_lock); \
@@ -645,7 +644,7 @@ store_pwm(struct device *dev, struct device_attribute *attr,
645 err = kstrtoul(buf, 10, &val); 644 err = kstrtoul(buf, 10, &val);
646 if (err) 645 if (err)
647 return err; 646 return err;
648 val = SENSORS_LIMIT(val, 0, 255) >> 4; 647 val = clamp_val(val, 0, 255) >> 4;
649 648
650 mutex_lock(&data->update_lock); 649 mutex_lock(&data->update_lock);
651 val |= w83792d_read_value(client, W83792D_REG_PWM[nr]) & 0xf0; 650 val |= w83792d_read_value(client, W83792D_REG_PWM[nr]) & 0xf0;
@@ -799,7 +798,7 @@ store_thermal_cruise(struct device *dev, struct device_attribute *attr,
799 mutex_lock(&data->update_lock); 798 mutex_lock(&data->update_lock);
800 target_mask = w83792d_read_value(client, 799 target_mask = w83792d_read_value(client,
801 W83792D_REG_THERMAL[nr]) & 0x80; 800 W83792D_REG_THERMAL[nr]) & 0x80;
802 data->thermal_cruise[nr] = SENSORS_LIMIT(target_tmp, 0, 255); 801 data->thermal_cruise[nr] = clamp_val(target_tmp, 0, 255);
803 w83792d_write_value(client, W83792D_REG_THERMAL[nr], 802 w83792d_write_value(client, W83792D_REG_THERMAL[nr],
804 (data->thermal_cruise[nr]) | target_mask); 803 (data->thermal_cruise[nr]) | target_mask);
805 mutex_unlock(&data->update_lock); 804 mutex_unlock(&data->update_lock);
@@ -837,7 +836,7 @@ store_tolerance(struct device *dev, struct device_attribute *attr,
837 mutex_lock(&data->update_lock); 836 mutex_lock(&data->update_lock);
838 tol_mask = w83792d_read_value(client, 837 tol_mask = w83792d_read_value(client,
839 W83792D_REG_TOLERANCE[nr]) & ((nr == 1) ? 0x0f : 0xf0); 838 W83792D_REG_TOLERANCE[nr]) & ((nr == 1) ? 0x0f : 0xf0);
840 tol_tmp = SENSORS_LIMIT(val, 0, 15); 839 tol_tmp = clamp_val(val, 0, 15);
841 tol_tmp &= 0x0f; 840 tol_tmp &= 0x0f;
842 data->tolerance[nr] = tol_tmp; 841 data->tolerance[nr] = tol_tmp;
843 if (nr == 1) 842 if (nr == 1)
@@ -881,7 +880,7 @@ store_sf2_point(struct device *dev, struct device_attribute *attr,
881 return err; 880 return err;
882 881
883 mutex_lock(&data->update_lock); 882 mutex_lock(&data->update_lock);
884 data->sf2_points[index][nr] = SENSORS_LIMIT(val, 0, 127); 883 data->sf2_points[index][nr] = clamp_val(val, 0, 127);
885 mask_tmp = w83792d_read_value(client, 884 mask_tmp = w83792d_read_value(client,
886 W83792D_REG_POINTS[index][nr]) & 0x80; 885 W83792D_REG_POINTS[index][nr]) & 0x80;
887 w83792d_write_value(client, W83792D_REG_POINTS[index][nr], 886 w83792d_write_value(client, W83792D_REG_POINTS[index][nr],
@@ -923,7 +922,7 @@ store_sf2_level(struct device *dev, struct device_attribute *attr,
923 return err; 922 return err;
924 923
925 mutex_lock(&data->update_lock); 924 mutex_lock(&data->update_lock);
926 data->sf2_levels[index][nr] = SENSORS_LIMIT((val * 15) / 100, 0, 15); 925 data->sf2_levels[index][nr] = clamp_val((val * 15) / 100, 0, 15);
927 mask_tmp = w83792d_read_value(client, W83792D_REG_LEVELS[index][nr]) 926 mask_tmp = w83792d_read_value(client, W83792D_REG_LEVELS[index][nr])
928 & ((nr == 3) ? 0xf0 : 0x0f); 927 & ((nr == 3) ? 0xf0 : 0x0f);
929 if (nr == 3) 928 if (nr == 3)
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index 99799fd1d917..660427520c53 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -191,7 +191,7 @@ static inline u16 FAN_TO_REG(long rpm)
191{ 191{
192 if (rpm <= 0) 192 if (rpm <= 0)
193 return 0x0fff; 193 return 0x0fff;
194 return SENSORS_LIMIT((1350000 + (rpm >> 1)) / rpm, 1, 0xffe); 194 return clamp_val((1350000 + (rpm >> 1)) / rpm, 1, 0xffe);
195} 195}
196 196
197static inline unsigned long TIME_FROM_REG(u8 reg) 197static inline unsigned long TIME_FROM_REG(u8 reg)
@@ -201,7 +201,7 @@ static inline unsigned long TIME_FROM_REG(u8 reg)
201 201
202static inline u8 TIME_TO_REG(unsigned long val) 202static inline u8 TIME_TO_REG(unsigned long val)
203{ 203{
204 return SENSORS_LIMIT((val + 50) / 100, 0, 0xff); 204 return clamp_val((val + 50) / 100, 0, 0xff);
205} 205}
206 206
207static inline long TEMP_FROM_REG(s8 reg) 207static inline long TEMP_FROM_REG(s8 reg)
@@ -211,7 +211,7 @@ static inline long TEMP_FROM_REG(s8 reg)
211 211
212static inline s8 TEMP_TO_REG(long val, s8 min, s8 max) 212static inline s8 TEMP_TO_REG(long val, s8 min, s8 max)
213{ 213{
214 return SENSORS_LIMIT((val + (val < 0 ? -500 : 500)) / 1000, min, max); 214 return clamp_val((val + (val < 0 ? -500 : 500)) / 1000, min, max);
215} 215}
216 216
217struct w83793_data { 217struct w83793_data {
@@ -558,7 +558,7 @@ store_pwm(struct device *dev, struct device_attribute *attr,
558 w83793_write_value(client, W83793_REG_PWM_STOP_TIME(index), 558 w83793_write_value(client, W83793_REG_PWM_STOP_TIME(index),
559 val); 559 val);
560 } else { 560 } else {
561 val = SENSORS_LIMIT(val, 0, 0xff) >> 2; 561 val = clamp_val(val, 0, 0xff) >> 2;
562 data->pwm[index][nr] = 562 data->pwm[index][nr] =
563 w83793_read_value(client, W83793_REG_PWM(index, nr)) & 0xc0; 563 w83793_read_value(client, W83793_REG_PWM(index, nr)) & 0xc0;
564 data->pwm[index][nr] |= val; 564 data->pwm[index][nr] |= val;
@@ -739,7 +739,7 @@ store_sf_setup(struct device *dev, struct device_attribute *attr,
739 if (nr == SETUP_PWM_DEFAULT) { 739 if (nr == SETUP_PWM_DEFAULT) {
740 data->pwm_default = 740 data->pwm_default =
741 w83793_read_value(client, W83793_REG_PWM_DEFAULT) & 0xc0; 741 w83793_read_value(client, W83793_REG_PWM_DEFAULT) & 0xc0;
742 data->pwm_default |= SENSORS_LIMIT(val, 0, 0xff) >> 2; 742 data->pwm_default |= clamp_val(val, 0, 0xff) >> 2;
743 w83793_write_value(client, W83793_REG_PWM_DEFAULT, 743 w83793_write_value(client, W83793_REG_PWM_DEFAULT,
744 data->pwm_default); 744 data->pwm_default);
745 } else if (nr == SETUP_PWM_UPTIME) { 745 } else if (nr == SETUP_PWM_UPTIME) {
@@ -838,7 +838,7 @@ store_sf_ctrl(struct device *dev, struct device_attribute *attr,
838 838
839 mutex_lock(&data->update_lock); 839 mutex_lock(&data->update_lock);
840 if (nr == TEMP_FAN_MAP) { 840 if (nr == TEMP_FAN_MAP) {
841 val = SENSORS_LIMIT(val, 0, 255); 841 val = clamp_val(val, 0, 255);
842 w83793_write_value(client, W83793_REG_TEMP_FAN_MAP(index), val); 842 w83793_write_value(client, W83793_REG_TEMP_FAN_MAP(index), val);
843 data->temp_fan_map[index] = val; 843 data->temp_fan_map[index] = val;
844 } else if (nr == TEMP_PWM_ENABLE) { 844 } else if (nr == TEMP_PWM_ENABLE) {
@@ -907,7 +907,7 @@ store_sf2_pwm(struct device *dev, struct device_attribute *attr,
907 err = kstrtoul(buf, 10, &val); 907 err = kstrtoul(buf, 10, &val);
908 if (err) 908 if (err)
909 return err; 909 return err;
910 val = SENSORS_LIMIT(val, 0, 0xff) >> 2; 910 val = clamp_val(val, 0, 0xff) >> 2;
911 911
912 mutex_lock(&data->update_lock); 912 mutex_lock(&data->update_lock);
913 data->sf2_pwm[index][nr] = 913 data->sf2_pwm[index][nr] =
@@ -1003,9 +1003,9 @@ store_in(struct device *dev, struct device_attribute *attr,
1003 /* fix the limit values of 5VDD and 5VSB to ALARM mechanism */ 1003 /* fix the limit values of 5VDD and 5VSB to ALARM mechanism */
1004 if (nr == 1 || nr == 2) 1004 if (nr == 1 || nr == 2)
1005 val -= scale_in_add[index] / scale_in[index]; 1005 val -= scale_in_add[index] / scale_in[index];
1006 val = SENSORS_LIMIT(val, 0, 255); 1006 val = clamp_val(val, 0, 255);
1007 } else { 1007 } else {
1008 val = SENSORS_LIMIT(val, 0, 0x3FF); 1008 val = clamp_val(val, 0, 0x3FF);
1009 data->in_low_bits[nr] = 1009 data->in_low_bits[nr] =
1010 w83793_read_value(client, W83793_REG_IN_LOW_BITS[nr]); 1010 w83793_read_value(client, W83793_REG_IN_LOW_BITS[nr]);
1011 data->in_low_bits[nr] &= ~(0x03 << (2 * index)); 1011 data->in_low_bits[nr] &= ~(0x03 << (2 * index));
diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c
index 55a4f4894531..e226096148eb 100644
--- a/drivers/hwmon/w83795.c
+++ b/drivers/hwmon/w83795.c
@@ -262,7 +262,7 @@ static inline u16 fan_to_reg(long rpm)
262{ 262{
263 if (rpm <= 0) 263 if (rpm <= 0)
264 return 0x0fff; 264 return 0x0fff;
265 return SENSORS_LIMIT((1350000 + (rpm >> 1)) / rpm, 1, 0xffe); 265 return clamp_val((1350000 + (rpm >> 1)) / rpm, 1, 0xffe);
266} 266}
267 267
268static inline unsigned long time_from_reg(u8 reg) 268static inline unsigned long time_from_reg(u8 reg)
@@ -272,7 +272,7 @@ static inline unsigned long time_from_reg(u8 reg)
272 272
273static inline u8 time_to_reg(unsigned long val) 273static inline u8 time_to_reg(unsigned long val)
274{ 274{
275 return SENSORS_LIMIT((val + 50) / 100, 0, 0xff); 275 return clamp_val((val + 50) / 100, 0, 0xff);
276} 276}
277 277
278static inline long temp_from_reg(s8 reg) 278static inline long temp_from_reg(s8 reg)
@@ -282,7 +282,7 @@ static inline long temp_from_reg(s8 reg)
282 282
283static inline s8 temp_to_reg(long val, s8 min, s8 max) 283static inline s8 temp_to_reg(long val, s8 min, s8 max)
284{ 284{
285 return SENSORS_LIMIT(val / 1000, min, max); 285 return clamp_val(val / 1000, min, max);
286} 286}
287 287
288static const u16 pwm_freq_cksel0[16] = { 288static const u16 pwm_freq_cksel0[16] = {
@@ -319,7 +319,7 @@ static u8 pwm_freq_to_reg(unsigned long val, u16 clkin)
319 319
320 /* Best fit for cksel = 1 */ 320 /* Best fit for cksel = 1 */
321 base_clock = clkin * 1000 / ((clkin == 48000) ? 384 : 256); 321 base_clock = clkin * 1000 / ((clkin == 48000) ? 384 : 256);
322 reg1 = SENSORS_LIMIT(DIV_ROUND_CLOSEST(base_clock, val), 1, 128); 322 reg1 = clamp_val(DIV_ROUND_CLOSEST(base_clock, val), 1, 128);
323 best1 = base_clock / reg1; 323 best1 = base_clock / reg1;
324 reg1 = 0x80 | (reg1 - 1); 324 reg1 = 0x80 | (reg1 - 1);
325 325
@@ -889,7 +889,7 @@ store_pwm(struct device *dev, struct device_attribute *attr,
889 val = pwm_freq_to_reg(val, data->clkin); 889 val = pwm_freq_to_reg(val, data->clkin);
890 break; 890 break;
891 default: 891 default:
892 val = SENSORS_LIMIT(val, 0, 0xff); 892 val = clamp_val(val, 0, 0xff);
893 break; 893 break;
894 } 894 }
895 w83795_write(client, W83795_REG_PWM(index, nr), val); 895 w83795_write(client, W83795_REG_PWM(index, nr), val);
@@ -1126,7 +1126,7 @@ store_temp_pwm_enable(struct device *dev, struct device_attribute *attr,
1126 break; 1126 break;
1127 case TEMP_PWM_FAN_MAP: 1127 case TEMP_PWM_FAN_MAP:
1128 mutex_lock(&data->update_lock); 1128 mutex_lock(&data->update_lock);
1129 tmp = SENSORS_LIMIT(tmp, 0, 0xff); 1129 tmp = clamp_val(tmp, 0, 0xff);
1130 w83795_write(client, W83795_REG_TFMR(index), tmp); 1130 w83795_write(client, W83795_REG_TFMR(index), tmp);
1131 data->pwm_tfmr[index] = tmp; 1131 data->pwm_tfmr[index] = tmp;
1132 mutex_unlock(&data->update_lock); 1132 mutex_unlock(&data->update_lock);
@@ -1177,13 +1177,13 @@ store_fanin(struct device *dev, struct device_attribute *attr,
1177 mutex_lock(&data->update_lock); 1177 mutex_lock(&data->update_lock);
1178 switch (nr) { 1178 switch (nr) {
1179 case FANIN_TARGET: 1179 case FANIN_TARGET:
1180 val = fan_to_reg(SENSORS_LIMIT(val, 0, 0xfff)); 1180 val = fan_to_reg(clamp_val(val, 0, 0xfff));
1181 w83795_write(client, W83795_REG_FTSH(index), val >> 4); 1181 w83795_write(client, W83795_REG_FTSH(index), val >> 4);
1182 w83795_write(client, W83795_REG_FTSL(index), (val << 4) & 0xf0); 1182 w83795_write(client, W83795_REG_FTSL(index), (val << 4) & 0xf0);
1183 data->target_speed[index] = val; 1183 data->target_speed[index] = val;
1184 break; 1184 break;
1185 case FANIN_TOL: 1185 case FANIN_TOL:
1186 val = SENSORS_LIMIT(val, 0, 0x3f); 1186 val = clamp_val(val, 0, 0x3f);
1187 w83795_write(client, W83795_REG_TFTS, val); 1187 w83795_write(client, W83795_REG_TFTS, val);
1188 data->tol_speed = val; 1188 data->tol_speed = val;
1189 break; 1189 break;
@@ -1227,22 +1227,22 @@ store_temp_pwm(struct device *dev, struct device_attribute *attr,
1227 mutex_lock(&data->update_lock); 1227 mutex_lock(&data->update_lock);
1228 switch (nr) { 1228 switch (nr) {
1229 case TEMP_PWM_TTTI: 1229 case TEMP_PWM_TTTI:
1230 val = SENSORS_LIMIT(val, 0, 0x7f); 1230 val = clamp_val(val, 0, 0x7f);
1231 w83795_write(client, W83795_REG_TTTI(index), val); 1231 w83795_write(client, W83795_REG_TTTI(index), val);
1232 break; 1232 break;
1233 case TEMP_PWM_CTFS: 1233 case TEMP_PWM_CTFS:
1234 val = SENSORS_LIMIT(val, 0, 0x7f); 1234 val = clamp_val(val, 0, 0x7f);
1235 w83795_write(client, W83795_REG_CTFS(index), val); 1235 w83795_write(client, W83795_REG_CTFS(index), val);
1236 break; 1236 break;
1237 case TEMP_PWM_HCT: 1237 case TEMP_PWM_HCT:
1238 val = SENSORS_LIMIT(val, 0, 0x0f); 1238 val = clamp_val(val, 0, 0x0f);
1239 tmp = w83795_read(client, W83795_REG_HT(index)); 1239 tmp = w83795_read(client, W83795_REG_HT(index));
1240 tmp &= 0x0f; 1240 tmp &= 0x0f;
1241 tmp |= (val << 4) & 0xf0; 1241 tmp |= (val << 4) & 0xf0;
1242 w83795_write(client, W83795_REG_HT(index), tmp); 1242 w83795_write(client, W83795_REG_HT(index), tmp);
1243 break; 1243 break;
1244 case TEMP_PWM_HOT: 1244 case TEMP_PWM_HOT:
1245 val = SENSORS_LIMIT(val, 0, 0x0f); 1245 val = clamp_val(val, 0, 0x0f);
1246 tmp = w83795_read(client, W83795_REG_HT(index)); 1246 tmp = w83795_read(client, W83795_REG_HT(index));
1247 tmp &= 0xf0; 1247 tmp &= 0xf0;
1248 tmp |= val & 0x0f; 1248 tmp |= val & 0x0f;
@@ -1541,7 +1541,7 @@ store_in(struct device *dev, struct device_attribute *attr,
1541 if ((index >= 17) && 1541 if ((index >= 17) &&
1542 !((data->has_gain >> (index - 17)) & 1)) 1542 !((data->has_gain >> (index - 17)) & 1))
1543 val /= 8; 1543 val /= 8;
1544 val = SENSORS_LIMIT(val, 0, 0x3FF); 1544 val = clamp_val(val, 0, 0x3FF);
1545 mutex_lock(&data->update_lock); 1545 mutex_lock(&data->update_lock);
1546 1546
1547 lsb_idx = IN_LSB_SHIFT_IDX[index][IN_LSB_IDX]; 1547 lsb_idx = IN_LSB_SHIFT_IDX[index][IN_LSB_IDX];
@@ -1596,7 +1596,7 @@ store_sf_setup(struct device *dev, struct device_attribute *attr,
1596 1596
1597 switch (nr) { 1597 switch (nr) {
1598 case SETUP_PWM_DEFAULT: 1598 case SETUP_PWM_DEFAULT:
1599 val = SENSORS_LIMIT(val, 0, 0xff); 1599 val = clamp_val(val, 0, 0xff);
1600 break; 1600 break;
1601 case SETUP_PWM_UPTIME: 1601 case SETUP_PWM_UPTIME:
1602 case SETUP_PWM_DOWNTIME: 1602 case SETUP_PWM_DOWNTIME:
diff --git a/drivers/hwmon/w83l786ng.c b/drivers/hwmon/w83l786ng.c
index 79710bcac2f7..edb06cda5a68 100644
--- a/drivers/hwmon/w83l786ng.c
+++ b/drivers/hwmon/w83l786ng.c
@@ -86,8 +86,8 @@ FAN_TO_REG(long rpm, int div)
86{ 86{
87 if (rpm == 0) 87 if (rpm == 0)
88 return 255; 88 return 255;
89 rpm = SENSORS_LIMIT(rpm, 1, 1000000); 89 rpm = clamp_val(rpm, 1, 1000000);
90 return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254); 90 return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
91} 91}
92 92
93#define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : \ 93#define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : \
@@ -95,9 +95,8 @@ FAN_TO_REG(long rpm, int div)
95 1350000 / ((val) * (div)))) 95 1350000 / ((val) * (div))))
96 96
97/* for temp */ 97/* for temp */
98#define TEMP_TO_REG(val) (SENSORS_LIMIT(((val) < 0 ? \ 98#define TEMP_TO_REG(val) (clamp_val(((val) < 0 ? (val) + 0x100 * 1000 \
99 (val) + 0x100 * 1000 \ 99 : (val)) / 1000, 0, 0xff))
100 : (val)) / 1000, 0, 0xff))
101#define TEMP_FROM_REG(val) (((val) & 0x80 ? \ 100#define TEMP_FROM_REG(val) (((val) & 0x80 ? \
102 (val) - 0x100 : (val)) * 1000) 101 (val) - 0x100 : (val)) * 1000)
103 102
@@ -106,7 +105,7 @@ FAN_TO_REG(long rpm, int div)
106 * in mV as would be measured on the chip input pin, need to just 105 * in mV as would be measured on the chip input pin, need to just
107 * multiply/divide by 8 to translate from/to register values. 106 * multiply/divide by 8 to translate from/to register values.
108 */ 107 */
109#define IN_TO_REG(val) (SENSORS_LIMIT((((val) + 4) / 8), 0, 255)) 108#define IN_TO_REG(val) (clamp_val((((val) + 4) / 8), 0, 255))
110#define IN_FROM_REG(val) ((val) * 8) 109#define IN_FROM_REG(val) ((val) * 8)
111 110
112#define DIV_FROM_REG(val) (1 << (val)) 111#define DIV_FROM_REG(val) (1 << (val))
@@ -115,7 +114,7 @@ static inline u8
115DIV_TO_REG(long val) 114DIV_TO_REG(long val)
116{ 115{
117 int i; 116 int i;
118 val = SENSORS_LIMIT(val, 1, 128) >> 1; 117 val = clamp_val(val, 1, 128) >> 1;
119 for (i = 0; i < 7; i++) { 118 for (i = 0; i < 7; i++) {
120 if (val == 0) 119 if (val == 0)
121 break; 120 break;
@@ -481,7 +480,7 @@ store_pwm(struct device *dev, struct device_attribute *attr,
481 err = kstrtoul(buf, 10, &val); 480 err = kstrtoul(buf, 10, &val);
482 if (err) 481 if (err)
483 return err; 482 return err;
484 val = SENSORS_LIMIT(val, 0, 255); 483 val = clamp_val(val, 0, 255);
485 484
486 mutex_lock(&data->update_lock); 485 mutex_lock(&data->update_lock);
487 data->pwm[nr] = val; 486 data->pwm[nr] = val;
@@ -564,7 +563,7 @@ store_tolerance(struct device *dev, struct device_attribute *attr,
564 mutex_lock(&data->update_lock); 563 mutex_lock(&data->update_lock);
565 tol_mask = w83l786ng_read_value(client, 564 tol_mask = w83l786ng_read_value(client,
566 W83L786NG_REG_TOLERANCE) & ((nr == 1) ? 0x0f : 0xf0); 565 W83L786NG_REG_TOLERANCE) & ((nr == 1) ? 0x0f : 0xf0);
567 tol_tmp = SENSORS_LIMIT(val, 0, 15); 566 tol_tmp = clamp_val(val, 0, 15);
568 tol_tmp &= 0x0f; 567 tol_tmp &= 0x0f;
569 data->tolerance[nr] = tol_tmp; 568 data->tolerance[nr] = tol_tmp;
570 if (nr == 1) 569 if (nr == 1)
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index cbba7db9ad59..f5258c205de5 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -34,6 +34,7 @@
34#include <linux/io.h> 34#include <linux/io.h>
35#include <linux/pm_runtime.h> 35#include <linux/pm_runtime.h>
36#include <linux/delay.h> 36#include <linux/delay.h>
37#include <linux/module.h>
37#include "i2c-designware-core.h" 38#include "i2c-designware-core.h"
38 39
39/* 40/*
@@ -725,3 +726,6 @@ u32 i2c_dw_read_comp_param(struct dw_i2c_dev *dev)
725 return dw_readl(dev, DW_IC_COMP_PARAM_1); 726 return dw_readl(dev, DW_IC_COMP_PARAM_1);
726} 727}
727EXPORT_SYMBOL_GPL(i2c_dw_read_comp_param); 728EXPORT_SYMBOL_GPL(i2c_dw_read_comp_param);
729
730MODULE_DESCRIPTION("Synopsys DesignWare I2C bus adapter core");
731MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index 1b1a936eccc9..d6abaf2cf2e3 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -127,7 +127,7 @@ struct mxs_i2c_dev {
127 struct device *dev; 127 struct device *dev;
128 void __iomem *regs; 128 void __iomem *regs;
129 struct completion cmd_complete; 129 struct completion cmd_complete;
130 u32 cmd_err; 130 int cmd_err;
131 struct i2c_adapter adapter; 131 struct i2c_adapter adapter;
132 const struct mxs_i2c_speed_config *speed; 132 const struct mxs_i2c_speed_config *speed;
133 133
@@ -316,7 +316,7 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
316 if (msg->len == 0) 316 if (msg->len == 0)
317 return -EINVAL; 317 return -EINVAL;
318 318
319 init_completion(&i2c->cmd_complete); 319 INIT_COMPLETION(i2c->cmd_complete);
320 i2c->cmd_err = 0; 320 i2c->cmd_err = 0;
321 321
322 ret = mxs_i2c_dma_setup_xfer(adap, msg, flags); 322 ret = mxs_i2c_dma_setup_xfer(adap, msg, flags);
@@ -473,6 +473,8 @@ static int mxs_i2c_probe(struct platform_device *pdev)
473 i2c->dev = dev; 473 i2c->dev = dev;
474 i2c->speed = &mxs_i2c_95kHz_config; 474 i2c->speed = &mxs_i2c_95kHz_config;
475 475
476 init_completion(&i2c->cmd_complete);
477
476 if (dev->of_node) { 478 if (dev->of_node) {
477 err = mxs_i2c_get_ofdata(i2c); 479 err = mxs_i2c_get_ofdata(i2c);
478 if (err) 480 if (err)
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 20d41bfa7c19..4cc2f0528c88 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -803,7 +803,7 @@ static int errata_omap3_i462(struct omap_i2c_dev *dev)
803 if (stat & OMAP_I2C_STAT_AL) { 803 if (stat & OMAP_I2C_STAT_AL) {
804 dev_err(dev->dev, "Arbitration lost\n"); 804 dev_err(dev->dev, "Arbitration lost\n");
805 dev->cmd_err |= OMAP_I2C_STAT_AL; 805 dev->cmd_err |= OMAP_I2C_STAT_AL;
806 omap_i2c_ack_stat(dev, OMAP_I2C_STAT_NACK); 806 omap_i2c_ack_stat(dev, OMAP_I2C_STAT_AL);
807 } 807 }
808 808
809 return -EIO; 809 return -EIO;
@@ -963,7 +963,7 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
963 i2c_omap_errata_i207(dev, stat); 963 i2c_omap_errata_i207(dev, stat);
964 964
965 omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR); 965 omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR);
966 break; 966 continue;
967 } 967 }
968 968
969 if (stat & OMAP_I2C_STAT_RRDY) { 969 if (stat & OMAP_I2C_STAT_RRDY) {
@@ -989,7 +989,7 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
989 break; 989 break;
990 990
991 omap_i2c_ack_stat(dev, OMAP_I2C_STAT_XDR); 991 omap_i2c_ack_stat(dev, OMAP_I2C_STAT_XDR);
992 break; 992 continue;
993 } 993 }
994 994
995 if (stat & OMAP_I2C_STAT_XRDY) { 995 if (stat & OMAP_I2C_STAT_XRDY) {
diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
index 6aafa3d88ff0..c447e8d40b78 100644
--- a/drivers/i2c/busses/i2c-scmi.c
+++ b/drivers/i2c/busses/i2c-scmi.c
@@ -406,7 +406,7 @@ err:
406 return -EIO; 406 return -EIO;
407} 407}
408 408
409static int acpi_smbus_cmi_remove(struct acpi_device *device, int type) 409static int acpi_smbus_cmi_remove(struct acpi_device *device)
410{ 410{
411 struct acpi_smbus_cmi *smbus_cmi = acpi_driver_data(device); 411 struct acpi_smbus_cmi *smbus_cmi = acpi_driver_data(device);
412 412
diff --git a/drivers/i2c/busses/i2c-sirf.c b/drivers/i2c/busses/i2c-sirf.c
index 3f1818b87974..e03381aee34f 100644
--- a/drivers/i2c/busses/i2c-sirf.c
+++ b/drivers/i2c/busses/i2c-sirf.c
@@ -12,6 +12,7 @@
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/platform_device.h> 13#include <linux/platform_device.h>
14#include <linux/i2c.h> 14#include <linux/i2c.h>
15#include <linux/of_i2c.h>
15#include <linux/clk.h> 16#include <linux/clk.h>
16#include <linux/err.h> 17#include <linux/err.h>
17#include <linux/io.h> 18#include <linux/io.h>
@@ -328,6 +329,7 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev)
328 adap->algo = &i2c_sirfsoc_algo; 329 adap->algo = &i2c_sirfsoc_algo;
329 adap->algo_data = siic; 330 adap->algo_data = siic;
330 331
332 adap->dev.of_node = pdev->dev.of_node;
331 adap->dev.parent = &pdev->dev; 333 adap->dev.parent = &pdev->dev;
332 adap->nr = pdev->id; 334 adap->nr = pdev->id;
333 335
@@ -371,6 +373,8 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev)
371 373
372 clk_disable(clk); 374 clk_disable(clk);
373 375
376 of_i2c_register_devices(adap);
377
374 dev_info(&pdev->dev, " I2C adapter ready to operate\n"); 378 dev_info(&pdev->dev, " I2C adapter ready to operate\n");
375 379
376 return 0; 380 return 0;
diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c
index 1e44d04d1b22..a43c0ce5e3d8 100644
--- a/drivers/i2c/muxes/i2c-mux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c
@@ -167,7 +167,7 @@ static int i2c_mux_pinctrl_probe(struct platform_device *pdev)
167 } 167 }
168 168
169 mux->busses = devm_kzalloc(&pdev->dev, 169 mux->busses = devm_kzalloc(&pdev->dev,
170 sizeof(mux->busses) * mux->pdata->bus_count, 170 sizeof(*mux->busses) * mux->pdata->bus_count,
171 GFP_KERNEL); 171 GFP_KERNEL);
172 if (!mux->busses) { 172 if (!mux->busses) {
173 dev_err(&pdev->dev, "Cannot allocate busses\n"); 173 dev_err(&pdev->dev, "Cannot allocate busses\n");
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 4ba384f1ab54..5d6675013864 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -74,7 +74,7 @@ static struct cpuidle_driver intel_idle_driver = {
74 .en_core_tk_irqen = 1, 74 .en_core_tk_irqen = 1,
75}; 75};
76/* intel_idle.max_cstate=0 disables driver */ 76/* intel_idle.max_cstate=0 disables driver */
77static int max_cstate = MWAIT_MAX_NUM_CSTATES - 1; 77static int max_cstate = CPUIDLE_STATE_MAX - 1;
78 78
79static unsigned int mwait_substates; 79static unsigned int mwait_substates;
80 80
@@ -90,6 +90,7 @@ struct idle_cpu {
90 * Indicate which enable bits to clear here. 90 * Indicate which enable bits to clear here.
91 */ 91 */
92 unsigned long auto_demotion_disable_flags; 92 unsigned long auto_demotion_disable_flags;
93 bool disable_promotion_to_c1e;
93}; 94};
94 95
95static const struct idle_cpu *icpu; 96static const struct idle_cpu *icpu;
@@ -109,162 +110,206 @@ static struct cpuidle_state *cpuidle_state_table;
109#define CPUIDLE_FLAG_TLB_FLUSHED 0x10000 110#define CPUIDLE_FLAG_TLB_FLUSHED 0x10000
110 111
111/* 112/*
113 * MWAIT takes an 8-bit "hint" in EAX "suggesting"
114 * the C-state (top nibble) and sub-state (bottom nibble)
115 * 0x00 means "MWAIT(C1)", 0x10 means "MWAIT(C2)" etc.
116 *
117 * We store the hint at the top of our "flags" for each state.
118 */
119#define flg2MWAIT(flags) (((flags) >> 24) & 0xFF)
120#define MWAIT2flg(eax) ((eax & 0xFF) << 24)
121
122/*
112 * States are indexed by the cstate number, 123 * States are indexed by the cstate number,
113 * which is also the index into the MWAIT hint array. 124 * which is also the index into the MWAIT hint array.
114 * Thus C0 is a dummy. 125 * Thus C0 is a dummy.
115 */ 126 */
116static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = { 127static struct cpuidle_state nehalem_cstates[CPUIDLE_STATE_MAX] = {
117 { /* MWAIT C0 */ }, 128 {
118 { /* MWAIT C1 */
119 .name = "C1-NHM", 129 .name = "C1-NHM",
120 .desc = "MWAIT 0x00", 130 .desc = "MWAIT 0x00",
121 .flags = CPUIDLE_FLAG_TIME_VALID, 131 .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
122 .exit_latency = 3, 132 .exit_latency = 3,
123 .target_residency = 6, 133 .target_residency = 6,
124 .enter = &intel_idle }, 134 .enter = &intel_idle },
125 { /* MWAIT C2 */ 135 {
136 .name = "C1E-NHM",
137 .desc = "MWAIT 0x01",
138 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID,
139 .exit_latency = 10,
140 .target_residency = 20,
141 .enter = &intel_idle },
142 {
126 .name = "C3-NHM", 143 .name = "C3-NHM",
127 .desc = "MWAIT 0x10", 144 .desc = "MWAIT 0x10",
128 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 145 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
129 .exit_latency = 20, 146 .exit_latency = 20,
130 .target_residency = 80, 147 .target_residency = 80,
131 .enter = &intel_idle }, 148 .enter = &intel_idle },
132 { /* MWAIT C3 */ 149 {
133 .name = "C6-NHM", 150 .name = "C6-NHM",
134 .desc = "MWAIT 0x20", 151 .desc = "MWAIT 0x20",
135 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 152 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
136 .exit_latency = 200, 153 .exit_latency = 200,
137 .target_residency = 800, 154 .target_residency = 800,
138 .enter = &intel_idle }, 155 .enter = &intel_idle },
156 {
157 .enter = NULL }
139}; 158};
140 159
141static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = { 160static struct cpuidle_state snb_cstates[CPUIDLE_STATE_MAX] = {
142 { /* MWAIT C0 */ }, 161 {
143 { /* MWAIT C1 */
144 .name = "C1-SNB", 162 .name = "C1-SNB",
145 .desc = "MWAIT 0x00", 163 .desc = "MWAIT 0x00",
146 .flags = CPUIDLE_FLAG_TIME_VALID, 164 .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
147 .exit_latency = 1, 165 .exit_latency = 2,
148 .target_residency = 1, 166 .target_residency = 2,
167 .enter = &intel_idle },
168 {
169 .name = "C1E-SNB",
170 .desc = "MWAIT 0x01",
171 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID,
172 .exit_latency = 10,
173 .target_residency = 20,
149 .enter = &intel_idle }, 174 .enter = &intel_idle },
150 { /* MWAIT C2 */ 175 {
151 .name = "C3-SNB", 176 .name = "C3-SNB",
152 .desc = "MWAIT 0x10", 177 .desc = "MWAIT 0x10",
153 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 178 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
154 .exit_latency = 80, 179 .exit_latency = 80,
155 .target_residency = 211, 180 .target_residency = 211,
156 .enter = &intel_idle }, 181 .enter = &intel_idle },
157 { /* MWAIT C3 */ 182 {
158 .name = "C6-SNB", 183 .name = "C6-SNB",
159 .desc = "MWAIT 0x20", 184 .desc = "MWAIT 0x20",
160 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 185 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
161 .exit_latency = 104, 186 .exit_latency = 104,
162 .target_residency = 345, 187 .target_residency = 345,
163 .enter = &intel_idle }, 188 .enter = &intel_idle },
164 { /* MWAIT C4 */ 189 {
165 .name = "C7-SNB", 190 .name = "C7-SNB",
166 .desc = "MWAIT 0x30", 191 .desc = "MWAIT 0x30",
167 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 192 .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
168 .exit_latency = 109, 193 .exit_latency = 109,
169 .target_residency = 345, 194 .target_residency = 345,
170 .enter = &intel_idle }, 195 .enter = &intel_idle },
196 {
197 .enter = NULL }
171}; 198};
172 199
173static struct cpuidle_state ivb_cstates[MWAIT_MAX_NUM_CSTATES] = { 200static struct cpuidle_state ivb_cstates[CPUIDLE_STATE_MAX] = {
174 { /* MWAIT C0 */ }, 201 {
175 { /* MWAIT C1 */
176 .name = "C1-IVB", 202 .name = "C1-IVB",
177 .desc = "MWAIT 0x00", 203 .desc = "MWAIT 0x00",
178 .flags = CPUIDLE_FLAG_TIME_VALID, 204 .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
179 .exit_latency = 1, 205 .exit_latency = 1,
180 .target_residency = 1, 206 .target_residency = 1,
181 .enter = &intel_idle }, 207 .enter = &intel_idle },
182 { /* MWAIT C2 */ 208 {
209 .name = "C1E-IVB",
210 .desc = "MWAIT 0x01",
211 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID,
212 .exit_latency = 10,
213 .target_residency = 20,
214 .enter = &intel_idle },
215 {
183 .name = "C3-IVB", 216 .name = "C3-IVB",
184 .desc = "MWAIT 0x10", 217 .desc = "MWAIT 0x10",
185 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 218 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
186 .exit_latency = 59, 219 .exit_latency = 59,
187 .target_residency = 156, 220 .target_residency = 156,
188 .enter = &intel_idle }, 221 .enter = &intel_idle },
189 { /* MWAIT C3 */ 222 {
190 .name = "C6-IVB", 223 .name = "C6-IVB",
191 .desc = "MWAIT 0x20", 224 .desc = "MWAIT 0x20",
192 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 225 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
193 .exit_latency = 80, 226 .exit_latency = 80,
194 .target_residency = 300, 227 .target_residency = 300,
195 .enter = &intel_idle }, 228 .enter = &intel_idle },
196 { /* MWAIT C4 */ 229 {
197 .name = "C7-IVB", 230 .name = "C7-IVB",
198 .desc = "MWAIT 0x30", 231 .desc = "MWAIT 0x30",
199 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 232 .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
200 .exit_latency = 87, 233 .exit_latency = 87,
201 .target_residency = 300, 234 .target_residency = 300,
202 .enter = &intel_idle }, 235 .enter = &intel_idle },
236 {
237 .enter = NULL }
203}; 238};
204 239
205static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = { 240static struct cpuidle_state hsw_cstates[CPUIDLE_STATE_MAX] = {
206 { /* MWAIT C0 */ }, 241 {
207 { /* MWAIT C1 */ 242 .name = "C1-HSW",
208 .name = "C1-ATM",
209 .desc = "MWAIT 0x00", 243 .desc = "MWAIT 0x00",
210 .flags = CPUIDLE_FLAG_TIME_VALID, 244 .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
211 .exit_latency = 1, 245 .exit_latency = 2,
212 .target_residency = 4, 246 .target_residency = 2,
213 .enter = &intel_idle }, 247 .enter = &intel_idle },
214 { /* MWAIT C2 */ 248 {
249 .name = "C1E-HSW",
250 .desc = "MWAIT 0x01",
251 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID,
252 .exit_latency = 10,
253 .target_residency = 20,
254 .enter = &intel_idle },
255 {
256 .name = "C3-HSW",
257 .desc = "MWAIT 0x10",
258 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
259 .exit_latency = 33,
260 .target_residency = 100,
261 .enter = &intel_idle },
262 {
263 .name = "C6-HSW",
264 .desc = "MWAIT 0x20",
265 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
266 .exit_latency = 133,
267 .target_residency = 400,
268 .enter = &intel_idle },
269 {
270 .name = "C7s-HSW",
271 .desc = "MWAIT 0x32",
272 .flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
273 .exit_latency = 166,
274 .target_residency = 500,
275 .enter = &intel_idle },
276 {
277 .enter = NULL }
278};
279
280static struct cpuidle_state atom_cstates[CPUIDLE_STATE_MAX] = {
281 {
282 .name = "C1E-ATM",
283 .desc = "MWAIT 0x00",
284 .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
285 .exit_latency = 10,
286 .target_residency = 20,
287 .enter = &intel_idle },
288 {
215 .name = "C2-ATM", 289 .name = "C2-ATM",
216 .desc = "MWAIT 0x10", 290 .desc = "MWAIT 0x10",
217 .flags = CPUIDLE_FLAG_TIME_VALID, 291 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID,
218 .exit_latency = 20, 292 .exit_latency = 20,
219 .target_residency = 80, 293 .target_residency = 80,
220 .enter = &intel_idle }, 294 .enter = &intel_idle },
221 { /* MWAIT C3 */ }, 295 {
222 { /* MWAIT C4 */
223 .name = "C4-ATM", 296 .name = "C4-ATM",
224 .desc = "MWAIT 0x30", 297 .desc = "MWAIT 0x30",
225 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 298 .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
226 .exit_latency = 100, 299 .exit_latency = 100,
227 .target_residency = 400, 300 .target_residency = 400,
228 .enter = &intel_idle }, 301 .enter = &intel_idle },
229 { /* MWAIT C5 */ }, 302 {
230 { /* MWAIT C6 */
231 .name = "C6-ATM", 303 .name = "C6-ATM",
232 .desc = "MWAIT 0x52", 304 .desc = "MWAIT 0x52",
233 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 305 .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
234 .exit_latency = 140, 306 .exit_latency = 140,
235 .target_residency = 560, 307 .target_residency = 560,
236 .enter = &intel_idle }, 308 .enter = &intel_idle },
309 {
310 .enter = NULL }
237}; 311};
238 312
239static long get_driver_data(int cstate)
240{
241 int driver_data;
242 switch (cstate) {
243
244 case 1: /* MWAIT C1 */
245 driver_data = 0x00;
246 break;
247 case 2: /* MWAIT C2 */
248 driver_data = 0x10;
249 break;
250 case 3: /* MWAIT C3 */
251 driver_data = 0x20;
252 break;
253 case 4: /* MWAIT C4 */
254 driver_data = 0x30;
255 break;
256 case 5: /* MWAIT C5 */
257 driver_data = 0x40;
258 break;
259 case 6: /* MWAIT C6 */
260 driver_data = 0x52;
261 break;
262 default:
263 driver_data = 0x00;
264 }
265 return driver_data;
266}
267
268/** 313/**
269 * intel_idle 314 * intel_idle
270 * @dev: cpuidle_device 315 * @dev: cpuidle_device
@@ -278,8 +323,7 @@ static int intel_idle(struct cpuidle_device *dev,
278{ 323{
279 unsigned long ecx = 1; /* break on interrupt flag */ 324 unsigned long ecx = 1; /* break on interrupt flag */
280 struct cpuidle_state *state = &drv->states[index]; 325 struct cpuidle_state *state = &drv->states[index];
281 struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; 326 unsigned long eax = flg2MWAIT(state->flags);
282 unsigned long eax = (unsigned long)cpuidle_get_statedata(state_usage);
283 unsigned int cstate; 327 unsigned int cstate;
284 int cpu = smp_processor_id(); 328 int cpu = smp_processor_id();
285 329
@@ -362,10 +406,19 @@ static void auto_demotion_disable(void *dummy)
362 msr_bits &= ~(icpu->auto_demotion_disable_flags); 406 msr_bits &= ~(icpu->auto_demotion_disable_flags);
363 wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits); 407 wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
364} 408}
409static void c1e_promotion_disable(void *dummy)
410{
411 unsigned long long msr_bits;
412
413 rdmsrl(MSR_IA32_POWER_CTL, msr_bits);
414 msr_bits &= ~0x2;
415 wrmsrl(MSR_IA32_POWER_CTL, msr_bits);
416}
365 417
366static const struct idle_cpu idle_cpu_nehalem = { 418static const struct idle_cpu idle_cpu_nehalem = {
367 .state_table = nehalem_cstates, 419 .state_table = nehalem_cstates,
368 .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE, 420 .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,
421 .disable_promotion_to_c1e = true,
369}; 422};
370 423
371static const struct idle_cpu idle_cpu_atom = { 424static const struct idle_cpu idle_cpu_atom = {
@@ -379,10 +432,17 @@ static const struct idle_cpu idle_cpu_lincroft = {
379 432
380static const struct idle_cpu idle_cpu_snb = { 433static const struct idle_cpu idle_cpu_snb = {
381 .state_table = snb_cstates, 434 .state_table = snb_cstates,
435 .disable_promotion_to_c1e = true,
382}; 436};
383 437
384static const struct idle_cpu idle_cpu_ivb = { 438static const struct idle_cpu idle_cpu_ivb = {
385 .state_table = ivb_cstates, 439 .state_table = ivb_cstates,
440 .disable_promotion_to_c1e = true,
441};
442
443static const struct idle_cpu idle_cpu_hsw = {
444 .state_table = hsw_cstates,
445 .disable_promotion_to_c1e = true,
386}; 446};
387 447
388#define ICPU(model, cpu) \ 448#define ICPU(model, cpu) \
@@ -402,6 +462,9 @@ static const struct x86_cpu_id intel_idle_ids[] = {
402 ICPU(0x2d, idle_cpu_snb), 462 ICPU(0x2d, idle_cpu_snb),
403 ICPU(0x3a, idle_cpu_ivb), 463 ICPU(0x3a, idle_cpu_ivb),
404 ICPU(0x3e, idle_cpu_ivb), 464 ICPU(0x3e, idle_cpu_ivb),
465 ICPU(0x3c, idle_cpu_hsw),
466 ICPU(0x3f, idle_cpu_hsw),
467 ICPU(0x45, idle_cpu_hsw),
405 {} 468 {}
406}; 469};
407MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids); 470MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
@@ -448,8 +511,6 @@ static int intel_idle_probe(void)
448 else 511 else
449 on_each_cpu(__setup_broadcast_timer, (void *)true, 1); 512 on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
450 513
451 register_cpu_notifier(&cpu_hotplug_notifier);
452
453 pr_debug(PREFIX "v" INTEL_IDLE_VERSION 514 pr_debug(PREFIX "v" INTEL_IDLE_VERSION
454 " model 0x%X\n", boot_cpu_data.x86_model); 515 " model 0x%X\n", boot_cpu_data.x86_model);
455 516
@@ -486,32 +547,31 @@ static int intel_idle_cpuidle_driver_init(void)
486 547
487 drv->state_count = 1; 548 drv->state_count = 1;
488 549
489 for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) { 550 for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) {
490 int num_substates; 551 int num_substates, mwait_hint, mwait_cstate, mwait_substate;
491 552
492 if (cstate > max_cstate) { 553 if (cpuidle_state_table[cstate].enter == NULL)
554 break;
555
556 if (cstate + 1 > max_cstate) {
493 printk(PREFIX "max_cstate %d reached\n", 557 printk(PREFIX "max_cstate %d reached\n",
494 max_cstate); 558 max_cstate);
495 break; 559 break;
496 } 560 }
497 561
562 mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags);
563 mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint);
564 mwait_substate = MWAIT_HINT2SUBSTATE(mwait_hint);
565
498 /* does the state exist in CPUID.MWAIT? */ 566 /* does the state exist in CPUID.MWAIT? */
499 num_substates = (mwait_substates >> ((cstate) * 4)) 567 num_substates = (mwait_substates >> ((mwait_cstate + 1) * 4))
500 & MWAIT_SUBSTATE_MASK; 568 & MWAIT_SUBSTATE_MASK;
501 if (num_substates == 0) 569
502 continue; 570 /* if sub-state in table is not enumerated by CPUID */
503 /* is the state not enabled? */ 571 if ((mwait_substate + 1) > num_substates)
504 if (cpuidle_state_table[cstate].enter == NULL) {
505 /* does the driver not know about the state? */
506 if (*cpuidle_state_table[cstate].name == '\0')
507 pr_debug(PREFIX "unaware of model 0x%x"
508 " MWAIT %d please"
509 " contact lenb@kernel.org\n",
510 boot_cpu_data.x86_model, cstate);
511 continue; 572 continue;
512 }
513 573
514 if ((cstate > 2) && 574 if (((mwait_cstate + 1) > 2) &&
515 !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) 575 !boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
516 mark_tsc_unstable("TSC halts in idle" 576 mark_tsc_unstable("TSC halts in idle"
517 " states deeper than C2"); 577 " states deeper than C2");
@@ -525,6 +585,9 @@ static int intel_idle_cpuidle_driver_init(void)
525 if (icpu->auto_demotion_disable_flags) 585 if (icpu->auto_demotion_disable_flags)
526 on_each_cpu(auto_demotion_disable, NULL, 1); 586 on_each_cpu(auto_demotion_disable, NULL, 1);
527 587
588 if (icpu->disable_promotion_to_c1e) /* each-cpu is redundant */
589 on_each_cpu(c1e_promotion_disable, NULL, 1);
590
528 return 0; 591 return 0;
529} 592}
530 593
@@ -543,25 +606,28 @@ static int intel_idle_cpu_init(int cpu)
543 606
544 dev->state_count = 1; 607 dev->state_count = 1;
545 608
546 for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) { 609 for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) {
547 int num_substates; 610 int num_substates, mwait_hint, mwait_cstate, mwait_substate;
611
612 if (cpuidle_state_table[cstate].enter == NULL)
613 continue;
548 614
549 if (cstate > max_cstate) { 615 if (cstate + 1 > max_cstate) {
550 printk(PREFIX "max_cstate %d reached\n", max_cstate); 616 printk(PREFIX "max_cstate %d reached\n", max_cstate);
551 break; 617 break;
552 } 618 }
553 619
620 mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags);
621 mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint);
622 mwait_substate = MWAIT_HINT2SUBSTATE(mwait_hint);
623
554 /* does the state exist in CPUID.MWAIT? */ 624 /* does the state exist in CPUID.MWAIT? */
555 num_substates = (mwait_substates >> ((cstate) * 4)) 625 num_substates = (mwait_substates >> ((mwait_cstate + 1) * 4))
556 & MWAIT_SUBSTATE_MASK; 626 & MWAIT_SUBSTATE_MASK;
557 if (num_substates == 0)
558 continue;
559 /* is the state not enabled? */
560 if (cpuidle_state_table[cstate].enter == NULL)
561 continue;
562 627
563 dev->states_usage[dev->state_count].driver_data = 628 /* if sub-state in table is not enumerated by CPUID */
564 (void *)get_driver_data(cstate); 629 if ((mwait_substate + 1) > num_substates)
630 continue;
565 631
566 dev->state_count += 1; 632 dev->state_count += 1;
567 } 633 }
@@ -612,6 +678,7 @@ static int __init intel_idle_init(void)
612 return retval; 678 return retval;
613 } 679 }
614 } 680 }
681 register_cpu_notifier(&cpu_hotplug_notifier);
615 682
616 return 0; 683 return 0;
617} 684}
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 4850d03870c2..35275099cafd 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -263,20 +263,15 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
263 struct qib_qp __rcu **qpp; 263 struct qib_qp __rcu **qpp;
264 264
265 qpp = &dev->qp_table[n]; 265 qpp = &dev->qp_table[n];
266 q = rcu_dereference_protected(*qpp, 266 for (; (q = rcu_dereference_protected(*qpp,
267 lockdep_is_held(&dev->qpt_lock)); 267 lockdep_is_held(&dev->qpt_lock))) != NULL;
268 for (; q; qpp = &q->next) { 268 qpp = &q->next)
269 if (q == qp) { 269 if (q == qp) {
270 atomic_dec(&qp->refcount); 270 atomic_dec(&qp->refcount);
271 *qpp = qp->next; 271 *qpp = qp->next;
272 rcu_assign_pointer(qp->next, NULL); 272 rcu_assign_pointer(qp->next, NULL);
273 q = rcu_dereference_protected(*qpp,
274 lockdep_is_held(&dev->qpt_lock));
275 break; 273 break;
276 } 274 }
277 q = rcu_dereference_protected(*qpp,
278 lockdep_is_held(&dev->qpt_lock));
279 }
280 } 275 }
281 276
282 spin_unlock_irqrestore(&dev->qpt_lock, flags); 277 spin_unlock_irqrestore(&dev->qpt_lock, flags);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 03103d2bd641..67b0c1d23678 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -741,6 +741,9 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
741 741
742 tx_req->mapping = addr; 742 tx_req->mapping = addr;
743 743
744 skb_orphan(skb);
745 skb_dst_drop(skb);
746
744 rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), 747 rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
745 addr, skb->len); 748 addr, skb->len);
746 if (unlikely(rc)) { 749 if (unlikely(rc)) {
@@ -752,9 +755,6 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
752 dev->trans_start = jiffies; 755 dev->trans_start = jiffies;
753 ++tx->tx_head; 756 ++tx->tx_head;
754 757
755 skb_orphan(skb);
756 skb_dst_drop(skb);
757
758 if (++priv->tx_outstanding == ipoib_sendq_size) { 758 if (++priv->tx_outstanding == ipoib_sendq_size) {
759 ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n", 759 ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
760 tx->qp->qp_num); 760 tx->qp->qp_num);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index a1bca70e20aa..2cfa76f5d99e 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -600,6 +600,9 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
600 netif_stop_queue(dev); 600 netif_stop_queue(dev);
601 } 601 }
602 602
603 skb_orphan(skb);
604 skb_dst_drop(skb);
605
603 rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1), 606 rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
604 address->ah, qpn, tx_req, phead, hlen); 607 address->ah, qpn, tx_req, phead, hlen);
605 if (unlikely(rc)) { 608 if (unlikely(rc)) {
@@ -615,9 +618,6 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
615 618
616 address->last_send = priv->tx_head; 619 address->last_send = priv->tx_head;
617 ++priv->tx_head; 620 ++priv->tx_head;
618
619 skb_orphan(skb);
620 skb_dst_drop(skb);
621 } 621 }
622 622
623 if (unlikely(priv->tx_outstanding > MAX_SEND_CQE)) 623 if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig
index 55f7e57d4e42..38b523a1ece0 100644
--- a/drivers/input/Kconfig
+++ b/drivers/input/Kconfig
@@ -3,7 +3,7 @@
3# 3#
4 4
5menu "Input device support" 5menu "Input device support"
6 depends on !S390 && !UML 6 depends on !UML
7 7
8config INPUT 8config INPUT
9 tristate "Generic input layer (needed for keyboard, mouse, ...)" if EXPERT 9 tristate "Generic input layer (needed for keyboard, mouse, ...)" if EXPERT
diff --git a/drivers/input/input-mt.c b/drivers/input/input-mt.c
index 47a6009dbf43..71db1930573f 100644
--- a/drivers/input/input-mt.c
+++ b/drivers/input/input-mt.c
@@ -18,6 +18,7 @@ static void copy_abs(struct input_dev *dev, unsigned int dst, unsigned int src)
18{ 18{
19 if (dev->absinfo && test_bit(src, dev->absbit)) { 19 if (dev->absinfo && test_bit(src, dev->absbit)) {
20 dev->absinfo[dst] = dev->absinfo[src]; 20 dev->absinfo[dst] = dev->absinfo[src];
21 dev->absinfo[dst].fuzz = 0;
21 dev->absbit[BIT_WORD(dst)] |= BIT_MASK(dst); 22 dev->absbit[BIT_WORD(dst)] |= BIT_MASK(dst);
22 } 23 }
23} 24}
diff --git a/drivers/input/input.c b/drivers/input/input.c
index ce01332f7b3a..c04469928925 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1785,12 +1785,13 @@ static void devm_input_device_release(struct device *dev, void *res)
1785 * its driver (or binding fails). Once managed input device is allocated, 1785 * its driver (or binding fails). Once managed input device is allocated,
1786 * it is ready to be set up and registered in the same fashion as regular 1786 * it is ready to be set up and registered in the same fashion as regular
1787 * input device. There are no special devm_input_device_[un]register() 1787 * input device. There are no special devm_input_device_[un]register()
1788 * variants, regular ones work with both managed and unmanaged devices. 1788 * variants, regular ones work with both managed and unmanaged devices,
1789 * should you need them. In most cases however, managed input device need
1790 * not be explicitly unregistered or freed.
1789 * 1791 *
1790 * NOTE: the owner device is set up as parent of input device and users 1792 * NOTE: the owner device is set up as parent of input device and users
1791 * should not override it. 1793 * should not override it.
1792 */ 1794 */
1793
1794struct input_dev *devm_input_allocate_device(struct device *dev) 1795struct input_dev *devm_input_allocate_device(struct device *dev)
1795{ 1796{
1796 struct input_dev *input; 1797 struct input_dev *input;
@@ -2004,6 +2005,17 @@ static void devm_input_device_unregister(struct device *dev, void *res)
2004 * Once device has been successfully registered it can be unregistered 2005 * Once device has been successfully registered it can be unregistered
2005 * with input_unregister_device(); input_free_device() should not be 2006 * with input_unregister_device(); input_free_device() should not be
2006 * called in this case. 2007 * called in this case.
2008 *
2009 * Note that this function is also used to register managed input devices
2010 * (ones allocated with devm_input_allocate_device()). Such managed input
2011 * devices need not be explicitly unregistered or freed, their tear down
2012 * is controlled by the devres infrastructure. It is also worth noting
2013 * that tear down of managed input devices is internally a 2-step process:
2014 * registered managed input device is first unregistered, but stays in
2015 * memory and can still handle input_event() calls (although events will
2016 * not be delivered anywhere). The freeing of managed input device will
2017 * happen later, when devres stack is unwound to the point where device
2018 * allocation was made.
2007 */ 2019 */
2008int input_register_device(struct input_dev *dev) 2020int input_register_device(struct input_dev *dev)
2009{ 2021{
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index 358cd7ee905b..7cd74e29cbc8 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -162,7 +162,7 @@ static unsigned int get_time_pit(void)
162#define GET_TIME(x) do { x = get_cycles(); } while (0) 162#define GET_TIME(x) do { x = get_cycles(); } while (0)
163#define DELTA(x,y) ((y)-(x)) 163#define DELTA(x,y) ((y)-(x))
164#define TIME_NAME "PCC" 164#define TIME_NAME "PCC"
165#elif defined(CONFIG_MN10300) 165#elif defined(CONFIG_MN10300) || defined(CONFIG_TILE)
166#define GET_TIME(x) do { x = get_cycles(); } while (0) 166#define GET_TIME(x) do { x = get_cycles(); } while (0)
167#define DELTA(x, y) ((x) - (y)) 167#define DELTA(x, y) ((x) - (y))
168#define TIME_NAME "TSC" 168#define TIME_NAME "TSC"
diff --git a/drivers/input/joystick/walkera0701.c b/drivers/input/joystick/walkera0701.c
index f8f892b076e8..b76ac580703c 100644
--- a/drivers/input/joystick/walkera0701.c
+++ b/drivers/input/joystick/walkera0701.c
@@ -12,7 +12,7 @@
12 * the Free Software Foundation. 12 * the Free Software Foundation.
13*/ 13*/
14 14
15/* #define WK0701_DEBUG */ 15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 16
17#define RESERVE 20000 17#define RESERVE 20000
18#define SYNC_PULSE 1306000 18#define SYNC_PULSE 1306000
@@ -67,6 +67,7 @@ static inline void walkera0701_parse_frame(struct walkera_dev *w)
67{ 67{
68 int i; 68 int i;
69 int val1, val2, val3, val4, val5, val6, val7, val8; 69 int val1, val2, val3, val4, val5, val6, val7, val8;
70 int magic, magic_bit;
70 int crc1, crc2; 71 int crc1, crc2;
71 72
72 for (crc1 = crc2 = i = 0; i < 10; i++) { 73 for (crc1 = crc2 = i = 0; i < 10; i++) {
@@ -102,17 +103,12 @@ static inline void walkera0701_parse_frame(struct walkera_dev *w)
102 val8 = (w->buf[18] & 1) << 8 | (w->buf[19] << 4) | w->buf[20]; 103 val8 = (w->buf[18] & 1) << 8 | (w->buf[19] << 4) | w->buf[20];
103 val8 *= (w->buf[18] & 2) - 1; /*sign */ 104 val8 *= (w->buf[18] & 2) - 1; /*sign */
104 105
105#ifdef WK0701_DEBUG 106 magic = (w->buf[21] << 4) | w->buf[22];
106 { 107 magic_bit = (w->buf[24] & 8) >> 3;
107 int magic, magic_bit; 108 pr_debug("%4d %4d %4d %4d %4d %4d %4d %4d (magic %2x %d)\n",
108 magic = (w->buf[21] << 4) | w->buf[22]; 109 val1, val2, val3, val4, val5, val6, val7, val8,
109 magic_bit = (w->buf[24] & 8) >> 3; 110 magic, magic_bit);
110 printk(KERN_DEBUG 111
111 "walkera0701: %4d %4d %4d %4d %4d %4d %4d %4d (magic %2x %d)\n",
112 val1, val2, val3, val4, val5, val6, val7, val8, magic,
113 magic_bit);
114 }
115#endif
116 input_report_abs(w->input_dev, ABS_X, val2); 112 input_report_abs(w->input_dev, ABS_X, val2);
117 input_report_abs(w->input_dev, ABS_Y, val1); 113 input_report_abs(w->input_dev, ABS_Y, val1);
118 input_report_abs(w->input_dev, ABS_Z, val6); 114 input_report_abs(w->input_dev, ABS_Z, val6);
@@ -187,6 +183,9 @@ static int walkera0701_open(struct input_dev *dev)
187{ 183{
188 struct walkera_dev *w = input_get_drvdata(dev); 184 struct walkera_dev *w = input_get_drvdata(dev);
189 185
186 if (parport_claim(w->pardevice))
187 return -EBUSY;
188
190 parport_enable_irq(w->parport); 189 parport_enable_irq(w->parport);
191 return 0; 190 return 0;
192} 191}
@@ -197,40 +196,51 @@ static void walkera0701_close(struct input_dev *dev)
197 196
198 parport_disable_irq(w->parport); 197 parport_disable_irq(w->parport);
199 hrtimer_cancel(&w->timer); 198 hrtimer_cancel(&w->timer);
199
200 parport_release(w->pardevice);
200} 201}
201 202
202static int walkera0701_connect(struct walkera_dev *w, int parport) 203static int walkera0701_connect(struct walkera_dev *w, int parport)
203{ 204{
204 int err = -ENODEV; 205 int error;
205 206
206 w->parport = parport_find_number(parport); 207 w->parport = parport_find_number(parport);
207 if (w->parport == NULL) 208 if (!w->parport) {
209 pr_err("parport %d does not exist\n", parport);
208 return -ENODEV; 210 return -ENODEV;
211 }
209 212
210 if (w->parport->irq == -1) { 213 if (w->parport->irq == -1) {
211 printk(KERN_ERR "walkera0701: parport without interrupt\n"); 214 pr_err("parport %d does not have interrupt assigned\n",
212 goto init_err; 215 parport);
216 error = -EINVAL;
217 goto err_put_parport;
213 } 218 }
214 219
215 err = -EBUSY;
216 w->pardevice = parport_register_device(w->parport, "walkera0701", 220 w->pardevice = parport_register_device(w->parport, "walkera0701",
217 NULL, NULL, walkera0701_irq_handler, 221 NULL, NULL, walkera0701_irq_handler,
218 PARPORT_DEV_EXCL, w); 222 PARPORT_DEV_EXCL, w);
219 if (!w->pardevice) 223 if (!w->pardevice) {
220 goto init_err; 224 pr_err("failed to register parport device\n");
221 225 error = -EIO;
222 if (parport_negotiate(w->pardevice->port, IEEE1284_MODE_COMPAT)) 226 goto err_put_parport;
223 goto init_err1; 227 }
224 228
225 if (parport_claim(w->pardevice)) 229 if (parport_negotiate(w->pardevice->port, IEEE1284_MODE_COMPAT)) {
226 goto init_err1; 230 pr_err("failed to negotiate parport mode\n");
231 error = -EIO;
232 goto err_unregister_device;
233 }
227 234
228 hrtimer_init(&w->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 235 hrtimer_init(&w->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
229 w->timer.function = timer_handler; 236 w->timer.function = timer_handler;
230 237
231 w->input_dev = input_allocate_device(); 238 w->input_dev = input_allocate_device();
232 if (!w->input_dev) 239 if (!w->input_dev) {
233 goto init_err2; 240 pr_err("failed to allocate input device\n");
241 error = -ENOMEM;
242 goto err_unregister_device;
243 }
234 244
235 input_set_drvdata(w->input_dev, w); 245 input_set_drvdata(w->input_dev, w);
236 w->input_dev->name = "Walkera WK-0701 TX"; 246 w->input_dev->name = "Walkera WK-0701 TX";
@@ -241,6 +251,7 @@ static int walkera0701_connect(struct walkera_dev *w, int parport)
241 w->input_dev->id.vendor = 0x0001; 251 w->input_dev->id.vendor = 0x0001;
242 w->input_dev->id.product = 0x0001; 252 w->input_dev->id.product = 0x0001;
243 w->input_dev->id.version = 0x0100; 253 w->input_dev->id.version = 0x0100;
254 w->input_dev->dev.parent = w->parport->dev;
244 w->input_dev->open = walkera0701_open; 255 w->input_dev->open = walkera0701_open;
245 w->input_dev->close = walkera0701_close; 256 w->input_dev->close = walkera0701_close;
246 257
@@ -254,27 +265,26 @@ static int walkera0701_connect(struct walkera_dev *w, int parport)
254 input_set_abs_params(w->input_dev, ABS_RUDDER, -512, 512, 0, 0); 265 input_set_abs_params(w->input_dev, ABS_RUDDER, -512, 512, 0, 0);
255 input_set_abs_params(w->input_dev, ABS_MISC, -512, 512, 0, 0); 266 input_set_abs_params(w->input_dev, ABS_MISC, -512, 512, 0, 0);
256 267
257 err = input_register_device(w->input_dev); 268 error = input_register_device(w->input_dev);
258 if (err) 269 if (error) {
259 goto init_err3; 270 pr_err("failed to register input device\n");
271 goto err_free_input_dev;
272 }
260 273
261 return 0; 274 return 0;
262 275
263 init_err3: 276err_free_input_dev:
264 input_free_device(w->input_dev); 277 input_free_device(w->input_dev);
265 init_err2: 278err_unregister_device:
266 parport_release(w->pardevice);
267 init_err1:
268 parport_unregister_device(w->pardevice); 279 parport_unregister_device(w->pardevice);
269 init_err: 280err_put_parport:
270 parport_put_port(w->parport); 281 parport_put_port(w->parport);
271 return err; 282 return error;
272} 283}
273 284
274static void walkera0701_disconnect(struct walkera_dev *w) 285static void walkera0701_disconnect(struct walkera_dev *w)
275{ 286{
276 input_unregister_device(w->input_dev); 287 input_unregister_device(w->input_dev);
277 parport_release(w->pardevice);
278 parport_unregister_device(w->pardevice); 288 parport_unregister_device(w->pardevice);
279 parport_put_port(w->parport); 289 parport_put_port(w->parport);
280} 290}
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 5a240c60342d..ac0500667000 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -224,7 +224,7 @@ config KEYBOARD_TCA6416
224 224
225config KEYBOARD_TCA8418 225config KEYBOARD_TCA8418
226 tristate "TCA8418 Keypad Support" 226 tristate "TCA8418 Keypad Support"
227 depends on I2C 227 depends on I2C && GENERIC_HARDIRQS
228 select INPUT_MATRIXKMAP 228 select INPUT_MATRIXKMAP
229 help 229 help
230 This driver implements basic keypad functionality 230 This driver implements basic keypad functionality
@@ -303,7 +303,7 @@ config KEYBOARD_HP7XX
303 303
304config KEYBOARD_LM8323 304config KEYBOARD_LM8323
305 tristate "LM8323 keypad chip" 305 tristate "LM8323 keypad chip"
306 depends on I2C 306 depends on I2C && GENERIC_HARDIRQS
307 depends on LEDS_CLASS 307 depends on LEDS_CLASS
308 help 308 help
309 If you say yes here you get support for the National Semiconductor 309 If you say yes here you get support for the National Semiconductor
@@ -420,7 +420,7 @@ config KEYBOARD_NOMADIK
420 420
421config KEYBOARD_TEGRA 421config KEYBOARD_TEGRA
422 tristate "NVIDIA Tegra internal matrix keyboard controller support" 422 tristate "NVIDIA Tegra internal matrix keyboard controller support"
423 depends on ARCH_TEGRA 423 depends on ARCH_TEGRA && OF
424 select INPUT_MATRIXKMAP 424 select INPUT_MATRIXKMAP
425 help 425 help
426 Say Y here if you want to use a matrix keyboard connected directly 426 Say Y here if you want to use a matrix keyboard connected directly
@@ -479,6 +479,16 @@ config KEYBOARD_SAMSUNG
479 To compile this driver as a module, choose M here: the 479 To compile this driver as a module, choose M here: the
480 module will be called samsung-keypad. 480 module will be called samsung-keypad.
481 481
482config KEYBOARD_GOLDFISH_EVENTS
483 depends on GOLDFISH
484 tristate "Generic Input Event device for Goldfish"
485 help
486 Say Y here to get an input event device for the Goldfish virtual
487 device emulator.
488
489 To compile this driver as a module, choose M here: the
490 module will be called goldfish-events.
491
482config KEYBOARD_STOWAWAY 492config KEYBOARD_STOWAWAY
483 tristate "Stowaway keyboard" 493 tristate "Stowaway keyboard"
484 select SERIO 494 select SERIO
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 44e76002f54b..49b16453d00e 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_KEYBOARD_ATKBD) += atkbd.o
13obj-$(CONFIG_KEYBOARD_BFIN) += bf54x-keys.o 13obj-$(CONFIG_KEYBOARD_BFIN) += bf54x-keys.o
14obj-$(CONFIG_KEYBOARD_DAVINCI) += davinci_keyscan.o 14obj-$(CONFIG_KEYBOARD_DAVINCI) += davinci_keyscan.o
15obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o 15obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o
16obj-$(CONFIG_KEYBOARD_GOLDFISH_EVENTS) += goldfish_events.o
16obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o 17obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o
17obj-$(CONFIG_KEYBOARD_GPIO_POLLED) += gpio_keys_polled.o 18obj-$(CONFIG_KEYBOARD_GPIO_POLLED) += gpio_keys_polled.o
18obj-$(CONFIG_KEYBOARD_TCA6416) += tca6416-keypad.o 19obj-$(CONFIG_KEYBOARD_TCA6416) += tca6416-keypad.o
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index add5ffd9fe26..2626773ff29b 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -676,6 +676,39 @@ static inline void atkbd_disable(struct atkbd *atkbd)
676 serio_continue_rx(atkbd->ps2dev.serio); 676 serio_continue_rx(atkbd->ps2dev.serio);
677} 677}
678 678
679static int atkbd_activate(struct atkbd *atkbd)
680{
681 struct ps2dev *ps2dev = &atkbd->ps2dev;
682
683/*
684 * Enable the keyboard to receive keystrokes.
685 */
686
687 if (ps2_command(ps2dev, NULL, ATKBD_CMD_ENABLE)) {
688 dev_err(&ps2dev->serio->dev,
689 "Failed to enable keyboard on %s\n",
690 ps2dev->serio->phys);
691 return -1;
692 }
693
694 return 0;
695}
696
697/*
698 * atkbd_deactivate() resets and disables the keyboard from sending
699 * keystrokes.
700 */
701
702static void atkbd_deactivate(struct atkbd *atkbd)
703{
704 struct ps2dev *ps2dev = &atkbd->ps2dev;
705
706 if (ps2_command(ps2dev, NULL, ATKBD_CMD_RESET_DIS))
707 dev_err(&ps2dev->serio->dev,
708 "Failed to deactivate keyboard on %s\n",
709 ps2dev->serio->phys);
710}
711
679/* 712/*
680 * atkbd_probe() probes for an AT keyboard on a serio port. 713 * atkbd_probe() probes for an AT keyboard on a serio port.
681 */ 714 */
@@ -726,11 +759,17 @@ static int atkbd_probe(struct atkbd *atkbd)
726 759
727 if (atkbd->id == 0xaca1 && atkbd->translated) { 760 if (atkbd->id == 0xaca1 && atkbd->translated) {
728 dev_err(&ps2dev->serio->dev, 761 dev_err(&ps2dev->serio->dev,
729 "NCD terminal keyboards are only supported on non-translating controlelrs. " 762 "NCD terminal keyboards are only supported on non-translating controllers. "
730 "Use i8042.direct=1 to disable translation.\n"); 763 "Use i8042.direct=1 to disable translation.\n");
731 return -1; 764 return -1;
732 } 765 }
733 766
767/*
768 * Make sure nothing is coming from the keyboard and disturbs our
769 * internal state.
770 */
771 atkbd_deactivate(atkbd);
772
734 return 0; 773 return 0;
735} 774}
736 775
@@ -825,24 +864,6 @@ static int atkbd_reset_state(struct atkbd *atkbd)
825 return 0; 864 return 0;
826} 865}
827 866
828static int atkbd_activate(struct atkbd *atkbd)
829{
830 struct ps2dev *ps2dev = &atkbd->ps2dev;
831
832/*
833 * Enable the keyboard to receive keystrokes.
834 */
835
836 if (ps2_command(ps2dev, NULL, ATKBD_CMD_ENABLE)) {
837 dev_err(&ps2dev->serio->dev,
838 "Failed to enable keyboard on %s\n",
839 ps2dev->serio->phys);
840 return -1;
841 }
842
843 return 0;
844}
845
846/* 867/*
847 * atkbd_cleanup() restores the keyboard state so that BIOS is happy after a 868 * atkbd_cleanup() restores the keyboard state so that BIOS is happy after a
848 * reboot. 869 * reboot.
@@ -1150,7 +1171,6 @@ static int atkbd_connect(struct serio *serio, struct serio_driver *drv)
1150 1171
1151 atkbd->set = atkbd_select_set(atkbd, atkbd_set, atkbd_extra); 1172 atkbd->set = atkbd_select_set(atkbd, atkbd_set, atkbd_extra);
1152 atkbd_reset_state(atkbd); 1173 atkbd_reset_state(atkbd);
1153 atkbd_activate(atkbd);
1154 1174
1155 } else { 1175 } else {
1156 atkbd->set = 2; 1176 atkbd->set = 2;
@@ -1165,6 +1185,8 @@ static int atkbd_connect(struct serio *serio, struct serio_driver *drv)
1165 goto fail3; 1185 goto fail3;
1166 1186
1167 atkbd_enable(atkbd); 1187 atkbd_enable(atkbd);
1188 if (serio->write)
1189 atkbd_activate(atkbd);
1168 1190
1169 err = input_register_device(atkbd->dev); 1191 err = input_register_device(atkbd->dev);
1170 if (err) 1192 if (err)
@@ -1208,8 +1230,6 @@ static int atkbd_reconnect(struct serio *serio)
1208 if (atkbd->set != atkbd_select_set(atkbd, atkbd->set, atkbd->extra)) 1230 if (atkbd->set != atkbd_select_set(atkbd, atkbd->set, atkbd->extra))
1209 goto out; 1231 goto out;
1210 1232
1211 atkbd_activate(atkbd);
1212
1213 /* 1233 /*
1214 * Restore LED state and repeat rate. While input core 1234 * Restore LED state and repeat rate. While input core
1215 * will do this for us at resume time reconnect may happen 1235 * will do this for us at resume time reconnect may happen
@@ -1223,7 +1243,17 @@ static int atkbd_reconnect(struct serio *serio)
1223 1243
1224 } 1244 }
1225 1245
1246 /*
1247 * Reset our state machine in case reconnect happened in the middle
1248 * of multi-byte scancode.
1249 */
1250 atkbd->xl_bit = 0;
1251 atkbd->emul = 0;
1252
1226 atkbd_enable(atkbd); 1253 atkbd_enable(atkbd);
1254 if (atkbd->write)
1255 atkbd_activate(atkbd);
1256
1227 retval = 0; 1257 retval = 0;
1228 1258
1229 out: 1259 out:
diff --git a/drivers/input/keyboard/goldfish_events.c b/drivers/input/keyboard/goldfish_events.c
new file mode 100644
index 000000000000..9f60a2ec88db
--- /dev/null
+++ b/drivers/input/keyboard/goldfish_events.c
@@ -0,0 +1,194 @@
1/*
2 * Copyright (C) 2007 Google, Inc.
3 * Copyright (C) 2012 Intel, Inc.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/interrupt.h>
19#include <linux/types.h>
20#include <linux/input.h>
21#include <linux/kernel.h>
22#include <linux/platform_device.h>
23#include <linux/slab.h>
24#include <linux/irq.h>
25#include <linux/io.h>
26
27enum {
28 REG_READ = 0x00,
29 REG_SET_PAGE = 0x00,
30 REG_LEN = 0x04,
31 REG_DATA = 0x08,
32
33 PAGE_NAME = 0x00000,
34 PAGE_EVBITS = 0x10000,
35 PAGE_ABSDATA = 0x20000 | EV_ABS,
36};
37
38struct event_dev {
39 struct input_dev *input;
40 int irq;
41 void __iomem *addr;
42 char name[0];
43};
44
45static irqreturn_t events_interrupt(int irq, void *dev_id)
46{
47 struct event_dev *edev = dev_id;
48 unsigned type, code, value;
49
50 type = __raw_readl(edev->addr + REG_READ);
51 code = __raw_readl(edev->addr + REG_READ);
52 value = __raw_readl(edev->addr + REG_READ);
53
54 input_event(edev->input, type, code, value);
55 input_sync(edev->input);
56 return IRQ_HANDLED;
57}
58
59static void events_import_bits(struct event_dev *edev,
60 unsigned long bits[], unsigned type, size_t count)
61{
62 void __iomem *addr = edev->addr;
63 int i, j;
64 size_t size;
65 uint8_t val;
66
67 __raw_writel(PAGE_EVBITS | type, addr + REG_SET_PAGE);
68
69 size = __raw_readl(addr + REG_LEN) * 8;
70 if (size < count)
71 count = size;
72
73 addr += REG_DATA;
74 for (i = 0; i < count; i += 8) {
75 val = __raw_readb(addr++);
76 for (j = 0; j < 8; j++)
77 if (val & 1 << j)
78 set_bit(i + j, bits);
79 }
80}
81
82static void events_import_abs_params(struct event_dev *edev)
83{
84 struct input_dev *input_dev = edev->input;
85 void __iomem *addr = edev->addr;
86 u32 val[4];
87 int count;
88 int i, j;
89
90 __raw_writel(PAGE_ABSDATA, addr + REG_SET_PAGE);
91
92 count = __raw_readl(addr + REG_LEN) / sizeof(val);
93 if (count > ABS_MAX)
94 count = ABS_MAX;
95
96 for (i = 0; i < count; i++) {
97 if (!test_bit(i, input_dev->absbit))
98 continue;
99
100 for (j = 0; j < ARRAY_SIZE(val); j++) {
101 int offset = (i * ARRAY_SIZE(val) + j) * sizeof(u32);
102 val[j] = __raw_readl(edev->addr + REG_DATA + offset);
103 }
104
105 input_set_abs_params(input_dev, i,
106 val[0], val[1], val[2], val[3]);
107 }
108}
109
110static int events_probe(struct platform_device *pdev)
111{
112 struct input_dev *input_dev;
113 struct event_dev *edev;
114 struct resource *res;
115 unsigned keymapnamelen;
116 void __iomem *addr;
117 int irq;
118 int i;
119 int error;
120
121 irq = platform_get_irq(pdev, 0);
122 if (irq < 0)
123 return -EINVAL;
124
125 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
126 if (!res)
127 return -EINVAL;
128
129 addr = devm_ioremap(&pdev->dev, res->start, 4096);
130 if (!addr)
131 return -ENOMEM;
132
133 __raw_writel(PAGE_NAME, addr + REG_SET_PAGE);
134 keymapnamelen = __raw_readl(addr + REG_LEN);
135
136 edev = devm_kzalloc(&pdev->dev,
137 sizeof(struct event_dev) + keymapnamelen + 1,
138 GFP_KERNEL);
139 if (!edev)
140 return -ENOMEM;
141
142 input_dev = devm_input_allocate_device(&pdev->dev);
143 if (!input_dev)
144 return -ENOMEM;
145
146 edev->input = input_dev;
147 edev->addr = addr;
148 edev->irq = irq;
149
150 for (i = 0; i < keymapnamelen; i++)
151 edev->name[i] = __raw_readb(edev->addr + REG_DATA + i);
152
153 pr_debug("events_probe() keymap=%s\n", edev->name);
154
155 input_dev->name = edev->name;
156 input_dev->id.bustype = BUS_HOST;
157
158 events_import_bits(edev, input_dev->evbit, EV_SYN, EV_MAX);
159 events_import_bits(edev, input_dev->keybit, EV_KEY, KEY_MAX);
160 events_import_bits(edev, input_dev->relbit, EV_REL, REL_MAX);
161 events_import_bits(edev, input_dev->absbit, EV_ABS, ABS_MAX);
162 events_import_bits(edev, input_dev->mscbit, EV_MSC, MSC_MAX);
163 events_import_bits(edev, input_dev->ledbit, EV_LED, LED_MAX);
164 events_import_bits(edev, input_dev->sndbit, EV_SND, SND_MAX);
165 events_import_bits(edev, input_dev->ffbit, EV_FF, FF_MAX);
166 events_import_bits(edev, input_dev->swbit, EV_SW, SW_MAX);
167
168 events_import_abs_params(edev);
169
170 error = devm_request_irq(&pdev->dev, edev->irq, events_interrupt, 0,
171 "goldfish-events-keypad", edev);
172 if (error)
173 return error;
174
175 error = input_register_device(input_dev);
176 if (error)
177 return error;
178
179 return 0;
180}
181
182static struct platform_driver events_driver = {
183 .probe = events_probe,
184 .driver = {
185 .owner = THIS_MODULE,
186 .name = "goldfish_events",
187 },
188};
189
190module_platform_driver(events_driver);
191
192MODULE_AUTHOR("Brian Swetland");
193MODULE_DESCRIPTION("Goldfish Event Device");
194MODULE_LICENSE("GPL");
diff --git a/drivers/input/keyboard/imx_keypad.c b/drivers/input/keyboard/imx_keypad.c
index 6d150e3e1f55..98f9113251d2 100644
--- a/drivers/input/keyboard/imx_keypad.c
+++ b/drivers/input/keyboard/imx_keypad.c
@@ -20,6 +20,7 @@
20#include <linux/jiffies.h> 20#include <linux/jiffies.h>
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/of.h>
23#include <linux/platform_device.h> 24#include <linux/platform_device.h>
24#include <linux/slab.h> 25#include <linux/slab.h>
25#include <linux/timer.h> 26#include <linux/timer.h>
@@ -414,15 +415,23 @@ open_err:
414 return -EIO; 415 return -EIO;
415} 416}
416 417
418#ifdef CONFIG_OF
419static struct of_device_id imx_keypad_of_match[] = {
420 { .compatible = "fsl,imx21-kpp", },
421 { /* sentinel */ }
422};
423MODULE_DEVICE_TABLE(of, imx_keypad_of_match);
424#endif
425
417static int imx_keypad_probe(struct platform_device *pdev) 426static int imx_keypad_probe(struct platform_device *pdev)
418{ 427{
419 const struct matrix_keymap_data *keymap_data = pdev->dev.platform_data; 428 const struct matrix_keymap_data *keymap_data = pdev->dev.platform_data;
420 struct imx_keypad *keypad; 429 struct imx_keypad *keypad;
421 struct input_dev *input_dev; 430 struct input_dev *input_dev;
422 struct resource *res; 431 struct resource *res;
423 int irq, error, i; 432 int irq, error, i, row, col;
424 433
425 if (keymap_data == NULL) { 434 if (!keymap_data && !pdev->dev.of_node) {
426 dev_err(&pdev->dev, "no keymap defined\n"); 435 dev_err(&pdev->dev, "no keymap defined\n");
427 return -EINVAL; 436 return -EINVAL;
428 } 437 }
@@ -480,22 +489,6 @@ static int imx_keypad_probe(struct platform_device *pdev)
480 goto failed_unmap; 489 goto failed_unmap;
481 } 490 }
482 491
483 /* Search for rows and cols enabled */
484 for (i = 0; i < keymap_data->keymap_size; i++) {
485 keypad->rows_en_mask |= 1 << KEY_ROW(keymap_data->keymap[i]);
486 keypad->cols_en_mask |= 1 << KEY_COL(keymap_data->keymap[i]);
487 }
488
489 if (keypad->rows_en_mask > ((1 << MAX_MATRIX_KEY_ROWS) - 1) ||
490 keypad->cols_en_mask > ((1 << MAX_MATRIX_KEY_COLS) - 1)) {
491 dev_err(&pdev->dev,
492 "invalid key data (too many rows or colums)\n");
493 error = -EINVAL;
494 goto failed_clock_put;
495 }
496 dev_dbg(&pdev->dev, "enabled rows mask: %x\n", keypad->rows_en_mask);
497 dev_dbg(&pdev->dev, "enabled cols mask: %x\n", keypad->cols_en_mask);
498
499 /* Init the Input device */ 492 /* Init the Input device */
500 input_dev->name = pdev->name; 493 input_dev->name = pdev->name;
501 input_dev->id.bustype = BUS_HOST; 494 input_dev->id.bustype = BUS_HOST;
@@ -512,6 +505,19 @@ static int imx_keypad_probe(struct platform_device *pdev)
512 goto failed_clock_put; 505 goto failed_clock_put;
513 } 506 }
514 507
508 /* Search for rows and cols enabled */
509 for (row = 0; row < MAX_MATRIX_KEY_ROWS; row++) {
510 for (col = 0; col < MAX_MATRIX_KEY_COLS; col++) {
511 i = MATRIX_SCAN_CODE(row, col, MATRIX_ROW_SHIFT);
512 if (keypad->keycodes[i] != KEY_RESERVED) {
513 keypad->rows_en_mask |= 1 << row;
514 keypad->cols_en_mask |= 1 << col;
515 }
516 }
517 }
518 dev_dbg(&pdev->dev, "enabled rows mask: %x\n", keypad->rows_en_mask);
519 dev_dbg(&pdev->dev, "enabled cols mask: %x\n", keypad->cols_en_mask);
520
515 __set_bit(EV_REP, input_dev->evbit); 521 __set_bit(EV_REP, input_dev->evbit);
516 input_set_capability(input_dev, EV_MSC, MSC_SCAN); 522 input_set_capability(input_dev, EV_MSC, MSC_SCAN);
517 input_set_drvdata(input_dev, keypad); 523 input_set_drvdata(input_dev, keypad);
@@ -631,6 +637,7 @@ static struct platform_driver imx_keypad_driver = {
631 .name = "imx-keypad", 637 .name = "imx-keypad",
632 .owner = THIS_MODULE, 638 .owner = THIS_MODULE,
633 .pm = &imx_kbd_pm_ops, 639 .pm = &imx_kbd_pm_ops,
640 .of_match_table = of_match_ptr(imx_keypad_of_match),
634 }, 641 },
635 .probe = imx_keypad_probe, 642 .probe = imx_keypad_probe,
636 .remove = imx_keypad_remove, 643 .remove = imx_keypad_remove,
diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c
index 93c812662134..0de23f41b2d3 100644
--- a/drivers/input/keyboard/lm8323.c
+++ b/drivers/input/keyboard/lm8323.c
@@ -398,7 +398,7 @@ static irqreturn_t lm8323_irq(int irq, void *_lm)
398 lm8323_configure(lm); 398 lm8323_configure(lm);
399 } 399 }
400 for (i = 0; i < LM8323_NUM_PWMS; i++) { 400 for (i = 0; i < LM8323_NUM_PWMS; i++) {
401 if (ints & (1 << (INT_PWM1 + i))) { 401 if (ints & (INT_PWM1 << i)) {
402 dev_vdbg(&lm->client->dev, 402 dev_vdbg(&lm->client->dev,
403 "pwm%d engine completed\n", i); 403 "pwm%d engine completed\n", i);
404 pwm_done(&lm->pwm[i]); 404 pwm_done(&lm->pwm[i]);
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index f4ff0dda7597..71d77192ac1e 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -403,7 +403,7 @@ matrix_keypad_parse_dt(struct device *dev)
403 struct matrix_keypad_platform_data *pdata; 403 struct matrix_keypad_platform_data *pdata;
404 struct device_node *np = dev->of_node; 404 struct device_node *np = dev->of_node;
405 unsigned int *gpios; 405 unsigned int *gpios;
406 int i; 406 int i, nrow, ncol;
407 407
408 if (!np) { 408 if (!np) {
409 dev_err(dev, "device lacks DT data\n"); 409 dev_err(dev, "device lacks DT data\n");
@@ -416,9 +416,9 @@ matrix_keypad_parse_dt(struct device *dev)
416 return ERR_PTR(-ENOMEM); 416 return ERR_PTR(-ENOMEM);
417 } 417 }
418 418
419 pdata->num_row_gpios = of_gpio_named_count(np, "row-gpios"); 419 pdata->num_row_gpios = nrow = of_gpio_named_count(np, "row-gpios");
420 pdata->num_col_gpios = of_gpio_named_count(np, "col-gpios"); 420 pdata->num_col_gpios = ncol = of_gpio_named_count(np, "col-gpios");
421 if (!pdata->num_row_gpios || !pdata->num_col_gpios) { 421 if (nrow <= 0 || ncol <= 0) {
422 dev_err(dev, "number of keypad rows/columns not specified\n"); 422 dev_err(dev, "number of keypad rows/columns not specified\n");
423 return ERR_PTR(-EINVAL); 423 return ERR_PTR(-EINVAL);
424 } 424 }
diff --git a/drivers/input/keyboard/qt2160.c b/drivers/input/keyboard/qt2160.c
index 3dc2b0f27b0c..1c0ddad0a1cc 100644
--- a/drivers/input/keyboard/qt2160.c
+++ b/drivers/input/keyboard/qt2160.c
@@ -20,6 +20,7 @@
20 20
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/leds.h>
23#include <linux/module.h> 24#include <linux/module.h>
24#include <linux/slab.h> 25#include <linux/slab.h>
25#include <linux/jiffies.h> 26#include <linux/jiffies.h>
@@ -39,6 +40,11 @@
39#define QT2160_CMD_GPIOS 6 40#define QT2160_CMD_GPIOS 6
40#define QT2160_CMD_SUBVER 7 41#define QT2160_CMD_SUBVER 7
41#define QT2160_CMD_CALIBRATE 10 42#define QT2160_CMD_CALIBRATE 10
43#define QT2160_CMD_DRIVE_X 70
44#define QT2160_CMD_PWMEN_X 74
45#define QT2160_CMD_PWM_DUTY 76
46
47#define QT2160_NUM_LEDS_X 8
42 48
43#define QT2160_CYCLE_INTERVAL (2*HZ) 49#define QT2160_CYCLE_INTERVAL (2*HZ)
44 50
@@ -49,6 +55,17 @@ static unsigned char qt2160_key2code[] = {
49 KEY_C, KEY_D, KEY_E, KEY_F, 55 KEY_C, KEY_D, KEY_E, KEY_F,
50}; 56};
51 57
58#ifdef CONFIG_LEDS_CLASS
59struct qt2160_led {
60 struct qt2160_data *qt2160;
61 struct led_classdev cdev;
62 struct work_struct work;
63 char name[32];
64 int id;
65 enum led_brightness new_brightness;
66};
67#endif
68
52struct qt2160_data { 69struct qt2160_data {
53 struct i2c_client *client; 70 struct i2c_client *client;
54 struct input_dev *input; 71 struct input_dev *input;
@@ -56,8 +73,61 @@ struct qt2160_data {
56 spinlock_t lock; /* Protects canceling/rescheduling of dwork */ 73 spinlock_t lock; /* Protects canceling/rescheduling of dwork */
57 unsigned short keycodes[ARRAY_SIZE(qt2160_key2code)]; 74 unsigned short keycodes[ARRAY_SIZE(qt2160_key2code)];
58 u16 key_matrix; 75 u16 key_matrix;
76#ifdef CONFIG_LEDS_CLASS
77 struct qt2160_led leds[QT2160_NUM_LEDS_X];
78 struct mutex led_lock;
79#endif
59}; 80};
60 81
82static int qt2160_read(struct i2c_client *client, u8 reg);
83static int qt2160_write(struct i2c_client *client, u8 reg, u8 data);
84
85#ifdef CONFIG_LEDS_CLASS
86
87static void qt2160_led_work(struct work_struct *work)
88{
89 struct qt2160_led *led = container_of(work, struct qt2160_led, work);
90 struct qt2160_data *qt2160 = led->qt2160;
91 struct i2c_client *client = qt2160->client;
92 int value = led->new_brightness;
93 u32 drive, pwmen;
94
95 mutex_lock(&qt2160->led_lock);
96
97 drive = qt2160_read(client, QT2160_CMD_DRIVE_X);
98 pwmen = qt2160_read(client, QT2160_CMD_PWMEN_X);
99 if (value != LED_OFF) {
100 drive |= (1 << led->id);
101 pwmen |= (1 << led->id);
102
103 } else {
104 drive &= ~(1 << led->id);
105 pwmen &= ~(1 << led->id);
106 }
107 qt2160_write(client, QT2160_CMD_DRIVE_X, drive);
108 qt2160_write(client, QT2160_CMD_PWMEN_X, pwmen);
109
110 /*
111 * Changing this register will change the brightness
112 * of every LED in the qt2160. It's a HW limitation.
113 */
114 if (value != LED_OFF)
115 qt2160_write(client, QT2160_CMD_PWM_DUTY, value);
116
117 mutex_unlock(&qt2160->led_lock);
118}
119
120static void qt2160_led_set(struct led_classdev *cdev,
121 enum led_brightness value)
122{
123 struct qt2160_led *led = container_of(cdev, struct qt2160_led, cdev);
124
125 led->new_brightness = value;
126 schedule_work(&led->work);
127}
128
129#endif /* CONFIG_LEDS_CLASS */
130
61static int qt2160_read_block(struct i2c_client *client, 131static int qt2160_read_block(struct i2c_client *client,
62 u8 inireg, u8 *buffer, unsigned int count) 132 u8 inireg, u8 *buffer, unsigned int count)
63{ 133{
@@ -216,6 +286,63 @@ static int qt2160_write(struct i2c_client *client, u8 reg, u8 data)
216 return ret; 286 return ret;
217} 287}
218 288
289#ifdef CONFIG_LEDS_CLASS
290
291static int qt2160_register_leds(struct qt2160_data *qt2160)
292{
293 struct i2c_client *client = qt2160->client;
294 int ret;
295 int i;
296
297 mutex_init(&qt2160->led_lock);
298
299 for (i = 0; i < QT2160_NUM_LEDS_X; i++) {
300 struct qt2160_led *led = &qt2160->leds[i];
301
302 snprintf(led->name, sizeof(led->name), "qt2160:x%d", i);
303 led->cdev.name = led->name;
304 led->cdev.brightness_set = qt2160_led_set;
305 led->cdev.brightness = LED_OFF;
306 led->id = i;
307 led->qt2160 = qt2160;
308
309 INIT_WORK(&led->work, qt2160_led_work);
310
311 ret = led_classdev_register(&client->dev, &led->cdev);
312 if (ret < 0)
313 return ret;
314 }
315
316 /* Tur off LEDs */
317 qt2160_write(client, QT2160_CMD_DRIVE_X, 0);
318 qt2160_write(client, QT2160_CMD_PWMEN_X, 0);
319 qt2160_write(client, QT2160_CMD_PWM_DUTY, 0);
320
321 return 0;
322}
323
324static void qt2160_unregister_leds(struct qt2160_data *qt2160)
325{
326 int i;
327
328 for (i = 0; i < QT2160_NUM_LEDS_X; i++) {
329 led_classdev_unregister(&qt2160->leds[i].cdev);
330 cancel_work_sync(&qt2160->leds[i].work);
331 }
332}
333
334#else
335
336static inline int qt2160_register_leds(struct qt2160_data *qt2160)
337{
338 return 0;
339}
340
341static inline void qt2160_unregister_leds(struct qt2160_data *qt2160)
342{
343}
344
345#endif
219 346
220static bool qt2160_identify(struct i2c_client *client) 347static bool qt2160_identify(struct i2c_client *client)
221{ 348{
@@ -249,7 +376,7 @@ static bool qt2160_identify(struct i2c_client *client)
249} 376}
250 377
251static int qt2160_probe(struct i2c_client *client, 378static int qt2160_probe(struct i2c_client *client,
252 const struct i2c_device_id *id) 379 const struct i2c_device_id *id)
253{ 380{
254 struct qt2160_data *qt2160; 381 struct qt2160_data *qt2160;
255 struct input_dev *input; 382 struct input_dev *input;
@@ -314,11 +441,17 @@ static int qt2160_probe(struct i2c_client *client,
314 } 441 }
315 } 442 }
316 443
444 error = qt2160_register_leds(qt2160);
445 if (error) {
446 dev_err(&client->dev, "Failed to register leds\n");
447 goto err_free_irq;
448 }
449
317 error = input_register_device(qt2160->input); 450 error = input_register_device(qt2160->input);
318 if (error) { 451 if (error) {
319 dev_err(&client->dev, 452 dev_err(&client->dev,
320 "Failed to register input device\n"); 453 "Failed to register input device\n");
321 goto err_free_irq; 454 goto err_unregister_leds;
322 } 455 }
323 456
324 i2c_set_clientdata(client, qt2160); 457 i2c_set_clientdata(client, qt2160);
@@ -326,6 +459,8 @@ static int qt2160_probe(struct i2c_client *client,
326 459
327 return 0; 460 return 0;
328 461
462err_unregister_leds:
463 qt2160_unregister_leds(qt2160);
329err_free_irq: 464err_free_irq:
330 if (client->irq) 465 if (client->irq)
331 free_irq(client->irq, qt2160); 466 free_irq(client->irq, qt2160);
@@ -339,6 +474,8 @@ static int qt2160_remove(struct i2c_client *client)
339{ 474{
340 struct qt2160_data *qt2160 = i2c_get_clientdata(client); 475 struct qt2160_data *qt2160 = i2c_get_clientdata(client);
341 476
477 qt2160_unregister_leds(qt2160);
478
342 /* Release IRQ so no queue will be scheduled */ 479 /* Release IRQ so no queue will be scheduled */
343 if (client->irq) 480 if (client->irq)
344 free_irq(client->irq, qt2160); 481 free_irq(client->irq, qt2160);
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index c76f96872d31..d89e7d392d1e 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -29,9 +29,16 @@
29#include <linux/of.h> 29#include <linux/of.h>
30#include <linux/clk.h> 30#include <linux/clk.h>
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/input/tegra_kbc.h> 32#include <linux/input/matrix_keypad.h>
33#include <mach/clk.h> 33#include <mach/clk.h>
34 34
35#define KBC_MAX_GPIO 24
36#define KBC_MAX_KPENT 8
37
38#define KBC_MAX_ROW 16
39#define KBC_MAX_COL 8
40#define KBC_MAX_KEY (KBC_MAX_ROW * KBC_MAX_COL)
41
35#define KBC_MAX_DEBOUNCE_CNT 0x3ffu 42#define KBC_MAX_DEBOUNCE_CNT 0x3ffu
36 43
37/* KBC row scan time and delay for beginning the row scan. */ 44/* KBC row scan time and delay for beginning the row scan. */
@@ -67,10 +74,27 @@
67 74
68#define KBC_ROW_SHIFT 3 75#define KBC_ROW_SHIFT 3
69 76
77enum tegra_pin_type {
78 PIN_CFG_IGNORE,
79 PIN_CFG_COL,
80 PIN_CFG_ROW,
81};
82
83struct tegra_kbc_pin_cfg {
84 enum tegra_pin_type type;
85 unsigned char num;
86};
87
70struct tegra_kbc { 88struct tegra_kbc {
89 struct device *dev;
90 unsigned int debounce_cnt;
91 unsigned int repeat_cnt;
92 struct tegra_kbc_pin_cfg pin_cfg[KBC_MAX_GPIO];
93 const struct matrix_keymap_data *keymap_data;
94 bool wakeup;
71 void __iomem *mmio; 95 void __iomem *mmio;
72 struct input_dev *idev; 96 struct input_dev *idev;
73 unsigned int irq; 97 int irq;
74 spinlock_t lock; 98 spinlock_t lock;
75 unsigned int repoll_dly; 99 unsigned int repoll_dly;
76 unsigned long cp_dly_jiffies; 100 unsigned long cp_dly_jiffies;
@@ -78,7 +102,6 @@ struct tegra_kbc {
78 bool use_fn_map; 102 bool use_fn_map;
79 bool use_ghost_filter; 103 bool use_ghost_filter;
80 bool keypress_caused_wake; 104 bool keypress_caused_wake;
81 const struct tegra_kbc_platform_data *pdata;
82 unsigned short keycode[KBC_MAX_KEY * 2]; 105 unsigned short keycode[KBC_MAX_KEY * 2];
83 unsigned short current_keys[KBC_MAX_KPENT]; 106 unsigned short current_keys[KBC_MAX_KPENT];
84 unsigned int num_pressed_keys; 107 unsigned int num_pressed_keys;
@@ -87,147 +110,6 @@ struct tegra_kbc {
87 struct clk *clk; 110 struct clk *clk;
88}; 111};
89 112
90static const u32 tegra_kbc_default_keymap[] = {
91 KEY(0, 2, KEY_W),
92 KEY(0, 3, KEY_S),
93 KEY(0, 4, KEY_A),
94 KEY(0, 5, KEY_Z),
95 KEY(0, 7, KEY_FN),
96
97 KEY(1, 7, KEY_LEFTMETA),
98
99 KEY(2, 6, KEY_RIGHTALT),
100 KEY(2, 7, KEY_LEFTALT),
101
102 KEY(3, 0, KEY_5),
103 KEY(3, 1, KEY_4),
104 KEY(3, 2, KEY_R),
105 KEY(3, 3, KEY_E),
106 KEY(3, 4, KEY_F),
107 KEY(3, 5, KEY_D),
108 KEY(3, 6, KEY_X),
109
110 KEY(4, 0, KEY_7),
111 KEY(4, 1, KEY_6),
112 KEY(4, 2, KEY_T),
113 KEY(4, 3, KEY_H),
114 KEY(4, 4, KEY_G),
115 KEY(4, 5, KEY_V),
116 KEY(4, 6, KEY_C),
117 KEY(4, 7, KEY_SPACE),
118
119 KEY(5, 0, KEY_9),
120 KEY(5, 1, KEY_8),
121 KEY(5, 2, KEY_U),
122 KEY(5, 3, KEY_Y),
123 KEY(5, 4, KEY_J),
124 KEY(5, 5, KEY_N),
125 KEY(5, 6, KEY_B),
126 KEY(5, 7, KEY_BACKSLASH),
127
128 KEY(6, 0, KEY_MINUS),
129 KEY(6, 1, KEY_0),
130 KEY(6, 2, KEY_O),
131 KEY(6, 3, KEY_I),
132 KEY(6, 4, KEY_L),
133 KEY(6, 5, KEY_K),
134 KEY(6, 6, KEY_COMMA),
135 KEY(6, 7, KEY_M),
136
137 KEY(7, 1, KEY_EQUAL),
138 KEY(7, 2, KEY_RIGHTBRACE),
139 KEY(7, 3, KEY_ENTER),
140 KEY(7, 7, KEY_MENU),
141
142 KEY(8, 4, KEY_RIGHTSHIFT),
143 KEY(8, 5, KEY_LEFTSHIFT),
144
145 KEY(9, 5, KEY_RIGHTCTRL),
146 KEY(9, 7, KEY_LEFTCTRL),
147
148 KEY(11, 0, KEY_LEFTBRACE),
149 KEY(11, 1, KEY_P),
150 KEY(11, 2, KEY_APOSTROPHE),
151 KEY(11, 3, KEY_SEMICOLON),
152 KEY(11, 4, KEY_SLASH),
153 KEY(11, 5, KEY_DOT),
154
155 KEY(12, 0, KEY_F10),
156 KEY(12, 1, KEY_F9),
157 KEY(12, 2, KEY_BACKSPACE),
158 KEY(12, 3, KEY_3),
159 KEY(12, 4, KEY_2),
160 KEY(12, 5, KEY_UP),
161 KEY(12, 6, KEY_PRINT),
162 KEY(12, 7, KEY_PAUSE),
163
164 KEY(13, 0, KEY_INSERT),
165 KEY(13, 1, KEY_DELETE),
166 KEY(13, 3, KEY_PAGEUP),
167 KEY(13, 4, KEY_PAGEDOWN),
168 KEY(13, 5, KEY_RIGHT),
169 KEY(13, 6, KEY_DOWN),
170 KEY(13, 7, KEY_LEFT),
171
172 KEY(14, 0, KEY_F11),
173 KEY(14, 1, KEY_F12),
174 KEY(14, 2, KEY_F8),
175 KEY(14, 3, KEY_Q),
176 KEY(14, 4, KEY_F4),
177 KEY(14, 5, KEY_F3),
178 KEY(14, 6, KEY_1),
179 KEY(14, 7, KEY_F7),
180
181 KEY(15, 0, KEY_ESC),
182 KEY(15, 1, KEY_GRAVE),
183 KEY(15, 2, KEY_F5),
184 KEY(15, 3, KEY_TAB),
185 KEY(15, 4, KEY_F1),
186 KEY(15, 5, KEY_F2),
187 KEY(15, 6, KEY_CAPSLOCK),
188 KEY(15, 7, KEY_F6),
189
190 /* Software Handled Function Keys */
191 KEY(20, 0, KEY_KP7),
192
193 KEY(21, 0, KEY_KP9),
194 KEY(21, 1, KEY_KP8),
195 KEY(21, 2, KEY_KP4),
196 KEY(21, 4, KEY_KP1),
197
198 KEY(22, 1, KEY_KPSLASH),
199 KEY(22, 2, KEY_KP6),
200 KEY(22, 3, KEY_KP5),
201 KEY(22, 4, KEY_KP3),
202 KEY(22, 5, KEY_KP2),
203 KEY(22, 7, KEY_KP0),
204
205 KEY(27, 1, KEY_KPASTERISK),
206 KEY(27, 3, KEY_KPMINUS),
207 KEY(27, 4, KEY_KPPLUS),
208 KEY(27, 5, KEY_KPDOT),
209
210 KEY(28, 5, KEY_VOLUMEUP),
211
212 KEY(29, 3, KEY_HOME),
213 KEY(29, 4, KEY_END),
214 KEY(29, 5, KEY_BRIGHTNESSDOWN),
215 KEY(29, 6, KEY_VOLUMEDOWN),
216 KEY(29, 7, KEY_BRIGHTNESSUP),
217
218 KEY(30, 0, KEY_NUMLOCK),
219 KEY(30, 1, KEY_SCROLLLOCK),
220 KEY(30, 2, KEY_MUTE),
221
222 KEY(31, 4, KEY_HELP),
223};
224
225static const
226struct matrix_keymap_data tegra_kbc_default_keymap_data = {
227 .keymap = tegra_kbc_default_keymap,
228 .keymap_size = ARRAY_SIZE(tegra_kbc_default_keymap),
229};
230
231static void tegra_kbc_report_released_keys(struct input_dev *input, 113static void tegra_kbc_report_released_keys(struct input_dev *input,
232 unsigned short old_keycodes[], 114 unsigned short old_keycodes[],
233 unsigned int old_num_keys, 115 unsigned int old_num_keys,
@@ -357,18 +239,6 @@ static void tegra_kbc_set_fifo_interrupt(struct tegra_kbc *kbc, bool enable)
357 writel(val, kbc->mmio + KBC_CONTROL_0); 239 writel(val, kbc->mmio + KBC_CONTROL_0);
358} 240}
359 241
360static void tegra_kbc_set_keypress_interrupt(struct tegra_kbc *kbc, bool enable)
361{
362 u32 val;
363
364 val = readl(kbc->mmio + KBC_CONTROL_0);
365 if (enable)
366 val |= KBC_CONTROL_KEYPRESS_INT_EN;
367 else
368 val &= ~KBC_CONTROL_KEYPRESS_INT_EN;
369 writel(val, kbc->mmio + KBC_CONTROL_0);
370}
371
372static void tegra_kbc_keypress_timer(unsigned long data) 242static void tegra_kbc_keypress_timer(unsigned long data)
373{ 243{
374 struct tegra_kbc *kbc = (struct tegra_kbc *)data; 244 struct tegra_kbc *kbc = (struct tegra_kbc *)data;
@@ -439,12 +309,11 @@ static irqreturn_t tegra_kbc_isr(int irq, void *args)
439 309
440static void tegra_kbc_setup_wakekeys(struct tegra_kbc *kbc, bool filter) 310static void tegra_kbc_setup_wakekeys(struct tegra_kbc *kbc, bool filter)
441{ 311{
442 const struct tegra_kbc_platform_data *pdata = kbc->pdata;
443 int i; 312 int i;
444 unsigned int rst_val; 313 unsigned int rst_val;
445 314
446 /* Either mask all keys or none. */ 315 /* Either mask all keys or none. */
447 rst_val = (filter && !pdata->wakeup) ? ~0 : 0; 316 rst_val = (filter && !kbc->wakeup) ? ~0 : 0;
448 317
449 for (i = 0; i < KBC_MAX_ROW; i++) 318 for (i = 0; i < KBC_MAX_ROW; i++)
450 writel(rst_val, kbc->mmio + KBC_ROW0_MASK_0 + i * 4); 319 writel(rst_val, kbc->mmio + KBC_ROW0_MASK_0 + i * 4);
@@ -452,7 +321,6 @@ static void tegra_kbc_setup_wakekeys(struct tegra_kbc *kbc, bool filter)
452 321
453static void tegra_kbc_config_pins(struct tegra_kbc *kbc) 322static void tegra_kbc_config_pins(struct tegra_kbc *kbc)
454{ 323{
455 const struct tegra_kbc_platform_data *pdata = kbc->pdata;
456 int i; 324 int i;
457 325
458 for (i = 0; i < KBC_MAX_GPIO; i++) { 326 for (i = 0; i < KBC_MAX_GPIO; i++) {
@@ -468,13 +336,13 @@ static void tegra_kbc_config_pins(struct tegra_kbc *kbc)
468 row_cfg &= ~r_mask; 336 row_cfg &= ~r_mask;
469 col_cfg &= ~c_mask; 337 col_cfg &= ~c_mask;
470 338
471 switch (pdata->pin_cfg[i].type) { 339 switch (kbc->pin_cfg[i].type) {
472 case PIN_CFG_ROW: 340 case PIN_CFG_ROW:
473 row_cfg |= ((pdata->pin_cfg[i].num << 1) | 1) << r_shft; 341 row_cfg |= ((kbc->pin_cfg[i].num << 1) | 1) << r_shft;
474 break; 342 break;
475 343
476 case PIN_CFG_COL: 344 case PIN_CFG_COL:
477 col_cfg |= ((pdata->pin_cfg[i].num << 1) | 1) << c_shft; 345 col_cfg |= ((kbc->pin_cfg[i].num << 1) | 1) << c_shft;
478 break; 346 break;
479 347
480 case PIN_CFG_IGNORE: 348 case PIN_CFG_IGNORE:
@@ -488,7 +356,6 @@ static void tegra_kbc_config_pins(struct tegra_kbc *kbc)
488 356
489static int tegra_kbc_start(struct tegra_kbc *kbc) 357static int tegra_kbc_start(struct tegra_kbc *kbc)
490{ 358{
491 const struct tegra_kbc_platform_data *pdata = kbc->pdata;
492 unsigned int debounce_cnt; 359 unsigned int debounce_cnt;
493 u32 val = 0; 360 u32 val = 0;
494 361
@@ -503,10 +370,10 @@ static int tegra_kbc_start(struct tegra_kbc *kbc)
503 tegra_kbc_config_pins(kbc); 370 tegra_kbc_config_pins(kbc);
504 tegra_kbc_setup_wakekeys(kbc, false); 371 tegra_kbc_setup_wakekeys(kbc, false);
505 372
506 writel(pdata->repeat_cnt, kbc->mmio + KBC_RPT_DLY_0); 373 writel(kbc->repeat_cnt, kbc->mmio + KBC_RPT_DLY_0);
507 374
508 /* Keyboard debounce count is maximum of 12 bits. */ 375 /* Keyboard debounce count is maximum of 12 bits. */
509 debounce_cnt = min(pdata->debounce_cnt, KBC_MAX_DEBOUNCE_CNT); 376 debounce_cnt = min(kbc->debounce_cnt, KBC_MAX_DEBOUNCE_CNT);
510 val = KBC_DEBOUNCE_CNT_SHIFT(debounce_cnt); 377 val = KBC_DEBOUNCE_CNT_SHIFT(debounce_cnt);
511 val |= KBC_FIFO_TH_CNT_SHIFT(1); /* set fifo interrupt threshold to 1 */ 378 val |= KBC_FIFO_TH_CNT_SHIFT(1); /* set fifo interrupt threshold to 1 */
512 val |= KBC_CONTROL_FIFO_CNT_INT_EN; /* interrupt on FIFO threshold */ 379 val |= KBC_CONTROL_FIFO_CNT_INT_EN; /* interrupt on FIFO threshold */
@@ -573,21 +440,20 @@ static void tegra_kbc_close(struct input_dev *dev)
573 return tegra_kbc_stop(kbc); 440 return tegra_kbc_stop(kbc);
574} 441}
575 442
576static bool 443static bool tegra_kbc_check_pin_cfg(const struct tegra_kbc *kbc,
577tegra_kbc_check_pin_cfg(const struct tegra_kbc_platform_data *pdata, 444 unsigned int *num_rows)
578 struct device *dev, unsigned int *num_rows)
579{ 445{
580 int i; 446 int i;
581 447
582 *num_rows = 0; 448 *num_rows = 0;
583 449
584 for (i = 0; i < KBC_MAX_GPIO; i++) { 450 for (i = 0; i < KBC_MAX_GPIO; i++) {
585 const struct tegra_kbc_pin_cfg *pin_cfg = &pdata->pin_cfg[i]; 451 const struct tegra_kbc_pin_cfg *pin_cfg = &kbc->pin_cfg[i];
586 452
587 switch (pin_cfg->type) { 453 switch (pin_cfg->type) {
588 case PIN_CFG_ROW: 454 case PIN_CFG_ROW:
589 if (pin_cfg->num >= KBC_MAX_ROW) { 455 if (pin_cfg->num >= KBC_MAX_ROW) {
590 dev_err(dev, 456 dev_err(kbc->dev,
591 "pin_cfg[%d]: invalid row number %d\n", 457 "pin_cfg[%d]: invalid row number %d\n",
592 i, pin_cfg->num); 458 i, pin_cfg->num);
593 return false; 459 return false;
@@ -597,7 +463,7 @@ tegra_kbc_check_pin_cfg(const struct tegra_kbc_platform_data *pdata,
597 463
598 case PIN_CFG_COL: 464 case PIN_CFG_COL:
599 if (pin_cfg->num >= KBC_MAX_COL) { 465 if (pin_cfg->num >= KBC_MAX_COL) {
600 dev_err(dev, 466 dev_err(kbc->dev,
601 "pin_cfg[%d]: invalid column number %d\n", 467 "pin_cfg[%d]: invalid column number %d\n",
602 i, pin_cfg->num); 468 i, pin_cfg->num);
603 return false; 469 return false;
@@ -608,7 +474,7 @@ tegra_kbc_check_pin_cfg(const struct tegra_kbc_platform_data *pdata,
608 break; 474 break;
609 475
610 default: 476 default:
611 dev_err(dev, 477 dev_err(kbc->dev,
612 "pin_cfg[%d]: invalid entry type %d\n", 478 "pin_cfg[%d]: invalid entry type %d\n",
613 pin_cfg->type, pin_cfg->num); 479 pin_cfg->type, pin_cfg->num);
614 return false; 480 return false;
@@ -618,154 +484,140 @@ tegra_kbc_check_pin_cfg(const struct tegra_kbc_platform_data *pdata,
618 return true; 484 return true;
619} 485}
620 486
621#ifdef CONFIG_OF 487static int tegra_kbc_parse_dt(struct tegra_kbc *kbc)
622static struct tegra_kbc_platform_data *tegra_kbc_dt_parse_pdata(
623 struct platform_device *pdev)
624{ 488{
625 struct tegra_kbc_platform_data *pdata; 489 struct device_node *np = kbc->dev->of_node;
626 struct device_node *np = pdev->dev.of_node;
627 u32 prop; 490 u32 prop;
628 int i; 491 int i;
629 492 u32 num_rows = 0;
630 if (!np) 493 u32 num_cols = 0;
631 return NULL; 494 u32 cols_cfg[KBC_MAX_GPIO];
632 495 u32 rows_cfg[KBC_MAX_GPIO];
633 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); 496 int proplen;
634 if (!pdata) 497 int ret;
635 return NULL;
636 498
637 if (!of_property_read_u32(np, "nvidia,debounce-delay-ms", &prop)) 499 if (!of_property_read_u32(np, "nvidia,debounce-delay-ms", &prop))
638 pdata->debounce_cnt = prop; 500 kbc->debounce_cnt = prop;
639 501
640 if (!of_property_read_u32(np, "nvidia,repeat-delay-ms", &prop)) 502 if (!of_property_read_u32(np, "nvidia,repeat-delay-ms", &prop))
641 pdata->repeat_cnt = prop; 503 kbc->repeat_cnt = prop;
642 504
643 if (of_find_property(np, "nvidia,needs-ghost-filter", NULL)) 505 if (of_find_property(np, "nvidia,needs-ghost-filter", NULL))
644 pdata->use_ghost_filter = true; 506 kbc->use_ghost_filter = true;
645 507
646 if (of_find_property(np, "nvidia,wakeup-source", NULL)) 508 if (of_find_property(np, "nvidia,wakeup-source", NULL))
647 pdata->wakeup = true; 509 kbc->wakeup = true;
648 510
649 /* 511 if (!of_get_property(np, "nvidia,kbc-row-pins", &proplen)) {
650 * All currently known keymaps with device tree support use the same 512 dev_err(kbc->dev, "property nvidia,kbc-row-pins not found\n");
651 * pin_cfg, so set it up here. 513 return -ENOENT;
652 */
653 for (i = 0; i < KBC_MAX_ROW; i++) {
654 pdata->pin_cfg[i].num = i;
655 pdata->pin_cfg[i].type = PIN_CFG_ROW;
656 } 514 }
515 num_rows = proplen / sizeof(u32);
657 516
658 for (i = 0; i < KBC_MAX_COL; i++) { 517 if (!of_get_property(np, "nvidia,kbc-col-pins", &proplen)) {
659 pdata->pin_cfg[KBC_MAX_ROW + i].num = i; 518 dev_err(kbc->dev, "property nvidia,kbc-col-pins not found\n");
660 pdata->pin_cfg[KBC_MAX_ROW + i].type = PIN_CFG_COL; 519 return -ENOENT;
661 } 520 }
521 num_cols = proplen / sizeof(u32);
662 522
663 return pdata; 523 if (!of_get_property(np, "linux,keymap", &proplen)) {
664} 524 dev_err(kbc->dev, "property linux,keymap not found\n");
665#else 525 return -ENOENT;
666static inline struct tegra_kbc_platform_data *tegra_kbc_dt_parse_pdata( 526 }
667 struct platform_device *pdev)
668{
669 return NULL;
670}
671#endif
672 527
673static int tegra_kbd_setup_keymap(struct tegra_kbc *kbc) 528 if (!num_rows || !num_cols || ((num_rows + num_cols) > KBC_MAX_GPIO)) {
674{ 529 dev_err(kbc->dev,
675 const struct tegra_kbc_platform_data *pdata = kbc->pdata; 530 "keypad rows/columns not porperly specified\n");
676 const struct matrix_keymap_data *keymap_data = pdata->keymap_data; 531 return -EINVAL;
677 unsigned int keymap_rows = KBC_MAX_KEY; 532 }
678 int retval;
679 533
680 if (keymap_data && pdata->use_fn_map) 534 /* Set all pins as non-configured */
681 keymap_rows *= 2; 535 for (i = 0; i < KBC_MAX_GPIO; i++)
536 kbc->pin_cfg[i].type = PIN_CFG_IGNORE;
682 537
683 retval = matrix_keypad_build_keymap(keymap_data, NULL, 538 ret = of_property_read_u32_array(np, "nvidia,kbc-row-pins",
684 keymap_rows, KBC_MAX_COL, 539 rows_cfg, num_rows);
685 kbc->keycode, kbc->idev); 540 if (ret < 0) {
686 if (retval == -ENOSYS || retval == -ENOENT) { 541 dev_err(kbc->dev, "Rows configurations are not proper\n");
687 /* 542 return -EINVAL;
688 * If there is no OF support in kernel or keymap 543 }
689 * property is missing, use default keymap. 544
690 */ 545 ret = of_property_read_u32_array(np, "nvidia,kbc-col-pins",
691 retval = matrix_keypad_build_keymap( 546 cols_cfg, num_cols);
692 &tegra_kbc_default_keymap_data, NULL, 547 if (ret < 0) {
693 keymap_rows, KBC_MAX_COL, 548 dev_err(kbc->dev, "Cols configurations are not proper\n");
694 kbc->keycode, kbc->idev); 549 return -EINVAL;
550 }
551
552 for (i = 0; i < num_rows; i++) {
553 kbc->pin_cfg[rows_cfg[i]].type = PIN_CFG_ROW;
554 kbc->pin_cfg[rows_cfg[i]].num = i;
695 } 555 }
696 556
697 return retval; 557 for (i = 0; i < num_cols; i++) {
558 kbc->pin_cfg[cols_cfg[i]].type = PIN_CFG_COL;
559 kbc->pin_cfg[cols_cfg[i]].num = i;
560 }
561
562 return 0;
698} 563}
699 564
700static int tegra_kbc_probe(struct platform_device *pdev) 565static int tegra_kbc_probe(struct platform_device *pdev)
701{ 566{
702 const struct tegra_kbc_platform_data *pdata = pdev->dev.platform_data;
703 struct tegra_kbc *kbc; 567 struct tegra_kbc *kbc;
704 struct input_dev *input_dev;
705 struct resource *res; 568 struct resource *res;
706 int irq;
707 int err; 569 int err;
708 int num_rows = 0; 570 int num_rows = 0;
709 unsigned int debounce_cnt; 571 unsigned int debounce_cnt;
710 unsigned int scan_time_rows; 572 unsigned int scan_time_rows;
573 unsigned int keymap_rows = KBC_MAX_KEY;
711 574
712 if (!pdata) 575 kbc = devm_kzalloc(&pdev->dev, sizeof(*kbc), GFP_KERNEL);
713 pdata = tegra_kbc_dt_parse_pdata(pdev); 576 if (!kbc) {
577 dev_err(&pdev->dev, "failed to alloc memory for kbc\n");
578 return -ENOMEM;
579 }
714 580
715 if (!pdata) 581 kbc->dev = &pdev->dev;
716 return -EINVAL; 582 spin_lock_init(&kbc->lock);
717 583
718 if (!tegra_kbc_check_pin_cfg(pdata, &pdev->dev, &num_rows)) { 584 err = tegra_kbc_parse_dt(kbc);
719 err = -EINVAL; 585 if (err)
720 goto err_free_pdata; 586 return err;
721 } 587
588 if (!tegra_kbc_check_pin_cfg(kbc, &num_rows))
589 return -EINVAL;
722 590
723 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 591 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
724 if (!res) { 592 if (!res) {
725 dev_err(&pdev->dev, "failed to get I/O memory\n"); 593 dev_err(&pdev->dev, "failed to get I/O memory\n");
726 err = -ENXIO; 594 return -ENXIO;
727 goto err_free_pdata;
728 } 595 }
729 596
730 irq = platform_get_irq(pdev, 0); 597 kbc->irq = platform_get_irq(pdev, 0);
731 if (irq < 0) { 598 if (kbc->irq < 0) {
732 dev_err(&pdev->dev, "failed to get keyboard IRQ\n"); 599 dev_err(&pdev->dev, "failed to get keyboard IRQ\n");
733 err = -ENXIO; 600 return -ENXIO;
734 goto err_free_pdata;
735 } 601 }
736 602
737 kbc = kzalloc(sizeof(*kbc), GFP_KERNEL); 603 kbc->idev = devm_input_allocate_device(&pdev->dev);
738 input_dev = input_allocate_device(); 604 if (!kbc->idev) {
739 if (!kbc || !input_dev) { 605 dev_err(&pdev->dev, "failed to allocate input device\n");
740 err = -ENOMEM; 606 return -ENOMEM;
741 goto err_free_mem;
742 } 607 }
743 608
744 kbc->pdata = pdata;
745 kbc->idev = input_dev;
746 kbc->irq = irq;
747 spin_lock_init(&kbc->lock);
748 setup_timer(&kbc->timer, tegra_kbc_keypress_timer, (unsigned long)kbc); 609 setup_timer(&kbc->timer, tegra_kbc_keypress_timer, (unsigned long)kbc);
749 610
750 res = request_mem_region(res->start, resource_size(res), pdev->name); 611 kbc->mmio = devm_request_and_ioremap(&pdev->dev, res);
751 if (!res) {
752 dev_err(&pdev->dev, "failed to request I/O memory\n");
753 err = -EBUSY;
754 goto err_free_mem;
755 }
756
757 kbc->mmio = ioremap(res->start, resource_size(res));
758 if (!kbc->mmio) { 612 if (!kbc->mmio) {
759 dev_err(&pdev->dev, "failed to remap I/O memory\n"); 613 dev_err(&pdev->dev, "Cannot request memregion/iomap address\n");
760 err = -ENXIO; 614 return -EBUSY;
761 goto err_free_mem_region;
762 } 615 }
763 616
764 kbc->clk = clk_get(&pdev->dev, NULL); 617 kbc->clk = devm_clk_get(&pdev->dev, NULL);
765 if (IS_ERR(kbc->clk)) { 618 if (IS_ERR(kbc->clk)) {
766 dev_err(&pdev->dev, "failed to get keyboard clock\n"); 619 dev_err(&pdev->dev, "failed to get keyboard clock\n");
767 err = PTR_ERR(kbc->clk); 620 return PTR_ERR(kbc->clk);
768 goto err_iounmap;
769 } 621 }
770 622
771 /* 623 /*
@@ -774,37 +626,38 @@ static int tegra_kbc_probe(struct platform_device *pdev)
774 * the rows. There is an additional delay before the row scanning 626 * the rows. There is an additional delay before the row scanning
775 * starts. The repoll delay is computed in milliseconds. 627 * starts. The repoll delay is computed in milliseconds.
776 */ 628 */
777 debounce_cnt = min(pdata->debounce_cnt, KBC_MAX_DEBOUNCE_CNT); 629 debounce_cnt = min(kbc->debounce_cnt, KBC_MAX_DEBOUNCE_CNT);
778 scan_time_rows = (KBC_ROW_SCAN_TIME + debounce_cnt) * num_rows; 630 scan_time_rows = (KBC_ROW_SCAN_TIME + debounce_cnt) * num_rows;
779 kbc->repoll_dly = KBC_ROW_SCAN_DLY + scan_time_rows + pdata->repeat_cnt; 631 kbc->repoll_dly = KBC_ROW_SCAN_DLY + scan_time_rows + kbc->repeat_cnt;
780 kbc->repoll_dly = DIV_ROUND_UP(kbc->repoll_dly, KBC_CYCLE_MS); 632 kbc->repoll_dly = DIV_ROUND_UP(kbc->repoll_dly, KBC_CYCLE_MS);
781 633
782 kbc->wakeup_key = pdata->wakeup_key; 634 kbc->idev->name = pdev->name;
783 kbc->use_fn_map = pdata->use_fn_map; 635 kbc->idev->id.bustype = BUS_HOST;
784 kbc->use_ghost_filter = pdata->use_ghost_filter; 636 kbc->idev->dev.parent = &pdev->dev;
637 kbc->idev->open = tegra_kbc_open;
638 kbc->idev->close = tegra_kbc_close;
785 639
786 input_dev->name = pdev->name; 640 if (kbc->keymap_data && kbc->use_fn_map)
787 input_dev->id.bustype = BUS_HOST; 641 keymap_rows *= 2;
788 input_dev->dev.parent = &pdev->dev;
789 input_dev->open = tegra_kbc_open;
790 input_dev->close = tegra_kbc_close;
791 642
792 err = tegra_kbd_setup_keymap(kbc); 643 err = matrix_keypad_build_keymap(kbc->keymap_data, NULL,
644 keymap_rows, KBC_MAX_COL,
645 kbc->keycode, kbc->idev);
793 if (err) { 646 if (err) {
794 dev_err(&pdev->dev, "failed to setup keymap\n"); 647 dev_err(&pdev->dev, "failed to setup keymap\n");
795 goto err_put_clk; 648 return err;
796 } 649 }
797 650
798 __set_bit(EV_REP, input_dev->evbit); 651 __set_bit(EV_REP, kbc->idev->evbit);
799 input_set_capability(input_dev, EV_MSC, MSC_SCAN); 652 input_set_capability(kbc->idev, EV_MSC, MSC_SCAN);
800 653
801 input_set_drvdata(input_dev, kbc); 654 input_set_drvdata(kbc->idev, kbc);
802 655
803 err = request_irq(kbc->irq, tegra_kbc_isr, 656 err = devm_request_irq(&pdev->dev, kbc->irq, tegra_kbc_isr,
804 IRQF_NO_SUSPEND | IRQF_TRIGGER_HIGH, pdev->name, kbc); 657 IRQF_NO_SUSPEND | IRQF_TRIGGER_HIGH, pdev->name, kbc);
805 if (err) { 658 if (err) {
806 dev_err(&pdev->dev, "failed to request keyboard IRQ\n"); 659 dev_err(&pdev->dev, "failed to request keyboard IRQ\n");
807 goto err_put_clk; 660 return err;
808 } 661 }
809 662
810 disable_irq(kbc->irq); 663 disable_irq(kbc->irq);
@@ -812,60 +665,28 @@ static int tegra_kbc_probe(struct platform_device *pdev)
812 err = input_register_device(kbc->idev); 665 err = input_register_device(kbc->idev);
813 if (err) { 666 if (err) {
814 dev_err(&pdev->dev, "failed to register input device\n"); 667 dev_err(&pdev->dev, "failed to register input device\n");
815 goto err_free_irq; 668 return err;
816 } 669 }
817 670
818 platform_set_drvdata(pdev, kbc); 671 platform_set_drvdata(pdev, kbc);
819 device_init_wakeup(&pdev->dev, pdata->wakeup); 672 device_init_wakeup(&pdev->dev, kbc->wakeup);
820 673
821 return 0; 674 return 0;
822
823err_free_irq:
824 free_irq(kbc->irq, pdev);
825err_put_clk:
826 clk_put(kbc->clk);
827err_iounmap:
828 iounmap(kbc->mmio);
829err_free_mem_region:
830 release_mem_region(res->start, resource_size(res));
831err_free_mem:
832 input_free_device(input_dev);
833 kfree(kbc);
834err_free_pdata:
835 if (!pdev->dev.platform_data)
836 kfree(pdata);
837
838 return err;
839} 675}
840 676
841static int tegra_kbc_remove(struct platform_device *pdev) 677#ifdef CONFIG_PM_SLEEP
678static void tegra_kbc_set_keypress_interrupt(struct tegra_kbc *kbc, bool enable)
842{ 679{
843 struct tegra_kbc *kbc = platform_get_drvdata(pdev); 680 u32 val;
844 struct resource *res;
845
846 platform_set_drvdata(pdev, NULL);
847
848 free_irq(kbc->irq, pdev);
849 clk_put(kbc->clk);
850
851 input_unregister_device(kbc->idev);
852 iounmap(kbc->mmio);
853 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
854 release_mem_region(res->start, resource_size(res));
855
856 /*
857 * If we do not have platform data attached to the device we
858 * allocated it ourselves and thus need to free it.
859 */
860 if (!pdev->dev.platform_data)
861 kfree(kbc->pdata);
862
863 kfree(kbc);
864 681
865 return 0; 682 val = readl(kbc->mmio + KBC_CONTROL_0);
683 if (enable)
684 val |= KBC_CONTROL_KEYPRESS_INT_EN;
685 else
686 val &= ~KBC_CONTROL_KEYPRESS_INT_EN;
687 writel(val, kbc->mmio + KBC_CONTROL_0);
866} 688}
867 689
868#ifdef CONFIG_PM_SLEEP
869static int tegra_kbc_suspend(struct device *dev) 690static int tegra_kbc_suspend(struct device *dev)
870{ 691{
871 struct platform_device *pdev = to_platform_device(dev); 692 struct platform_device *pdev = to_platform_device(dev);
@@ -954,7 +775,6 @@ MODULE_DEVICE_TABLE(of, tegra_kbc_of_match);
954 775
955static struct platform_driver tegra_kbc_driver = { 776static struct platform_driver tegra_kbc_driver = {
956 .probe = tegra_kbc_probe, 777 .probe = tegra_kbc_probe,
957 .remove = tegra_kbc_remove,
958 .driver = { 778 .driver = {
959 .name = "tegra-kbc", 779 .name = "tegra-kbc",
960 .owner = THIS_MODULE, 780 .owner = THIS_MODULE,
diff --git a/drivers/input/misc/adxl34x.c b/drivers/input/misc/adxl34x.c
index 1cf72fe513e6..0735de3a6468 100644
--- a/drivers/input/misc/adxl34x.c
+++ b/drivers/input/misc/adxl34x.c
@@ -232,7 +232,7 @@ static const struct adxl34x_platform_data adxl34x_default_init = {
232 232
233 .ev_code_tap = {BTN_TOUCH, BTN_TOUCH, BTN_TOUCH}, /* EV_KEY {x,y,z} */ 233 .ev_code_tap = {BTN_TOUCH, BTN_TOUCH, BTN_TOUCH}, /* EV_KEY {x,y,z} */
234 .power_mode = ADXL_AUTO_SLEEP | ADXL_LINK, 234 .power_mode = ADXL_AUTO_SLEEP | ADXL_LINK,
235 .fifo_mode = FIFO_STREAM, 235 .fifo_mode = ADXL_FIFO_STREAM,
236 .watermark = 0, 236 .watermark = 0,
237}; 237};
238 238
@@ -732,7 +732,7 @@ struct adxl34x *adxl34x_probe(struct device *dev, int irq,
732 mutex_init(&ac->mutex); 732 mutex_init(&ac->mutex);
733 733
734 input_dev->name = "ADXL34x accelerometer"; 734 input_dev->name = "ADXL34x accelerometer";
735 revid = ac->bops->read(dev, DEVID); 735 revid = AC_READ(ac, DEVID);
736 736
737 switch (revid) { 737 switch (revid) {
738 case ID_ADXL345: 738 case ID_ADXL345:
@@ -809,7 +809,7 @@ struct adxl34x *adxl34x_probe(struct device *dev, int irq,
809 if (FIFO_MODE(pdata->fifo_mode) == FIFO_BYPASS) 809 if (FIFO_MODE(pdata->fifo_mode) == FIFO_BYPASS)
810 ac->fifo_delay = false; 810 ac->fifo_delay = false;
811 811
812 ac->bops->write(dev, POWER_CTL, 0); 812 AC_WRITE(ac, POWER_CTL, 0);
813 813
814 err = request_threaded_irq(ac->irq, NULL, adxl34x_irq, 814 err = request_threaded_irq(ac->irq, NULL, adxl34x_irq,
815 IRQF_TRIGGER_HIGH | IRQF_ONESHOT, 815 IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
@@ -827,7 +827,6 @@ struct adxl34x *adxl34x_probe(struct device *dev, int irq,
827 if (err) 827 if (err)
828 goto err_remove_attr; 828 goto err_remove_attr;
829 829
830 AC_WRITE(ac, THRESH_TAP, pdata->tap_threshold);
831 AC_WRITE(ac, OFSX, pdata->x_axis_offset); 830 AC_WRITE(ac, OFSX, pdata->x_axis_offset);
832 ac->hwcal.x = pdata->x_axis_offset; 831 ac->hwcal.x = pdata->x_axis_offset;
833 AC_WRITE(ac, OFSY, pdata->y_axis_offset); 832 AC_WRITE(ac, OFSY, pdata->y_axis_offset);
diff --git a/drivers/input/misc/atlas_btns.c b/drivers/input/misc/atlas_btns.c
index 26f13131639a..5d4402365a52 100644
--- a/drivers/input/misc/atlas_btns.c
+++ b/drivers/input/misc/atlas_btns.c
@@ -121,7 +121,7 @@ static int atlas_acpi_button_add(struct acpi_device *device)
121 return err; 121 return err;
122} 122}
123 123
124static int atlas_acpi_button_remove(struct acpi_device *device, int type) 124static int atlas_acpi_button_remove(struct acpi_device *device)
125{ 125{
126 acpi_status status; 126 acpi_status status;
127 127
diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c
index 08ffcabd7220..865c2f9d25b9 100644
--- a/drivers/input/misc/bma150.c
+++ b/drivers/input/misc/bma150.c
@@ -46,18 +46,6 @@
46#define BMA150_POLL_MAX 200 46#define BMA150_POLL_MAX 200
47#define BMA150_POLL_MIN 0 47#define BMA150_POLL_MIN 0
48 48
49#define BMA150_BW_25HZ 0
50#define BMA150_BW_50HZ 1
51#define BMA150_BW_100HZ 2
52#define BMA150_BW_190HZ 3
53#define BMA150_BW_375HZ 4
54#define BMA150_BW_750HZ 5
55#define BMA150_BW_1500HZ 6
56
57#define BMA150_RANGE_2G 0
58#define BMA150_RANGE_4G 1
59#define BMA150_RANGE_8G 2
60
61#define BMA150_MODE_NORMAL 0 49#define BMA150_MODE_NORMAL 0
62#define BMA150_MODE_SLEEP 2 50#define BMA150_MODE_SLEEP 2
63#define BMA150_MODE_WAKE_UP 3 51#define BMA150_MODE_WAKE_UP 3
@@ -372,7 +360,7 @@ static int bma150_open(struct bma150_data *bma150)
372 int error; 360 int error;
373 361
374 error = pm_runtime_get_sync(&bma150->client->dev); 362 error = pm_runtime_get_sync(&bma150->client->dev);
375 if (error && error != -ENOSYS) 363 if (error < 0 && error != -ENOSYS)
376 return error; 364 return error;
377 365
378 /* 366 /*
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index 78eb6b30580a..68a5f33152a8 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -43,7 +43,6 @@ struct vibra_info {
43 struct device *dev; 43 struct device *dev;
44 struct input_dev *input_dev; 44 struct input_dev *input_dev;
45 45
46 struct workqueue_struct *workqueue;
47 struct work_struct play_work; 46 struct work_struct play_work;
48 47
49 bool enabled; 48 bool enabled;
@@ -143,19 +142,7 @@ static int vibra_play(struct input_dev *input, void *data,
143 if (!info->speed) 142 if (!info->speed)
144 info->speed = effect->u.rumble.weak_magnitude >> 9; 143 info->speed = effect->u.rumble.weak_magnitude >> 9;
145 info->direction = effect->direction < EFFECT_DIR_180_DEG ? 0 : 1; 144 info->direction = effect->direction < EFFECT_DIR_180_DEG ? 0 : 1;
146 queue_work(info->workqueue, &info->play_work); 145 schedule_work(&info->play_work);
147 return 0;
148}
149
150static int twl4030_vibra_open(struct input_dev *input)
151{
152 struct vibra_info *info = input_get_drvdata(input);
153
154 info->workqueue = create_singlethread_workqueue("vibra");
155 if (info->workqueue == NULL) {
156 dev_err(&input->dev, "couldn't create workqueue\n");
157 return -ENOMEM;
158 }
159 return 0; 146 return 0;
160} 147}
161 148
@@ -164,9 +151,6 @@ static void twl4030_vibra_close(struct input_dev *input)
164 struct vibra_info *info = input_get_drvdata(input); 151 struct vibra_info *info = input_get_drvdata(input);
165 152
166 cancel_work_sync(&info->play_work); 153 cancel_work_sync(&info->play_work);
167 INIT_WORK(&info->play_work, vibra_play_work); /* cleanup */
168 destroy_workqueue(info->workqueue);
169 info->workqueue = NULL;
170 154
171 if (info->enabled) 155 if (info->enabled)
172 vibra_disable(info); 156 vibra_disable(info);
@@ -219,7 +203,7 @@ static int twl4030_vibra_probe(struct platform_device *pdev)
219 return -EINVAL; 203 return -EINVAL;
220 } 204 }
221 205
222 info = kzalloc(sizeof(*info), GFP_KERNEL); 206 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
223 if (!info) 207 if (!info)
224 return -ENOMEM; 208 return -ENOMEM;
225 209
@@ -227,11 +211,10 @@ static int twl4030_vibra_probe(struct platform_device *pdev)
227 info->coexist = twl4030_vibra_check_coexist(pdata, twl4030_core_node); 211 info->coexist = twl4030_vibra_check_coexist(pdata, twl4030_core_node);
228 INIT_WORK(&info->play_work, vibra_play_work); 212 INIT_WORK(&info->play_work, vibra_play_work);
229 213
230 info->input_dev = input_allocate_device(); 214 info->input_dev = devm_input_allocate_device(&pdev->dev);
231 if (info->input_dev == NULL) { 215 if (info->input_dev == NULL) {
232 dev_err(&pdev->dev, "couldn't allocate input device\n"); 216 dev_err(&pdev->dev, "couldn't allocate input device\n");
233 ret = -ENOMEM; 217 return -ENOMEM;
234 goto err_kzalloc;
235 } 218 }
236 219
237 input_set_drvdata(info->input_dev, info); 220 input_set_drvdata(info->input_dev, info);
@@ -239,14 +222,13 @@ static int twl4030_vibra_probe(struct platform_device *pdev)
239 info->input_dev->name = "twl4030:vibrator"; 222 info->input_dev->name = "twl4030:vibrator";
240 info->input_dev->id.version = 1; 223 info->input_dev->id.version = 1;
241 info->input_dev->dev.parent = pdev->dev.parent; 224 info->input_dev->dev.parent = pdev->dev.parent;
242 info->input_dev->open = twl4030_vibra_open;
243 info->input_dev->close = twl4030_vibra_close; 225 info->input_dev->close = twl4030_vibra_close;
244 __set_bit(FF_RUMBLE, info->input_dev->ffbit); 226 __set_bit(FF_RUMBLE, info->input_dev->ffbit);
245 227
246 ret = input_ff_create_memless(info->input_dev, NULL, vibra_play); 228 ret = input_ff_create_memless(info->input_dev, NULL, vibra_play);
247 if (ret < 0) { 229 if (ret < 0) {
248 dev_dbg(&pdev->dev, "couldn't register vibrator to FF\n"); 230 dev_dbg(&pdev->dev, "couldn't register vibrator to FF\n");
249 goto err_ialloc; 231 return ret;
250 } 232 }
251 233
252 ret = input_register_device(info->input_dev); 234 ret = input_register_device(info->input_dev);
@@ -262,28 +244,11 @@ static int twl4030_vibra_probe(struct platform_device *pdev)
262 244
263err_iff: 245err_iff:
264 input_ff_destroy(info->input_dev); 246 input_ff_destroy(info->input_dev);
265err_ialloc:
266 input_free_device(info->input_dev);
267err_kzalloc:
268 kfree(info);
269 return ret; 247 return ret;
270} 248}
271 249
272static int twl4030_vibra_remove(struct platform_device *pdev)
273{
274 struct vibra_info *info = platform_get_drvdata(pdev);
275
276 /* this also free ff-memless and calls close if needed */
277 input_unregister_device(info->input_dev);
278 kfree(info);
279 platform_set_drvdata(pdev, NULL);
280
281 return 0;
282}
283
284static struct platform_driver twl4030_vibra_driver = { 250static struct platform_driver twl4030_vibra_driver = {
285 .probe = twl4030_vibra_probe, 251 .probe = twl4030_vibra_probe,
286 .remove = twl4030_vibra_remove,
287 .driver = { 252 .driver = {
288 .name = "twl4030-vibra", 253 .name = "twl4030-vibra",
289 .owner = THIS_MODULE, 254 .owner = THIS_MODULE,
diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c
index 71a28ee699f3..0c2dfc8e9691 100644
--- a/drivers/input/misc/twl6040-vibra.c
+++ b/drivers/input/misc/twl6040-vibra.c
@@ -275,7 +275,7 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
275 return -EINVAL; 275 return -EINVAL;
276 } 276 }
277 277
278 info = kzalloc(sizeof(*info), GFP_KERNEL); 278 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
279 if (!info) { 279 if (!info) {
280 dev_err(&pdev->dev, "couldn't allocate memory\n"); 280 dev_err(&pdev->dev, "couldn't allocate memory\n");
281 return -ENOMEM; 281 return -ENOMEM;
@@ -309,53 +309,23 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
309 if ((!info->vibldrv_res && !info->viblmotor_res) || 309 if ((!info->vibldrv_res && !info->viblmotor_res) ||
310 (!info->vibrdrv_res && !info->vibrmotor_res)) { 310 (!info->vibrdrv_res && !info->vibrmotor_res)) {
311 dev_err(info->dev, "invalid vibra driver/motor resistance\n"); 311 dev_err(info->dev, "invalid vibra driver/motor resistance\n");
312 ret = -EINVAL; 312 return -EINVAL;
313 goto err_kzalloc;
314 } 313 }
315 314
316 info->irq = platform_get_irq(pdev, 0); 315 info->irq = platform_get_irq(pdev, 0);
317 if (info->irq < 0) { 316 if (info->irq < 0) {
318 dev_err(info->dev, "invalid irq\n"); 317 dev_err(info->dev, "invalid irq\n");
319 ret = -EINVAL; 318 return -EINVAL;
320 goto err_kzalloc;
321 } 319 }
322 320
323 mutex_init(&info->mutex); 321 mutex_init(&info->mutex);
324 322
325 info->input_dev = input_allocate_device(); 323 ret = devm_request_threaded_irq(&pdev->dev, info->irq, NULL,
326 if (info->input_dev == NULL) { 324 twl6040_vib_irq_handler, 0,
327 dev_err(info->dev, "couldn't allocate input device\n"); 325 "twl6040_irq_vib", info);
328 ret = -ENOMEM;
329 goto err_kzalloc;
330 }
331
332 input_set_drvdata(info->input_dev, info);
333
334 info->input_dev->name = "twl6040:vibrator";
335 info->input_dev->id.version = 1;
336 info->input_dev->dev.parent = pdev->dev.parent;
337 info->input_dev->close = twl6040_vibra_close;
338 __set_bit(FF_RUMBLE, info->input_dev->ffbit);
339
340 ret = input_ff_create_memless(info->input_dev, NULL, vibra_play);
341 if (ret < 0) {
342 dev_err(info->dev, "couldn't register vibrator to FF\n");
343 goto err_ialloc;
344 }
345
346 ret = input_register_device(info->input_dev);
347 if (ret < 0) {
348 dev_err(info->dev, "couldn't register input device\n");
349 goto err_iff;
350 }
351
352 platform_set_drvdata(pdev, info);
353
354 ret = request_threaded_irq(info->irq, NULL, twl6040_vib_irq_handler, 0,
355 "twl6040_irq_vib", info);
356 if (ret) { 326 if (ret) {
357 dev_err(info->dev, "VIB IRQ request failed: %d\n", ret); 327 dev_err(info->dev, "VIB IRQ request failed: %d\n", ret);
358 goto err_irq; 328 return ret;
359 } 329 }
360 330
361 info->supplies[0].supply = "vddvibl"; 331 info->supplies[0].supply = "vddvibl";
@@ -368,7 +338,7 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
368 ARRAY_SIZE(info->supplies), info->supplies); 338 ARRAY_SIZE(info->supplies), info->supplies);
369 if (ret) { 339 if (ret) {
370 dev_err(info->dev, "couldn't get regulators %d\n", ret); 340 dev_err(info->dev, "couldn't get regulators %d\n", ret);
371 goto err_regulator; 341 return ret;
372 } 342 }
373 343
374 if (vddvibl_uV) { 344 if (vddvibl_uV) {
@@ -377,7 +347,7 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
377 if (ret) { 347 if (ret) {
378 dev_err(info->dev, "failed to set VDDVIBL volt %d\n", 348 dev_err(info->dev, "failed to set VDDVIBL volt %d\n",
379 ret); 349 ret);
380 goto err_voltage; 350 goto err_regulator;
381 } 351 }
382 } 352 }
383 353
@@ -387,34 +357,49 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
387 if (ret) { 357 if (ret) {
388 dev_err(info->dev, "failed to set VDDVIBR volt %d\n", 358 dev_err(info->dev, "failed to set VDDVIBR volt %d\n",
389 ret); 359 ret);
390 goto err_voltage; 360 goto err_regulator;
391 } 361 }
392 } 362 }
393 363
394 info->workqueue = alloc_workqueue("twl6040-vibra", 0, 0); 364 INIT_WORK(&info->play_work, vibra_play_work);
395 if (info->workqueue == NULL) { 365
396 dev_err(info->dev, "couldn't create workqueue\n"); 366 info->input_dev = input_allocate_device();
367 if (info->input_dev == NULL) {
368 dev_err(info->dev, "couldn't allocate input device\n");
397 ret = -ENOMEM; 369 ret = -ENOMEM;
398 goto err_voltage; 370 goto err_regulator;
399 } 371 }
400 INIT_WORK(&info->play_work, vibra_play_work); 372
373 input_set_drvdata(info->input_dev, info);
374
375 info->input_dev->name = "twl6040:vibrator";
376 info->input_dev->id.version = 1;
377 info->input_dev->dev.parent = pdev->dev.parent;
378 info->input_dev->close = twl6040_vibra_close;
379 __set_bit(FF_RUMBLE, info->input_dev->ffbit);
380
381 ret = input_ff_create_memless(info->input_dev, NULL, vibra_play);
382 if (ret < 0) {
383 dev_err(info->dev, "couldn't register vibrator to FF\n");
384 goto err_ialloc;
385 }
386
387 ret = input_register_device(info->input_dev);
388 if (ret < 0) {
389 dev_err(info->dev, "couldn't register input device\n");
390 goto err_iff;
391 }
392
393 platform_set_drvdata(pdev, info);
401 394
402 return 0; 395 return 0;
403 396
404err_voltage:
405 regulator_bulk_free(ARRAY_SIZE(info->supplies), info->supplies);
406err_regulator:
407 free_irq(info->irq, info);
408err_irq:
409 input_unregister_device(info->input_dev);
410 info->input_dev = NULL;
411err_iff: 397err_iff:
412 if (info->input_dev) 398 input_ff_destroy(info->input_dev);
413 input_ff_destroy(info->input_dev);
414err_ialloc: 399err_ialloc:
415 input_free_device(info->input_dev); 400 input_free_device(info->input_dev);
416err_kzalloc: 401err_regulator:
417 kfree(info); 402 regulator_bulk_free(ARRAY_SIZE(info->supplies), info->supplies);
418 return ret; 403 return ret;
419} 404}
420 405
@@ -423,10 +408,7 @@ static int twl6040_vibra_remove(struct platform_device *pdev)
423 struct vibra_info *info = platform_get_drvdata(pdev); 408 struct vibra_info *info = platform_get_drvdata(pdev);
424 409
425 input_unregister_device(info->input_dev); 410 input_unregister_device(info->input_dev);
426 free_irq(info->irq, info);
427 regulator_bulk_free(ARRAY_SIZE(info->supplies), info->supplies); 411 regulator_bulk_free(ARRAY_SIZE(info->supplies), info->supplies);
428 destroy_workqueue(info->workqueue);
429 kfree(info);
430 412
431 return 0; 413 return 0;
432} 414}
diff --git a/drivers/input/misc/wm831x-on.c b/drivers/input/misc/wm831x-on.c
index 558767d8ebf4..caa2c4068f09 100644
--- a/drivers/input/misc/wm831x-on.c
+++ b/drivers/input/misc/wm831x-on.c
@@ -86,7 +86,7 @@ static int wm831x_on_probe(struct platform_device *pdev)
86 wm831x_on->wm831x = wm831x; 86 wm831x_on->wm831x = wm831x;
87 INIT_DELAYED_WORK(&wm831x_on->work, wm831x_poll_on); 87 INIT_DELAYED_WORK(&wm831x_on->work, wm831x_poll_on);
88 88
89 wm831x_on->dev = input_allocate_device(); 89 wm831x_on->dev = devm_input_allocate_device(&pdev->dev);
90 if (!wm831x_on->dev) { 90 if (!wm831x_on->dev) {
91 dev_err(&pdev->dev, "Can't allocate input dev\n"); 91 dev_err(&pdev->dev, "Can't allocate input dev\n");
92 ret = -ENOMEM; 92 ret = -ENOMEM;
@@ -119,7 +119,6 @@ static int wm831x_on_probe(struct platform_device *pdev)
119err_irq: 119err_irq:
120 free_irq(irq, wm831x_on); 120 free_irq(irq, wm831x_on);
121err_input_dev: 121err_input_dev:
122 input_free_device(wm831x_on->dev);
123err: 122err:
124 return ret; 123 return ret;
125} 124}
@@ -131,7 +130,6 @@ static int wm831x_on_remove(struct platform_device *pdev)
131 130
132 free_irq(irq, wm831x_on); 131 free_irq(irq, wm831x_on);
133 cancel_delayed_work_sync(&wm831x_on->work); 132 cancel_delayed_work_sync(&wm831x_on->work);
134 input_unregister_device(wm831x_on->dev);
135 133
136 return 0; 134 return 0;
137} 135}
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index cd6268cf7cd5..802bd6a72d73 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -68,6 +68,16 @@ config MOUSE_PS2_SYNAPTICS
68 68
69 If unsure, say Y. 69 If unsure, say Y.
70 70
71config MOUSE_PS2_CYPRESS
72 bool "Cypress PS/2 mouse protocol extension" if EXPERT
73 default y
74 depends on MOUSE_PS2
75 help
76 Say Y here if you have a Cypress PS/2 Trackpad connected to
77 your system.
78
79 If unsure, say Y.
80
71config MOUSE_PS2_LIFEBOOK 81config MOUSE_PS2_LIFEBOOK
72 bool "Fujitsu Lifebook PS/2 mouse protocol extension" if EXPERT 82 bool "Fujitsu Lifebook PS/2 mouse protocol extension" if EXPERT
73 default y 83 default y
@@ -193,6 +203,18 @@ config MOUSE_BCM5974
193 To compile this driver as a module, choose M here: the 203 To compile this driver as a module, choose M here: the
194 module will be called bcm5974. 204 module will be called bcm5974.
195 205
206config MOUSE_CYAPA
207 tristate "Cypress APA I2C Trackpad support"
208 depends on I2C
209 help
210 This driver adds support for Cypress All Points Addressable (APA)
211 I2C Trackpads, including the ones used in 2012 Samsung Chromebooks.
212
213 Say Y here if you have a Cypress APA I2C Trackpad.
214
215 To compile this driver as a module, choose M here: the module will be
216 called cyapa.
217
196config MOUSE_INPORT 218config MOUSE_INPORT
197 tristate "InPort/MS/ATIXL busmouse" 219 tristate "InPort/MS/ATIXL busmouse"
198 depends on ISA 220 depends on ISA
diff --git a/drivers/input/mouse/Makefile b/drivers/input/mouse/Makefile
index 46ba7556fd4f..c25efdb3f288 100644
--- a/drivers/input/mouse/Makefile
+++ b/drivers/input/mouse/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_MOUSE_AMIGA) += amimouse.o
8obj-$(CONFIG_MOUSE_APPLETOUCH) += appletouch.o 8obj-$(CONFIG_MOUSE_APPLETOUCH) += appletouch.o
9obj-$(CONFIG_MOUSE_ATARI) += atarimouse.o 9obj-$(CONFIG_MOUSE_ATARI) += atarimouse.o
10obj-$(CONFIG_MOUSE_BCM5974) += bcm5974.o 10obj-$(CONFIG_MOUSE_BCM5974) += bcm5974.o
11obj-$(CONFIG_MOUSE_CYAPA) += cyapa.o
11obj-$(CONFIG_MOUSE_GPIO) += gpio_mouse.o 12obj-$(CONFIG_MOUSE_GPIO) += gpio_mouse.o
12obj-$(CONFIG_MOUSE_INPORT) += inport.o 13obj-$(CONFIG_MOUSE_INPORT) += inport.o
13obj-$(CONFIG_MOUSE_LOGIBM) += logibm.o 14obj-$(CONFIG_MOUSE_LOGIBM) += logibm.o
@@ -32,3 +33,4 @@ psmouse-$(CONFIG_MOUSE_PS2_LIFEBOOK) += lifebook.o
32psmouse-$(CONFIG_MOUSE_PS2_SENTELIC) += sentelic.o 33psmouse-$(CONFIG_MOUSE_PS2_SENTELIC) += sentelic.o
33psmouse-$(CONFIG_MOUSE_PS2_TRACKPOINT) += trackpoint.o 34psmouse-$(CONFIG_MOUSE_PS2_TRACKPOINT) += trackpoint.o
34psmouse-$(CONFIG_MOUSE_PS2_TOUCHKIT) += touchkit_ps2.o 35psmouse-$(CONFIG_MOUSE_PS2_TOUCHKIT) += touchkit_ps2.o
36psmouse-$(CONFIG_MOUSE_PS2_CYPRESS) += cypress_ps2.o
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index e229fa3cad96..7b99fc7c9438 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -27,14 +27,11 @@
27/* 27/*
28 * Definitions for ALPS version 3 and 4 command mode protocol 28 * Definitions for ALPS version 3 and 4 command mode protocol
29 */ 29 */
30#define ALPS_V3_X_MAX 2000
31#define ALPS_V3_Y_MAX 1400
32
33#define ALPS_BITMAP_X_BITS 15
34#define ALPS_BITMAP_Y_BITS 11
35
36#define ALPS_CMD_NIBBLE_10 0x01f2 30#define ALPS_CMD_NIBBLE_10 0x01f2
37 31
32#define ALPS_REG_BASE_RUSHMORE 0xc2c0
33#define ALPS_REG_BASE_PINNACLE 0x0000
34
38static const struct alps_nibble_commands alps_v3_nibble_commands[] = { 35static const struct alps_nibble_commands alps_v3_nibble_commands[] = {
39 { PSMOUSE_CMD_SETPOLL, 0x00 }, /* 0 */ 36 { PSMOUSE_CMD_SETPOLL, 0x00 }, /* 0 */
40 { PSMOUSE_CMD_RESET_DIS, 0x00 }, /* 1 */ 37 { PSMOUSE_CMD_RESET_DIS, 0x00 }, /* 1 */
@@ -109,11 +106,14 @@ static const struct alps_model_info alps_model_data[] = {
109 { { 0x73, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */ 106 { { 0x73, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */
110 { { 0x52, 0x01, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff, 107 { { 0x52, 0x01, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff,
111 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */ 108 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */
112 { { 0x73, 0x02, 0x64 }, 0x9b, ALPS_PROTO_V3, 0x8f, 0x8f, ALPS_DUALPOINT },
113 { { 0x73, 0x02, 0x64 }, 0x9d, ALPS_PROTO_V3, 0x8f, 0x8f, ALPS_DUALPOINT },
114 { { 0x73, 0x02, 0x64 }, 0x8a, ALPS_PROTO_V4, 0x8f, 0x8f, 0 }, 109 { { 0x73, 0x02, 0x64 }, 0x8a, ALPS_PROTO_V4, 0x8f, 0x8f, 0 },
115}; 110};
116 111
112static void alps_set_abs_params_st(struct alps_data *priv,
113 struct input_dev *dev1);
114static void alps_set_abs_params_mt(struct alps_data *priv,
115 struct input_dev *dev1);
116
117/* 117/*
118 * XXX - this entry is suspicious. First byte has zero lower nibble, 118 * XXX - this entry is suspicious. First byte has zero lower nibble,
119 * which is what a normal mouse would report. Also, the value 0x0e 119 * which is what a normal mouse would report. Also, the value 0x0e
@@ -122,10 +122,10 @@ static const struct alps_model_info alps_model_data[] = {
122 122
123/* Packet formats are described in Documentation/input/alps.txt */ 123/* Packet formats are described in Documentation/input/alps.txt */
124 124
125static bool alps_is_valid_first_byte(const struct alps_model_info *model, 125static bool alps_is_valid_first_byte(struct alps_data *priv,
126 unsigned char data) 126 unsigned char data)
127{ 127{
128 return (data & model->mask0) == model->byte0; 128 return (data & priv->mask0) == priv->byte0;
129} 129}
130 130
131static void alps_report_buttons(struct psmouse *psmouse, 131static void alps_report_buttons(struct psmouse *psmouse,
@@ -158,14 +158,13 @@ static void alps_report_buttons(struct psmouse *psmouse,
158static void alps_process_packet_v1_v2(struct psmouse *psmouse) 158static void alps_process_packet_v1_v2(struct psmouse *psmouse)
159{ 159{
160 struct alps_data *priv = psmouse->private; 160 struct alps_data *priv = psmouse->private;
161 const struct alps_model_info *model = priv->i;
162 unsigned char *packet = psmouse->packet; 161 unsigned char *packet = psmouse->packet;
163 struct input_dev *dev = psmouse->dev; 162 struct input_dev *dev = psmouse->dev;
164 struct input_dev *dev2 = priv->dev2; 163 struct input_dev *dev2 = priv->dev2;
165 int x, y, z, ges, fin, left, right, middle; 164 int x, y, z, ges, fin, left, right, middle;
166 int back = 0, forward = 0; 165 int back = 0, forward = 0;
167 166
168 if (model->proto_version == ALPS_PROTO_V1) { 167 if (priv->proto_version == ALPS_PROTO_V1) {
169 left = packet[2] & 0x10; 168 left = packet[2] & 0x10;
170 right = packet[2] & 0x08; 169 right = packet[2] & 0x08;
171 middle = 0; 170 middle = 0;
@@ -181,12 +180,12 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse)
181 z = packet[5]; 180 z = packet[5];
182 } 181 }
183 182
184 if (model->flags & ALPS_FW_BK_1) { 183 if (priv->flags & ALPS_FW_BK_1) {
185 back = packet[0] & 0x10; 184 back = packet[0] & 0x10;
186 forward = packet[2] & 4; 185 forward = packet[2] & 4;
187 } 186 }
188 187
189 if (model->flags & ALPS_FW_BK_2) { 188 if (priv->flags & ALPS_FW_BK_2) {
190 back = packet[3] & 4; 189 back = packet[3] & 4;
191 forward = packet[2] & 4; 190 forward = packet[2] & 4;
192 if ((middle = forward && back)) 191 if ((middle = forward && back))
@@ -196,7 +195,7 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse)
196 ges = packet[2] & 1; 195 ges = packet[2] & 1;
197 fin = packet[2] & 2; 196 fin = packet[2] & 2;
198 197
199 if ((model->flags & ALPS_DUALPOINT) && z == 127) { 198 if ((priv->flags & ALPS_DUALPOINT) && z == 127) {
200 input_report_rel(dev2, REL_X, (x > 383 ? (x - 768) : x)); 199 input_report_rel(dev2, REL_X, (x > 383 ? (x - 768) : x));
201 input_report_rel(dev2, REL_Y, -(y > 255 ? (y - 512) : y)); 200 input_report_rel(dev2, REL_Y, -(y > 255 ? (y - 512) : y));
202 201
@@ -239,15 +238,15 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse)
239 input_report_abs(dev, ABS_PRESSURE, z); 238 input_report_abs(dev, ABS_PRESSURE, z);
240 input_report_key(dev, BTN_TOOL_FINGER, z > 0); 239 input_report_key(dev, BTN_TOOL_FINGER, z > 0);
241 240
242 if (model->flags & ALPS_WHEEL) 241 if (priv->flags & ALPS_WHEEL)
243 input_report_rel(dev, REL_WHEEL, ((packet[2] << 1) & 0x08) - ((packet[0] >> 4) & 0x07)); 242 input_report_rel(dev, REL_WHEEL, ((packet[2] << 1) & 0x08) - ((packet[0] >> 4) & 0x07));
244 243
245 if (model->flags & (ALPS_FW_BK_1 | ALPS_FW_BK_2)) { 244 if (priv->flags & (ALPS_FW_BK_1 | ALPS_FW_BK_2)) {
246 input_report_key(dev, BTN_FORWARD, forward); 245 input_report_key(dev, BTN_FORWARD, forward);
247 input_report_key(dev, BTN_BACK, back); 246 input_report_key(dev, BTN_BACK, back);
248 } 247 }
249 248
250 if (model->flags & ALPS_FOUR_BUTTONS) { 249 if (priv->flags & ALPS_FOUR_BUTTONS) {
251 input_report_key(dev, BTN_0, packet[2] & 4); 250 input_report_key(dev, BTN_0, packet[2] & 4);
252 input_report_key(dev, BTN_1, packet[0] & 0x10); 251 input_report_key(dev, BTN_1, packet[0] & 0x10);
253 input_report_key(dev, BTN_2, packet[3] & 4); 252 input_report_key(dev, BTN_2, packet[3] & 4);
@@ -267,7 +266,8 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse)
267 * These points are returned in x1, y1, x2, and y2 when the return value 266 * These points are returned in x1, y1, x2, and y2 when the return value
268 * is greater than 0. 267 * is greater than 0.
269 */ 268 */
270static int alps_process_bitmap(unsigned int x_map, unsigned int y_map, 269static int alps_process_bitmap(struct alps_data *priv,
270 unsigned int x_map, unsigned int y_map,
271 int *x1, int *y1, int *x2, int *y2) 271 int *x1, int *y1, int *x2, int *y2)
272{ 272{
273 struct alps_bitmap_point { 273 struct alps_bitmap_point {
@@ -309,7 +309,7 @@ static int alps_process_bitmap(unsigned int x_map, unsigned int y_map,
309 * y bitmap is reversed for what we need (lower positions are in 309 * y bitmap is reversed for what we need (lower positions are in
310 * higher bits), so we process from the top end. 310 * higher bits), so we process from the top end.
311 */ 311 */
312 y_map = y_map << (sizeof(y_map) * BITS_PER_BYTE - ALPS_BITMAP_Y_BITS); 312 y_map = y_map << (sizeof(y_map) * BITS_PER_BYTE - priv->y_bits);
313 prev_bit = 0; 313 prev_bit = 0;
314 point = &y_low; 314 point = &y_low;
315 for (i = 0; y_map != 0; i++, y_map <<= 1) { 315 for (i = 0; y_map != 0; i++, y_map <<= 1) {
@@ -355,16 +355,18 @@ static int alps_process_bitmap(unsigned int x_map, unsigned int y_map,
355 } 355 }
356 } 356 }
357 357
358 *x1 = (ALPS_V3_X_MAX * (2 * x_low.start_bit + x_low.num_bits - 1)) / 358 *x1 = (priv->x_max * (2 * x_low.start_bit + x_low.num_bits - 1)) /
359 (2 * (ALPS_BITMAP_X_BITS - 1)); 359 (2 * (priv->x_bits - 1));
360 *y1 = (ALPS_V3_Y_MAX * (2 * y_low.start_bit + y_low.num_bits - 1)) / 360 *y1 = (priv->y_max * (2 * y_low.start_bit + y_low.num_bits - 1)) /
361 (2 * (ALPS_BITMAP_Y_BITS - 1)); 361 (2 * (priv->y_bits - 1));
362 362
363 if (fingers > 1) { 363 if (fingers > 1) {
364 *x2 = (ALPS_V3_X_MAX * (2 * x_high.start_bit + x_high.num_bits - 1)) / 364 *x2 = (priv->x_max *
365 (2 * (ALPS_BITMAP_X_BITS - 1)); 365 (2 * x_high.start_bit + x_high.num_bits - 1)) /
366 *y2 = (ALPS_V3_Y_MAX * (2 * y_high.start_bit + y_high.num_bits - 1)) / 366 (2 * (priv->x_bits - 1));
367 (2 * (ALPS_BITMAP_Y_BITS - 1)); 367 *y2 = (priv->y_max *
368 (2 * y_high.start_bit + y_high.num_bits - 1)) /
369 (2 * (priv->y_bits - 1));
368 } 370 }
369 371
370 return fingers; 372 return fingers;
@@ -448,17 +450,57 @@ static void alps_process_trackstick_packet_v3(struct psmouse *psmouse)
448 return; 450 return;
449} 451}
450 452
453static void alps_decode_buttons_v3(struct alps_fields *f, unsigned char *p)
454{
455 f->left = !!(p[3] & 0x01);
456 f->right = !!(p[3] & 0x02);
457 f->middle = !!(p[3] & 0x04);
458
459 f->ts_left = !!(p[3] & 0x10);
460 f->ts_right = !!(p[3] & 0x20);
461 f->ts_middle = !!(p[3] & 0x40);
462}
463
464static void alps_decode_pinnacle(struct alps_fields *f, unsigned char *p)
465{
466 f->first_mp = !!(p[4] & 0x40);
467 f->is_mp = !!(p[0] & 0x40);
468
469 f->fingers = (p[5] & 0x3) + 1;
470 f->x_map = ((p[4] & 0x7e) << 8) |
471 ((p[1] & 0x7f) << 2) |
472 ((p[0] & 0x30) >> 4);
473 f->y_map = ((p[3] & 0x70) << 4) |
474 ((p[2] & 0x7f) << 1) |
475 (p[4] & 0x01);
476
477 f->x = ((p[1] & 0x7f) << 4) | ((p[4] & 0x30) >> 2) |
478 ((p[0] & 0x30) >> 4);
479 f->y = ((p[2] & 0x7f) << 4) | (p[4] & 0x0f);
480 f->z = p[5] & 0x7f;
481
482 alps_decode_buttons_v3(f, p);
483}
484
485static void alps_decode_rushmore(struct alps_fields *f, unsigned char *p)
486{
487 alps_decode_pinnacle(f, p);
488
489 f->x_map |= (p[5] & 0x10) << 11;
490 f->y_map |= (p[5] & 0x20) << 6;
491}
492
451static void alps_process_touchpad_packet_v3(struct psmouse *psmouse) 493static void alps_process_touchpad_packet_v3(struct psmouse *psmouse)
452{ 494{
453 struct alps_data *priv = psmouse->private; 495 struct alps_data *priv = psmouse->private;
454 unsigned char *packet = psmouse->packet; 496 unsigned char *packet = psmouse->packet;
455 struct input_dev *dev = psmouse->dev; 497 struct input_dev *dev = psmouse->dev;
456 struct input_dev *dev2 = priv->dev2; 498 struct input_dev *dev2 = priv->dev2;
457 int x, y, z;
458 int left, right, middle;
459 int x1 = 0, y1 = 0, x2 = 0, y2 = 0; 499 int x1 = 0, y1 = 0, x2 = 0, y2 = 0;
460 int fingers = 0, bmap_fingers; 500 int fingers = 0, bmap_fingers;
461 unsigned int x_bitmap, y_bitmap; 501 struct alps_fields f;
502
503 priv->decode_fields(&f, packet);
462 504
463 /* 505 /*
464 * There's no single feature of touchpad position and bitmap packets 506 * There's no single feature of touchpad position and bitmap packets
@@ -473,16 +515,10 @@ static void alps_process_touchpad_packet_v3(struct psmouse *psmouse)
473 * packet. Check for this, and when it happens process the 515 * packet. Check for this, and when it happens process the
474 * position packet as usual. 516 * position packet as usual.
475 */ 517 */
476 if (packet[0] & 0x40) { 518 if (f.is_mp) {
477 fingers = (packet[5] & 0x3) + 1; 519 fingers = f.fingers;
478 x_bitmap = ((packet[4] & 0x7e) << 8) | 520 bmap_fingers = alps_process_bitmap(priv,
479 ((packet[1] & 0x7f) << 2) | 521 f.x_map, f.y_map,
480 ((packet[0] & 0x30) >> 4);
481 y_bitmap = ((packet[3] & 0x70) << 4) |
482 ((packet[2] & 0x7f) << 1) |
483 (packet[4] & 0x01);
484
485 bmap_fingers = alps_process_bitmap(x_bitmap, y_bitmap,
486 &x1, &y1, &x2, &y2); 522 &x1, &y1, &x2, &y2);
487 523
488 /* 524 /*
@@ -493,7 +529,7 @@ static void alps_process_touchpad_packet_v3(struct psmouse *psmouse)
493 fingers = bmap_fingers; 529 fingers = bmap_fingers;
494 530
495 /* Now process position packet */ 531 /* Now process position packet */
496 packet = priv->multi_data; 532 priv->decode_fields(&f, priv->multi_data);
497 } else { 533 } else {
498 priv->multi_packet = 0; 534 priv->multi_packet = 0;
499 } 535 }
@@ -507,10 +543,10 @@ static void alps_process_touchpad_packet_v3(struct psmouse *psmouse)
507 * out misidentified bitmap packets, we reject anything with this 543 * out misidentified bitmap packets, we reject anything with this
508 * bit set. 544 * bit set.
509 */ 545 */
510 if (packet[0] & 0x40) 546 if (f.is_mp)
511 return; 547 return;
512 548
513 if (!priv->multi_packet && (packet[4] & 0x40)) { 549 if (!priv->multi_packet && f.first_mp) {
514 priv->multi_packet = 1; 550 priv->multi_packet = 1;
515 memcpy(priv->multi_data, packet, sizeof(priv->multi_data)); 551 memcpy(priv->multi_data, packet, sizeof(priv->multi_data));
516 return; 552 return;
@@ -518,22 +554,13 @@ static void alps_process_touchpad_packet_v3(struct psmouse *psmouse)
518 554
519 priv->multi_packet = 0; 555 priv->multi_packet = 0;
520 556
521 left = packet[3] & 0x01;
522 right = packet[3] & 0x02;
523 middle = packet[3] & 0x04;
524
525 x = ((packet[1] & 0x7f) << 4) | ((packet[4] & 0x30) >> 2) |
526 ((packet[0] & 0x30) >> 4);
527 y = ((packet[2] & 0x7f) << 4) | (packet[4] & 0x0f);
528 z = packet[5] & 0x7f;
529
530 /* 557 /*
531 * Sometimes the hardware sends a single packet with z = 0 558 * Sometimes the hardware sends a single packet with z = 0
532 * in the middle of a stream. Real releases generate packets 559 * in the middle of a stream. Real releases generate packets
533 * with x, y, and z all zero, so these seem to be flukes. 560 * with x, y, and z all zero, so these seem to be flukes.
534 * Ignore them. 561 * Ignore them.
535 */ 562 */
536 if (x && y && !z) 563 if (f.x && f.y && !f.z)
537 return; 564 return;
538 565
539 /* 566 /*
@@ -541,12 +568,12 @@ static void alps_process_touchpad_packet_v3(struct psmouse *psmouse)
541 * to rely on ST data. 568 * to rely on ST data.
542 */ 569 */
543 if (!fingers) { 570 if (!fingers) {
544 x1 = x; 571 x1 = f.x;
545 y1 = y; 572 y1 = f.y;
546 fingers = z > 0 ? 1 : 0; 573 fingers = f.z > 0 ? 1 : 0;
547 } 574 }
548 575
549 if (z >= 64) 576 if (f.z >= 64)
550 input_report_key(dev, BTN_TOUCH, 1); 577 input_report_key(dev, BTN_TOUCH, 1);
551 else 578 else
552 input_report_key(dev, BTN_TOUCH, 0); 579 input_report_key(dev, BTN_TOUCH, 0);
@@ -555,26 +582,22 @@ static void alps_process_touchpad_packet_v3(struct psmouse *psmouse)
555 582
556 input_mt_report_finger_count(dev, fingers); 583 input_mt_report_finger_count(dev, fingers);
557 584
558 input_report_key(dev, BTN_LEFT, left); 585 input_report_key(dev, BTN_LEFT, f.left);
559 input_report_key(dev, BTN_RIGHT, right); 586 input_report_key(dev, BTN_RIGHT, f.right);
560 input_report_key(dev, BTN_MIDDLE, middle); 587 input_report_key(dev, BTN_MIDDLE, f.middle);
561 588
562 if (z > 0) { 589 if (f.z > 0) {
563 input_report_abs(dev, ABS_X, x); 590 input_report_abs(dev, ABS_X, f.x);
564 input_report_abs(dev, ABS_Y, y); 591 input_report_abs(dev, ABS_Y, f.y);
565 } 592 }
566 input_report_abs(dev, ABS_PRESSURE, z); 593 input_report_abs(dev, ABS_PRESSURE, f.z);
567 594
568 input_sync(dev); 595 input_sync(dev);
569 596
570 if (!(priv->quirks & ALPS_QUIRK_TRACKSTICK_BUTTONS)) { 597 if (!(priv->quirks & ALPS_QUIRK_TRACKSTICK_BUTTONS)) {
571 left = packet[3] & 0x10; 598 input_report_key(dev2, BTN_LEFT, f.ts_left);
572 right = packet[3] & 0x20; 599 input_report_key(dev2, BTN_RIGHT, f.ts_right);
573 middle = packet[3] & 0x40; 600 input_report_key(dev2, BTN_MIDDLE, f.ts_middle);
574
575 input_report_key(dev2, BTN_LEFT, left);
576 input_report_key(dev2, BTN_RIGHT, right);
577 input_report_key(dev2, BTN_MIDDLE, middle);
578 input_sync(dev2); 601 input_sync(dev2);
579 } 602 }
580} 603}
@@ -639,7 +662,7 @@ static void alps_process_packet_v4(struct psmouse *psmouse)
639 ((priv->multi_data[3] & 0x1f) << 5) | 662 ((priv->multi_data[3] & 0x1f) << 5) |
640 (priv->multi_data[1] & 0x1f); 663 (priv->multi_data[1] & 0x1f);
641 664
642 fingers = alps_process_bitmap(x_bitmap, y_bitmap, 665 fingers = alps_process_bitmap(priv, x_bitmap, y_bitmap,
643 &x1, &y1, &x2, &y2); 666 &x1, &y1, &x2, &y2);
644 667
645 /* Store MT data.*/ 668 /* Store MT data.*/
@@ -696,25 +719,6 @@ static void alps_process_packet_v4(struct psmouse *psmouse)
696 input_sync(dev); 719 input_sync(dev);
697} 720}
698 721
699static void alps_process_packet(struct psmouse *psmouse)
700{
701 struct alps_data *priv = psmouse->private;
702 const struct alps_model_info *model = priv->i;
703
704 switch (model->proto_version) {
705 case ALPS_PROTO_V1:
706 case ALPS_PROTO_V2:
707 alps_process_packet_v1_v2(psmouse);
708 break;
709 case ALPS_PROTO_V3:
710 alps_process_packet_v3(psmouse);
711 break;
712 case ALPS_PROTO_V4:
713 alps_process_packet_v4(psmouse);
714 break;
715 }
716}
717
718static void alps_report_bare_ps2_packet(struct psmouse *psmouse, 722static void alps_report_bare_ps2_packet(struct psmouse *psmouse,
719 unsigned char packet[], 723 unsigned char packet[],
720 bool report_buttons) 724 bool report_buttons)
@@ -765,14 +769,14 @@ static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse)
765 if (((psmouse->packet[3] | 769 if (((psmouse->packet[3] |
766 psmouse->packet[4] | 770 psmouse->packet[4] |
767 psmouse->packet[5]) & 0x80) || 771 psmouse->packet[5]) & 0x80) ||
768 (!alps_is_valid_first_byte(priv->i, psmouse->packet[6]))) { 772 (!alps_is_valid_first_byte(priv, psmouse->packet[6]))) {
769 psmouse_dbg(psmouse, 773 psmouse_dbg(psmouse,
770 "refusing packet %4ph (suspected interleaved ps/2)\n", 774 "refusing packet %4ph (suspected interleaved ps/2)\n",
771 psmouse->packet + 3); 775 psmouse->packet + 3);
772 return PSMOUSE_BAD_DATA; 776 return PSMOUSE_BAD_DATA;
773 } 777 }
774 778
775 alps_process_packet(psmouse); 779 priv->process_packet(psmouse);
776 780
777 /* Continue with the next packet */ 781 /* Continue with the next packet */
778 psmouse->packet[0] = psmouse->packet[6]; 782 psmouse->packet[0] = psmouse->packet[6];
@@ -816,6 +820,7 @@ static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse)
816static void alps_flush_packet(unsigned long data) 820static void alps_flush_packet(unsigned long data)
817{ 821{
818 struct psmouse *psmouse = (struct psmouse *)data; 822 struct psmouse *psmouse = (struct psmouse *)data;
823 struct alps_data *priv = psmouse->private;
819 824
820 serio_pause_rx(psmouse->ps2dev.serio); 825 serio_pause_rx(psmouse->ps2dev.serio);
821 826
@@ -833,7 +838,7 @@ static void alps_flush_packet(unsigned long data)
833 "refusing packet %3ph (suspected interleaved ps/2)\n", 838 "refusing packet %3ph (suspected interleaved ps/2)\n",
834 psmouse->packet + 3); 839 psmouse->packet + 3);
835 } else { 840 } else {
836 alps_process_packet(psmouse); 841 priv->process_packet(psmouse);
837 } 842 }
838 psmouse->pktcnt = 0; 843 psmouse->pktcnt = 0;
839 } 844 }
@@ -844,7 +849,6 @@ static void alps_flush_packet(unsigned long data)
844static psmouse_ret_t alps_process_byte(struct psmouse *psmouse) 849static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
845{ 850{
846 struct alps_data *priv = psmouse->private; 851 struct alps_data *priv = psmouse->private;
847 const struct alps_model_info *model = priv->i;
848 852
849 if ((psmouse->packet[0] & 0xc8) == 0x08) { /* PS/2 packet */ 853 if ((psmouse->packet[0] & 0xc8) == 0x08) { /* PS/2 packet */
850 if (psmouse->pktcnt == 3) { 854 if (psmouse->pktcnt == 3) {
@@ -857,15 +861,15 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
857 861
858 /* Check for PS/2 packet stuffed in the middle of ALPS packet. */ 862 /* Check for PS/2 packet stuffed in the middle of ALPS packet. */
859 863
860 if ((model->flags & ALPS_PS2_INTERLEAVED) && 864 if ((priv->flags & ALPS_PS2_INTERLEAVED) &&
861 psmouse->pktcnt >= 4 && (psmouse->packet[3] & 0x0f) == 0x0f) { 865 psmouse->pktcnt >= 4 && (psmouse->packet[3] & 0x0f) == 0x0f) {
862 return alps_handle_interleaved_ps2(psmouse); 866 return alps_handle_interleaved_ps2(psmouse);
863 } 867 }
864 868
865 if (!alps_is_valid_first_byte(model, psmouse->packet[0])) { 869 if (!alps_is_valid_first_byte(priv, psmouse->packet[0])) {
866 psmouse_dbg(psmouse, 870 psmouse_dbg(psmouse,
867 "refusing packet[0] = %x (mask0 = %x, byte0 = %x)\n", 871 "refusing packet[0] = %x (mask0 = %x, byte0 = %x)\n",
868 psmouse->packet[0], model->mask0, model->byte0); 872 psmouse->packet[0], priv->mask0, priv->byte0);
869 return PSMOUSE_BAD_DATA; 873 return PSMOUSE_BAD_DATA;
870 } 874 }
871 875
@@ -879,7 +883,7 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
879 } 883 }
880 884
881 if (psmouse->pktcnt == psmouse->pktsize) { 885 if (psmouse->pktcnt == psmouse->pktsize) {
882 alps_process_packet(psmouse); 886 priv->process_packet(psmouse);
883 return PSMOUSE_FULL_PACKET; 887 return PSMOUSE_FULL_PACKET;
884 } 888 }
885 889
@@ -967,24 +971,42 @@ static int alps_command_mode_write_reg(struct psmouse *psmouse, int addr,
967 return __alps_command_mode_write_reg(psmouse, value); 971 return __alps_command_mode_write_reg(psmouse, value);
968} 972}
969 973
974static int alps_rpt_cmd(struct psmouse *psmouse, int init_command,
975 int repeated_command, unsigned char *param)
976{
977 struct ps2dev *ps2dev = &psmouse->ps2dev;
978
979 param[0] = 0;
980 if (init_command && ps2_command(ps2dev, param, init_command))
981 return -EIO;
982
983 if (ps2_command(ps2dev, NULL, repeated_command) ||
984 ps2_command(ps2dev, NULL, repeated_command) ||
985 ps2_command(ps2dev, NULL, repeated_command))
986 return -EIO;
987
988 param[0] = param[1] = param[2] = 0xff;
989 if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO))
990 return -EIO;
991
992 psmouse_dbg(psmouse, "%2.2X report: %2.2x %2.2x %2.2x\n",
993 repeated_command, param[0], param[1], param[2]);
994 return 0;
995}
996
970static int alps_enter_command_mode(struct psmouse *psmouse, 997static int alps_enter_command_mode(struct psmouse *psmouse,
971 unsigned char *resp) 998 unsigned char *resp)
972{ 999{
973 unsigned char param[4]; 1000 unsigned char param[4];
974 struct ps2dev *ps2dev = &psmouse->ps2dev;
975 1001
976 if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP) || 1002 if (alps_rpt_cmd(psmouse, 0, PSMOUSE_CMD_RESET_WRAP, param)) {
977 ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP) ||
978 ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP) ||
979 ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) {
980 psmouse_err(psmouse, "failed to enter command mode\n"); 1003 psmouse_err(psmouse, "failed to enter command mode\n");
981 return -1; 1004 return -1;
982 } 1005 }
983 1006
984 if (param[0] != 0x88 && param[1] != 0x07) { 1007 if (param[0] != 0x88 || (param[1] != 0x07 && param[1] != 0x08)) {
985 psmouse_dbg(psmouse, 1008 psmouse_dbg(psmouse,
986 "unknown response while entering command mode: %2.2x %2.2x %2.2x\n", 1009 "unknown response while entering command mode\n");
987 param[0], param[1], param[2]);
988 return -1; 1010 return -1;
989 } 1011 }
990 1012
@@ -1001,99 +1023,6 @@ static inline int alps_exit_command_mode(struct psmouse *psmouse)
1001 return 0; 1023 return 0;
1002} 1024}
1003 1025
1004static const struct alps_model_info *alps_get_model(struct psmouse *psmouse, int *version)
1005{
1006 struct ps2dev *ps2dev = &psmouse->ps2dev;
1007 static const unsigned char rates[] = { 0, 10, 20, 40, 60, 80, 100, 200 };
1008 unsigned char param[4];
1009 const struct alps_model_info *model = NULL;
1010 int i;
1011
1012 /*
1013 * First try "E6 report".
1014 * ALPS should return 0,0,10 or 0,0,100 if no buttons are pressed.
1015 * The bits 0-2 of the first byte will be 1s if some buttons are
1016 * pressed.
1017 */
1018 param[0] = 0;
1019 if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES) ||
1020 ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
1021 ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
1022 ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11))
1023 return NULL;
1024
1025 param[0] = param[1] = param[2] = 0xff;
1026 if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO))
1027 return NULL;
1028
1029 psmouse_dbg(psmouse, "E6 report: %2.2x %2.2x %2.2x",
1030 param[0], param[1], param[2]);
1031
1032 if ((param[0] & 0xf8) != 0 || param[1] != 0 ||
1033 (param[2] != 10 && param[2] != 100))
1034 return NULL;
1035
1036 /*
1037 * Now try "E7 report". Allowed responses are in
1038 * alps_model_data[].signature
1039 */
1040 param[0] = 0;
1041 if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES) ||
1042 ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) ||
1043 ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) ||
1044 ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21))
1045 return NULL;
1046
1047 param[0] = param[1] = param[2] = 0xff;
1048 if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO))
1049 return NULL;
1050
1051 psmouse_dbg(psmouse, "E7 report: %2.2x %2.2x %2.2x",
1052 param[0], param[1], param[2]);
1053
1054 if (version) {
1055 for (i = 0; i < ARRAY_SIZE(rates) && param[2] != rates[i]; i++)
1056 /* empty */;
1057 *version = (param[0] << 8) | (param[1] << 4) | i;
1058 }
1059
1060 for (i = 0; i < ARRAY_SIZE(alps_model_data); i++) {
1061 if (!memcmp(param, alps_model_data[i].signature,
1062 sizeof(alps_model_data[i].signature))) {
1063 model = alps_model_data + i;
1064 break;
1065 }
1066 }
1067
1068 if (model && model->proto_version > ALPS_PROTO_V2) {
1069 /*
1070 * Need to check command mode response to identify
1071 * model
1072 */
1073 model = NULL;
1074 if (alps_enter_command_mode(psmouse, param)) {
1075 psmouse_warn(psmouse,
1076 "touchpad failed to enter command mode\n");
1077 } else {
1078 for (i = 0; i < ARRAY_SIZE(alps_model_data); i++) {
1079 if (alps_model_data[i].proto_version > ALPS_PROTO_V2 &&
1080 alps_model_data[i].command_mode_resp == param[0]) {
1081 model = alps_model_data + i;
1082 break;
1083 }
1084 }
1085 alps_exit_command_mode(psmouse);
1086
1087 if (!model)
1088 psmouse_dbg(psmouse,
1089 "Unknown command mode response %2.2x\n",
1090 param[0]);
1091 }
1092 }
1093
1094 return model;
1095}
1096
1097/* 1026/*
1098 * For DualPoint devices select the device that should respond to 1027 * For DualPoint devices select the device that should respond to
1099 * subsequent commands. It looks like glidepad is behind stickpointer, 1028 * subsequent commands. It looks like glidepad is behind stickpointer,
@@ -1137,18 +1066,10 @@ static int alps_absolute_mode_v1_v2(struct psmouse *psmouse)
1137 1066
1138static int alps_get_status(struct psmouse *psmouse, char *param) 1067static int alps_get_status(struct psmouse *psmouse, char *param)
1139{ 1068{
1140 struct ps2dev *ps2dev = &psmouse->ps2dev;
1141
1142 /* Get status: 0xF5 0xF5 0xF5 0xE9 */ 1069 /* Get status: 0xF5 0xF5 0xF5 0xE9 */
1143 if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) || 1070 if (alps_rpt_cmd(psmouse, 0, PSMOUSE_CMD_DISABLE, param))
1144 ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) ||
1145 ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) ||
1146 ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO))
1147 return -1; 1071 return -1;
1148 1072
1149 psmouse_dbg(psmouse, "Status: %2.2x %2.2x %2.2x",
1150 param[0], param[1], param[2]);
1151
1152 return 0; 1073 return 0;
1153} 1074}
1154 1075
@@ -1190,16 +1111,16 @@ static int alps_poll(struct psmouse *psmouse)
1190 unsigned char buf[sizeof(psmouse->packet)]; 1111 unsigned char buf[sizeof(psmouse->packet)];
1191 bool poll_failed; 1112 bool poll_failed;
1192 1113
1193 if (priv->i->flags & ALPS_PASS) 1114 if (priv->flags & ALPS_PASS)
1194 alps_passthrough_mode_v2(psmouse, true); 1115 alps_passthrough_mode_v2(psmouse, true);
1195 1116
1196 poll_failed = ps2_command(&psmouse->ps2dev, buf, 1117 poll_failed = ps2_command(&psmouse->ps2dev, buf,
1197 PSMOUSE_CMD_POLL | (psmouse->pktsize << 8)) < 0; 1118 PSMOUSE_CMD_POLL | (psmouse->pktsize << 8)) < 0;
1198 1119
1199 if (priv->i->flags & ALPS_PASS) 1120 if (priv->flags & ALPS_PASS)
1200 alps_passthrough_mode_v2(psmouse, false); 1121 alps_passthrough_mode_v2(psmouse, false);
1201 1122
1202 if (poll_failed || (buf[0] & priv->i->mask0) != priv->i->byte0) 1123 if (poll_failed || (buf[0] & priv->mask0) != priv->byte0)
1203 return -1; 1124 return -1;
1204 1125
1205 if ((psmouse->badbyte & 0xc8) == 0x08) { 1126 if ((psmouse->badbyte & 0xc8) == 0x08) {
@@ -1217,9 +1138,8 @@ static int alps_poll(struct psmouse *psmouse)
1217static int alps_hw_init_v1_v2(struct psmouse *psmouse) 1138static int alps_hw_init_v1_v2(struct psmouse *psmouse)
1218{ 1139{
1219 struct alps_data *priv = psmouse->private; 1140 struct alps_data *priv = psmouse->private;
1220 const struct alps_model_info *model = priv->i;
1221 1141
1222 if ((model->flags & ALPS_PASS) && 1142 if ((priv->flags & ALPS_PASS) &&
1223 alps_passthrough_mode_v2(psmouse, true)) { 1143 alps_passthrough_mode_v2(psmouse, true)) {
1224 return -1; 1144 return -1;
1225 } 1145 }
@@ -1234,7 +1154,7 @@ static int alps_hw_init_v1_v2(struct psmouse *psmouse)
1234 return -1; 1154 return -1;
1235 } 1155 }
1236 1156
1237 if ((model->flags & ALPS_PASS) && 1157 if ((priv->flags & ALPS_PASS) &&
1238 alps_passthrough_mode_v2(psmouse, false)) { 1158 alps_passthrough_mode_v2(psmouse, false)) {
1239 return -1; 1159 return -1;
1240 } 1160 }
@@ -1249,26 +1169,31 @@ static int alps_hw_init_v1_v2(struct psmouse *psmouse)
1249} 1169}
1250 1170
1251/* 1171/*
1252 * Enable or disable passthrough mode to the trackstick. Must be in 1172 * Enable or disable passthrough mode to the trackstick.
1253 * command mode when calling this function.
1254 */ 1173 */
1255static int alps_passthrough_mode_v3(struct psmouse *psmouse, bool enable) 1174static int alps_passthrough_mode_v3(struct psmouse *psmouse,
1175 int reg_base, bool enable)
1256{ 1176{
1257 int reg_val; 1177 int reg_val, ret = -1;
1258 1178
1259 reg_val = alps_command_mode_read_reg(psmouse, 0x0008); 1179 if (alps_enter_command_mode(psmouse, NULL))
1260 if (reg_val == -1)
1261 return -1; 1180 return -1;
1262 1181
1182 reg_val = alps_command_mode_read_reg(psmouse, reg_base + 0x0008);
1183 if (reg_val == -1)
1184 goto error;
1185
1263 if (enable) 1186 if (enable)
1264 reg_val |= 0x01; 1187 reg_val |= 0x01;
1265 else 1188 else
1266 reg_val &= ~0x01; 1189 reg_val &= ~0x01;
1267 1190
1268 if (__alps_command_mode_write_reg(psmouse, reg_val)) 1191 ret = __alps_command_mode_write_reg(psmouse, reg_val);
1269 return -1;
1270 1192
1271 return 0; 1193error:
1194 if (alps_exit_command_mode(psmouse))
1195 ret = -1;
1196 return ret;
1272} 1197}
1273 1198
1274/* Must be in command mode when calling this function */ 1199/* Must be in command mode when calling this function */
@@ -1287,73 +1212,102 @@ static int alps_absolute_mode_v3(struct psmouse *psmouse)
1287 return 0; 1212 return 0;
1288} 1213}
1289 1214
1290static int alps_hw_init_v3(struct psmouse *psmouse) 1215static int alps_probe_trackstick_v3(struct psmouse *psmouse, int reg_base)
1291{ 1216{
1292 struct alps_data *priv = psmouse->private; 1217 int ret = -EIO, reg_val;
1293 struct ps2dev *ps2dev = &psmouse->ps2dev;
1294 int reg_val;
1295 unsigned char param[4];
1296
1297 priv->nibble_commands = alps_v3_nibble_commands;
1298 priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
1299 1218
1300 if (alps_enter_command_mode(psmouse, NULL)) 1219 if (alps_enter_command_mode(psmouse, NULL))
1301 goto error; 1220 goto error;
1302 1221
1303 /* Check for trackstick */ 1222 reg_val = alps_command_mode_read_reg(psmouse, reg_base + 0x08);
1304 reg_val = alps_command_mode_read_reg(psmouse, 0x0008);
1305 if (reg_val == -1) 1223 if (reg_val == -1)
1306 goto error; 1224 goto error;
1307 if (reg_val & 0x80) { 1225
1308 if (alps_passthrough_mode_v3(psmouse, true)) 1226 /* bit 7: trackstick is present */
1309 goto error; 1227 ret = reg_val & 0x80 ? 0 : -ENODEV;
1310 if (alps_exit_command_mode(psmouse)) 1228
1311 goto error; 1229error:
1230 alps_exit_command_mode(psmouse);
1231 return ret;
1232}
1233
1234static int alps_setup_trackstick_v3(struct psmouse *psmouse, int reg_base)
1235{
1236 struct ps2dev *ps2dev = &psmouse->ps2dev;
1237 int ret = 0;
1238 unsigned char param[4];
1239
1240 if (alps_passthrough_mode_v3(psmouse, reg_base, true))
1241 return -EIO;
1242
1243 /*
1244 * E7 report for the trackstick
1245 *
1246 * There have been reports of failures to seem to trace back
1247 * to the above trackstick check failing. When these occur
1248 * this E7 report fails, so when that happens we continue
1249 * with the assumption that there isn't a trackstick after
1250 * all.
1251 */
1252 if (alps_rpt_cmd(psmouse, 0, PSMOUSE_CMD_SETSCALE21, param)) {
1253 psmouse_warn(psmouse, "trackstick E7 report failed\n");
1254 ret = -ENODEV;
1255 } else {
1256 psmouse_dbg(psmouse,
1257 "trackstick E7 report: %2.2x %2.2x %2.2x\n",
1258 param[0], param[1], param[2]);
1312 1259
1313 /* 1260 /*
1314 * E7 report for the trackstick 1261 * Not sure what this does, but it is absolutely
1315 * 1262 * essential. Without it, the touchpad does not
1316 * There have been reports of failures to seem to trace back 1263 * work at all and the trackstick just emits normal
1317 * to the above trackstick check failing. When these occur 1264 * PS/2 packets.
1318 * this E7 report fails, so when that happens we continue
1319 * with the assumption that there isn't a trackstick after
1320 * all.
1321 */ 1265 */
1322 param[0] = 0x64; 1266 if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
1323 if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) || 1267 ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
1324 ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) || 1268 ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
1325 ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) || 1269 alps_command_mode_send_nibble(psmouse, 0x9) ||
1326 ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) { 1270 alps_command_mode_send_nibble(psmouse, 0x4)) {
1327 psmouse_warn(psmouse, "trackstick E7 report failed\n"); 1271 psmouse_err(psmouse,
1328 } else { 1272 "Error sending magic E6 sequence\n");
1329 psmouse_dbg(psmouse, 1273 ret = -EIO;
1330 "trackstick E7 report: %2.2x %2.2x %2.2x\n", 1274 goto error;
1331 param[0], param[1], param[2]);
1332
1333 /*
1334 * Not sure what this does, but it is absolutely
1335 * essential. Without it, the touchpad does not
1336 * work at all and the trackstick just emits normal
1337 * PS/2 packets.
1338 */
1339 if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
1340 ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
1341 ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
1342 alps_command_mode_send_nibble(psmouse, 0x9) ||
1343 alps_command_mode_send_nibble(psmouse, 0x4)) {
1344 psmouse_err(psmouse,
1345 "Error sending magic E6 sequence\n");
1346 goto error_passthrough;
1347 }
1348 } 1275 }
1349 1276
1350 if (alps_enter_command_mode(psmouse, NULL)) 1277 /*
1351 goto error_passthrough; 1278 * This ensures the trackstick packets are in the format
1352 if (alps_passthrough_mode_v3(psmouse, false)) 1279 * supported by this driver. If bit 1 isn't set the packet
1353 goto error; 1280 * format is different.
1281 */
1282 if (alps_enter_command_mode(psmouse, NULL) ||
1283 alps_command_mode_write_reg(psmouse,
1284 reg_base + 0x08, 0x82) ||
1285 alps_exit_command_mode(psmouse))
1286 ret = -EIO;
1354 } 1287 }
1355 1288
1356 if (alps_absolute_mode_v3(psmouse)) { 1289error:
1290 if (alps_passthrough_mode_v3(psmouse, reg_base, false))
1291 ret = -EIO;
1292
1293 return ret;
1294}
1295
1296static int alps_hw_init_v3(struct psmouse *psmouse)
1297{
1298 struct ps2dev *ps2dev = &psmouse->ps2dev;
1299 int reg_val;
1300 unsigned char param[4];
1301
1302 reg_val = alps_probe_trackstick_v3(psmouse, ALPS_REG_BASE_PINNACLE);
1303 if (reg_val == -EIO)
1304 goto error;
1305 if (reg_val == 0 &&
1306 alps_setup_trackstick_v3(psmouse, ALPS_REG_BASE_PINNACLE) == -EIO)
1307 goto error;
1308
1309 if (alps_enter_command_mode(psmouse, NULL) ||
1310 alps_absolute_mode_v3(psmouse)) {
1357 psmouse_err(psmouse, "Failed to enter absolute mode\n"); 1311 psmouse_err(psmouse, "Failed to enter absolute mode\n");
1358 goto error; 1312 goto error;
1359 } 1313 }
@@ -1390,14 +1344,6 @@ static int alps_hw_init_v3(struct psmouse *psmouse)
1390 if (alps_command_mode_write_reg(psmouse, 0x0162, 0x04)) 1344 if (alps_command_mode_write_reg(psmouse, 0x0162, 0x04))
1391 goto error; 1345 goto error;
1392 1346
1393 /*
1394 * This ensures the trackstick packets are in the format
1395 * supported by this driver. If bit 1 isn't set the packet
1396 * format is different.
1397 */
1398 if (alps_command_mode_write_reg(psmouse, 0x0008, 0x82))
1399 goto error;
1400
1401 alps_exit_command_mode(psmouse); 1347 alps_exit_command_mode(psmouse);
1402 1348
1403 /* Set rate and enable data reporting */ 1349 /* Set rate and enable data reporting */
@@ -1410,10 +1356,6 @@ static int alps_hw_init_v3(struct psmouse *psmouse)
1410 1356
1411 return 0; 1357 return 0;
1412 1358
1413error_passthrough:
1414 /* Something failed while in passthrough mode, so try to get out */
1415 if (!alps_enter_command_mode(psmouse, NULL))
1416 alps_passthrough_mode_v3(psmouse, false);
1417error: 1359error:
1418 /* 1360 /*
1419 * Leaving the touchpad in command mode will essentially render 1361 * Leaving the touchpad in command mode will essentially render
@@ -1424,6 +1366,50 @@ error:
1424 return -1; 1366 return -1;
1425} 1367}
1426 1368
1369static int alps_hw_init_rushmore_v3(struct psmouse *psmouse)
1370{
1371 struct alps_data *priv = psmouse->private;
1372 struct ps2dev *ps2dev = &psmouse->ps2dev;
1373 int reg_val, ret = -1;
1374
1375 if (priv->flags & ALPS_DUALPOINT) {
1376 reg_val = alps_setup_trackstick_v3(psmouse,
1377 ALPS_REG_BASE_RUSHMORE);
1378 if (reg_val == -EIO)
1379 goto error;
1380 if (reg_val == -ENODEV)
1381 priv->flags &= ~ALPS_DUALPOINT;
1382 }
1383
1384 if (alps_enter_command_mode(psmouse, NULL) ||
1385 alps_command_mode_read_reg(psmouse, 0xc2d9) == -1 ||
1386 alps_command_mode_write_reg(psmouse, 0xc2cb, 0x00))
1387 goto error;
1388
1389 reg_val = alps_command_mode_read_reg(psmouse, 0xc2c6);
1390 if (reg_val == -1)
1391 goto error;
1392 if (__alps_command_mode_write_reg(psmouse, reg_val & 0xfd))
1393 goto error;
1394
1395 if (alps_command_mode_write_reg(psmouse, 0xc2c9, 0x64))
1396 goto error;
1397
1398 /* enter absolute mode */
1399 reg_val = alps_command_mode_read_reg(psmouse, 0xc2c4);
1400 if (reg_val == -1)
1401 goto error;
1402 if (__alps_command_mode_write_reg(psmouse, reg_val | 0x02))
1403 goto error;
1404
1405 alps_exit_command_mode(psmouse);
1406 return ps2_command(ps2dev, NULL, PSMOUSE_CMD_ENABLE);
1407
1408error:
1409 alps_exit_command_mode(psmouse);
1410 return ret;
1411}
1412
1427/* Must be in command mode when calling this function */ 1413/* Must be in command mode when calling this function */
1428static int alps_absolute_mode_v4(struct psmouse *psmouse) 1414static int alps_absolute_mode_v4(struct psmouse *psmouse)
1429{ 1415{
@@ -1442,13 +1428,9 @@ static int alps_absolute_mode_v4(struct psmouse *psmouse)
1442 1428
1443static int alps_hw_init_v4(struct psmouse *psmouse) 1429static int alps_hw_init_v4(struct psmouse *psmouse)
1444{ 1430{
1445 struct alps_data *priv = psmouse->private;
1446 struct ps2dev *ps2dev = &psmouse->ps2dev; 1431 struct ps2dev *ps2dev = &psmouse->ps2dev;
1447 unsigned char param[4]; 1432 unsigned char param[4];
1448 1433
1449 priv->nibble_commands = alps_v4_nibble_commands;
1450 priv->addr_command = PSMOUSE_CMD_DISABLE;
1451
1452 if (alps_enter_command_mode(psmouse, NULL)) 1434 if (alps_enter_command_mode(psmouse, NULL))
1453 goto error; 1435 goto error;
1454 1436
@@ -1517,39 +1499,140 @@ error:
1517 return -1; 1499 return -1;
1518} 1500}
1519 1501
1520static int alps_hw_init(struct psmouse *psmouse) 1502static void alps_set_defaults(struct alps_data *priv)
1521{ 1503{
1522 struct alps_data *priv = psmouse->private; 1504 priv->byte0 = 0x8f;
1523 const struct alps_model_info *model = priv->i; 1505 priv->mask0 = 0x8f;
1524 int ret = -1; 1506 priv->flags = ALPS_DUALPOINT;
1507
1508 priv->x_max = 2000;
1509 priv->y_max = 1400;
1510 priv->x_bits = 15;
1511 priv->y_bits = 11;
1525 1512
1526 switch (model->proto_version) { 1513 switch (priv->proto_version) {
1527 case ALPS_PROTO_V1: 1514 case ALPS_PROTO_V1:
1528 case ALPS_PROTO_V2: 1515 case ALPS_PROTO_V2:
1529 ret = alps_hw_init_v1_v2(psmouse); 1516 priv->hw_init = alps_hw_init_v1_v2;
1517 priv->process_packet = alps_process_packet_v1_v2;
1518 priv->set_abs_params = alps_set_abs_params_st;
1530 break; 1519 break;
1531 case ALPS_PROTO_V3: 1520 case ALPS_PROTO_V3:
1532 ret = alps_hw_init_v3(psmouse); 1521 priv->hw_init = alps_hw_init_v3;
1522 priv->process_packet = alps_process_packet_v3;
1523 priv->set_abs_params = alps_set_abs_params_mt;
1524 priv->decode_fields = alps_decode_pinnacle;
1525 priv->nibble_commands = alps_v3_nibble_commands;
1526 priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
1533 break; 1527 break;
1534 case ALPS_PROTO_V4: 1528 case ALPS_PROTO_V4:
1535 ret = alps_hw_init_v4(psmouse); 1529 priv->hw_init = alps_hw_init_v4;
1530 priv->process_packet = alps_process_packet_v4;
1531 priv->set_abs_params = alps_set_abs_params_mt;
1532 priv->nibble_commands = alps_v4_nibble_commands;
1533 priv->addr_command = PSMOUSE_CMD_DISABLE;
1536 break; 1534 break;
1537 } 1535 }
1536}
1538 1537
1539 return ret; 1538static int alps_match_table(struct psmouse *psmouse, struct alps_data *priv,
1539 unsigned char *e7, unsigned char *ec)
1540{
1541 const struct alps_model_info *model;
1542 int i;
1543
1544 for (i = 0; i < ARRAY_SIZE(alps_model_data); i++) {
1545 model = &alps_model_data[i];
1546
1547 if (!memcmp(e7, model->signature, sizeof(model->signature)) &&
1548 (!model->command_mode_resp ||
1549 model->command_mode_resp == ec[2])) {
1550
1551 priv->proto_version = model->proto_version;
1552 alps_set_defaults(priv);
1553
1554 priv->flags = model->flags;
1555 priv->byte0 = model->byte0;
1556 priv->mask0 = model->mask0;
1557
1558 return 0;
1559 }
1560 }
1561
1562 return -EINVAL;
1563}
1564
1565static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
1566{
1567 unsigned char e6[4], e7[4], ec[4];
1568
1569 /*
1570 * First try "E6 report".
1571 * ALPS should return 0,0,10 or 0,0,100 if no buttons are pressed.
1572 * The bits 0-2 of the first byte will be 1s if some buttons are
1573 * pressed.
1574 */
1575 if (alps_rpt_cmd(psmouse, PSMOUSE_CMD_SETRES,
1576 PSMOUSE_CMD_SETSCALE11, e6))
1577 return -EIO;
1578
1579 if ((e6[0] & 0xf8) != 0 || e6[1] != 0 || (e6[2] != 10 && e6[2] != 100))
1580 return -EINVAL;
1581
1582 /*
1583 * Now get the "E7" and "EC" reports. These will uniquely identify
1584 * most ALPS touchpads.
1585 */
1586 if (alps_rpt_cmd(psmouse, PSMOUSE_CMD_SETRES,
1587 PSMOUSE_CMD_SETSCALE21, e7) ||
1588 alps_rpt_cmd(psmouse, PSMOUSE_CMD_SETRES,
1589 PSMOUSE_CMD_RESET_WRAP, ec) ||
1590 alps_exit_command_mode(psmouse))
1591 return -EIO;
1592
1593 if (alps_match_table(psmouse, priv, e7, ec) == 0) {
1594 return 0;
1595 } else if (ec[0] == 0x88 && ec[1] == 0x08) {
1596 priv->proto_version = ALPS_PROTO_V3;
1597 alps_set_defaults(priv);
1598
1599 priv->hw_init = alps_hw_init_rushmore_v3;
1600 priv->decode_fields = alps_decode_rushmore;
1601 priv->x_bits = 16;
1602 priv->y_bits = 12;
1603
1604 /* hack to make addr_command, nibble_command available */
1605 psmouse->private = priv;
1606
1607 if (alps_probe_trackstick_v3(psmouse, ALPS_REG_BASE_RUSHMORE))
1608 priv->flags &= ~ALPS_DUALPOINT;
1609
1610 return 0;
1611 } else if (ec[0] == 0x88 && ec[1] == 0x07 &&
1612 ec[2] >= 0x90 && ec[2] <= 0x9d) {
1613 priv->proto_version = ALPS_PROTO_V3;
1614 alps_set_defaults(priv);
1615
1616 return 0;
1617 }
1618
1619 psmouse_info(psmouse,
1620 "Unknown ALPS touchpad: E7=%2.2x %2.2x %2.2x, EC=%2.2x %2.2x %2.2x\n",
1621 e7[0], e7[1], e7[2], ec[0], ec[1], ec[2]);
1622
1623 return -EINVAL;
1540} 1624}
1541 1625
1542static int alps_reconnect(struct psmouse *psmouse) 1626static int alps_reconnect(struct psmouse *psmouse)
1543{ 1627{
1544 const struct alps_model_info *model; 1628 struct alps_data *priv = psmouse->private;
1545 1629
1546 psmouse_reset(psmouse); 1630 psmouse_reset(psmouse);
1547 1631
1548 model = alps_get_model(psmouse, NULL); 1632 if (alps_identify(psmouse, priv) < 0)
1549 if (!model)
1550 return -1; 1633 return -1;
1551 1634
1552 return alps_hw_init(psmouse); 1635 return priv->hw_init(psmouse);
1553} 1636}
1554 1637
1555static void alps_disconnect(struct psmouse *psmouse) 1638static void alps_disconnect(struct psmouse *psmouse)
@@ -1562,12 +1645,33 @@ static void alps_disconnect(struct psmouse *psmouse)
1562 kfree(priv); 1645 kfree(priv);
1563} 1646}
1564 1647
1648static void alps_set_abs_params_st(struct alps_data *priv,
1649 struct input_dev *dev1)
1650{
1651 input_set_abs_params(dev1, ABS_X, 0, 1023, 0, 0);
1652 input_set_abs_params(dev1, ABS_Y, 0, 767, 0, 0);
1653}
1654
1655static void alps_set_abs_params_mt(struct alps_data *priv,
1656 struct input_dev *dev1)
1657{
1658 set_bit(INPUT_PROP_SEMI_MT, dev1->propbit);
1659 input_mt_init_slots(dev1, 2, 0);
1660 input_set_abs_params(dev1, ABS_MT_POSITION_X, 0, priv->x_max, 0, 0);
1661 input_set_abs_params(dev1, ABS_MT_POSITION_Y, 0, priv->y_max, 0, 0);
1662
1663 set_bit(BTN_TOOL_DOUBLETAP, dev1->keybit);
1664 set_bit(BTN_TOOL_TRIPLETAP, dev1->keybit);
1665 set_bit(BTN_TOOL_QUADTAP, dev1->keybit);
1666
1667 input_set_abs_params(dev1, ABS_X, 0, priv->x_max, 0, 0);
1668 input_set_abs_params(dev1, ABS_Y, 0, priv->y_max, 0, 0);
1669}
1670
1565int alps_init(struct psmouse *psmouse) 1671int alps_init(struct psmouse *psmouse)
1566{ 1672{
1567 struct alps_data *priv; 1673 struct alps_data *priv;
1568 const struct alps_model_info *model;
1569 struct input_dev *dev1 = psmouse->dev, *dev2; 1674 struct input_dev *dev1 = psmouse->dev, *dev2;
1570 int version;
1571 1675
1572 priv = kzalloc(sizeof(struct alps_data), GFP_KERNEL); 1676 priv = kzalloc(sizeof(struct alps_data), GFP_KERNEL);
1573 dev2 = input_allocate_device(); 1677 dev2 = input_allocate_device();
@@ -1581,13 +1685,10 @@ int alps_init(struct psmouse *psmouse)
1581 1685
1582 psmouse_reset(psmouse); 1686 psmouse_reset(psmouse);
1583 1687
1584 model = alps_get_model(psmouse, &version); 1688 if (alps_identify(psmouse, priv) < 0)
1585 if (!model)
1586 goto init_fail; 1689 goto init_fail;
1587 1690
1588 priv->i = model; 1691 if (priv->hw_init(psmouse))
1589
1590 if (alps_hw_init(psmouse))
1591 goto init_fail; 1692 goto init_fail;
1592 1693
1593 /* 1694 /*
@@ -1609,41 +1710,20 @@ int alps_init(struct psmouse *psmouse)
1609 1710
1610 dev1->evbit[BIT_WORD(EV_ABS)] |= BIT_MASK(EV_ABS); 1711 dev1->evbit[BIT_WORD(EV_ABS)] |= BIT_MASK(EV_ABS);
1611 1712
1612 switch (model->proto_version) { 1713 priv->set_abs_params(priv, dev1);
1613 case ALPS_PROTO_V1:
1614 case ALPS_PROTO_V2:
1615 input_set_abs_params(dev1, ABS_X, 0, 1023, 0, 0);
1616 input_set_abs_params(dev1, ABS_Y, 0, 767, 0, 0);
1617 break;
1618 case ALPS_PROTO_V3:
1619 case ALPS_PROTO_V4:
1620 set_bit(INPUT_PROP_SEMI_MT, dev1->propbit);
1621 input_mt_init_slots(dev1, 2, 0);
1622 input_set_abs_params(dev1, ABS_MT_POSITION_X, 0, ALPS_V3_X_MAX, 0, 0);
1623 input_set_abs_params(dev1, ABS_MT_POSITION_Y, 0, ALPS_V3_Y_MAX, 0, 0);
1624
1625 set_bit(BTN_TOOL_DOUBLETAP, dev1->keybit);
1626 set_bit(BTN_TOOL_TRIPLETAP, dev1->keybit);
1627 set_bit(BTN_TOOL_QUADTAP, dev1->keybit);
1628
1629 input_set_abs_params(dev1, ABS_X, 0, ALPS_V3_X_MAX, 0, 0);
1630 input_set_abs_params(dev1, ABS_Y, 0, ALPS_V3_Y_MAX, 0, 0);
1631 break;
1632 }
1633
1634 input_set_abs_params(dev1, ABS_PRESSURE, 0, 127, 0, 0); 1714 input_set_abs_params(dev1, ABS_PRESSURE, 0, 127, 0, 0);
1635 1715
1636 if (model->flags & ALPS_WHEEL) { 1716 if (priv->flags & ALPS_WHEEL) {
1637 dev1->evbit[BIT_WORD(EV_REL)] |= BIT_MASK(EV_REL); 1717 dev1->evbit[BIT_WORD(EV_REL)] |= BIT_MASK(EV_REL);
1638 dev1->relbit[BIT_WORD(REL_WHEEL)] |= BIT_MASK(REL_WHEEL); 1718 dev1->relbit[BIT_WORD(REL_WHEEL)] |= BIT_MASK(REL_WHEEL);
1639 } 1719 }
1640 1720
1641 if (model->flags & (ALPS_FW_BK_1 | ALPS_FW_BK_2)) { 1721 if (priv->flags & (ALPS_FW_BK_1 | ALPS_FW_BK_2)) {
1642 dev1->keybit[BIT_WORD(BTN_FORWARD)] |= BIT_MASK(BTN_FORWARD); 1722 dev1->keybit[BIT_WORD(BTN_FORWARD)] |= BIT_MASK(BTN_FORWARD);
1643 dev1->keybit[BIT_WORD(BTN_BACK)] |= BIT_MASK(BTN_BACK); 1723 dev1->keybit[BIT_WORD(BTN_BACK)] |= BIT_MASK(BTN_BACK);
1644 } 1724 }
1645 1725
1646 if (model->flags & ALPS_FOUR_BUTTONS) { 1726 if (priv->flags & ALPS_FOUR_BUTTONS) {
1647 dev1->keybit[BIT_WORD(BTN_0)] |= BIT_MASK(BTN_0); 1727 dev1->keybit[BIT_WORD(BTN_0)] |= BIT_MASK(BTN_0);
1648 dev1->keybit[BIT_WORD(BTN_1)] |= BIT_MASK(BTN_1); 1728 dev1->keybit[BIT_WORD(BTN_1)] |= BIT_MASK(BTN_1);
1649 dev1->keybit[BIT_WORD(BTN_2)] |= BIT_MASK(BTN_2); 1729 dev1->keybit[BIT_WORD(BTN_2)] |= BIT_MASK(BTN_2);
@@ -1654,7 +1734,8 @@ int alps_init(struct psmouse *psmouse)
1654 1734
1655 snprintf(priv->phys, sizeof(priv->phys), "%s/input1", psmouse->ps2dev.serio->phys); 1735 snprintf(priv->phys, sizeof(priv->phys), "%s/input1", psmouse->ps2dev.serio->phys);
1656 dev2->phys = priv->phys; 1736 dev2->phys = priv->phys;
1657 dev2->name = (model->flags & ALPS_DUALPOINT) ? "DualPoint Stick" : "PS/2 Mouse"; 1737 dev2->name = (priv->flags & ALPS_DUALPOINT) ?
1738 "DualPoint Stick" : "PS/2 Mouse";
1658 dev2->id.bustype = BUS_I8042; 1739 dev2->id.bustype = BUS_I8042;
1659 dev2->id.vendor = 0x0002; 1740 dev2->id.vendor = 0x0002;
1660 dev2->id.product = PSMOUSE_ALPS; 1741 dev2->id.product = PSMOUSE_ALPS;
@@ -1673,7 +1754,7 @@ int alps_init(struct psmouse *psmouse)
1673 psmouse->poll = alps_poll; 1754 psmouse->poll = alps_poll;
1674 psmouse->disconnect = alps_disconnect; 1755 psmouse->disconnect = alps_disconnect;
1675 psmouse->reconnect = alps_reconnect; 1756 psmouse->reconnect = alps_reconnect;
1676 psmouse->pktsize = model->proto_version == ALPS_PROTO_V4 ? 8 : 6; 1757 psmouse->pktsize = priv->proto_version == ALPS_PROTO_V4 ? 8 : 6;
1677 1758
1678 /* We are having trouble resyncing ALPS touchpads so disable it for now */ 1759 /* We are having trouble resyncing ALPS touchpads so disable it for now */
1679 psmouse->resync_time = 0; 1760 psmouse->resync_time = 0;
@@ -1690,18 +1771,16 @@ init_fail:
1690 1771
1691int alps_detect(struct psmouse *psmouse, bool set_properties) 1772int alps_detect(struct psmouse *psmouse, bool set_properties)
1692{ 1773{
1693 int version; 1774 struct alps_data dummy;
1694 const struct alps_model_info *model;
1695 1775
1696 model = alps_get_model(psmouse, &version); 1776 if (alps_identify(psmouse, &dummy) < 0)
1697 if (!model)
1698 return -1; 1777 return -1;
1699 1778
1700 if (set_properties) { 1779 if (set_properties) {
1701 psmouse->vendor = "ALPS"; 1780 psmouse->vendor = "ALPS";
1702 psmouse->name = model->flags & ALPS_DUALPOINT ? 1781 psmouse->name = dummy.flags & ALPS_DUALPOINT ?
1703 "DualPoint TouchPad" : "GlidePoint"; 1782 "DualPoint TouchPad" : "GlidePoint";
1704 psmouse->model = version; 1783 psmouse->model = dummy.proto_version << 8;
1705 } 1784 }
1706 return 0; 1785 return 0;
1707} 1786}
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index ae1ac354c778..970480551b6e 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -12,35 +12,146 @@
12#ifndef _ALPS_H 12#ifndef _ALPS_H
13#define _ALPS_H 13#define _ALPS_H
14 14
15#define ALPS_PROTO_V1 0 15#define ALPS_PROTO_V1 1
16#define ALPS_PROTO_V2 1 16#define ALPS_PROTO_V2 2
17#define ALPS_PROTO_V3 2 17#define ALPS_PROTO_V3 3
18#define ALPS_PROTO_V4 3 18#define ALPS_PROTO_V4 4
19 19
20/**
21 * struct alps_model_info - touchpad ID table
22 * @signature: E7 response string to match.
23 * @command_mode_resp: For V3/V4 touchpads, the final byte of the EC response
24 * (aka command mode response) identifies the firmware minor version. This
25 * can be used to distinguish different hardware models which are not
26 * uniquely identifiable through their E7 responses.
27 * @proto_version: Indicates V1/V2/V3/...
28 * @byte0: Helps figure out whether a position report packet matches the
29 * known format for this model. The first byte of the report, ANDed with
30 * mask0, should match byte0.
31 * @mask0: The mask used to check the first byte of the report.
32 * @flags: Additional device capabilities (passthrough port, trackstick, etc.).
33 *
34 * Many (but not all) ALPS touchpads can be identified by looking at the
35 * values returned in the "E7 report" and/or the "EC report." This table
36 * lists a number of such touchpads.
37 */
20struct alps_model_info { 38struct alps_model_info {
21 unsigned char signature[3]; 39 unsigned char signature[3];
22 unsigned char command_mode_resp; /* v3/v4 only */ 40 unsigned char command_mode_resp;
23 unsigned char proto_version; 41 unsigned char proto_version;
24 unsigned char byte0, mask0; 42 unsigned char byte0, mask0;
25 unsigned char flags; 43 unsigned char flags;
26}; 44};
27 45
46/**
47 * struct alps_nibble_commands - encodings for register accesses
48 * @command: PS/2 command used for the nibble
49 * @data: Data supplied as an argument to the PS/2 command, if applicable
50 *
51 * The ALPS protocol uses magic sequences to transmit binary data to the
52 * touchpad, as it is generally not OK to send arbitrary bytes out the
53 * PS/2 port. Each of the sequences in this table sends one nibble of the
54 * register address or (write) data. Different versions of the ALPS protocol
55 * use slightly different encodings.
56 */
28struct alps_nibble_commands { 57struct alps_nibble_commands {
29 int command; 58 int command;
30 unsigned char data; 59 unsigned char data;
31}; 60};
32 61
62/**
63 * struct alps_fields - decoded version of the report packet
64 * @x_map: Bitmap of active X positions for MT.
65 * @y_map: Bitmap of active Y positions for MT.
66 * @fingers: Number of fingers for MT.
67 * @x: X position for ST.
68 * @y: Y position for ST.
69 * @z: Z position for ST.
70 * @first_mp: Packet is the first of a multi-packet report.
71 * @is_mp: Packet is part of a multi-packet report.
72 * @left: Left touchpad button is active.
73 * @right: Right touchpad button is active.
74 * @middle: Middle touchpad button is active.
75 * @ts_left: Left trackstick button is active.
76 * @ts_right: Right trackstick button is active.
77 * @ts_middle: Middle trackstick button is active.
78 */
79struct alps_fields {
80 unsigned int x_map;
81 unsigned int y_map;
82 unsigned int fingers;
83 unsigned int x;
84 unsigned int y;
85 unsigned int z;
86 unsigned int first_mp:1;
87 unsigned int is_mp:1;
88
89 unsigned int left:1;
90 unsigned int right:1;
91 unsigned int middle:1;
92
93 unsigned int ts_left:1;
94 unsigned int ts_right:1;
95 unsigned int ts_middle:1;
96};
97
98/**
99 * struct alps_data - private data structure for the ALPS driver
100 * @dev2: "Relative" device used to report trackstick or mouse activity.
101 * @phys: Physical path for the relative device.
102 * @nibble_commands: Command mapping used for touchpad register accesses.
103 * @addr_command: Command used to tell the touchpad that a register address
104 * follows.
105 * @proto_version: Indicates V1/V2/V3/...
106 * @byte0: Helps figure out whether a position report packet matches the
107 * known format for this model. The first byte of the report, ANDed with
108 * mask0, should match byte0.
109 * @mask0: The mask used to check the first byte of the report.
110 * @flags: Additional device capabilities (passthrough port, trackstick, etc.).
111 * @x_max: Largest possible X position value.
112 * @y_max: Largest possible Y position value.
113 * @x_bits: Number of X bits in the MT bitmap.
114 * @y_bits: Number of Y bits in the MT bitmap.
115 * @hw_init: Protocol-specific hardware init function.
116 * @process_packet: Protocol-specific function to process a report packet.
117 * @decode_fields: Protocol-specific function to read packet bitfields.
118 * @set_abs_params: Protocol-specific function to configure the input_dev.
119 * @prev_fin: Finger bit from previous packet.
120 * @multi_packet: Multi-packet data in progress.
121 * @multi_data: Saved multi-packet data.
122 * @x1: First X coordinate from last MT report.
123 * @x2: Second X coordinate from last MT report.
124 * @y1: First Y coordinate from last MT report.
125 * @y2: Second Y coordinate from last MT report.
126 * @fingers: Number of fingers from last MT report.
127 * @quirks: Bitmap of ALPS_QUIRK_*.
128 * @timer: Timer for flushing out the final report packet in the stream.
129 */
33struct alps_data { 130struct alps_data {
34 struct input_dev *dev2; /* Relative device */ 131 struct input_dev *dev2;
35 char phys[32]; /* Phys */ 132 char phys[32];
36 const struct alps_model_info *i;/* Info */ 133
134 /* these are autodetected when the device is identified */
37 const struct alps_nibble_commands *nibble_commands; 135 const struct alps_nibble_commands *nibble_commands;
38 int addr_command; /* Command to set register address */ 136 int addr_command;
39 int prev_fin; /* Finger bit from previous packet */ 137 unsigned char proto_version;
40 int multi_packet; /* Multi-packet data in progress */ 138 unsigned char byte0, mask0;
41 unsigned char multi_data[6]; /* Saved multi-packet data */ 139 unsigned char flags;
42 int x1, x2, y1, y2; /* Coordinates from last MT report */ 140 int x_max;
43 int fingers; /* Number of fingers from MT report */ 141 int y_max;
142 int x_bits;
143 int y_bits;
144
145 int (*hw_init)(struct psmouse *psmouse);
146 void (*process_packet)(struct psmouse *psmouse);
147 void (*decode_fields)(struct alps_fields *f, unsigned char *p);
148 void (*set_abs_params)(struct alps_data *priv, struct input_dev *dev1);
149
150 int prev_fin;
151 int multi_packet;
152 unsigned char multi_data[6];
153 int x1, x2, y1, y2;
154 int fingers;
44 u8 quirks; 155 u8 quirks;
45 struct timer_list timer; 156 struct timer_list timer;
46}; 157};
diff --git a/drivers/input/mouse/cyapa.c b/drivers/input/mouse/cyapa.c
new file mode 100644
index 000000000000..b409c3d7d4fb
--- /dev/null
+++ b/drivers/input/mouse/cyapa.c
@@ -0,0 +1,973 @@
1/*
2 * Cypress APA trackpad with I2C interface
3 *
4 * Author: Dudley Du <dudl@cypress.com>
5 * Further cleanup and restructuring by:
6 * Daniel Kurtz <djkurtz@chromium.org>
7 * Benson Leung <bleung@chromium.org>
8 *
9 * Copyright (C) 2011-2012 Cypress Semiconductor, Inc.
10 * Copyright (C) 2011-2012 Google, Inc.
11 *
12 * This file is subject to the terms and conditions of the GNU General Public
13 * License. See the file COPYING in the main directory of this archive for
14 * more details.
15 */
16
17#include <linux/delay.h>
18#include <linux/i2c.h>
19#include <linux/input.h>
20#include <linux/input/mt.h>
21#include <linux/interrupt.h>
22#include <linux/module.h>
23#include <linux/slab.h>
24
25/* APA trackpad firmware generation */
26#define CYAPA_GEN3 0x03 /* support MT-protocol B with tracking ID. */
27
28#define CYAPA_NAME "Cypress APA Trackpad (cyapa)"
29
30/* commands for read/write registers of Cypress trackpad */
31#define CYAPA_CMD_SOFT_RESET 0x00
32#define CYAPA_CMD_POWER_MODE 0x01
33#define CYAPA_CMD_DEV_STATUS 0x02
34#define CYAPA_CMD_GROUP_DATA 0x03
35#define CYAPA_CMD_GROUP_CMD 0x04
36#define CYAPA_CMD_GROUP_QUERY 0x05
37#define CYAPA_CMD_BL_STATUS 0x06
38#define CYAPA_CMD_BL_HEAD 0x07
39#define CYAPA_CMD_BL_CMD 0x08
40#define CYAPA_CMD_BL_DATA 0x09
41#define CYAPA_CMD_BL_ALL 0x0a
42#define CYAPA_CMD_BLK_PRODUCT_ID 0x0b
43#define CYAPA_CMD_BLK_HEAD 0x0c
44
45/* report data start reg offset address. */
46#define DATA_REG_START_OFFSET 0x0000
47
48#define BL_HEAD_OFFSET 0x00
49#define BL_DATA_OFFSET 0x10
50
51/*
52 * Operational Device Status Register
53 *
54 * bit 7: Valid interrupt source
55 * bit 6 - 4: Reserved
56 * bit 3 - 2: Power status
57 * bit 1 - 0: Device status
58 */
59#define REG_OP_STATUS 0x00
60#define OP_STATUS_SRC 0x80
61#define OP_STATUS_POWER 0x0c
62#define OP_STATUS_DEV 0x03
63#define OP_STATUS_MASK (OP_STATUS_SRC | OP_STATUS_POWER | OP_STATUS_DEV)
64
65/*
66 * Operational Finger Count/Button Flags Register
67 *
68 * bit 7 - 4: Number of touched finger
69 * bit 3: Valid data
70 * bit 2: Middle Physical Button
71 * bit 1: Right Physical Button
72 * bit 0: Left physical Button
73 */
74#define REG_OP_DATA1 0x01
75#define OP_DATA_VALID 0x08
76#define OP_DATA_MIDDLE_BTN 0x04
77#define OP_DATA_RIGHT_BTN 0x02
78#define OP_DATA_LEFT_BTN 0x01
79#define OP_DATA_BTN_MASK (OP_DATA_MIDDLE_BTN | OP_DATA_RIGHT_BTN | \
80 OP_DATA_LEFT_BTN)
81
82/*
83 * Bootloader Status Register
84 *
85 * bit 7: Busy
86 * bit 6 - 5: Reserved
87 * bit 4: Bootloader running
88 * bit 3 - 1: Reserved
89 * bit 0: Checksum valid
90 */
91#define REG_BL_STATUS 0x01
92#define BL_STATUS_BUSY 0x80
93#define BL_STATUS_RUNNING 0x10
94#define BL_STATUS_DATA_VALID 0x08
95#define BL_STATUS_CSUM_VALID 0x01
96
97/*
98 * Bootloader Error Register
99 *
100 * bit 7: Invalid
101 * bit 6: Invalid security key
102 * bit 5: Bootloading
103 * bit 4: Command checksum
104 * bit 3: Flash protection error
105 * bit 2: Flash checksum error
106 * bit 1 - 0: Reserved
107 */
108#define REG_BL_ERROR 0x02
109#define BL_ERROR_INVALID 0x80
110#define BL_ERROR_INVALID_KEY 0x40
111#define BL_ERROR_BOOTLOADING 0x20
112#define BL_ERROR_CMD_CSUM 0x10
113#define BL_ERROR_FLASH_PROT 0x08
114#define BL_ERROR_FLASH_CSUM 0x04
115
116#define BL_STATUS_SIZE 3 /* length of bootloader status registers */
117#define BLK_HEAD_BYTES 32
118
119#define PRODUCT_ID_SIZE 16
120#define QUERY_DATA_SIZE 27
121#define REG_PROTOCOL_GEN_QUERY_OFFSET 20
122
123#define REG_OFFSET_DATA_BASE 0x0000
124#define REG_OFFSET_COMMAND_BASE 0x0028
125#define REG_OFFSET_QUERY_BASE 0x002a
126
127#define CAPABILITY_LEFT_BTN_MASK (0x01 << 3)
128#define CAPABILITY_RIGHT_BTN_MASK (0x01 << 4)
129#define CAPABILITY_MIDDLE_BTN_MASK (0x01 << 5)
130#define CAPABILITY_BTN_MASK (CAPABILITY_LEFT_BTN_MASK | \
131 CAPABILITY_RIGHT_BTN_MASK | \
132 CAPABILITY_MIDDLE_BTN_MASK)
133
134#define CYAPA_OFFSET_SOFT_RESET REG_OFFSET_COMMAND_BASE
135
136#define REG_OFFSET_POWER_MODE (REG_OFFSET_COMMAND_BASE + 1)
137
138#define PWR_MODE_MASK 0xfc
139#define PWR_MODE_FULL_ACTIVE (0x3f << 2)
140#define PWR_MODE_IDLE (0x05 << 2) /* default sleep time is 50 ms. */
141#define PWR_MODE_OFF (0x00 << 2)
142
143#define PWR_STATUS_MASK 0x0c
144#define PWR_STATUS_ACTIVE (0x03 << 2)
145#define PWR_STATUS_IDLE (0x02 << 2)
146#define PWR_STATUS_OFF (0x00 << 2)
147
148/*
149 * CYAPA trackpad device states.
150 * Used in register 0x00, bit1-0, DeviceStatus field.
151 * Other values indicate device is in an abnormal state and must be reset.
152 */
153#define CYAPA_DEV_NORMAL 0x03
154#define CYAPA_DEV_BUSY 0x01
155
156enum cyapa_state {
157 CYAPA_STATE_OP,
158 CYAPA_STATE_BL_IDLE,
159 CYAPA_STATE_BL_ACTIVE,
160 CYAPA_STATE_BL_BUSY,
161 CYAPA_STATE_NO_DEVICE,
162};
163
164
165struct cyapa_touch {
166 /*
167 * high bits or x/y position value
168 * bit 7 - 4: high 4 bits of x position value
169 * bit 3 - 0: high 4 bits of y position value
170 */
171 u8 xy_hi;
172 u8 x_lo; /* low 8 bits of x position value. */
173 u8 y_lo; /* low 8 bits of y position value. */
174 u8 pressure;
175 /* id range is 1 - 15. It is incremented with every new touch. */
176 u8 id;
177} __packed;
178
179/* The touch.id is used as the MT slot id, thus max MT slot is 15 */
180#define CYAPA_MAX_MT_SLOTS 15
181
182struct cyapa_reg_data {
183 /*
184 * bit 0 - 1: device status
185 * bit 3 - 2: power mode
186 * bit 6 - 4: reserved
187 * bit 7: interrupt valid bit
188 */
189 u8 device_status;
190 /*
191 * bit 7 - 4: number of fingers currently touching pad
192 * bit 3: valid data check bit
193 * bit 2: middle mechanism button state if exists
194 * bit 1: right mechanism button state if exists
195 * bit 0: left mechanism button state if exists
196 */
197 u8 finger_btn;
198 /* CYAPA reports up to 5 touches per packet. */
199 struct cyapa_touch touches[5];
200} __packed;
201
202/* The main device structure */
203struct cyapa {
204 enum cyapa_state state;
205
206 struct i2c_client *client;
207 struct input_dev *input;
208 char phys[32]; /* device physical location */
209 int irq;
210 bool irq_wake; /* irq wake is enabled */
211 bool smbus;
212
213 /* read from query data region. */
214 char product_id[16];
215 u8 btn_capability;
216 u8 gen;
217 int max_abs_x;
218 int max_abs_y;
219 int physical_size_x;
220 int physical_size_y;
221};
222
223static const u8 bl_deactivate[] = { 0x00, 0xff, 0x3b, 0x00, 0x01, 0x02, 0x03,
224 0x04, 0x05, 0x06, 0x07 };
225static const u8 bl_exit[] = { 0x00, 0xff, 0xa5, 0x00, 0x01, 0x02, 0x03, 0x04,
226 0x05, 0x06, 0x07 };
227
228struct cyapa_cmd_len {
229 u8 cmd;
230 u8 len;
231};
232
233#define CYAPA_ADAPTER_FUNC_NONE 0
234#define CYAPA_ADAPTER_FUNC_I2C 1
235#define CYAPA_ADAPTER_FUNC_SMBUS 2
236#define CYAPA_ADAPTER_FUNC_BOTH 3
237
238/*
239 * macros for SMBus communication
240 */
241#define SMBUS_READ 0x01
242#define SMBUS_WRITE 0x00
243#define SMBUS_ENCODE_IDX(cmd, idx) ((cmd) | (((idx) & 0x03) << 1))
244#define SMBUS_ENCODE_RW(cmd, rw) ((cmd) | ((rw) & 0x01))
245#define SMBUS_BYTE_BLOCK_CMD_MASK 0x80
246#define SMBUS_GROUP_BLOCK_CMD_MASK 0x40
247
248 /* for byte read/write command */
249#define CMD_RESET 0
250#define CMD_POWER_MODE 1
251#define CMD_DEV_STATUS 2
252#define SMBUS_BYTE_CMD(cmd) (((cmd) & 0x3f) << 1)
253#define CYAPA_SMBUS_RESET SMBUS_BYTE_CMD(CMD_RESET)
254#define CYAPA_SMBUS_POWER_MODE SMBUS_BYTE_CMD(CMD_POWER_MODE)
255#define CYAPA_SMBUS_DEV_STATUS SMBUS_BYTE_CMD(CMD_DEV_STATUS)
256
257 /* for group registers read/write command */
258#define REG_GROUP_DATA 0
259#define REG_GROUP_CMD 2
260#define REG_GROUP_QUERY 3
261#define SMBUS_GROUP_CMD(grp) (0x80 | (((grp) & 0x07) << 3))
262#define CYAPA_SMBUS_GROUP_DATA SMBUS_GROUP_CMD(REG_GROUP_DATA)
263#define CYAPA_SMBUS_GROUP_CMD SMBUS_GROUP_CMD(REG_GROUP_CMD)
264#define CYAPA_SMBUS_GROUP_QUERY SMBUS_GROUP_CMD(REG_GROUP_QUERY)
265
266 /* for register block read/write command */
267#define CMD_BL_STATUS 0
268#define CMD_BL_HEAD 1
269#define CMD_BL_CMD 2
270#define CMD_BL_DATA 3
271#define CMD_BL_ALL 4
272#define CMD_BLK_PRODUCT_ID 5
273#define CMD_BLK_HEAD 6
274#define SMBUS_BLOCK_CMD(cmd) (0xc0 | (((cmd) & 0x1f) << 1))
275
276/* register block read/write command in bootloader mode */
277#define CYAPA_SMBUS_BL_STATUS SMBUS_BLOCK_CMD(CMD_BL_STATUS)
278#define CYAPA_SMBUS_BL_HEAD SMBUS_BLOCK_CMD(CMD_BL_HEAD)
279#define CYAPA_SMBUS_BL_CMD SMBUS_BLOCK_CMD(CMD_BL_CMD)
280#define CYAPA_SMBUS_BL_DATA SMBUS_BLOCK_CMD(CMD_BL_DATA)
281#define CYAPA_SMBUS_BL_ALL SMBUS_BLOCK_CMD(CMD_BL_ALL)
282
283/* register block read/write command in operational mode */
284#define CYAPA_SMBUS_BLK_PRODUCT_ID SMBUS_BLOCK_CMD(CMD_BLK_PRODUCT_ID)
285#define CYAPA_SMBUS_BLK_HEAD SMBUS_BLOCK_CMD(CMD_BLK_HEAD)
286
287static const struct cyapa_cmd_len cyapa_i2c_cmds[] = {
288 { CYAPA_OFFSET_SOFT_RESET, 1 },
289 { REG_OFFSET_COMMAND_BASE + 1, 1 },
290 { REG_OFFSET_DATA_BASE, 1 },
291 { REG_OFFSET_DATA_BASE, sizeof(struct cyapa_reg_data) },
292 { REG_OFFSET_COMMAND_BASE, 0 },
293 { REG_OFFSET_QUERY_BASE, QUERY_DATA_SIZE },
294 { BL_HEAD_OFFSET, 3 },
295 { BL_HEAD_OFFSET, 16 },
296 { BL_HEAD_OFFSET, 16 },
297 { BL_DATA_OFFSET, 16 },
298 { BL_HEAD_OFFSET, 32 },
299 { REG_OFFSET_QUERY_BASE, PRODUCT_ID_SIZE },
300 { REG_OFFSET_DATA_BASE, 32 }
301};
302
303static const struct cyapa_cmd_len cyapa_smbus_cmds[] = {
304 { CYAPA_SMBUS_RESET, 1 },
305 { CYAPA_SMBUS_POWER_MODE, 1 },
306 { CYAPA_SMBUS_DEV_STATUS, 1 },
307 { CYAPA_SMBUS_GROUP_DATA, sizeof(struct cyapa_reg_data) },
308 { CYAPA_SMBUS_GROUP_CMD, 2 },
309 { CYAPA_SMBUS_GROUP_QUERY, QUERY_DATA_SIZE },
310 { CYAPA_SMBUS_BL_STATUS, 3 },
311 { CYAPA_SMBUS_BL_HEAD, 16 },
312 { CYAPA_SMBUS_BL_CMD, 16 },
313 { CYAPA_SMBUS_BL_DATA, 16 },
314 { CYAPA_SMBUS_BL_ALL, 32 },
315 { CYAPA_SMBUS_BLK_PRODUCT_ID, PRODUCT_ID_SIZE },
316 { CYAPA_SMBUS_BLK_HEAD, 16 },
317};
318
319static ssize_t cyapa_i2c_reg_read_block(struct cyapa *cyapa, u8 reg, size_t len,
320 u8 *values)
321{
322 return i2c_smbus_read_i2c_block_data(cyapa->client, reg, len, values);
323}
324
325static ssize_t cyapa_i2c_reg_write_block(struct cyapa *cyapa, u8 reg,
326 size_t len, const u8 *values)
327{
328 return i2c_smbus_write_i2c_block_data(cyapa->client, reg, len, values);
329}
330
331/*
332 * cyapa_smbus_read_block - perform smbus block read command
333 * @cyapa - private data structure of the driver
334 * @cmd - the properly encoded smbus command
335 * @len - expected length of smbus command result
336 * @values - buffer to store smbus command result
337 *
338 * Returns negative errno, else the number of bytes written.
339 *
340 * Note:
341 * In trackpad device, the memory block allocated for I2C register map
342 * is 256 bytes, so the max read block for I2C bus is 256 bytes.
343 */
344static ssize_t cyapa_smbus_read_block(struct cyapa *cyapa, u8 cmd, size_t len,
345 u8 *values)
346{
347 ssize_t ret;
348 u8 index;
349 u8 smbus_cmd;
350 u8 *buf;
351 struct i2c_client *client = cyapa->client;
352
353 if (!(SMBUS_BYTE_BLOCK_CMD_MASK & cmd))
354 return -EINVAL;
355
356 if (SMBUS_GROUP_BLOCK_CMD_MASK & cmd) {
357 /* read specific block registers command. */
358 smbus_cmd = SMBUS_ENCODE_RW(cmd, SMBUS_READ);
359 ret = i2c_smbus_read_block_data(client, smbus_cmd, values);
360 goto out;
361 }
362
363 ret = 0;
364 for (index = 0; index * I2C_SMBUS_BLOCK_MAX < len; index++) {
365 smbus_cmd = SMBUS_ENCODE_IDX(cmd, index);
366 smbus_cmd = SMBUS_ENCODE_RW(smbus_cmd, SMBUS_READ);
367 buf = values + I2C_SMBUS_BLOCK_MAX * index;
368 ret = i2c_smbus_read_block_data(client, smbus_cmd, buf);
369 if (ret < 0)
370 goto out;
371 }
372
373out:
374 return ret > 0 ? len : ret;
375}
376
377static s32 cyapa_read_byte(struct cyapa *cyapa, u8 cmd_idx)
378{
379 u8 cmd;
380
381 if (cyapa->smbus) {
382 cmd = cyapa_smbus_cmds[cmd_idx].cmd;
383 cmd = SMBUS_ENCODE_RW(cmd, SMBUS_READ);
384 } else {
385 cmd = cyapa_i2c_cmds[cmd_idx].cmd;
386 }
387 return i2c_smbus_read_byte_data(cyapa->client, cmd);
388}
389
390static s32 cyapa_write_byte(struct cyapa *cyapa, u8 cmd_idx, u8 value)
391{
392 u8 cmd;
393
394 if (cyapa->smbus) {
395 cmd = cyapa_smbus_cmds[cmd_idx].cmd;
396 cmd = SMBUS_ENCODE_RW(cmd, SMBUS_WRITE);
397 } else {
398 cmd = cyapa_i2c_cmds[cmd_idx].cmd;
399 }
400 return i2c_smbus_write_byte_data(cyapa->client, cmd, value);
401}
402
403static ssize_t cyapa_read_block(struct cyapa *cyapa, u8 cmd_idx, u8 *values)
404{
405 u8 cmd;
406 size_t len;
407
408 if (cyapa->smbus) {
409 cmd = cyapa_smbus_cmds[cmd_idx].cmd;
410 len = cyapa_smbus_cmds[cmd_idx].len;
411 return cyapa_smbus_read_block(cyapa, cmd, len, values);
412 } else {
413 cmd = cyapa_i2c_cmds[cmd_idx].cmd;
414 len = cyapa_i2c_cmds[cmd_idx].len;
415 return cyapa_i2c_reg_read_block(cyapa, cmd, len, values);
416 }
417}
418
419/*
420 * Query device for its current operating state.
421 *
422 */
423static int cyapa_get_state(struct cyapa *cyapa)
424{
425 int ret;
426 u8 status[BL_STATUS_SIZE];
427
428 cyapa->state = CYAPA_STATE_NO_DEVICE;
429
430 /*
431 * Get trackpad status by reading 3 registers starting from 0.
432 * If the device is in the bootloader, this will be BL_HEAD.
433 * If the device is in operation mode, this will be the DATA regs.
434 *
435 */
436 ret = cyapa_i2c_reg_read_block(cyapa, BL_HEAD_OFFSET, BL_STATUS_SIZE,
437 status);
438
439 /*
440 * On smbus systems in OP mode, the i2c_reg_read will fail with
441 * -ETIMEDOUT. In this case, try again using the smbus equivalent
442 * command. This should return a BL_HEAD indicating CYAPA_STATE_OP.
443 */
444 if (cyapa->smbus && (ret == -ETIMEDOUT || ret == -ENXIO))
445 ret = cyapa_read_block(cyapa, CYAPA_CMD_BL_STATUS, status);
446
447 if (ret != BL_STATUS_SIZE)
448 goto error;
449
450 if ((status[REG_OP_STATUS] & OP_STATUS_SRC) == OP_STATUS_SRC) {
451 switch (status[REG_OP_STATUS] & OP_STATUS_DEV) {
452 case CYAPA_DEV_NORMAL:
453 case CYAPA_DEV_BUSY:
454 cyapa->state = CYAPA_STATE_OP;
455 break;
456 default:
457 ret = -EAGAIN;
458 goto error;
459 }
460 } else {
461 if (status[REG_BL_STATUS] & BL_STATUS_BUSY)
462 cyapa->state = CYAPA_STATE_BL_BUSY;
463 else if (status[REG_BL_ERROR] & BL_ERROR_BOOTLOADING)
464 cyapa->state = CYAPA_STATE_BL_ACTIVE;
465 else
466 cyapa->state = CYAPA_STATE_BL_IDLE;
467 }
468
469 return 0;
470error:
471 return (ret < 0) ? ret : -EAGAIN;
472}
473
474/*
475 * Poll device for its status in a loop, waiting up to timeout for a response.
476 *
477 * When the device switches state, it usually takes ~300 ms.
478 * However, when running a new firmware image, the device must calibrate its
479 * sensors, which can take as long as 2 seconds.
480 *
481 * Note: The timeout has granularity of the polling rate, which is 100 ms.
482 *
483 * Returns:
484 * 0 when the device eventually responds with a valid non-busy state.
485 * -ETIMEDOUT if device never responds (too many -EAGAIN)
486 * < 0 other errors
487 */
488static int cyapa_poll_state(struct cyapa *cyapa, unsigned int timeout)
489{
490 int ret;
491 int tries = timeout / 100;
492
493 ret = cyapa_get_state(cyapa);
494 while ((ret || cyapa->state >= CYAPA_STATE_BL_BUSY) && tries--) {
495 msleep(100);
496 ret = cyapa_get_state(cyapa);
497 }
498 return (ret == -EAGAIN || ret == -ETIMEDOUT) ? -ETIMEDOUT : ret;
499}
500
501static int cyapa_bl_deactivate(struct cyapa *cyapa)
502{
503 int ret;
504
505 ret = cyapa_i2c_reg_write_block(cyapa, 0, sizeof(bl_deactivate),
506 bl_deactivate);
507 if (ret < 0)
508 return ret;
509
510 /* wait for bootloader to switch to idle state; should take < 100ms */
511 msleep(100);
512 ret = cyapa_poll_state(cyapa, 500);
513 if (ret < 0)
514 return ret;
515 if (cyapa->state != CYAPA_STATE_BL_IDLE)
516 return -EAGAIN;
517 return 0;
518}
519
520/*
521 * Exit bootloader
522 *
523 * Send bl_exit command, then wait 50 - 100 ms to let device transition to
524 * operational mode. If this is the first time the device's firmware is
525 * running, it can take up to 2 seconds to calibrate its sensors. So, poll
526 * the device's new state for up to 2 seconds.
527 *
528 * Returns:
529 * -EIO failure while reading from device
530 * -EAGAIN device is stuck in bootloader, b/c it has invalid firmware
531 * 0 device is supported and in operational mode
532 */
533static int cyapa_bl_exit(struct cyapa *cyapa)
534{
535 int ret;
536
537 ret = cyapa_i2c_reg_write_block(cyapa, 0, sizeof(bl_exit), bl_exit);
538 if (ret < 0)
539 return ret;
540
541 /*
542 * Wait for bootloader to exit, and operation mode to start.
543 * Normally, this takes at least 50 ms.
544 */
545 usleep_range(50000, 100000);
546 /*
547 * In addition, when a device boots for the first time after being
548 * updated to new firmware, it must first calibrate its sensors, which
549 * can take up to an additional 2 seconds.
550 */
551 ret = cyapa_poll_state(cyapa, 2000);
552 if (ret < 0)
553 return ret;
554 if (cyapa->state != CYAPA_STATE_OP)
555 return -EAGAIN;
556
557 return 0;
558}
559
560/*
561 * Set device power mode
562 *
563 */
564static int cyapa_set_power_mode(struct cyapa *cyapa, u8 power_mode)
565{
566 struct device *dev = &cyapa->client->dev;
567 int ret;
568 u8 power;
569
570 if (cyapa->state != CYAPA_STATE_OP)
571 return 0;
572
573 ret = cyapa_read_byte(cyapa, CYAPA_CMD_POWER_MODE);
574 if (ret < 0)
575 return ret;
576
577 power = ret & ~PWR_MODE_MASK;
578 power |= power_mode & PWR_MODE_MASK;
579 ret = cyapa_write_byte(cyapa, CYAPA_CMD_POWER_MODE, power);
580 if (ret < 0)
581 dev_err(dev, "failed to set power_mode 0x%02x err = %d\n",
582 power_mode, ret);
583 return ret;
584}
585
586static int cyapa_get_query_data(struct cyapa *cyapa)
587{
588 u8 query_data[QUERY_DATA_SIZE];
589 int ret;
590
591 if (cyapa->state != CYAPA_STATE_OP)
592 return -EBUSY;
593
594 ret = cyapa_read_block(cyapa, CYAPA_CMD_GROUP_QUERY, query_data);
595 if (ret < 0)
596 return ret;
597 if (ret != QUERY_DATA_SIZE)
598 return -EIO;
599
600 memcpy(&cyapa->product_id[0], &query_data[0], 5);
601 cyapa->product_id[5] = '-';
602 memcpy(&cyapa->product_id[6], &query_data[5], 6);
603 cyapa->product_id[12] = '-';
604 memcpy(&cyapa->product_id[13], &query_data[11], 2);
605 cyapa->product_id[15] = '\0';
606
607 cyapa->btn_capability = query_data[19] & CAPABILITY_BTN_MASK;
608
609 cyapa->gen = query_data[20] & 0x0f;
610
611 cyapa->max_abs_x = ((query_data[21] & 0xf0) << 4) | query_data[22];
612 cyapa->max_abs_y = ((query_data[21] & 0x0f) << 8) | query_data[23];
613
614 cyapa->physical_size_x =
615 ((query_data[24] & 0xf0) << 4) | query_data[25];
616 cyapa->physical_size_y =
617 ((query_data[24] & 0x0f) << 8) | query_data[26];
618
619 return 0;
620}
621
622/*
623 * Check if device is operational.
624 *
625 * An operational device is responding, has exited bootloader, and has
626 * firmware supported by this driver.
627 *
628 * Returns:
629 * -EBUSY no device or in bootloader
630 * -EIO failure while reading from device
631 * -EAGAIN device is still in bootloader
632 * if ->state = CYAPA_STATE_BL_IDLE, device has invalid firmware
633 * -EINVAL device is in operational mode, but not supported by this driver
634 * 0 device is supported
635 */
636static int cyapa_check_is_operational(struct cyapa *cyapa)
637{
638 struct device *dev = &cyapa->client->dev;
639 static const char unique_str[] = "CYTRA";
640 int ret;
641
642 ret = cyapa_poll_state(cyapa, 2000);
643 if (ret < 0)
644 return ret;
645 switch (cyapa->state) {
646 case CYAPA_STATE_BL_ACTIVE:
647 ret = cyapa_bl_deactivate(cyapa);
648 if (ret)
649 return ret;
650
651 /* Fallthrough state */
652 case CYAPA_STATE_BL_IDLE:
653 ret = cyapa_bl_exit(cyapa);
654 if (ret)
655 return ret;
656
657 /* Fallthrough state */
658 case CYAPA_STATE_OP:
659 ret = cyapa_get_query_data(cyapa);
660 if (ret < 0)
661 return ret;
662
663 /* only support firmware protocol gen3 */
664 if (cyapa->gen != CYAPA_GEN3) {
665 dev_err(dev, "unsupported protocol version (%d)",
666 cyapa->gen);
667 return -EINVAL;
668 }
669
670 /* only support product ID starting with CYTRA */
671 if (memcmp(cyapa->product_id, unique_str,
672 sizeof(unique_str) - 1) != 0) {
673 dev_err(dev, "unsupported product ID (%s)\n",
674 cyapa->product_id);
675 return -EINVAL;
676 }
677 return 0;
678
679 default:
680 return -EIO;
681 }
682 return 0;
683}
684
685static irqreturn_t cyapa_irq(int irq, void *dev_id)
686{
687 struct cyapa *cyapa = dev_id;
688 struct device *dev = &cyapa->client->dev;
689 struct input_dev *input = cyapa->input;
690 struct cyapa_reg_data data;
691 int i;
692 int ret;
693 int num_fingers;
694
695 if (device_may_wakeup(dev))
696 pm_wakeup_event(dev, 0);
697
698 ret = cyapa_read_block(cyapa, CYAPA_CMD_GROUP_DATA, (u8 *)&data);
699 if (ret != sizeof(data))
700 goto out;
701
702 if ((data.device_status & OP_STATUS_SRC) != OP_STATUS_SRC ||
703 (data.device_status & OP_STATUS_DEV) != CYAPA_DEV_NORMAL ||
704 (data.finger_btn & OP_DATA_VALID) != OP_DATA_VALID) {
705 goto out;
706 }
707
708 num_fingers = (data.finger_btn >> 4) & 0x0f;
709 for (i = 0; i < num_fingers; i++) {
710 const struct cyapa_touch *touch = &data.touches[i];
711 /* Note: touch->id range is 1 to 15; slots are 0 to 14. */
712 int slot = touch->id - 1;
713
714 input_mt_slot(input, slot);
715 input_mt_report_slot_state(input, MT_TOOL_FINGER, true);
716 input_report_abs(input, ABS_MT_POSITION_X,
717 ((touch->xy_hi & 0xf0) << 4) | touch->x_lo);
718 input_report_abs(input, ABS_MT_POSITION_Y,
719 ((touch->xy_hi & 0x0f) << 8) | touch->y_lo);
720 input_report_abs(input, ABS_MT_PRESSURE, touch->pressure);
721 }
722
723 input_mt_sync_frame(input);
724
725 if (cyapa->btn_capability & CAPABILITY_LEFT_BTN_MASK)
726 input_report_key(input, BTN_LEFT,
727 data.finger_btn & OP_DATA_LEFT_BTN);
728
729 if (cyapa->btn_capability & CAPABILITY_MIDDLE_BTN_MASK)
730 input_report_key(input, BTN_MIDDLE,
731 data.finger_btn & OP_DATA_MIDDLE_BTN);
732
733 if (cyapa->btn_capability & CAPABILITY_RIGHT_BTN_MASK)
734 input_report_key(input, BTN_RIGHT,
735 data.finger_btn & OP_DATA_RIGHT_BTN);
736
737 input_sync(input);
738
739out:
740 return IRQ_HANDLED;
741}
742
743static u8 cyapa_check_adapter_functionality(struct i2c_client *client)
744{
745 u8 ret = CYAPA_ADAPTER_FUNC_NONE;
746
747 if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
748 ret |= CYAPA_ADAPTER_FUNC_I2C;
749 if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA |
750 I2C_FUNC_SMBUS_BLOCK_DATA |
751 I2C_FUNC_SMBUS_I2C_BLOCK))
752 ret |= CYAPA_ADAPTER_FUNC_SMBUS;
753 return ret;
754}
755
756static int cyapa_create_input_dev(struct cyapa *cyapa)
757{
758 struct device *dev = &cyapa->client->dev;
759 int ret;
760 struct input_dev *input;
761
762 if (!cyapa->physical_size_x || !cyapa->physical_size_y)
763 return -EINVAL;
764
765 input = cyapa->input = input_allocate_device();
766 if (!input) {
767 dev_err(dev, "allocate memory for input device failed\n");
768 return -ENOMEM;
769 }
770
771 input->name = CYAPA_NAME;
772 input->phys = cyapa->phys;
773 input->id.bustype = BUS_I2C;
774 input->id.version = 1;
775 input->id.product = 0; /* means any product in eventcomm. */
776 input->dev.parent = &cyapa->client->dev;
777
778 input_set_drvdata(input, cyapa);
779
780 __set_bit(EV_ABS, input->evbit);
781
782 /* finger position */
783 input_set_abs_params(input, ABS_MT_POSITION_X, 0, cyapa->max_abs_x, 0,
784 0);
785 input_set_abs_params(input, ABS_MT_POSITION_Y, 0, cyapa->max_abs_y, 0,
786 0);
787 input_set_abs_params(input, ABS_MT_PRESSURE, 0, 255, 0, 0);
788
789 input_abs_set_res(input, ABS_MT_POSITION_X,
790 cyapa->max_abs_x / cyapa->physical_size_x);
791 input_abs_set_res(input, ABS_MT_POSITION_Y,
792 cyapa->max_abs_y / cyapa->physical_size_y);
793
794 if (cyapa->btn_capability & CAPABILITY_LEFT_BTN_MASK)
795 __set_bit(BTN_LEFT, input->keybit);
796 if (cyapa->btn_capability & CAPABILITY_MIDDLE_BTN_MASK)
797 __set_bit(BTN_MIDDLE, input->keybit);
798 if (cyapa->btn_capability & CAPABILITY_RIGHT_BTN_MASK)
799 __set_bit(BTN_RIGHT, input->keybit);
800
801 if (cyapa->btn_capability == CAPABILITY_LEFT_BTN_MASK)
802 __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
803
804 /* handle pointer emulation and unused slots in core */
805 ret = input_mt_init_slots(input, CYAPA_MAX_MT_SLOTS,
806 INPUT_MT_POINTER | INPUT_MT_DROP_UNUSED);
807 if (ret) {
808 dev_err(dev, "allocate memory for MT slots failed, %d\n", ret);
809 goto err_free_device;
810 }
811
812 /* Register the device in input subsystem */
813 ret = input_register_device(input);
814 if (ret) {
815 dev_err(dev, "input device register failed, %d\n", ret);
816 goto err_free_device;
817 }
818 return 0;
819
820err_free_device:
821 input_free_device(input);
822 cyapa->input = NULL;
823 return ret;
824}
825
826static int cyapa_probe(struct i2c_client *client,
827 const struct i2c_device_id *dev_id)
828{
829 int ret;
830 u8 adapter_func;
831 struct cyapa *cyapa;
832 struct device *dev = &client->dev;
833
834 adapter_func = cyapa_check_adapter_functionality(client);
835 if (adapter_func == CYAPA_ADAPTER_FUNC_NONE) {
836 dev_err(dev, "not a supported I2C/SMBus adapter\n");
837 return -EIO;
838 }
839
840 cyapa = kzalloc(sizeof(struct cyapa), GFP_KERNEL);
841 if (!cyapa) {
842 dev_err(dev, "allocate memory for cyapa failed\n");
843 return -ENOMEM;
844 }
845
846 cyapa->gen = CYAPA_GEN3;
847 cyapa->client = client;
848 i2c_set_clientdata(client, cyapa);
849 sprintf(cyapa->phys, "i2c-%d-%04x/input0", client->adapter->nr,
850 client->addr);
851
852 /* i2c isn't supported, use smbus */
853 if (adapter_func == CYAPA_ADAPTER_FUNC_SMBUS)
854 cyapa->smbus = true;
855 cyapa->state = CYAPA_STATE_NO_DEVICE;
856 ret = cyapa_check_is_operational(cyapa);
857 if (ret) {
858 dev_err(dev, "device not operational, %d\n", ret);
859 goto err_mem_free;
860 }
861
862 ret = cyapa_create_input_dev(cyapa);
863 if (ret) {
864 dev_err(dev, "create input_dev instance failed, %d\n", ret);
865 goto err_mem_free;
866 }
867
868 ret = cyapa_set_power_mode(cyapa, PWR_MODE_FULL_ACTIVE);
869 if (ret) {
870 dev_err(dev, "set active power failed, %d\n", ret);
871 goto err_unregister_device;
872 }
873
874 cyapa->irq = client->irq;
875 ret = request_threaded_irq(cyapa->irq,
876 NULL,
877 cyapa_irq,
878 IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
879 "cyapa",
880 cyapa);
881 if (ret) {
882 dev_err(dev, "IRQ request failed: %d\n, ", ret);
883 goto err_unregister_device;
884 }
885
886 return 0;
887
888err_unregister_device:
889 input_unregister_device(cyapa->input);
890err_mem_free:
891 kfree(cyapa);
892
893 return ret;
894}
895
896static int cyapa_remove(struct i2c_client *client)
897{
898 struct cyapa *cyapa = i2c_get_clientdata(client);
899
900 free_irq(cyapa->irq, cyapa);
901 input_unregister_device(cyapa->input);
902 cyapa_set_power_mode(cyapa, PWR_MODE_OFF);
903 kfree(cyapa);
904
905 return 0;
906}
907
908#ifdef CONFIG_PM_SLEEP
909static int cyapa_suspend(struct device *dev)
910{
911 int ret;
912 u8 power_mode;
913 struct cyapa *cyapa = dev_get_drvdata(dev);
914
915 disable_irq(cyapa->irq);
916
917 /*
918 * Set trackpad device to idle mode if wakeup is allowed,
919 * otherwise turn off.
920 */
921 power_mode = device_may_wakeup(dev) ? PWR_MODE_IDLE
922 : PWR_MODE_OFF;
923 ret = cyapa_set_power_mode(cyapa, power_mode);
924 if (ret < 0)
925 dev_err(dev, "set power mode failed, %d\n", ret);
926
927 if (device_may_wakeup(dev))
928 cyapa->irq_wake = (enable_irq_wake(cyapa->irq) == 0);
929 return 0;
930}
931
932static int cyapa_resume(struct device *dev)
933{
934 int ret;
935 struct cyapa *cyapa = dev_get_drvdata(dev);
936
937 if (device_may_wakeup(dev) && cyapa->irq_wake)
938 disable_irq_wake(cyapa->irq);
939
940 ret = cyapa_set_power_mode(cyapa, PWR_MODE_FULL_ACTIVE);
941 if (ret)
942 dev_warn(dev, "resume active power failed, %d\n", ret);
943
944 enable_irq(cyapa->irq);
945 return 0;
946}
947#endif /* CONFIG_PM_SLEEP */
948
949static SIMPLE_DEV_PM_OPS(cyapa_pm_ops, cyapa_suspend, cyapa_resume);
950
951static const struct i2c_device_id cyapa_id_table[] = {
952 { "cyapa", 0 },
953 { },
954};
955MODULE_DEVICE_TABLE(i2c, cyapa_id_table);
956
957static struct i2c_driver cyapa_driver = {
958 .driver = {
959 .name = "cyapa",
960 .owner = THIS_MODULE,
961 .pm = &cyapa_pm_ops,
962 },
963
964 .probe = cyapa_probe,
965 .remove = cyapa_remove,
966 .id_table = cyapa_id_table,
967};
968
969module_i2c_driver(cyapa_driver);
970
971MODULE_DESCRIPTION("Cypress APA I2C Trackpad Driver");
972MODULE_AUTHOR("Dudley Du <dudl@cypress.com>");
973MODULE_LICENSE("GPL");
diff --git a/drivers/input/mouse/cypress_ps2.c b/drivers/input/mouse/cypress_ps2.c
new file mode 100644
index 000000000000..1673dc6c8092
--- /dev/null
+++ b/drivers/input/mouse/cypress_ps2.c
@@ -0,0 +1,725 @@
1/*
2 * Cypress Trackpad PS/2 mouse driver
3 *
4 * Copyright (c) 2012 Cypress Semiconductor Corporation.
5 *
6 * Author:
7 * Dudley Du <dudl@cypress.com>
8 *
9 * Additional contributors include:
10 * Kamal Mostafa <kamal@canonical.com>
11 * Kyle Fazzari <git@status.e4ward.com>
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License version 2 as published by
15 * the Free Software Foundation.
16 */
17
18#include <linux/init.h>
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/slab.h>
22#include <linux/serio.h>
23#include <linux/libps2.h>
24#include <linux/input.h>
25#include <linux/input/mt.h>
26#include <linux/sched.h>
27#include <linux/wait.h>
28
29#include "cypress_ps2.h"
30
31#undef CYTP_DEBUG_VERBOSE /* define this and DEBUG for more verbose dump */
32
33static void cypress_set_packet_size(struct psmouse *psmouse, unsigned int n)
34{
35 struct cytp_data *cytp = psmouse->private;
36 cytp->pkt_size = n;
37}
38
39static const unsigned char cytp_rate[] = {10, 20, 40, 60, 100, 200};
40static const unsigned char cytp_resolution[] = {0x00, 0x01, 0x02, 0x03};
41
42static int cypress_ps2_sendbyte(struct psmouse *psmouse, int value)
43{
44 struct ps2dev *ps2dev = &psmouse->ps2dev;
45
46 if (ps2_sendbyte(ps2dev, value & 0xff, CYTP_CMD_TIMEOUT) < 0) {
47 psmouse_dbg(psmouse,
48 "sending command 0x%02x failed, resp 0x%02x\n",
49 value & 0xff, ps2dev->nak);
50 if (ps2dev->nak == CYTP_PS2_RETRY)
51 return CYTP_PS2_RETRY;
52 else
53 return CYTP_PS2_ERROR;
54 }
55
56#ifdef CYTP_DEBUG_VERBOSE
57 psmouse_dbg(psmouse, "sending command 0x%02x succeeded, resp 0xfa\n",
58 value & 0xff);
59#endif
60
61 return 0;
62}
63
64static int cypress_ps2_ext_cmd(struct psmouse *psmouse, unsigned short cmd,
65 unsigned char data)
66{
67 struct ps2dev *ps2dev = &psmouse->ps2dev;
68 int tries = CYTP_PS2_CMD_TRIES;
69 int rc;
70
71 ps2_begin_command(ps2dev);
72
73 do {
74 /*
75 * Send extension command byte (0xE8 or 0xF3).
76 * If sending the command fails, send recovery command
77 * to make the device return to the ready state.
78 */
79 rc = cypress_ps2_sendbyte(psmouse, cmd & 0xff);
80 if (rc == CYTP_PS2_RETRY) {
81 rc = cypress_ps2_sendbyte(psmouse, 0x00);
82 if (rc == CYTP_PS2_RETRY)
83 rc = cypress_ps2_sendbyte(psmouse, 0x0a);
84 }
85 if (rc == CYTP_PS2_ERROR)
86 continue;
87
88 rc = cypress_ps2_sendbyte(psmouse, data);
89 if (rc == CYTP_PS2_RETRY)
90 rc = cypress_ps2_sendbyte(psmouse, data);
91 if (rc == CYTP_PS2_ERROR)
92 continue;
93 else
94 break;
95 } while (--tries > 0);
96
97 ps2_end_command(ps2dev);
98
99 return rc;
100}
101
102static int cypress_ps2_read_cmd_status(struct psmouse *psmouse,
103 unsigned char cmd,
104 unsigned char *param)
105{
106 int rc;
107 struct ps2dev *ps2dev = &psmouse->ps2dev;
108 enum psmouse_state old_state;
109 int pktsize;
110
111 ps2_begin_command(&psmouse->ps2dev);
112
113 old_state = psmouse->state;
114 psmouse->state = PSMOUSE_CMD_MODE;
115 psmouse->pktcnt = 0;
116
117 pktsize = (cmd == CYTP_CMD_READ_TP_METRICS) ? 8 : 3;
118 memset(param, 0, pktsize);
119
120 rc = cypress_ps2_sendbyte(psmouse, 0xe9);
121 if (rc < 0)
122 goto out;
123
124 wait_event_timeout(ps2dev->wait,
125 (psmouse->pktcnt >= pktsize),
126 msecs_to_jiffies(CYTP_CMD_TIMEOUT));
127
128 memcpy(param, psmouse->packet, pktsize);
129
130 psmouse_dbg(psmouse, "Command 0x%02x response data (0x): %*ph\n",
131 cmd, pktsize, param);
132
133out:
134 psmouse->state = old_state;
135 psmouse->pktcnt = 0;
136
137 ps2_end_command(&psmouse->ps2dev);
138
139 return rc;
140}
141
142static bool cypress_verify_cmd_state(struct psmouse *psmouse,
143 unsigned char cmd, unsigned char *param)
144{
145 bool rate_match = false;
146 bool resolution_match = false;
147 int i;
148
149 /* callers will do further checking. */
150 if (cmd == CYTP_CMD_READ_CYPRESS_ID ||
151 cmd == CYTP_CMD_STANDARD_MODE ||
152 cmd == CYTP_CMD_READ_TP_METRICS)
153 return true;
154
155 if ((~param[0] & DFLT_RESP_BITS_VALID) == DFLT_RESP_BITS_VALID &&
156 (param[0] & DFLT_RESP_BIT_MODE) == DFLT_RESP_STREAM_MODE) {
157 for (i = 0; i < sizeof(cytp_resolution); i++)
158 if (cytp_resolution[i] == param[1])
159 resolution_match = true;
160
161 for (i = 0; i < sizeof(cytp_rate); i++)
162 if (cytp_rate[i] == param[2])
163 rate_match = true;
164
165 if (resolution_match && rate_match)
166 return true;
167 }
168
169 psmouse_dbg(psmouse, "verify cmd state failed.\n");
170 return false;
171}
172
173static int cypress_send_ext_cmd(struct psmouse *psmouse, unsigned char cmd,
174 unsigned char *param)
175{
176 int tries = CYTP_PS2_CMD_TRIES;
177 int rc;
178
179 psmouse_dbg(psmouse, "send extension cmd 0x%02x, [%d %d %d %d]\n",
180 cmd, DECODE_CMD_AA(cmd), DECODE_CMD_BB(cmd),
181 DECODE_CMD_CC(cmd), DECODE_CMD_DD(cmd));
182
183 do {
184 cypress_ps2_ext_cmd(psmouse,
185 PSMOUSE_CMD_SETRES, DECODE_CMD_DD(cmd));
186 cypress_ps2_ext_cmd(psmouse,
187 PSMOUSE_CMD_SETRES, DECODE_CMD_CC(cmd));
188 cypress_ps2_ext_cmd(psmouse,
189 PSMOUSE_CMD_SETRES, DECODE_CMD_BB(cmd));
190 cypress_ps2_ext_cmd(psmouse,
191 PSMOUSE_CMD_SETRES, DECODE_CMD_AA(cmd));
192
193 rc = cypress_ps2_read_cmd_status(psmouse, cmd, param);
194 if (rc)
195 continue;
196
197 if (cypress_verify_cmd_state(psmouse, cmd, param))
198 return 0;
199
200 } while (--tries > 0);
201
202 return -EIO;
203}
204
205int cypress_detect(struct psmouse *psmouse, bool set_properties)
206{
207 unsigned char param[3];
208
209 if (cypress_send_ext_cmd(psmouse, CYTP_CMD_READ_CYPRESS_ID, param))
210 return -ENODEV;
211
212 /* Check for Cypress Trackpad signature bytes: 0x33 0xCC */
213 if (param[0] != 0x33 || param[1] != 0xCC)
214 return -ENODEV;
215
216 if (set_properties) {
217 psmouse->vendor = "Cypress";
218 psmouse->name = "Trackpad";
219 }
220
221 return 0;
222}
223
224static int cypress_read_fw_version(struct psmouse *psmouse)
225{
226 struct cytp_data *cytp = psmouse->private;
227 unsigned char param[3];
228
229 if (cypress_send_ext_cmd(psmouse, CYTP_CMD_READ_CYPRESS_ID, param))
230 return -ENODEV;
231
232 /* Check for Cypress Trackpad signature bytes: 0x33 0xCC */
233 if (param[0] != 0x33 || param[1] != 0xCC)
234 return -ENODEV;
235
236 cytp->fw_version = param[2] & FW_VERSION_MASX;
237 cytp->tp_metrics_supported = (param[2] & TP_METRICS_MASK) ? 1 : 0;
238
239 psmouse_dbg(psmouse, "cytp->fw_version = %d\n", cytp->fw_version);
240 psmouse_dbg(psmouse, "cytp->tp_metrics_supported = %d\n",
241 cytp->tp_metrics_supported);
242
243 return 0;
244}
245
246static int cypress_read_tp_metrics(struct psmouse *psmouse)
247{
248 struct cytp_data *cytp = psmouse->private;
249 unsigned char param[8];
250
251 /* set default values for tp metrics. */
252 cytp->tp_width = CYTP_DEFAULT_WIDTH;
253 cytp->tp_high = CYTP_DEFAULT_HIGH;
254 cytp->tp_max_abs_x = CYTP_ABS_MAX_X;
255 cytp->tp_max_abs_y = CYTP_ABS_MAX_Y;
256 cytp->tp_min_pressure = CYTP_MIN_PRESSURE;
257 cytp->tp_max_pressure = CYTP_MAX_PRESSURE;
258 cytp->tp_res_x = cytp->tp_max_abs_x / cytp->tp_width;
259 cytp->tp_res_y = cytp->tp_max_abs_y / cytp->tp_high;
260
261 memset(param, 0, sizeof(param));
262 if (cypress_send_ext_cmd(psmouse, CYTP_CMD_READ_TP_METRICS, param) == 0) {
263 /* Update trackpad parameters. */
264 cytp->tp_max_abs_x = (param[1] << 8) | param[0];
265 cytp->tp_max_abs_y = (param[3] << 8) | param[2];
266 cytp->tp_min_pressure = param[4];
267 cytp->tp_max_pressure = param[5];
268 }
269
270 if (!cytp->tp_max_pressure ||
271 cytp->tp_max_pressure < cytp->tp_min_pressure ||
272 !cytp->tp_width || !cytp->tp_high ||
273 !cytp->tp_max_abs_x ||
274 cytp->tp_max_abs_x < cytp->tp_width ||
275 !cytp->tp_max_abs_y ||
276 cytp->tp_max_abs_y < cytp->tp_high)
277 return -EINVAL;
278
279 cytp->tp_res_x = cytp->tp_max_abs_x / cytp->tp_width;
280 cytp->tp_res_y = cytp->tp_max_abs_y / cytp->tp_high;
281
282#ifdef CYTP_DEBUG_VERBOSE
283 psmouse_dbg(psmouse, "Dump trackpad hardware configuration as below:\n");
284 psmouse_dbg(psmouse, "cytp->tp_width = %d\n", cytp->tp_width);
285 psmouse_dbg(psmouse, "cytp->tp_high = %d\n", cytp->tp_high);
286 psmouse_dbg(psmouse, "cytp->tp_max_abs_x = %d\n", cytp->tp_max_abs_x);
287 psmouse_dbg(psmouse, "cytp->tp_max_abs_y = %d\n", cytp->tp_max_abs_y);
288 psmouse_dbg(psmouse, "cytp->tp_min_pressure = %d\n", cytp->tp_min_pressure);
289 psmouse_dbg(psmouse, "cytp->tp_max_pressure = %d\n", cytp->tp_max_pressure);
290 psmouse_dbg(psmouse, "cytp->tp_res_x = %d\n", cytp->tp_res_x);
291 psmouse_dbg(psmouse, "cytp->tp_res_y = %d\n", cytp->tp_res_y);
292
293 psmouse_dbg(psmouse, "tp_type_APA = %d\n",
294 (param[6] & TP_METRICS_BIT_APA) ? 1 : 0);
295 psmouse_dbg(psmouse, "tp_type_MTG = %d\n",
296 (param[6] & TP_METRICS_BIT_MTG) ? 1 : 0);
297 psmouse_dbg(psmouse, "tp_palm = %d\n",
298 (param[6] & TP_METRICS_BIT_PALM) ? 1 : 0);
299 psmouse_dbg(psmouse, "tp_stubborn = %d\n",
300 (param[6] & TP_METRICS_BIT_STUBBORN) ? 1 : 0);
301 psmouse_dbg(psmouse, "tp_1f_jitter = %d\n",
302 (param[6] & TP_METRICS_BIT_1F_JITTER) >> 2);
303 psmouse_dbg(psmouse, "tp_2f_jitter = %d\n",
304 (param[6] & TP_METRICS_BIT_2F_JITTER) >> 4);
305 psmouse_dbg(psmouse, "tp_1f_spike = %d\n",
306 param[7] & TP_METRICS_BIT_1F_SPIKE);
307 psmouse_dbg(psmouse, "tp_2f_spike = %d\n",
308 (param[7] & TP_METRICS_BIT_2F_SPIKE) >> 2);
309 psmouse_dbg(psmouse, "tp_abs_packet_format_set = %d\n",
310 (param[7] & TP_METRICS_BIT_ABS_PKT_FORMAT_SET) >> 4);
311#endif
312
313 return 0;
314}
315
316static int cypress_query_hardware(struct psmouse *psmouse)
317{
318 struct cytp_data *cytp = psmouse->private;
319 int ret;
320
321 ret = cypress_read_fw_version(psmouse);
322 if (ret)
323 return ret;
324
325 if (cytp->tp_metrics_supported) {
326 ret = cypress_read_tp_metrics(psmouse);
327 if (ret)
328 return ret;
329 }
330
331 return 0;
332}
333
334static int cypress_set_absolute_mode(struct psmouse *psmouse)
335{
336 struct cytp_data *cytp = psmouse->private;
337 unsigned char param[3];
338
339 if (cypress_send_ext_cmd(psmouse, CYTP_CMD_ABS_WITH_PRESSURE_MODE, param) < 0)
340 return -1;
341
342 cytp->mode = (cytp->mode & ~CYTP_BIT_ABS_REL_MASK)
343 | CYTP_BIT_ABS_PRESSURE;
344 cypress_set_packet_size(psmouse, 5);
345
346 return 0;
347}
348
349/*
350 * Reset trackpad device.
351 * This is also the default mode when trackpad powered on.
352 */
353static void cypress_reset(struct psmouse *psmouse)
354{
355 struct cytp_data *cytp = psmouse->private;
356
357 cytp->mode = 0;
358
359 psmouse_reset(psmouse);
360}
361
362static int cypress_set_input_params(struct input_dev *input,
363 struct cytp_data *cytp)
364{
365 int ret;
366
367 if (!cytp->tp_res_x || !cytp->tp_res_y)
368 return -EINVAL;
369
370 __set_bit(EV_ABS, input->evbit);
371 input_set_abs_params(input, ABS_X, 0, cytp->tp_max_abs_x, 0, 0);
372 input_set_abs_params(input, ABS_Y, 0, cytp->tp_max_abs_y, 0, 0);
373 input_set_abs_params(input, ABS_PRESSURE,
374 cytp->tp_min_pressure, cytp->tp_max_pressure, 0, 0);
375 input_set_abs_params(input, ABS_TOOL_WIDTH, 0, 255, 0, 0);
376
377 /* finger position */
378 input_set_abs_params(input, ABS_MT_POSITION_X, 0, cytp->tp_max_abs_x, 0, 0);
379 input_set_abs_params(input, ABS_MT_POSITION_Y, 0, cytp->tp_max_abs_y, 0, 0);
380 input_set_abs_params(input, ABS_MT_PRESSURE, 0, 255, 0, 0);
381
382 ret = input_mt_init_slots(input, CYTP_MAX_MT_SLOTS,
383 INPUT_MT_DROP_UNUSED|INPUT_MT_TRACK);
384 if (ret < 0)
385 return ret;
386
387 __set_bit(INPUT_PROP_SEMI_MT, input->propbit);
388
389 input_abs_set_res(input, ABS_X, cytp->tp_res_x);
390 input_abs_set_res(input, ABS_Y, cytp->tp_res_y);
391
392 input_abs_set_res(input, ABS_MT_POSITION_X, cytp->tp_res_x);
393 input_abs_set_res(input, ABS_MT_POSITION_Y, cytp->tp_res_y);
394
395 __set_bit(BTN_TOUCH, input->keybit);
396 __set_bit(BTN_TOOL_FINGER, input->keybit);
397 __set_bit(BTN_TOOL_DOUBLETAP, input->keybit);
398 __set_bit(BTN_TOOL_TRIPLETAP, input->keybit);
399 __set_bit(BTN_TOOL_QUADTAP, input->keybit);
400 __set_bit(BTN_TOOL_QUINTTAP, input->keybit);
401
402 __clear_bit(EV_REL, input->evbit);
403 __clear_bit(REL_X, input->relbit);
404 __clear_bit(REL_Y, input->relbit);
405
406 __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
407 __set_bit(EV_KEY, input->evbit);
408 __set_bit(BTN_LEFT, input->keybit);
409 __set_bit(BTN_RIGHT, input->keybit);
410 __set_bit(BTN_MIDDLE, input->keybit);
411
412 input_set_drvdata(input, cytp);
413
414 return 0;
415}
416
417static int cypress_get_finger_count(unsigned char header_byte)
418{
419 unsigned char bits6_7;
420 int finger_count;
421
422 bits6_7 = header_byte >> 6;
423 finger_count = bits6_7 & 0x03;
424
425 if (finger_count == 1)
426 return 1;
427
428 if (header_byte & ABS_HSCROLL_BIT) {
429 /* HSCROLL gets added on to 0 finger count. */
430 switch (finger_count) {
431 case 0: return 4;
432 case 2: return 5;
433 default:
434 /* Invalid contact (e.g. palm). Ignore it. */
435 return -1;
436 }
437 }
438
439 return finger_count;
440}
441
442
443static int cypress_parse_packet(struct psmouse *psmouse,
444 struct cytp_data *cytp, struct cytp_report_data *report_data)
445{
446 unsigned char *packet = psmouse->packet;
447 unsigned char header_byte = packet[0];
448 int contact_cnt;
449
450 memset(report_data, 0, sizeof(struct cytp_report_data));
451
452 contact_cnt = cypress_get_finger_count(header_byte);
453
454 if (contact_cnt < 0) /* e.g. palm detect */
455 return -EINVAL;
456
457 report_data->contact_cnt = contact_cnt;
458
459 report_data->tap = (header_byte & ABS_MULTIFINGER_TAP) ? 1 : 0;
460
461 if (report_data->contact_cnt == 1) {
462 report_data->contacts[0].x =
463 ((packet[1] & 0x70) << 4) | packet[2];
464 report_data->contacts[0].y =
465 ((packet[1] & 0x07) << 8) | packet[3];
466 if (cytp->mode & CYTP_BIT_ABS_PRESSURE)
467 report_data->contacts[0].z = packet[4];
468
469 } else if (report_data->contact_cnt >= 2) {
470 report_data->contacts[0].x =
471 ((packet[1] & 0x70) << 4) | packet[2];
472 report_data->contacts[0].y =
473 ((packet[1] & 0x07) << 8) | packet[3];
474 if (cytp->mode & CYTP_BIT_ABS_PRESSURE)
475 report_data->contacts[0].z = packet[4];
476
477 report_data->contacts[1].x =
478 ((packet[5] & 0xf0) << 4) | packet[6];
479 report_data->contacts[1].y =
480 ((packet[5] & 0x0f) << 8) | packet[7];
481 if (cytp->mode & CYTP_BIT_ABS_PRESSURE)
482 report_data->contacts[1].z = report_data->contacts[0].z;
483 }
484
485 report_data->left = (header_byte & BTN_LEFT_BIT) ? 1 : 0;
486 report_data->right = (header_byte & BTN_RIGHT_BIT) ? 1 : 0;
487
488 /*
489 * This is only true if one of the mouse buttons were tapped. Make
490 * sure it doesn't turn into a click. The regular tap-to-click
491 * functionality will handle that on its own. If we don't do this,
492 * disabling tap-to-click won't affect the mouse button zones.
493 */
494 if (report_data->tap)
495 report_data->left = 0;
496
497#ifdef CYTP_DEBUG_VERBOSE
498 {
499 int i;
500 int n = report_data->contact_cnt;
501 psmouse_dbg(psmouse, "Dump parsed report data as below:\n");
502 psmouse_dbg(psmouse, "contact_cnt = %d\n",
503 report_data->contact_cnt);
504 if (n > CYTP_MAX_MT_SLOTS)
505 n = CYTP_MAX_MT_SLOTS;
506 for (i = 0; i < n; i++)
507 psmouse_dbg(psmouse, "contacts[%d] = {%d, %d, %d}\n", i,
508 report_data->contacts[i].x,
509 report_data->contacts[i].y,
510 report_data->contacts[i].z);
511 psmouse_dbg(psmouse, "left = %d\n", report_data->left);
512 psmouse_dbg(psmouse, "right = %d\n", report_data->right);
513 psmouse_dbg(psmouse, "middle = %d\n", report_data->middle);
514 }
515#endif
516
517 return 0;
518}
519
520static void cypress_process_packet(struct psmouse *psmouse, bool zero_pkt)
521{
522 int i;
523 struct input_dev *input = psmouse->dev;
524 struct cytp_data *cytp = psmouse->private;
525 struct cytp_report_data report_data;
526 struct cytp_contact *contact;
527 struct input_mt_pos pos[CYTP_MAX_MT_SLOTS];
528 int slots[CYTP_MAX_MT_SLOTS];
529 int n;
530
531 if (cypress_parse_packet(psmouse, cytp, &report_data))
532 return;
533
534 n = report_data.contact_cnt;
535
536 if (n > CYTP_MAX_MT_SLOTS)
537 n = CYTP_MAX_MT_SLOTS;
538
539 for (i = 0; i < n; i++) {
540 contact = &report_data.contacts[i];
541 pos[i].x = contact->x;
542 pos[i].y = contact->y;
543 }
544
545 input_mt_assign_slots(input, slots, pos, n);
546
547 for (i = 0; i < n; i++) {
548 contact = &report_data.contacts[i];
549 input_mt_slot(input, slots[i]);
550 input_mt_report_slot_state(input, MT_TOOL_FINGER, true);
551 input_report_abs(input, ABS_MT_POSITION_X, contact->x);
552 input_report_abs(input, ABS_MT_POSITION_Y, contact->y);
553 input_report_abs(input, ABS_MT_PRESSURE, contact->z);
554 }
555
556 input_mt_sync_frame(input);
557
558 input_mt_report_finger_count(input, report_data.contact_cnt);
559
560 input_report_key(input, BTN_LEFT, report_data.left);
561 input_report_key(input, BTN_RIGHT, report_data.right);
562 input_report_key(input, BTN_MIDDLE, report_data.middle);
563
564 input_sync(input);
565}
566
567static psmouse_ret_t cypress_validate_byte(struct psmouse *psmouse)
568{
569 int contact_cnt;
570 int index = psmouse->pktcnt - 1;
571 unsigned char *packet = psmouse->packet;
572 struct cytp_data *cytp = psmouse->private;
573
574 if (index < 0 || index > cytp->pkt_size)
575 return PSMOUSE_BAD_DATA;
576
577 if (index == 0 && (packet[0] & 0xfc) == 0) {
578 /* call packet process for reporting finger leave. */
579 cypress_process_packet(psmouse, 1);
580 return PSMOUSE_FULL_PACKET;
581 }
582
583 /*
584 * Perform validation (and adjust packet size) based only on the
585 * first byte; allow all further bytes through.
586 */
587 if (index != 0)
588 return PSMOUSE_GOOD_DATA;
589
590 /*
591 * If absolute/relative mode bit has not been set yet, just pass
592 * the byte through.
593 */
594 if ((cytp->mode & CYTP_BIT_ABS_REL_MASK) == 0)
595 return PSMOUSE_GOOD_DATA;
596
597 if ((packet[0] & 0x08) == 0x08)
598 return PSMOUSE_BAD_DATA;
599
600 contact_cnt = cypress_get_finger_count(packet[0]);
601
602 if (contact_cnt < 0)
603 return PSMOUSE_BAD_DATA;
604
605 if (cytp->mode & CYTP_BIT_ABS_NO_PRESSURE)
606 cypress_set_packet_size(psmouse, contact_cnt == 2 ? 7 : 4);
607 else
608 cypress_set_packet_size(psmouse, contact_cnt == 2 ? 8 : 5);
609
610 return PSMOUSE_GOOD_DATA;
611}
612
613static psmouse_ret_t cypress_protocol_handler(struct psmouse *psmouse)
614{
615 struct cytp_data *cytp = psmouse->private;
616
617 if (psmouse->pktcnt >= cytp->pkt_size) {
618 cypress_process_packet(psmouse, 0);
619 return PSMOUSE_FULL_PACKET;
620 }
621
622 return cypress_validate_byte(psmouse);
623}
624
625static void cypress_set_rate(struct psmouse *psmouse, unsigned int rate)
626{
627 struct cytp_data *cytp = psmouse->private;
628
629 if (rate >= 80) {
630 psmouse->rate = 80;
631 cytp->mode |= CYTP_BIT_HIGH_RATE;
632 } else {
633 psmouse->rate = 40;
634 cytp->mode &= ~CYTP_BIT_HIGH_RATE;
635 }
636
637 ps2_command(&psmouse->ps2dev, (unsigned char *)&psmouse->rate,
638 PSMOUSE_CMD_SETRATE);
639}
640
641static void cypress_disconnect(struct psmouse *psmouse)
642{
643 cypress_reset(psmouse);
644 kfree(psmouse->private);
645 psmouse->private = NULL;
646}
647
648static int cypress_reconnect(struct psmouse *psmouse)
649{
650 int tries = CYTP_PS2_CMD_TRIES;
651 int rc;
652
653 do {
654 cypress_reset(psmouse);
655 rc = cypress_detect(psmouse, false);
656 } while (rc && (--tries > 0));
657
658 if (rc) {
659 psmouse_err(psmouse, "Reconnect: unable to detect trackpad.\n");
660 return -1;
661 }
662
663 if (cypress_set_absolute_mode(psmouse)) {
664 psmouse_err(psmouse, "Reconnect: Unable to initialize Cypress absolute mode.\n");
665 return -1;
666 }
667
668 return 0;
669}
670
671int cypress_init(struct psmouse *psmouse)
672{
673 struct cytp_data *cytp;
674
675 cytp = (struct cytp_data *)kzalloc(sizeof(struct cytp_data), GFP_KERNEL);
676 psmouse->private = (void *)cytp;
677 if (cytp == NULL)
678 return -ENOMEM;
679
680 cypress_reset(psmouse);
681
682 psmouse->pktsize = 8;
683
684 if (cypress_query_hardware(psmouse)) {
685 psmouse_err(psmouse, "Unable to query Trackpad hardware.\n");
686 goto err_exit;
687 }
688
689 if (cypress_set_absolute_mode(psmouse)) {
690 psmouse_err(psmouse, "init: Unable to initialize Cypress absolute mode.\n");
691 goto err_exit;
692 }
693
694 if (cypress_set_input_params(psmouse->dev, cytp) < 0) {
695 psmouse_err(psmouse, "init: Unable to set input params.\n");
696 goto err_exit;
697 }
698
699 psmouse->model = 1;
700 psmouse->protocol_handler = cypress_protocol_handler;
701 psmouse->set_rate = cypress_set_rate;
702 psmouse->disconnect = cypress_disconnect;
703 psmouse->reconnect = cypress_reconnect;
704 psmouse->cleanup = cypress_reset;
705 psmouse->resync_time = 0;
706
707 return 0;
708
709err_exit:
710 /*
711 * Reset Cypress Trackpad as a standard mouse. Then
712 * let psmouse driver commmunicating with it as default PS2 mouse.
713 */
714 cypress_reset(psmouse);
715
716 psmouse->private = NULL;
717 kfree(cytp);
718
719 return -1;
720}
721
722bool cypress_supported(void)
723{
724 return true;
725}
diff --git a/drivers/input/mouse/cypress_ps2.h b/drivers/input/mouse/cypress_ps2.h
new file mode 100644
index 000000000000..4720f21d2d70
--- /dev/null
+++ b/drivers/input/mouse/cypress_ps2.h
@@ -0,0 +1,191 @@
1#ifndef _CYPRESS_PS2_H
2#define _CYPRESS_PS2_H
3
4#include "psmouse.h"
5
6#define CMD_BITS_MASK 0x03
7#define COMPOSIT(x, s) (((x) & CMD_BITS_MASK) << (s))
8
9#define ENCODE_CMD(aa, bb, cc, dd) \
10 (COMPOSIT((aa), 6) | COMPOSIT((bb), 4) | COMPOSIT((cc), 2) | COMPOSIT((dd), 0))
11#define CYTP_CMD_ABS_NO_PRESSURE_MODE ENCODE_CMD(0, 1, 0, 0)
12#define CYTP_CMD_ABS_WITH_PRESSURE_MODE ENCODE_CMD(0, 1, 0, 1)
13#define CYTP_CMD_SMBUS_MODE ENCODE_CMD(0, 1, 1, 0)
14#define CYTP_CMD_STANDARD_MODE ENCODE_CMD(0, 2, 0, 0) /* not implemented yet. */
15#define CYTP_CMD_CYPRESS_REL_MODE ENCODE_CMD(1, 1, 1, 1) /* not implemented yet. */
16#define CYTP_CMD_READ_CYPRESS_ID ENCODE_CMD(0, 0, 0, 0)
17#define CYTP_CMD_READ_TP_METRICS ENCODE_CMD(0, 0, 0, 1)
18#define CYTP_CMD_SET_HSCROLL_WIDTH(w) ENCODE_CMD(1, 1, 0, (w))
19#define CYTP_CMD_SET_HSCROLL_MASK ENCODE_CMD(1, 1, 0, 0)
20#define CYTP_CMD_SET_VSCROLL_WIDTH(w) ENCODE_CMD(1, 2, 0, (w))
21#define CYTP_CMD_SET_VSCROLL_MASK ENCODE_CMD(1, 2, 0, 0)
22#define CYTP_CMD_SET_PALM_GEOMETRY(e) ENCODE_CMD(1, 2, 1, (e))
23#define CYTP_CMD_PALM_GEMMETRY_MASK ENCODE_CMD(1, 2, 1, 0)
24#define CYTP_CMD_SET_PALM_SENSITIVITY(s) ENCODE_CMD(1, 2, 2, (s))
25#define CYTP_CMD_PALM_SENSITIVITY_MASK ENCODE_CMD(1, 2, 2, 0)
26#define CYTP_CMD_SET_MOUSE_SENSITIVITY(s) ENCODE_CMD(1, 3, ((s) >> 2), (s))
27#define CYTP_CMD_MOUSE_SENSITIVITY_MASK ENCODE_CMD(1, 3, 0, 0)
28#define CYTP_CMD_REQUEST_BASELINE_STATUS ENCODE_CMD(2, 0, 0, 1)
29#define CYTP_CMD_REQUEST_RECALIBRATION ENCODE_CMD(2, 0, 0, 3)
30
31#define DECODE_CMD_AA(x) (((x) >> 6) & CMD_BITS_MASK)
32#define DECODE_CMD_BB(x) (((x) >> 4) & CMD_BITS_MASK)
33#define DECODE_CMD_CC(x) (((x) >> 2) & CMD_BITS_MASK)
34#define DECODE_CMD_DD(x) ((x) & CMD_BITS_MASK)
35
36/* Cypress trackpad working mode. */
37#define CYTP_BIT_ABS_PRESSURE (1 << 3)
38#define CYTP_BIT_ABS_NO_PRESSURE (1 << 2)
39#define CYTP_BIT_CYPRESS_REL (1 << 1)
40#define CYTP_BIT_STANDARD_REL (1 << 0)
41#define CYTP_BIT_REL_MASK (CYTP_BIT_CYPRESS_REL | CYTP_BIT_STANDARD_REL)
42#define CYTP_BIT_ABS_MASK (CYTP_BIT_ABS_PRESSURE | CYTP_BIT_ABS_NO_PRESSURE)
43#define CYTP_BIT_ABS_REL_MASK (CYTP_BIT_ABS_MASK | CYTP_BIT_REL_MASK)
44
45#define CYTP_BIT_HIGH_RATE (1 << 4)
46/*
47 * report mode bit is set, firmware working in Remote Mode.
48 * report mode bit is cleared, firmware working in Stream Mode.
49 */
50#define CYTP_BIT_REPORT_MODE (1 << 5)
51
52/* scrolling width values for set HSCROLL and VSCROLL width command. */
53#define SCROLL_WIDTH_NARROW 1
54#define SCROLL_WIDTH_NORMAL 2
55#define SCROLL_WIDTH_WIDE 3
56
57#define PALM_GEOMETRY_ENABLE 1
58#define PALM_GEOMETRY_DISABLE 0
59
60#define TP_METRICS_MASK 0x80
61#define FW_VERSION_MASX 0x7f
62#define FW_VER_HIGH_MASK 0x70
63#define FW_VER_LOW_MASK 0x0f
64
65/* Times to retry a ps2_command and millisecond delay between tries. */
66#define CYTP_PS2_CMD_TRIES 3
67#define CYTP_PS2_CMD_DELAY 500
68
69/* time out for PS/2 command only in milliseconds. */
70#define CYTP_CMD_TIMEOUT 200
71#define CYTP_DATA_TIMEOUT 30
72
73#define CYTP_EXT_CMD 0xe8
74#define CYTP_PS2_RETRY 0xfe
75#define CYTP_PS2_ERROR 0xfc
76
77#define CYTP_RESP_RETRY 0x01
78#define CYTP_RESP_ERROR 0xfe
79
80
81#define CYTP_105001_WIDTH 97 /* Dell XPS 13 */
82#define CYTP_105001_HIGH 59
83#define CYTP_DEFAULT_WIDTH (CYTP_105001_WIDTH)
84#define CYTP_DEFAULT_HIGH (CYTP_105001_HIGH)
85
86#define CYTP_ABS_MAX_X 1600
87#define CYTP_ABS_MAX_Y 900
88#define CYTP_MAX_PRESSURE 255
89#define CYTP_MIN_PRESSURE 0
90
91/* header byte bits of relative package. */
92#define BTN_LEFT_BIT 0x01
93#define BTN_RIGHT_BIT 0x02
94#define BTN_MIDDLE_BIT 0x04
95#define REL_X_SIGN_BIT 0x10
96#define REL_Y_SIGN_BIT 0x20
97
98/* header byte bits of absolute package. */
99#define ABS_VSCROLL_BIT 0x10
100#define ABS_HSCROLL_BIT 0x20
101#define ABS_MULTIFINGER_TAP 0x04
102#define ABS_EDGE_MOTION_MASK 0x80
103
104#define DFLT_RESP_BITS_VALID 0x88 /* SMBus bit should not be set. */
105#define DFLT_RESP_SMBUS_BIT 0x80
106#define DFLT_SMBUS_MODE 0x80
107#define DFLT_PS2_MODE 0x00
108#define DFLT_RESP_BIT_MODE 0x40
109#define DFLT_RESP_REMOTE_MODE 0x40
110#define DFLT_RESP_STREAM_MODE 0x00
111#define DFLT_RESP_BIT_REPORTING 0x20
112#define DFLT_RESP_BIT_SCALING 0x10
113
114#define TP_METRICS_BIT_PALM 0x80
115#define TP_METRICS_BIT_STUBBORN 0x40
116#define TP_METRICS_BIT_2F_JITTER 0x30
117#define TP_METRICS_BIT_1F_JITTER 0x0c
118#define TP_METRICS_BIT_APA 0x02
119#define TP_METRICS_BIT_MTG 0x01
120#define TP_METRICS_BIT_ABS_PKT_FORMAT_SET 0xf0
121#define TP_METRICS_BIT_2F_SPIKE 0x0c
122#define TP_METRICS_BIT_1F_SPIKE 0x03
123
124/* bits of first byte response of E9h-Status Request command. */
125#define RESP_BTN_RIGHT_BIT 0x01
126#define RESP_BTN_MIDDLE_BIT 0x02
127#define RESP_BTN_LEFT_BIT 0x04
128#define RESP_SCALING_BIT 0x10
129#define RESP_ENABLE_BIT 0x20
130#define RESP_REMOTE_BIT 0x40
131#define RESP_SMBUS_BIT 0x80
132
133#define CYTP_MAX_MT_SLOTS 2
134
135struct cytp_contact {
136 int x;
137 int y;
138 int z; /* also named as touch pressure. */
139};
140
141/* The structure of Cypress Trackpad event data. */
142struct cytp_report_data {
143 int contact_cnt;
144 struct cytp_contact contacts[CYTP_MAX_MT_SLOTS];
145 unsigned int left:1;
146 unsigned int right:1;
147 unsigned int middle:1;
148 unsigned int tap:1; /* multi-finger tap detected. */
149};
150
151/* The structure of Cypress Trackpad device private data. */
152struct cytp_data {
153 int fw_version;
154
155 int pkt_size;
156 int mode;
157
158 int tp_min_pressure;
159 int tp_max_pressure;
160 int tp_width; /* X direction physical size in mm. */
161 int tp_high; /* Y direction physical size in mm. */
162 int tp_max_abs_x; /* Max X absolute units that can be reported. */
163 int tp_max_abs_y; /* Max Y absolute units that can be reported. */
164
165 int tp_res_x; /* X resolution in units/mm. */
166 int tp_res_y; /* Y resolution in units/mm. */
167
168 int tp_metrics_supported;
169};
170
171
172#ifdef CONFIG_MOUSE_PS2_CYPRESS
173int cypress_detect(struct psmouse *psmouse, bool set_properties);
174int cypress_init(struct psmouse *psmouse);
175bool cypress_supported(void);
176#else
177inline int cypress_detect(struct psmouse *psmouse, bool set_properties)
178{
179 return -ENOSYS;
180}
181inline int cypress_init(struct psmouse *psmouse)
182{
183 return -ENOSYS;
184}
185inline bool cypress_supported(void)
186{
187 return 0;
188}
189#endif /* CONFIG_MOUSE_PS2_CYPRESS */
190
191#endif /* _CYPRESS_PS2_H */
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 22fe2547e169..cff065f6261c 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -34,6 +34,7 @@
34#include "touchkit_ps2.h" 34#include "touchkit_ps2.h"
35#include "elantech.h" 35#include "elantech.h"
36#include "sentelic.h" 36#include "sentelic.h"
37#include "cypress_ps2.h"
37 38
38#define DRIVER_DESC "PS/2 mouse driver" 39#define DRIVER_DESC "PS/2 mouse driver"
39 40
@@ -759,6 +760,28 @@ static int psmouse_extensions(struct psmouse *psmouse,
759 } 760 }
760 761
761/* 762/*
763 * Try Cypress Trackpad.
764 * Must try it before Finger Sensing Pad because Finger Sensing Pad probe
765 * upsets some modules of Cypress Trackpads.
766 */
767 if (max_proto > PSMOUSE_IMEX &&
768 cypress_detect(psmouse, set_properties) == 0) {
769 if (cypress_supported()) {
770 if (cypress_init(psmouse) == 0)
771 return PSMOUSE_CYPRESS;
772
773 /*
774 * Finger Sensing Pad probe upsets some modules of
775 * Cypress Trackpad, must avoid Finger Sensing Pad
776 * probe if Cypress Trackpad device detected.
777 */
778 return PSMOUSE_PS2;
779 }
780
781 max_proto = PSMOUSE_IMEX;
782 }
783
784/*
762 * Try ALPS TouchPad 785 * Try ALPS TouchPad
763 */ 786 */
764 if (max_proto > PSMOUSE_IMEX) { 787 if (max_proto > PSMOUSE_IMEX) {
@@ -896,6 +919,15 @@ static const struct psmouse_protocol psmouse_protocols[] = {
896 .alias = "thinkps", 919 .alias = "thinkps",
897 .detect = thinking_detect, 920 .detect = thinking_detect,
898 }, 921 },
922#ifdef CONFIG_MOUSE_PS2_CYPRESS
923 {
924 .type = PSMOUSE_CYPRESS,
925 .name = "CyPS/2",
926 .alias = "cypress",
927 .detect = cypress_detect,
928 .init = cypress_init,
929 },
930#endif
899 { 931 {
900 .type = PSMOUSE_GENPS, 932 .type = PSMOUSE_GENPS,
901 .name = "GenPS/2", 933 .name = "GenPS/2",
diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
index fe1df231ba4c..2f0b39d59a9b 100644
--- a/drivers/input/mouse/psmouse.h
+++ b/drivers/input/mouse/psmouse.h
@@ -95,6 +95,7 @@ enum psmouse_type {
95 PSMOUSE_ELANTECH, 95 PSMOUSE_ELANTECH,
96 PSMOUSE_FSP, 96 PSMOUSE_FSP,
97 PSMOUSE_SYNAPTICS_RELATIVE, 97 PSMOUSE_SYNAPTICS_RELATIVE,
98 PSMOUSE_CYPRESS,
98 PSMOUSE_AUTO /* This one should always be last */ 99 PSMOUSE_AUTO /* This one should always be last */
99}; 100};
100 101
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 12d12ca3fee0..2f78538e09d0 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -722,11 +722,13 @@ static void synaptics_report_mt_data(struct psmouse *psmouse,
722 default: 722 default:
723 /* 723 /*
724 * If the finger slot contained in SGM is valid, and either 724 * If the finger slot contained in SGM is valid, and either
725 * hasn't changed, or is new, then report SGM in MTB slot 0. 725 * hasn't changed, or is new, or the old SGM has now moved to
726 * AGM, then report SGM in MTB slot 0.
726 * Otherwise, empty MTB slot 0. 727 * Otherwise, empty MTB slot 0.
727 */ 728 */
728 if (mt_state->sgm != -1 && 729 if (mt_state->sgm != -1 &&
729 (mt_state->sgm == old->sgm || old->sgm == -1)) 730 (mt_state->sgm == old->sgm ||
731 old->sgm == -1 || mt_state->agm == old->sgm))
730 synaptics_report_slot(dev, 0, sgm); 732 synaptics_report_slot(dev, 0, sgm);
731 else 733 else
732 synaptics_report_slot(dev, 0, NULL); 734 synaptics_report_slot(dev, 0, NULL);
@@ -735,9 +737,31 @@ static void synaptics_report_mt_data(struct psmouse *psmouse,
735 * If the finger slot contained in AGM is valid, and either 737 * If the finger slot contained in AGM is valid, and either
736 * hasn't changed, or is new, then report AGM in MTB slot 1. 738 * hasn't changed, or is new, then report AGM in MTB slot 1.
737 * Otherwise, empty MTB slot 1. 739 * Otherwise, empty MTB slot 1.
740 *
741 * However, in the case where the AGM is new, make sure that
742 * that it is either the same as the old SGM, or there was no
743 * SGM.
744 *
745 * Otherwise, if the SGM was just 1, and the new AGM is 2, then
746 * the new AGM will keep the old SGM's tracking ID, which can
747 * cause apparent drumroll. This happens if in the following
748 * valid finger sequence:
749 *
750 * Action SGM AGM (MTB slot:Contact)
751 * 1. Touch contact 0 (0:0)
752 * 2. Touch contact 1 (0:0, 1:1)
753 * 3. Lift contact 0 (1:1)
754 * 4. Touch contacts 2,3 (0:2, 1:3)
755 *
756 * In step 4, contact 3, in AGM must not be given the same
757 * tracking ID as contact 1 had in step 3. To avoid this,
758 * the first agm with contact 3 is dropped and slot 1 is
759 * invalidated (tracking ID = -1).
738 */ 760 */
739 if (mt_state->agm != -1 && 761 if (mt_state->agm != -1 &&
740 (mt_state->agm == old->agm || old->agm == -1)) 762 (mt_state->agm == old->agm ||
763 (old->agm == -1 &&
764 (old->sgm == -1 || mt_state->agm == old->sgm))))
741 synaptics_report_slot(dev, 1, agm); 765 synaptics_report_slot(dev, 1, agm);
742 else 766 else
743 synaptics_report_slot(dev, 1, NULL); 767 synaptics_report_slot(dev, 1, NULL);
@@ -1247,11 +1271,11 @@ static void set_input_params(struct input_dev *dev, struct synaptics_data *priv)
1247 input_set_abs_params(dev, ABS_PRESSURE, 0, 255, 0, 0); 1271 input_set_abs_params(dev, ABS_PRESSURE, 0, 255, 0, 0);
1248 1272
1249 if (SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)) { 1273 if (SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)) {
1250 input_mt_init_slots(dev, 2, 0);
1251 set_abs_position_params(dev, priv, ABS_MT_POSITION_X, 1274 set_abs_position_params(dev, priv, ABS_MT_POSITION_X,
1252 ABS_MT_POSITION_Y); 1275 ABS_MT_POSITION_Y);
1253 /* Image sensors can report per-contact pressure */ 1276 /* Image sensors can report per-contact pressure */
1254 input_set_abs_params(dev, ABS_MT_PRESSURE, 0, 255, 0, 0); 1277 input_set_abs_params(dev, ABS_MT_PRESSURE, 0, 255, 0, 0);
1278 input_mt_init_slots(dev, 2, INPUT_MT_POINTER);
1255 1279
1256 /* Image sensors can signal 4 and 5 finger clicks */ 1280 /* Image sensors can signal 4 and 5 finger clicks */
1257 __set_bit(BTN_TOOL_QUADTAP, dev->keybit); 1281 __set_bit(BTN_TOOL_QUADTAP, dev->keybit);
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index 4a4e182c33e7..560c243bfcaf 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -236,6 +236,7 @@ config SERIO_PS2MULT
236 236
237config SERIO_ARC_PS2 237config SERIO_ARC_PS2
238 tristate "ARC PS/2 support" 238 tristate "ARC PS/2 support"
239 depends on GENERIC_HARDIRQS
239 help 240 help
240 Say Y here if you have an ARC FPGA platform with a PS/2 241 Say Y here if you have an ARC FPGA platform with a PS/2
241 controller in it. 242 controller in it.
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index f92d34f45a1c..aaf23aeae2ea 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -553,10 +553,10 @@ static int wacom_set_device_mode(struct usb_interface *intf, int report_id, int
553 if (!rep_data) 553 if (!rep_data)
554 return error; 554 return error;
555 555
556 rep_data[0] = report_id;
557 rep_data[1] = mode;
558
559 do { 556 do {
557 rep_data[0] = report_id;
558 rep_data[1] = mode;
559
560 error = wacom_set_report(intf, WAC_HID_FEATURE_REPORT, 560 error = wacom_set_report(intf, WAC_HID_FEATURE_REPORT,
561 report_id, rep_data, length, 1); 561 report_id, rep_data, length, 1);
562 if (error >= 0) 562 if (error >= 0)
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 264138f3217e..41b6fbf60112 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -359,6 +359,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
359 case 0x802: /* Intuos4 General Pen */ 359 case 0x802: /* Intuos4 General Pen */
360 case 0x804: /* Intuos4 Marker Pen */ 360 case 0x804: /* Intuos4 Marker Pen */
361 case 0x40802: /* Intuos4 Classic Pen */ 361 case 0x40802: /* Intuos4 Classic Pen */
362 case 0x18803: /* DTH2242 Grip Pen */
362 case 0x022: 363 case 0x022:
363 wacom->tool[idx] = BTN_TOOL_PEN; 364 wacom->tool[idx] = BTN_TOOL_PEN;
364 break; 365 break;
@@ -538,6 +539,13 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
538 input_report_key(input, wacom->tool[1], 0); 539 input_report_key(input, wacom->tool[1], 0);
539 input_report_abs(input, ABS_MISC, 0); 540 input_report_abs(input, ABS_MISC, 0);
540 } 541 }
542 } else if (features->type == DTK) {
543 input_report_key(input, BTN_0, (data[6] & 0x01));
544 input_report_key(input, BTN_1, (data[6] & 0x02));
545 input_report_key(input, BTN_2, (data[6] & 0x04));
546 input_report_key(input, BTN_3, (data[6] & 0x08));
547 input_report_key(input, BTN_4, (data[6] & 0x10));
548 input_report_key(input, BTN_5, (data[6] & 0x20));
541 } else if (features->type == WACOM_24HD) { 549 } else if (features->type == WACOM_24HD) {
542 input_report_key(input, BTN_0, (data[6] & 0x01)); 550 input_report_key(input, BTN_0, (data[6] & 0x01));
543 input_report_key(input, BTN_1, (data[6] & 0x02)); 551 input_report_key(input, BTN_1, (data[6] & 0x02));
@@ -785,25 +793,6 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
785 return 1; 793 return 1;
786} 794}
787 795
788static int find_slot_from_contactid(struct wacom_wac *wacom, int contactid)
789{
790 int touch_max = wacom->features.touch_max;
791 int i;
792
793 if (!wacom->slots)
794 return -1;
795
796 for (i = 0; i < touch_max; ++i) {
797 if (wacom->slots[i] == contactid)
798 return i;
799 }
800 for (i = 0; i < touch_max; ++i) {
801 if (wacom->slots[i] == -1)
802 return i;
803 }
804 return -1;
805}
806
807static int int_dist(int x1, int y1, int x2, int y2) 796static int int_dist(int x1, int y1, int x2, int y2)
808{ 797{
809 int x = x2 - x1; 798 int x = x2 - x1;
@@ -833,8 +822,7 @@ static int wacom_24hdt_irq(struct wacom_wac *wacom)
833 for (i = 0; i < contacts_to_send; i++) { 822 for (i = 0; i < contacts_to_send; i++) {
834 int offset = (WACOM_BYTES_PER_24HDT_PACKET * i) + 1; 823 int offset = (WACOM_BYTES_PER_24HDT_PACKET * i) + 1;
835 bool touch = data[offset] & 0x1 && !wacom->shared->stylus_in_proximity; 824 bool touch = data[offset] & 0x1 && !wacom->shared->stylus_in_proximity;
836 int id = data[offset + 1]; 825 int slot = input_mt_get_slot_by_key(input, data[offset + 1]);
837 int slot = find_slot_from_contactid(wacom, id);
838 826
839 if (slot < 0) 827 if (slot < 0)
840 continue; 828 continue;
@@ -856,9 +844,7 @@ static int wacom_24hdt_irq(struct wacom_wac *wacom)
856 input_report_abs(input, ABS_MT_WIDTH_MINOR, min(w, h)); 844 input_report_abs(input, ABS_MT_WIDTH_MINOR, min(w, h));
857 input_report_abs(input, ABS_MT_ORIENTATION, w > h); 845 input_report_abs(input, ABS_MT_ORIENTATION, w > h);
858 } 846 }
859 wacom->slots[slot] = touch ? id : -1;
860 } 847 }
861
862 input_mt_report_pointer_emulation(input, true); 848 input_mt_report_pointer_emulation(input, true);
863 849
864 wacom->num_contacts_left -= contacts_to_send; 850 wacom->num_contacts_left -= contacts_to_send;
@@ -895,7 +881,7 @@ static int wacom_mt_touch(struct wacom_wac *wacom)
895 int offset = (WACOM_BYTES_PER_MT_PACKET + x_offset) * i + 3; 881 int offset = (WACOM_BYTES_PER_MT_PACKET + x_offset) * i + 3;
896 bool touch = data[offset] & 0x1; 882 bool touch = data[offset] & 0x1;
897 int id = le16_to_cpup((__le16 *)&data[offset + 1]); 883 int id = le16_to_cpup((__le16 *)&data[offset + 1]);
898 int slot = find_slot_from_contactid(wacom, id); 884 int slot = input_mt_get_slot_by_key(input, id);
899 885
900 if (slot < 0) 886 if (slot < 0)
901 continue; 887 continue;
@@ -908,9 +894,7 @@ static int wacom_mt_touch(struct wacom_wac *wacom)
908 input_report_abs(input, ABS_MT_POSITION_X, x); 894 input_report_abs(input, ABS_MT_POSITION_X, x);
909 input_report_abs(input, ABS_MT_POSITION_Y, y); 895 input_report_abs(input, ABS_MT_POSITION_Y, y);
910 } 896 }
911 wacom->slots[slot] = touch ? id : -1;
912 } 897 }
913
914 input_mt_report_pointer_emulation(input, true); 898 input_mt_report_pointer_emulation(input, true);
915 899
916 wacom->num_contacts_left -= contacts_to_send; 900 wacom->num_contacts_left -= contacts_to_send;
@@ -942,12 +926,11 @@ static int wacom_tpc_mt_touch(struct wacom_wac *wacom)
942 contact_with_no_pen_down_count++; 926 contact_with_no_pen_down_count++;
943 } 927 }
944 } 928 }
929 input_mt_report_pointer_emulation(input, true);
945 930
946 /* keep touch state for pen event */ 931 /* keep touch state for pen event */
947 wacom->shared->touch_down = (contact_with_no_pen_down_count > 0); 932 wacom->shared->touch_down = (contact_with_no_pen_down_count > 0);
948 933
949 input_mt_report_pointer_emulation(input, true);
950
951 return 1; 934 return 1;
952} 935}
953 936
@@ -1104,12 +1087,15 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)
1104static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data) 1087static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
1105{ 1088{
1106 struct input_dev *input = wacom->input; 1089 struct input_dev *input = wacom->input;
1107 int slot_id = data[0] - 2; /* data[0] is between 2 and 17 */
1108 bool touch = data[1] & 0x80; 1090 bool touch = data[1] & 0x80;
1091 int slot = input_mt_get_slot_by_key(input, data[0]);
1092
1093 if (slot < 0)
1094 return;
1109 1095
1110 touch = touch && !wacom->shared->stylus_in_proximity; 1096 touch = touch && !wacom->shared->stylus_in_proximity;
1111 1097
1112 input_mt_slot(input, slot_id); 1098 input_mt_slot(input, slot);
1113 input_mt_report_slot_state(input, MT_TOOL_FINGER, touch); 1099 input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);
1114 1100
1115 if (touch) { 1101 if (touch) {
@@ -1162,7 +1148,6 @@ static int wacom_bpt3_touch(struct wacom_wac *wacom)
1162 wacom_bpt3_button_msg(wacom, data + offset); 1148 wacom_bpt3_button_msg(wacom, data + offset);
1163 1149
1164 } 1150 }
1165
1166 input_mt_report_pointer_emulation(input, true); 1151 input_mt_report_pointer_emulation(input, true);
1167 1152
1168 input_sync(input); 1153 input_sync(input);
@@ -1319,6 +1304,7 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
1319 case WACOM_21UX2: 1304 case WACOM_21UX2:
1320 case WACOM_22HD: 1305 case WACOM_22HD:
1321 case WACOM_24HD: 1306 case WACOM_24HD:
1307 case DTK:
1322 sync = wacom_intuos_irq(wacom_wac); 1308 sync = wacom_intuos_irq(wacom_wac);
1323 break; 1309 break;
1324 1310
@@ -1444,39 +1430,64 @@ static unsigned int wacom_calculate_touch_res(unsigned int logical_max,
1444 return (logical_max * 100) / physical_max; 1430 return (logical_max * 100) / physical_max;
1445} 1431}
1446 1432
1447int wacom_setup_input_capabilities(struct input_dev *input_dev, 1433static void wacom_abs_set_axis(struct input_dev *input_dev,
1448 struct wacom_wac *wacom_wac) 1434 struct wacom_wac *wacom_wac)
1449{ 1435{
1450 struct wacom_features *features = &wacom_wac->features; 1436 struct wacom_features *features = &wacom_wac->features;
1451 int i;
1452
1453 input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
1454
1455 __set_bit(BTN_TOUCH, input_dev->keybit);
1456
1457 input_set_abs_params(input_dev, ABS_X, 0, features->x_max,
1458 features->x_fuzz, 0);
1459 input_set_abs_params(input_dev, ABS_Y, 0, features->y_max,
1460 features->y_fuzz, 0);
1461 1437
1462 if (features->device_type == BTN_TOOL_PEN) { 1438 if (features->device_type == BTN_TOOL_PEN) {
1463 input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max, 1439 input_set_abs_params(input_dev, ABS_X, 0, features->x_max,
1464 features->pressure_fuzz, 0); 1440 features->x_fuzz, 0);
1441 input_set_abs_params(input_dev, ABS_Y, 0, features->y_max,
1442 features->y_fuzz, 0);
1443 input_set_abs_params(input_dev, ABS_PRESSURE, 0,
1444 features->pressure_max, features->pressure_fuzz, 0);
1465 1445
1466 /* penabled devices have fixed resolution for each model */ 1446 /* penabled devices have fixed resolution for each model */
1467 input_abs_set_res(input_dev, ABS_X, features->x_resolution); 1447 input_abs_set_res(input_dev, ABS_X, features->x_resolution);
1468 input_abs_set_res(input_dev, ABS_Y, features->y_resolution); 1448 input_abs_set_res(input_dev, ABS_Y, features->y_resolution);
1469 } else { 1449 } else {
1470 input_abs_set_res(input_dev, ABS_X, 1450 if (features->touch_max <= 2) {
1471 wacom_calculate_touch_res(features->x_max, 1451 input_set_abs_params(input_dev, ABS_X, 0,
1472 features->x_phy)); 1452 features->x_max, features->x_fuzz, 0);
1473 input_abs_set_res(input_dev, ABS_Y, 1453 input_set_abs_params(input_dev, ABS_Y, 0,
1474 wacom_calculate_touch_res(features->y_max, 1454 features->y_max, features->y_fuzz, 0);
1475 features->y_phy)); 1455 input_abs_set_res(input_dev, ABS_X,
1456 wacom_calculate_touch_res(features->x_max,
1457 features->x_phy));
1458 input_abs_set_res(input_dev, ABS_Y,
1459 wacom_calculate_touch_res(features->y_max,
1460 features->y_phy));
1461 }
1462
1463 if (features->touch_max > 1) {
1464 input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0,
1465 features->x_max, features->x_fuzz, 0);
1466 input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0,
1467 features->y_max, features->y_fuzz, 0);
1468 input_abs_set_res(input_dev, ABS_MT_POSITION_X,
1469 wacom_calculate_touch_res(features->x_max,
1470 features->x_phy));
1471 input_abs_set_res(input_dev, ABS_MT_POSITION_Y,
1472 wacom_calculate_touch_res(features->y_max,
1473 features->y_phy));
1474 }
1476 } 1475 }
1476}
1477 1477
1478int wacom_setup_input_capabilities(struct input_dev *input_dev,
1479 struct wacom_wac *wacom_wac)
1480{
1481 struct wacom_features *features = &wacom_wac->features;
1482 int i;
1483
1484 input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
1485
1486 __set_bit(BTN_TOUCH, input_dev->keybit);
1478 __set_bit(ABS_MISC, input_dev->absbit); 1487 __set_bit(ABS_MISC, input_dev->absbit);
1479 1488
1489 wacom_abs_set_axis(input_dev, wacom_wac);
1490
1480 switch (wacom_wac->features.type) { 1491 switch (wacom_wac->features.type) {
1481 case WACOM_MO: 1492 case WACOM_MO:
1482 input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0); 1493 input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0);
@@ -1513,12 +1524,17 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
1513 __set_bit(BTN_Y, input_dev->keybit); 1524 __set_bit(BTN_Y, input_dev->keybit);
1514 __set_bit(BTN_Z, input_dev->keybit); 1525 __set_bit(BTN_Z, input_dev->keybit);
1515 1526
1516 for (i = 0; i < 10; i++) 1527 for (i = 6; i < 10; i++)
1517 __set_bit(BTN_0 + i, input_dev->keybit); 1528 __set_bit(BTN_0 + i, input_dev->keybit);
1518 1529
1519 __set_bit(KEY_PROG1, input_dev->keybit); 1530 __set_bit(KEY_PROG1, input_dev->keybit);
1520 __set_bit(KEY_PROG2, input_dev->keybit); 1531 __set_bit(KEY_PROG2, input_dev->keybit);
1521 __set_bit(KEY_PROG3, input_dev->keybit); 1532 __set_bit(KEY_PROG3, input_dev->keybit);
1533 /* fall through */
1534
1535 case DTK:
1536 for (i = 0; i < 6; i++)
1537 __set_bit(BTN_0 + i, input_dev->keybit);
1522 1538
1523 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); 1539 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
1524 input_set_abs_params(input_dev, ABS_THROTTLE, 0, 71, 0, 0); 1540 input_set_abs_params(input_dev, ABS_THROTTLE, 0, 71, 0, 0);
@@ -1614,24 +1630,11 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
1614 } else if (features->device_type == BTN_TOOL_FINGER) { 1630 } else if (features->device_type == BTN_TOOL_FINGER) {
1615 __clear_bit(ABS_MISC, input_dev->absbit); 1631 __clear_bit(ABS_MISC, input_dev->absbit);
1616 1632
1617 __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
1618 __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
1619 __set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit);
1620 __set_bit(BTN_TOOL_QUADTAP, input_dev->keybit);
1621
1622 input_mt_init_slots(input_dev, features->touch_max, 0);
1623
1624 input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 1633 input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR,
1625 0, features->x_max, 0, 0); 1634 0, features->x_max, 0, 0);
1626 input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR, 1635 input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR,
1627 0, features->y_max, 0, 0); 1636 0, features->y_max, 0, 0);
1628 1637 input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_POINTER);
1629 input_set_abs_params(input_dev, ABS_MT_POSITION_X,
1630 0, features->x_max,
1631 features->x_fuzz, 0);
1632 input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
1633 0, features->y_max,
1634 features->y_fuzz, 0);
1635 } 1638 }
1636 break; 1639 break;
1637 1640
@@ -1662,27 +1665,14 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
1662 1665
1663 case MTSCREEN: 1666 case MTSCREEN:
1664 case MTTPC: 1667 case MTTPC:
1665 if (features->device_type == BTN_TOOL_FINGER) {
1666 wacom_wac->slots = kmalloc(features->touch_max *
1667 sizeof(int),
1668 GFP_KERNEL);
1669 if (!wacom_wac->slots)
1670 return -ENOMEM;
1671
1672 for (i = 0; i < features->touch_max; i++)
1673 wacom_wac->slots[i] = -1;
1674 }
1675 /* fall through */
1676
1677 case TABLETPC2FG: 1668 case TABLETPC2FG:
1678 if (features->device_type == BTN_TOOL_FINGER) { 1669 if (features->device_type == BTN_TOOL_FINGER) {
1679 input_mt_init_slots(input_dev, features->touch_max, 0); 1670 unsigned int flags = INPUT_MT_DIRECT;
1680 input_set_abs_params(input_dev, ABS_MT_TOOL_TYPE, 1671
1681 0, MT_TOOL_MAX, 0, 0); 1672 if (wacom_wac->features.type == TABLETPC2FG)
1682 input_set_abs_params(input_dev, ABS_MT_POSITION_X, 1673 flags = 0;
1683 0, features->x_max, 0, 0); 1674
1684 input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 1675 input_mt_init_slots(input_dev, features->touch_max, flags);
1685 0, features->y_max, 0, 0);
1686 } 1676 }
1687 /* fall through */ 1677 /* fall through */
1688 1678
@@ -1725,35 +1715,26 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
1725 __set_bit(INPUT_PROP_POINTER, input_dev->propbit); 1715 __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
1726 1716
1727 if (features->device_type == BTN_TOOL_FINGER) { 1717 if (features->device_type == BTN_TOOL_FINGER) {
1718 unsigned int flags = INPUT_MT_POINTER;
1719
1728 __set_bit(BTN_LEFT, input_dev->keybit); 1720 __set_bit(BTN_LEFT, input_dev->keybit);
1729 __set_bit(BTN_FORWARD, input_dev->keybit); 1721 __set_bit(BTN_FORWARD, input_dev->keybit);
1730 __set_bit(BTN_BACK, input_dev->keybit); 1722 __set_bit(BTN_BACK, input_dev->keybit);
1731 __set_bit(BTN_RIGHT, input_dev->keybit); 1723 __set_bit(BTN_RIGHT, input_dev->keybit);
1732 1724
1733 __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
1734 __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
1735 input_mt_init_slots(input_dev, features->touch_max, 0);
1736
1737 if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) { 1725 if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) {
1738 __set_bit(BTN_TOOL_TRIPLETAP,
1739 input_dev->keybit);
1740 __set_bit(BTN_TOOL_QUADTAP,
1741 input_dev->keybit);
1742
1743 input_set_abs_params(input_dev, 1726 input_set_abs_params(input_dev,
1744 ABS_MT_TOUCH_MAJOR, 1727 ABS_MT_TOUCH_MAJOR,
1745 0, features->x_max, 0, 0); 1728 0, features->x_max, 0, 0);
1746 input_set_abs_params(input_dev, 1729 input_set_abs_params(input_dev,
1747 ABS_MT_TOUCH_MINOR, 1730 ABS_MT_TOUCH_MINOR,
1748 0, features->y_max, 0, 0); 1731 0, features->y_max, 0, 0);
1732 } else {
1733 __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
1734 __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
1735 flags = 0;
1749 } 1736 }
1750 1737 input_mt_init_slots(input_dev, features->touch_max, flags);
1751 input_set_abs_params(input_dev, ABS_MT_POSITION_X,
1752 0, features->x_max,
1753 features->x_fuzz, 0);
1754 input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
1755 0, features->y_max,
1756 features->y_fuzz, 0);
1757 } else if (features->device_type == BTN_TOOL_PEN) { 1738 } else if (features->device_type == BTN_TOOL_PEN) {
1758 __set_bit(BTN_TOOL_RUBBER, input_dev->keybit); 1739 __set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
1759 __set_bit(BTN_TOOL_PEN, input_dev->keybit); 1740 __set_bit(BTN_TOOL_PEN, input_dev->keybit);
@@ -1978,6 +1959,13 @@ static const struct wacom_features wacom_features_0xCE =
1978static const struct wacom_features wacom_features_0xF0 = 1959static const struct wacom_features wacom_features_0xF0 =
1979 { "Wacom DTU1631", WACOM_PKGLEN_GRAPHIRE, 34623, 19553, 511, 1960 { "Wacom DTU1631", WACOM_PKGLEN_GRAPHIRE, 34623, 19553, 511,
1980 0, DTU, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 1961 0, DTU, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
1962static const struct wacom_features wacom_features_0x59 = /* Pen */
1963 { "Wacom DTH2242", WACOM_PKGLEN_INTUOS, 95840, 54260, 2047,
1964 63, DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
1965 .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5D };
1966static const struct wacom_features wacom_features_0x5D = /* Touch */
1967 { "Wacom DTH2242", .type = WACOM_24HDT,
1968 .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x59, .touch_max = 10 };
1981static const struct wacom_features wacom_features_0xCC = 1969static const struct wacom_features wacom_features_0xCC =
1982 { "Wacom Cintiq 21UX2", WACOM_PKGLEN_INTUOS, 87200, 65600, 2047, 1970 { "Wacom Cintiq 21UX2", WACOM_PKGLEN_INTUOS, 87200, 65600, 2047,
1983 63, WACOM_21UX2, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; 1971 63, WACOM_21UX2, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
@@ -2152,6 +2140,8 @@ const struct usb_device_id wacom_ids[] = {
2152 { USB_DEVICE_WACOM(0x43) }, 2140 { USB_DEVICE_WACOM(0x43) },
2153 { USB_DEVICE_WACOM(0x44) }, 2141 { USB_DEVICE_WACOM(0x44) },
2154 { USB_DEVICE_WACOM(0x45) }, 2142 { USB_DEVICE_WACOM(0x45) },
2143 { USB_DEVICE_WACOM(0x59) },
2144 { USB_DEVICE_WACOM(0x5D) },
2155 { USB_DEVICE_WACOM(0xB0) }, 2145 { USB_DEVICE_WACOM(0xB0) },
2156 { USB_DEVICE_WACOM(0xB1) }, 2146 { USB_DEVICE_WACOM(0xB1) },
2157 { USB_DEVICE_WACOM(0xB2) }, 2147 { USB_DEVICE_WACOM(0xB2) },
diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h
index 9396d7769f86..5f9a7721e16c 100644
--- a/drivers/input/tablet/wacom_wac.h
+++ b/drivers/input/tablet/wacom_wac.h
@@ -78,6 +78,7 @@ enum {
78 INTUOS5L, 78 INTUOS5L,
79 WACOM_21UX2, 79 WACOM_21UX2,
80 WACOM_22HD, 80 WACOM_22HD,
81 DTK,
81 WACOM_24HD, 82 WACOM_24HD,
82 CINTIQ, 83 CINTIQ,
83 WACOM_BEE, 84 WACOM_BEE,
@@ -135,7 +136,6 @@ struct wacom_wac {
135 int pid; 136 int pid;
136 int battery_capacity; 137 int battery_capacity;
137 int num_contacts_left; 138 int num_contacts_left;
138 int *slots;
139}; 139};
140 140
141#endif 141#endif
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 515cfe790543..f9a5fd89bc02 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -359,7 +359,7 @@ config TOUCHSCREEN_MCS5000
359 359
360config TOUCHSCREEN_MMS114 360config TOUCHSCREEN_MMS114
361 tristate "MELFAS MMS114 touchscreen" 361 tristate "MELFAS MMS114 touchscreen"
362 depends on I2C 362 depends on I2C && GENERIC_HARDIRQS
363 help 363 help
364 Say Y here if you have the MELFAS MMS114 touchscreen controller 364 Say Y here if you have the MELFAS MMS114 touchscreen controller
365 chip in your system. 365 chip in your system.
diff --git a/drivers/input/touchscreen/cyttsp_spi.c b/drivers/input/touchscreen/cyttsp_spi.c
index 638e20310f12..861b7f77605b 100644
--- a/drivers/input/touchscreen/cyttsp_spi.c
+++ b/drivers/input/touchscreen/cyttsp_spi.c
@@ -193,7 +193,6 @@ static struct spi_driver cyttsp_spi_driver = {
193 193
194module_spi_driver(cyttsp_spi_driver); 194module_spi_driver(cyttsp_spi_driver);
195 195
196MODULE_ALIAS("spi:cyttsp");
197MODULE_LICENSE("GPL"); 196MODULE_LICENSE("GPL");
198MODULE_DESCRIPTION("Cypress TrueTouch(R) Standard Product (TTSP) SPI driver"); 197MODULE_DESCRIPTION("Cypress TrueTouch(R) Standard Product (TTSP) SPI driver");
199MODULE_AUTHOR("Cypress"); 198MODULE_AUTHOR("Cypress");
diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c
index 98841d8aa635..4a29ddf6bf1e 100644
--- a/drivers/input/touchscreen/mms114.c
+++ b/drivers/input/touchscreen/mms114.c
@@ -429,12 +429,12 @@ static int mms114_probe(struct i2c_client *client,
429 return -ENODEV; 429 return -ENODEV;
430 } 430 }
431 431
432 data = kzalloc(sizeof(struct mms114_data), GFP_KERNEL); 432 data = devm_kzalloc(&client->dev, sizeof(struct mms114_data),
433 input_dev = input_allocate_device(); 433 GFP_KERNEL);
434 input_dev = devm_input_allocate_device(&client->dev);
434 if (!data || !input_dev) { 435 if (!data || !input_dev) {
435 dev_err(&client->dev, "Failed to allocate memory\n"); 436 dev_err(&client->dev, "Failed to allocate memory\n");
436 error = -ENOMEM; 437 return -ENOMEM;
437 goto err_free_mem;
438 } 438 }
439 439
440 data->client = client; 440 data->client = client;
@@ -466,57 +466,36 @@ static int mms114_probe(struct i2c_client *client,
466 input_set_drvdata(input_dev, data); 466 input_set_drvdata(input_dev, data);
467 i2c_set_clientdata(client, data); 467 i2c_set_clientdata(client, data);
468 468
469 data->core_reg = regulator_get(&client->dev, "avdd"); 469 data->core_reg = devm_regulator_get(&client->dev, "avdd");
470 if (IS_ERR(data->core_reg)) { 470 if (IS_ERR(data->core_reg)) {
471 error = PTR_ERR(data->core_reg); 471 error = PTR_ERR(data->core_reg);
472 dev_err(&client->dev, 472 dev_err(&client->dev,
473 "Unable to get the Core regulator (%d)\n", error); 473 "Unable to get the Core regulator (%d)\n", error);
474 goto err_free_mem; 474 return error;
475 } 475 }
476 476
477 data->io_reg = regulator_get(&client->dev, "vdd"); 477 data->io_reg = devm_regulator_get(&client->dev, "vdd");
478 if (IS_ERR(data->io_reg)) { 478 if (IS_ERR(data->io_reg)) {
479 error = PTR_ERR(data->io_reg); 479 error = PTR_ERR(data->io_reg);
480 dev_err(&client->dev, 480 dev_err(&client->dev,
481 "Unable to get the IO regulator (%d)\n", error); 481 "Unable to get the IO regulator (%d)\n", error);
482 goto err_core_reg; 482 return error;
483 } 483 }
484 484
485 error = request_threaded_irq(client->irq, NULL, mms114_interrupt, 485 error = devm_request_threaded_irq(&client->dev, client->irq, NULL,
486 IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "mms114", data); 486 mms114_interrupt, IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
487 dev_name(&client->dev), data);
487 if (error) { 488 if (error) {
488 dev_err(&client->dev, "Failed to register interrupt\n"); 489 dev_err(&client->dev, "Failed to register interrupt\n");
489 goto err_io_reg; 490 return error;
490 } 491 }
491 disable_irq(client->irq); 492 disable_irq(client->irq);
492 493
493 error = input_register_device(data->input_dev); 494 error = input_register_device(data->input_dev);
494 if (error) 495 if (error) {
495 goto err_free_irq; 496 dev_err(&client->dev, "Failed to register input device\n");
496 497 return error;
497 return 0; 498 }
498
499err_free_irq:
500 free_irq(client->irq, data);
501err_io_reg:
502 regulator_put(data->io_reg);
503err_core_reg:
504 regulator_put(data->core_reg);
505err_free_mem:
506 input_free_device(input_dev);
507 kfree(data);
508 return error;
509}
510
511static int mms114_remove(struct i2c_client *client)
512{
513 struct mms114_data *data = i2c_get_clientdata(client);
514
515 free_irq(client->irq, data);
516 regulator_put(data->io_reg);
517 regulator_put(data->core_reg);
518 input_unregister_device(data->input_dev);
519 kfree(data);
520 499
521 return 0; 500 return 0;
522} 501}
@@ -590,7 +569,6 @@ static struct i2c_driver mms114_driver = {
590 .of_match_table = of_match_ptr(mms114_dt_match), 569 .of_match_table = of_match_ptr(mms114_dt_match),
591 }, 570 },
592 .probe = mms114_probe, 571 .probe = mms114_probe,
593 .remove = mms114_remove,
594 .id_table = mms114_id, 572 .id_table = mms114_id,
595}; 573};
596 574
diff --git a/drivers/input/touchscreen/stmpe-ts.c b/drivers/input/touchscreen/stmpe-ts.c
index 84d884b4ec3e..59e81b00f244 100644
--- a/drivers/input/touchscreen/stmpe-ts.c
+++ b/drivers/input/touchscreen/stmpe-ts.c
@@ -120,6 +120,7 @@ static void stmpe_work(struct work_struct *work)
120 __stmpe_reset_fifo(ts->stmpe); 120 __stmpe_reset_fifo(ts->stmpe);
121 121
122 input_report_abs(ts->idev, ABS_PRESSURE, 0); 122 input_report_abs(ts->idev, ABS_PRESSURE, 0);
123 input_report_key(ts->idev, BTN_TOUCH, 0);
123 input_sync(ts->idev); 124 input_sync(ts->idev);
124} 125}
125 126
@@ -153,6 +154,7 @@ static irqreturn_t stmpe_ts_handler(int irq, void *data)
153 input_report_abs(ts->idev, ABS_X, x); 154 input_report_abs(ts->idev, ABS_X, x);
154 input_report_abs(ts->idev, ABS_Y, y); 155 input_report_abs(ts->idev, ABS_Y, y);
155 input_report_abs(ts->idev, ABS_PRESSURE, z); 156 input_report_abs(ts->idev, ABS_PRESSURE, z);
157 input_report_key(ts->idev, BTN_TOUCH, 1);
156 input_sync(ts->idev); 158 input_sync(ts->idev);
157 159
158 /* flush the FIFO after we have read out our values. */ 160 /* flush the FIFO after we have read out our values. */
diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c
index 9c0cdc7ea449..7213e8b07e79 100644
--- a/drivers/input/touchscreen/tsc2005.c
+++ b/drivers/input/touchscreen/tsc2005.c
@@ -753,3 +753,4 @@ module_spi_driver(tsc2005_driver);
753MODULE_AUTHOR("Lauri Leukkunen <lauri.leukkunen@nokia.com>"); 753MODULE_AUTHOR("Lauri Leukkunen <lauri.leukkunen@nokia.com>");
754MODULE_DESCRIPTION("TSC2005 Touchscreen Driver"); 754MODULE_DESCRIPTION("TSC2005 Touchscreen Driver");
755MODULE_LICENSE("GPL"); 755MODULE_LICENSE("GPL");
756MODULE_ALIAS("spi:tsc2005");
diff --git a/drivers/input/touchscreen/wm831x-ts.c b/drivers/input/touchscreen/wm831x-ts.c
index f88fab56178c..6be2eb6a153a 100644
--- a/drivers/input/touchscreen/wm831x-ts.c
+++ b/drivers/input/touchscreen/wm831x-ts.c
@@ -247,7 +247,7 @@ static int wm831x_ts_probe(struct platform_device *pdev)
247 247
248 wm831x_ts = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_ts), 248 wm831x_ts = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_ts),
249 GFP_KERNEL); 249 GFP_KERNEL);
250 input_dev = input_allocate_device(); 250 input_dev = devm_input_allocate_device(&pdev->dev);
251 if (!wm831x_ts || !input_dev) { 251 if (!wm831x_ts || !input_dev) {
252 error = -ENOMEM; 252 error = -ENOMEM;
253 goto err_alloc; 253 goto err_alloc;
@@ -376,7 +376,6 @@ err_pd_irq:
376err_data_irq: 376err_data_irq:
377 free_irq(wm831x_ts->data_irq, wm831x_ts); 377 free_irq(wm831x_ts->data_irq, wm831x_ts);
378err_alloc: 378err_alloc:
379 input_free_device(input_dev);
380 379
381 return error; 380 return error;
382} 381}
@@ -387,7 +386,6 @@ static int wm831x_ts_remove(struct platform_device *pdev)
387 386
388 free_irq(wm831x_ts->pd_irq, wm831x_ts); 387 free_irq(wm831x_ts->pd_irq, wm831x_ts);
389 free_irq(wm831x_ts->data_irq, wm831x_ts); 388 free_irq(wm831x_ts->data_irq, wm831x_ts);
390 input_unregister_device(wm831x_ts->input_dev);
391 389
392 return 0; 390 return 0;
393} 391}
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index c1c74e030a58..d33eaaf783ad 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -4017,10 +4017,10 @@ static int alloc_irq_index(struct irq_cfg *cfg, u16 devid, int count)
4017 4017
4018 index -= count - 1; 4018 index -= count - 1;
4019 4019
4020 cfg->remapped = 1;
4020 irte_info = &cfg->irq_2_iommu; 4021 irte_info = &cfg->irq_2_iommu;
4021 irte_info->sub_handle = devid; 4022 irte_info->sub_handle = devid;
4022 irte_info->irte_index = index; 4023 irte_info->irte_index = index;
4023 irte_info->iommu = (void *)cfg;
4024 4024
4025 goto out; 4025 goto out;
4026 } 4026 }
@@ -4127,9 +4127,9 @@ static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
4127 index = attr->ioapic_pin; 4127 index = attr->ioapic_pin;
4128 4128
4129 /* Setup IRQ remapping info */ 4129 /* Setup IRQ remapping info */
4130 cfg->remapped = 1;
4130 irte_info->sub_handle = devid; 4131 irte_info->sub_handle = devid;
4131 irte_info->irte_index = index; 4132 irte_info->irte_index = index;
4132 irte_info->iommu = (void *)cfg;
4133 4133
4134 /* Setup IRTE for IOMMU */ 4134 /* Setup IRTE for IOMMU */
4135 irte.val = 0; 4135 irte.val = 0;
@@ -4288,9 +4288,9 @@ static int msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
4288 devid = get_device_id(&pdev->dev); 4288 devid = get_device_id(&pdev->dev);
4289 irte_info = &cfg->irq_2_iommu; 4289 irte_info = &cfg->irq_2_iommu;
4290 4290
4291 cfg->remapped = 1;
4291 irte_info->sub_handle = devid; 4292 irte_info->sub_handle = devid;
4292 irte_info->irte_index = index + offset; 4293 irte_info->irte_index = index + offset;
4293 irte_info->iommu = (void *)cfg;
4294 4294
4295 return 0; 4295 return 0;
4296} 4296}
@@ -4314,9 +4314,9 @@ static int setup_hpet_msi(unsigned int irq, unsigned int id)
4314 if (index < 0) 4314 if (index < 0)
4315 return index; 4315 return index;
4316 4316
4317 cfg->remapped = 1;
4317 irte_info->sub_handle = devid; 4318 irte_info->sub_handle = devid;
4318 irte_info->irte_index = index; 4319 irte_info->irte_index = index;
4319 irte_info->iommu = (void *)cfg;
4320 4320
4321 return 0; 4321 return 0;
4322} 4322}
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 81837b0710a9..faf10ba1ed9a 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -975,6 +975,38 @@ static void __init free_iommu_all(void)
975} 975}
976 976
977/* 977/*
978 * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
979 * Workaround:
980 * BIOS should disable L2B micellaneous clock gating by setting
981 * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
982 */
983static void __init amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
984{
985 u32 value;
986
987 if ((boot_cpu_data.x86 != 0x15) ||
988 (boot_cpu_data.x86_model < 0x10) ||
989 (boot_cpu_data.x86_model > 0x1f))
990 return;
991
992 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
993 pci_read_config_dword(iommu->dev, 0xf4, &value);
994
995 if (value & BIT(2))
996 return;
997
998 /* Select NB indirect register 0x90 and enable writing */
999 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
1000
1001 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
1002 pr_info("AMD-Vi: Applying erratum 746 workaround for IOMMU at %s\n",
1003 dev_name(&iommu->dev->dev));
1004
1005 /* Clear the enable writing bit */
1006 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1007}
1008
1009/*
978 * This function clues the initialization function for one IOMMU 1010 * This function clues the initialization function for one IOMMU
979 * together and also allocates the command buffer and programs the 1011 * together and also allocates the command buffer and programs the
980 * hardware. It does NOT enable the IOMMU. This is done afterwards. 1012 * hardware. It does NOT enable the IOMMU. This is done afterwards.
@@ -1172,6 +1204,8 @@ static int iommu_init_pci(struct amd_iommu *iommu)
1172 iommu->stored_l2[i] = iommu_read_l2(iommu, i); 1204 iommu->stored_l2[i] = iommu_read_l2(iommu, i);
1173 } 1205 }
1174 1206
1207 amd_iommu_erratum_746_workaround(iommu);
1208
1175 return pci_enable_device(iommu->dev); 1209 return pci_enable_device(iommu->dev);
1176} 1210}
1177 1211
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 86e2f4a62b9a..174bb654453d 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -41,6 +41,8 @@
41#include <asm/irq_remapping.h> 41#include <asm/irq_remapping.h>
42#include <asm/iommu_table.h> 42#include <asm/iommu_table.h>
43 43
44#include "irq_remapping.h"
45
44/* No locks are needed as DMA remapping hardware unit 46/* No locks are needed as DMA remapping hardware unit
45 * list is constructed at boot time and hotplug of 47 * list is constructed at boot time and hotplug of
46 * these units are not supported by the architecture. 48 * these units are not supported by the architecture.
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index b9d091157884..43d5c8b8e7ad 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -46,6 +46,8 @@
46#include <asm/cacheflush.h> 46#include <asm/cacheflush.h>
47#include <asm/iommu.h> 47#include <asm/iommu.h>
48 48
49#include "irq_remapping.h"
50
49#define ROOT_SIZE VTD_PAGE_SIZE 51#define ROOT_SIZE VTD_PAGE_SIZE
50#define CONTEXT_SIZE VTD_PAGE_SIZE 52#define CONTEXT_SIZE VTD_PAGE_SIZE
51 53
@@ -4234,6 +4236,21 @@ static struct iommu_ops intel_iommu_ops = {
4234 .pgsize_bitmap = INTEL_IOMMU_PGSIZES, 4236 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
4235}; 4237};
4236 4238
4239static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4240{
4241 /* G4x/GM45 integrated gfx dmar support is totally busted. */
4242 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4243 dmar_map_gfx = 0;
4244}
4245
4246DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4247DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4248DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4249DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4250DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4251DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4252DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4253
4237static void quirk_iommu_rwbf(struct pci_dev *dev) 4254static void quirk_iommu_rwbf(struct pci_dev *dev)
4238{ 4255{
4239 /* 4256 /*
@@ -4242,12 +4259,6 @@ static void quirk_iommu_rwbf(struct pci_dev *dev)
4242 */ 4259 */
4243 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n"); 4260 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4244 rwbf_quirk = 1; 4261 rwbf_quirk = 1;
4245
4246 /* https://bugzilla.redhat.com/show_bug.cgi?id=538163 */
4247 if (dev->revision == 0x07) {
4248 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4249 dmar_map_gfx = 0;
4250 }
4251} 4262}
4252 4263
4253DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf); 4264DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index af8904de1d44..f3b8f23b5d8f 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -68,6 +68,7 @@ static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
68{ 68{
69 struct ir_table *table = iommu->ir_table; 69 struct ir_table *table = iommu->ir_table;
70 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); 70 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
71 struct irq_cfg *cfg = irq_get_chip_data(irq);
71 u16 index, start_index; 72 u16 index, start_index;
72 unsigned int mask = 0; 73 unsigned int mask = 0;
73 unsigned long flags; 74 unsigned long flags;
@@ -115,6 +116,7 @@ static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
115 for (i = index; i < index + count; i++) 116 for (i = index; i < index + count; i++)
116 table->base[i].present = 1; 117 table->base[i].present = 1;
117 118
119 cfg->remapped = 1;
118 irq_iommu->iommu = iommu; 120 irq_iommu->iommu = iommu;
119 irq_iommu->irte_index = index; 121 irq_iommu->irte_index = index;
120 irq_iommu->sub_handle = 0; 122 irq_iommu->sub_handle = 0;
@@ -155,6 +157,7 @@ static int map_irq_to_irte_handle(int irq, u16 *sub_handle)
155static int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) 157static int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
156{ 158{
157 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); 159 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
160 struct irq_cfg *cfg = irq_get_chip_data(irq);
158 unsigned long flags; 161 unsigned long flags;
159 162
160 if (!irq_iommu) 163 if (!irq_iommu)
@@ -162,6 +165,7 @@ static int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subha
162 165
163 raw_spin_lock_irqsave(&irq_2_ir_lock, flags); 166 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
164 167
168 cfg->remapped = 1;
165 irq_iommu->iommu = iommu; 169 irq_iommu->iommu = iommu;
166 irq_iommu->irte_index = index; 170 irq_iommu->irte_index = index;
167 irq_iommu->sub_handle = subhandle; 171 irq_iommu->sub_handle = subhandle;
@@ -425,11 +429,22 @@ static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
425 429
426 /* Enable interrupt-remapping */ 430 /* Enable interrupt-remapping */
427 iommu->gcmd |= DMA_GCMD_IRE; 431 iommu->gcmd |= DMA_GCMD_IRE;
432 iommu->gcmd &= ~DMA_GCMD_CFI; /* Block compatibility-format MSIs */
428 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); 433 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
429 434
430 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 435 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
431 readl, (sts & DMA_GSTS_IRES), sts); 436 readl, (sts & DMA_GSTS_IRES), sts);
432 437
438 /*
439 * With CFI clear in the Global Command register, we should be
440 * protected from dangerous (i.e. compatibility) interrupts
441 * regardless of x2apic status. Check just to be sure.
442 */
443 if (sts & DMA_GSTS_CFIS)
444 WARN(1, KERN_WARNING
445 "Compatibility-format IRQs enabled despite intr remapping;\n"
446 "you are vulnerable to IRQ injection.\n");
447
433 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); 448 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
434} 449}
435 450
@@ -526,20 +541,24 @@ static int __init intel_irq_remapping_supported(void)
526static int __init intel_enable_irq_remapping(void) 541static int __init intel_enable_irq_remapping(void)
527{ 542{
528 struct dmar_drhd_unit *drhd; 543 struct dmar_drhd_unit *drhd;
544 bool x2apic_present;
529 int setup = 0; 545 int setup = 0;
530 int eim = 0; 546 int eim = 0;
531 547
548 x2apic_present = x2apic_supported();
549
532 if (parse_ioapics_under_ir() != 1) { 550 if (parse_ioapics_under_ir() != 1) {
533 printk(KERN_INFO "Not enable interrupt remapping\n"); 551 printk(KERN_INFO "Not enable interrupt remapping\n");
534 return -1; 552 goto error;
535 } 553 }
536 554
537 if (x2apic_supported()) { 555 if (x2apic_present) {
538 eim = !dmar_x2apic_optout(); 556 eim = !dmar_x2apic_optout();
539 WARN(!eim, KERN_WARNING 557 if (!eim)
540 "Your BIOS is broken and requested that x2apic be disabled\n" 558 printk(KERN_WARNING
541 "This will leave your machine vulnerable to irq-injection attacks\n" 559 "Your BIOS is broken and requested that x2apic be disabled.\n"
542 "Use 'intremap=no_x2apic_optout' to override BIOS request\n"); 560 "This will slightly decrease performance.\n"
561 "Use 'intremap=no_x2apic_optout' to override BIOS request.\n");
543 } 562 }
544 563
545 for_each_drhd_unit(drhd) { 564 for_each_drhd_unit(drhd) {
@@ -578,7 +597,7 @@ static int __init intel_enable_irq_remapping(void)
578 if (eim && !ecap_eim_support(iommu->ecap)) { 597 if (eim && !ecap_eim_support(iommu->ecap)) {
579 printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, " 598 printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
580 " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap); 599 " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
581 return -1; 600 goto error;
582 } 601 }
583 } 602 }
584 603
@@ -594,7 +613,7 @@ static int __init intel_enable_irq_remapping(void)
594 printk(KERN_ERR "DRHD %Lx: failed to enable queued, " 613 printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
595 " invalidation, ecap %Lx, ret %d\n", 614 " invalidation, ecap %Lx, ret %d\n",
596 drhd->reg_base_addr, iommu->ecap, ret); 615 drhd->reg_base_addr, iommu->ecap, ret);
597 return -1; 616 goto error;
598 } 617 }
599 } 618 }
600 619
@@ -617,6 +636,14 @@ static int __init intel_enable_irq_remapping(void)
617 goto error; 636 goto error;
618 637
619 irq_remapping_enabled = 1; 638 irq_remapping_enabled = 1;
639
640 /*
641 * VT-d has a different layout for IO-APIC entries when
642 * interrupt remapping is enabled. So it needs a special routine
643 * to print IO-APIC entries for debugging purposes too.
644 */
645 x86_io_apic_ops.print_entries = intel_ir_io_apic_print_entries;
646
620 pr_info("Enabled IRQ remapping in %s mode\n", eim ? "x2apic" : "xapic"); 647 pr_info("Enabled IRQ remapping in %s mode\n", eim ? "x2apic" : "xapic");
621 648
622 return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE; 649 return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE;
@@ -625,6 +652,11 @@ error:
625 /* 652 /*
626 * handle error condition gracefully here! 653 * handle error condition gracefully here!
627 */ 654 */
655
656 if (x2apic_present)
657 WARN(1, KERN_WARNING
658 "Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n");
659
628 return -1; 660 return -1;
629} 661}
630 662
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index faf85d6e33fe..d56f8c17c5fe 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -1,11 +1,18 @@
1#include <linux/seq_file.h>
2#include <linux/cpumask.h>
1#include <linux/kernel.h> 3#include <linux/kernel.h>
2#include <linux/string.h> 4#include <linux/string.h>
3#include <linux/cpumask.h> 5#include <linux/cpumask.h>
4#include <linux/errno.h> 6#include <linux/errno.h>
5#include <linux/msi.h> 7#include <linux/msi.h>
8#include <linux/irq.h>
9#include <linux/pci.h>
6 10
7#include <asm/hw_irq.h> 11#include <asm/hw_irq.h>
8#include <asm/irq_remapping.h> 12#include <asm/irq_remapping.h>
13#include <asm/processor.h>
14#include <asm/x86_init.h>
15#include <asm/apic.h>
9 16
10#include "irq_remapping.h" 17#include "irq_remapping.h"
11 18
@@ -17,6 +24,152 @@ int no_x2apic_optout;
17 24
18static struct irq_remap_ops *remap_ops; 25static struct irq_remap_ops *remap_ops;
19 26
27static int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec);
28static int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq,
29 int index, int sub_handle);
30static int set_remapped_irq_affinity(struct irq_data *data,
31 const struct cpumask *mask,
32 bool force);
33
34static bool irq_remapped(struct irq_cfg *cfg)
35{
36 return (cfg->remapped == 1);
37}
38
39static void irq_remapping_disable_io_apic(void)
40{
41 /*
42 * With interrupt-remapping, for now we will use virtual wire A
43 * mode, as virtual wire B is little complex (need to configure
44 * both IOAPIC RTE as well as interrupt-remapping table entry).
45 * As this gets called during crash dump, keep this simple for
46 * now.
47 */
48 if (cpu_has_apic || apic_from_smp_config())
49 disconnect_bsp_APIC(0);
50}
51
52static int do_setup_msi_irqs(struct pci_dev *dev, int nvec)
53{
54 int node, ret, sub_handle, index = 0;
55 unsigned int irq;
56 struct msi_desc *msidesc;
57
58 nvec = __roundup_pow_of_two(nvec);
59
60 WARN_ON(!list_is_singular(&dev->msi_list));
61 msidesc = list_entry(dev->msi_list.next, struct msi_desc, list);
62 WARN_ON(msidesc->irq);
63 WARN_ON(msidesc->msi_attrib.multiple);
64
65 node = dev_to_node(&dev->dev);
66 irq = __create_irqs(get_nr_irqs_gsi(), nvec, node);
67 if (irq == 0)
68 return -ENOSPC;
69
70 msidesc->msi_attrib.multiple = ilog2(nvec);
71 for (sub_handle = 0; sub_handle < nvec; sub_handle++) {
72 if (!sub_handle) {
73 index = msi_alloc_remapped_irq(dev, irq, nvec);
74 if (index < 0) {
75 ret = index;
76 goto error;
77 }
78 } else {
79 ret = msi_setup_remapped_irq(dev, irq + sub_handle,
80 index, sub_handle);
81 if (ret < 0)
82 goto error;
83 }
84 ret = setup_msi_irq(dev, msidesc, irq, sub_handle);
85 if (ret < 0)
86 goto error;
87 }
88 return 0;
89
90error:
91 destroy_irqs(irq, nvec);
92
93 /*
94 * Restore altered MSI descriptor fields and prevent just destroyed
95 * IRQs from tearing down again in default_teardown_msi_irqs()
96 */
97 msidesc->irq = 0;
98 msidesc->msi_attrib.multiple = 0;
99
100 return ret;
101}
102
103static int do_setup_msix_irqs(struct pci_dev *dev, int nvec)
104{
105 int node, ret, sub_handle, index = 0;
106 struct msi_desc *msidesc;
107 unsigned int irq;
108
109 node = dev_to_node(&dev->dev);
110 irq = get_nr_irqs_gsi();
111 sub_handle = 0;
112
113 list_for_each_entry(msidesc, &dev->msi_list, list) {
114
115 irq = create_irq_nr(irq, node);
116 if (irq == 0)
117 return -1;
118
119 if (sub_handle == 0)
120 ret = index = msi_alloc_remapped_irq(dev, irq, nvec);
121 else
122 ret = msi_setup_remapped_irq(dev, irq, index, sub_handle);
123
124 if (ret < 0)
125 goto error;
126
127 ret = setup_msi_irq(dev, msidesc, irq, 0);
128 if (ret < 0)
129 goto error;
130
131 sub_handle += 1;
132 irq += 1;
133 }
134
135 return 0;
136
137error:
138 destroy_irq(irq);
139 return ret;
140}
141
142static int irq_remapping_setup_msi_irqs(struct pci_dev *dev,
143 int nvec, int type)
144{
145 if (type == PCI_CAP_ID_MSI)
146 return do_setup_msi_irqs(dev, nvec);
147 else
148 return do_setup_msix_irqs(dev, nvec);
149}
150
151void eoi_ioapic_pin_remapped(int apic, int pin, int vector)
152{
153 /*
154 * Intr-remapping uses pin number as the virtual vector
155 * in the RTE. Actual vector is programmed in
156 * intr-remapping table entry. Hence for the io-apic
157 * EOI we use the pin number.
158 */
159 io_apic_eoi(apic, pin);
160}
161
162static void __init irq_remapping_modify_x86_ops(void)
163{
164 x86_io_apic_ops.disable = irq_remapping_disable_io_apic;
165 x86_io_apic_ops.set_affinity = set_remapped_irq_affinity;
166 x86_io_apic_ops.setup_entry = setup_ioapic_remapped_entry;
167 x86_io_apic_ops.eoi_ioapic_pin = eoi_ioapic_pin_remapped;
168 x86_msi.setup_msi_irqs = irq_remapping_setup_msi_irqs;
169 x86_msi.setup_hpet_msi = setup_hpet_msi_remapped;
170 x86_msi.compose_msi_msg = compose_remapped_msi_msg;
171}
172
20static __init int setup_nointremap(char *str) 173static __init int setup_nointremap(char *str)
21{ 174{
22 disable_irq_remap = 1; 175 disable_irq_remap = 1;
@@ -79,15 +232,24 @@ int __init irq_remapping_prepare(void)
79 232
80int __init irq_remapping_enable(void) 233int __init irq_remapping_enable(void)
81{ 234{
235 int ret;
236
82 if (!remap_ops || !remap_ops->enable) 237 if (!remap_ops || !remap_ops->enable)
83 return -ENODEV; 238 return -ENODEV;
84 239
85 return remap_ops->enable(); 240 ret = remap_ops->enable();
241
242 if (irq_remapping_enabled)
243 irq_remapping_modify_x86_ops();
244
245 return ret;
86} 246}
87 247
88void irq_remapping_disable(void) 248void irq_remapping_disable(void)
89{ 249{
90 if (!remap_ops || !remap_ops->disable) 250 if (!irq_remapping_enabled ||
251 !remap_ops ||
252 !remap_ops->disable)
91 return; 253 return;
92 254
93 remap_ops->disable(); 255 remap_ops->disable();
@@ -95,7 +257,9 @@ void irq_remapping_disable(void)
95 257
96int irq_remapping_reenable(int mode) 258int irq_remapping_reenable(int mode)
97{ 259{
98 if (!remap_ops || !remap_ops->reenable) 260 if (!irq_remapping_enabled ||
261 !remap_ops ||
262 !remap_ops->reenable)
99 return 0; 263 return 0;
100 264
101 return remap_ops->reenable(mode); 265 return remap_ops->reenable(mode);
@@ -103,6 +267,9 @@ int irq_remapping_reenable(int mode)
103 267
104int __init irq_remap_enable_fault_handling(void) 268int __init irq_remap_enable_fault_handling(void)
105{ 269{
270 if (!irq_remapping_enabled)
271 return 0;
272
106 if (!remap_ops || !remap_ops->enable_faulting) 273 if (!remap_ops || !remap_ops->enable_faulting)
107 return -ENODEV; 274 return -ENODEV;
108 275
@@ -133,23 +300,28 @@ int set_remapped_irq_affinity(struct irq_data *data, const struct cpumask *mask,
133 300
134void free_remapped_irq(int irq) 301void free_remapped_irq(int irq)
135{ 302{
303 struct irq_cfg *cfg = irq_get_chip_data(irq);
304
136 if (!remap_ops || !remap_ops->free_irq) 305 if (!remap_ops || !remap_ops->free_irq)
137 return; 306 return;
138 307
139 remap_ops->free_irq(irq); 308 if (irq_remapped(cfg))
309 remap_ops->free_irq(irq);
140} 310}
141 311
142void compose_remapped_msi_msg(struct pci_dev *pdev, 312void compose_remapped_msi_msg(struct pci_dev *pdev,
143 unsigned int irq, unsigned int dest, 313 unsigned int irq, unsigned int dest,
144 struct msi_msg *msg, u8 hpet_id) 314 struct msi_msg *msg, u8 hpet_id)
145{ 315{
146 if (!remap_ops || !remap_ops->compose_msi_msg) 316 struct irq_cfg *cfg = irq_get_chip_data(irq);
147 return;
148 317
149 remap_ops->compose_msi_msg(pdev, irq, dest, msg, hpet_id); 318 if (!irq_remapped(cfg))
319 native_compose_msi_msg(pdev, irq, dest, msg, hpet_id);
320 else if (remap_ops && remap_ops->compose_msi_msg)
321 remap_ops->compose_msi_msg(pdev, irq, dest, msg, hpet_id);
150} 322}
151 323
152int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec) 324static int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec)
153{ 325{
154 if (!remap_ops || !remap_ops->msi_alloc_irq) 326 if (!remap_ops || !remap_ops->msi_alloc_irq)
155 return -ENODEV; 327 return -ENODEV;
@@ -157,8 +329,8 @@ int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec)
157 return remap_ops->msi_alloc_irq(pdev, irq, nvec); 329 return remap_ops->msi_alloc_irq(pdev, irq, nvec);
158} 330}
159 331
160int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq, 332static int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq,
161 int index, int sub_handle) 333 int index, int sub_handle)
162{ 334{
163 if (!remap_ops || !remap_ops->msi_setup_irq) 335 if (!remap_ops || !remap_ops->msi_setup_irq)
164 return -ENODEV; 336 return -ENODEV;
@@ -173,3 +345,42 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
173 345
174 return remap_ops->setup_hpet_msi(irq, id); 346 return remap_ops->setup_hpet_msi(irq, id);
175} 347}
348
349void panic_if_irq_remap(const char *msg)
350{
351 if (irq_remapping_enabled)
352 panic(msg);
353}
354
355static void ir_ack_apic_edge(struct irq_data *data)
356{
357 ack_APIC_irq();
358}
359
360static void ir_ack_apic_level(struct irq_data *data)
361{
362 ack_APIC_irq();
363 eoi_ioapic_irq(data->irq, data->chip_data);
364}
365
366static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
367{
368 seq_printf(p, " IR-%s", data->chip->name);
369}
370
371void irq_remap_modify_chip_defaults(struct irq_chip *chip)
372{
373 chip->irq_print_chip = ir_print_prefix;
374 chip->irq_ack = ir_ack_apic_edge;
375 chip->irq_eoi = ir_ack_apic_level;
376 chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
377}
378
379bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
380{
381 if (!irq_remapped(cfg))
382 return false;
383 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
384 irq_remap_modify_chip_defaults(chip);
385 return true;
386}
diff --git a/drivers/iommu/irq_remapping.h b/drivers/iommu/irq_remapping.h
index 95363acb583f..ecb637670405 100644
--- a/drivers/iommu/irq_remapping.h
+++ b/drivers/iommu/irq_remapping.h
@@ -34,6 +34,7 @@ struct msi_msg;
34extern int disable_irq_remap; 34extern int disable_irq_remap;
35extern int disable_sourceid_checking; 35extern int disable_sourceid_checking;
36extern int no_x2apic_optout; 36extern int no_x2apic_optout;
37extern int irq_remapping_enabled;
37 38
38struct irq_remap_ops { 39struct irq_remap_ops {
39 /* Check whether Interrupt Remapping is supported */ 40 /* Check whether Interrupt Remapping is supported */
diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
index 68452b768da2..03a0a01a4054 100644
--- a/drivers/isdn/gigaset/capi.c
+++ b/drivers/isdn/gigaset/capi.c
@@ -248,6 +248,8 @@ static inline void dump_rawmsg(enum debuglevel level, const char *tag,
248 CAPIMSG_APPID(data), CAPIMSG_MSGID(data), l, 248 CAPIMSG_APPID(data), CAPIMSG_MSGID(data), l,
249 CAPIMSG_CONTROL(data)); 249 CAPIMSG_CONTROL(data));
250 l -= 12; 250 l -= 12;
251 if (l <= 0)
252 return;
251 dbgline = kmalloc(3 * l, GFP_ATOMIC); 253 dbgline = kmalloc(3 * l, GFP_ATOMIC);
252 if (!dbgline) 254 if (!dbgline)
253 return; 255 return;
diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c
index 5f21f629b7ae..deda591f70b9 100644
--- a/drivers/isdn/mISDN/stack.c
+++ b/drivers/isdn/mISDN/stack.c
@@ -18,6 +18,7 @@
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/mISDNif.h> 19#include <linux/mISDNif.h>
20#include <linux/kthread.h> 20#include <linux/kthread.h>
21#include <linux/sched.h>
21#include "core.h" 22#include "core.h"
22 23
23static u_int *debug; 24static u_int *debug;
@@ -202,6 +203,9 @@ static int
202mISDNStackd(void *data) 203mISDNStackd(void *data)
203{ 204{
204 struct mISDNstack *st = data; 205 struct mISDNstack *st = data;
206#ifdef MISDN_MSG_STATS
207 cputime_t utime, stime;
208#endif
205 int err = 0; 209 int err = 0;
206 210
207 sigfillset(&current->blocked); 211 sigfillset(&current->blocked);
@@ -303,9 +307,10 @@ mISDNStackd(void *data)
303 "msg %d sleep %d stopped\n", 307 "msg %d sleep %d stopped\n",
304 dev_name(&st->dev->dev), st->msg_cnt, st->sleep_cnt, 308 dev_name(&st->dev->dev), st->msg_cnt, st->sleep_cnt,
305 st->stopped_cnt); 309 st->stopped_cnt);
310 task_cputime(st->thread, &utime, &stime);
306 printk(KERN_DEBUG 311 printk(KERN_DEBUG
307 "mISDNStackd daemon for %s utime(%ld) stime(%ld)\n", 312 "mISDNStackd daemon for %s utime(%ld) stime(%ld)\n",
308 dev_name(&st->dev->dev), st->thread->utime, st->thread->stime); 313 dev_name(&st->dev->dev), utime, stime);
309 printk(KERN_DEBUG 314 printk(KERN_DEBUG
310 "mISDNStackd daemon for %s nvcsw(%ld) nivcsw(%ld)\n", 315 "mISDNStackd daemon for %s nvcsw(%ld) nivcsw(%ld)\n",
311 dev_name(&st->dev->dev), st->thread->nvcsw, st->thread->nivcsw); 316 dev_name(&st->dev->dev), st->thread->nvcsw, st->thread->nivcsw);
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
new file mode 100644
index 000000000000..9545c9f03809
--- /dev/null
+++ b/drivers/mailbox/Kconfig
@@ -0,0 +1,19 @@
1menuconfig MAILBOX
2 bool "Mailbox Hardware Support"
3 help
4 Mailbox is a framework to control hardware communication between
5 on-chip processors through queued messages and interrupt driven
6 signals. Say Y if your platform supports hardware mailboxes.
7
8if MAILBOX
9config PL320_MBOX
10 bool "ARM PL320 Mailbox"
11 depends on ARM_AMBA
12 help
13 An implementation of the ARM PL320 Interprocessor Communication
14 Mailbox (IPCM), tailored for the Calxeda Highbank. It is used to
15 send short messages between Highbank's A9 cores and the EnergyCore
16 Management Engine, primarily for cpufreq. Say Y here if you want
17 to use the PL320 IPCM support.
18
19endif
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
new file mode 100644
index 000000000000..543ad6a79505
--- /dev/null
+++ b/drivers/mailbox/Makefile
@@ -0,0 +1 @@
obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o
diff --git a/drivers/mailbox/pl320-ipc.c b/drivers/mailbox/pl320-ipc.c
new file mode 100644
index 000000000000..c45b3aedafba
--- /dev/null
+++ b/drivers/mailbox/pl320-ipc.c
@@ -0,0 +1,199 @@
1/*
2 * Copyright 2012 Calxeda, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#include <linux/types.h>
17#include <linux/err.h>
18#include <linux/delay.h>
19#include <linux/export.h>
20#include <linux/io.h>
21#include <linux/interrupt.h>
22#include <linux/completion.h>
23#include <linux/mutex.h>
24#include <linux/notifier.h>
25#include <linux/spinlock.h>
26#include <linux/device.h>
27#include <linux/amba/bus.h>
28
29#include <linux/mailbox.h>
30
31#define IPCMxSOURCE(m) ((m) * 0x40)
32#define IPCMxDSET(m) (((m) * 0x40) + 0x004)
33#define IPCMxDCLEAR(m) (((m) * 0x40) + 0x008)
34#define IPCMxDSTATUS(m) (((m) * 0x40) + 0x00C)
35#define IPCMxMODE(m) (((m) * 0x40) + 0x010)
36#define IPCMxMSET(m) (((m) * 0x40) + 0x014)
37#define IPCMxMCLEAR(m) (((m) * 0x40) + 0x018)
38#define IPCMxMSTATUS(m) (((m) * 0x40) + 0x01C)
39#define IPCMxSEND(m) (((m) * 0x40) + 0x020)
40#define IPCMxDR(m, dr) (((m) * 0x40) + ((dr) * 4) + 0x024)
41
42#define IPCMMIS(irq) (((irq) * 8) + 0x800)
43#define IPCMRIS(irq) (((irq) * 8) + 0x804)
44
45#define MBOX_MASK(n) (1 << (n))
46#define IPC_TX_MBOX 1
47#define IPC_RX_MBOX 2
48
49#define CHAN_MASK(n) (1 << (n))
50#define A9_SOURCE 1
51#define M3_SOURCE 0
52
53static void __iomem *ipc_base;
54static int ipc_irq;
55static DEFINE_MUTEX(ipc_m1_lock);
56static DECLARE_COMPLETION(ipc_completion);
57static ATOMIC_NOTIFIER_HEAD(ipc_notifier);
58
59static inline void set_destination(int source, int mbox)
60{
61 __raw_writel(CHAN_MASK(source), ipc_base + IPCMxDSET(mbox));
62 __raw_writel(CHAN_MASK(source), ipc_base + IPCMxMSET(mbox));
63}
64
65static inline void clear_destination(int source, int mbox)
66{
67 __raw_writel(CHAN_MASK(source), ipc_base + IPCMxDCLEAR(mbox));
68 __raw_writel(CHAN_MASK(source), ipc_base + IPCMxMCLEAR(mbox));
69}
70
71static void __ipc_send(int mbox, u32 *data)
72{
73 int i;
74 for (i = 0; i < 7; i++)
75 __raw_writel(data[i], ipc_base + IPCMxDR(mbox, i));
76 __raw_writel(0x1, ipc_base + IPCMxSEND(mbox));
77}
78
79static u32 __ipc_rcv(int mbox, u32 *data)
80{
81 int i;
82 for (i = 0; i < 7; i++)
83 data[i] = __raw_readl(ipc_base + IPCMxDR(mbox, i));
84 return data[1];
85}
86
87/* blocking implmentation from the A9 side, not usuable in interrupts! */
88int pl320_ipc_transmit(u32 *data)
89{
90 int ret;
91
92 mutex_lock(&ipc_m1_lock);
93
94 init_completion(&ipc_completion);
95 __ipc_send(IPC_TX_MBOX, data);
96 ret = wait_for_completion_timeout(&ipc_completion,
97 msecs_to_jiffies(1000));
98 if (ret == 0) {
99 ret = -ETIMEDOUT;
100 goto out;
101 }
102
103 ret = __ipc_rcv(IPC_TX_MBOX, data);
104out:
105 mutex_unlock(&ipc_m1_lock);
106 return ret;
107}
108EXPORT_SYMBOL_GPL(pl320_ipc_transmit);
109
110static irqreturn_t ipc_handler(int irq, void *dev)
111{
112 u32 irq_stat;
113 u32 data[7];
114
115 irq_stat = __raw_readl(ipc_base + IPCMMIS(1));
116 if (irq_stat & MBOX_MASK(IPC_TX_MBOX)) {
117 __raw_writel(0, ipc_base + IPCMxSEND(IPC_TX_MBOX));
118 complete(&ipc_completion);
119 }
120 if (irq_stat & MBOX_MASK(IPC_RX_MBOX)) {
121 __ipc_rcv(IPC_RX_MBOX, data);
122 atomic_notifier_call_chain(&ipc_notifier, data[0], data + 1);
123 __raw_writel(2, ipc_base + IPCMxSEND(IPC_RX_MBOX));
124 }
125
126 return IRQ_HANDLED;
127}
128
129int pl320_ipc_register_notifier(struct notifier_block *nb)
130{
131 return atomic_notifier_chain_register(&ipc_notifier, nb);
132}
133EXPORT_SYMBOL_GPL(pl320_ipc_register_notifier);
134
135int pl320_ipc_unregister_notifier(struct notifier_block *nb)
136{
137 return atomic_notifier_chain_unregister(&ipc_notifier, nb);
138}
139EXPORT_SYMBOL_GPL(pl320_ipc_unregister_notifier);
140
141static int __init pl320_probe(struct amba_device *adev,
142 const struct amba_id *id)
143{
144 int ret;
145
146 ipc_base = ioremap(adev->res.start, resource_size(&adev->res));
147 if (ipc_base == NULL)
148 return -ENOMEM;
149
150 __raw_writel(0, ipc_base + IPCMxSEND(IPC_TX_MBOX));
151
152 ipc_irq = adev->irq[0];
153 ret = request_irq(ipc_irq, ipc_handler, 0, dev_name(&adev->dev), NULL);
154 if (ret < 0)
155 goto err;
156
157 /* Init slow mailbox */
158 __raw_writel(CHAN_MASK(A9_SOURCE),
159 ipc_base + IPCMxSOURCE(IPC_TX_MBOX));
160 __raw_writel(CHAN_MASK(M3_SOURCE),
161 ipc_base + IPCMxDSET(IPC_TX_MBOX));
162 __raw_writel(CHAN_MASK(M3_SOURCE) | CHAN_MASK(A9_SOURCE),
163 ipc_base + IPCMxMSET(IPC_TX_MBOX));
164
165 /* Init receive mailbox */
166 __raw_writel(CHAN_MASK(M3_SOURCE),
167 ipc_base + IPCMxSOURCE(IPC_RX_MBOX));
168 __raw_writel(CHAN_MASK(A9_SOURCE),
169 ipc_base + IPCMxDSET(IPC_RX_MBOX));
170 __raw_writel(CHAN_MASK(M3_SOURCE) | CHAN_MASK(A9_SOURCE),
171 ipc_base + IPCMxMSET(IPC_RX_MBOX));
172
173 return 0;
174err:
175 iounmap(ipc_base);
176 return ret;
177}
178
179static struct amba_id pl320_ids[] = {
180 {
181 .id = 0x00041320,
182 .mask = 0x000fffff,
183 },
184 { 0, 0 },
185};
186
187static struct amba_driver pl320_driver = {
188 .drv = {
189 .name = "pl320",
190 },
191 .id_table = pl320_ids,
192 .probe = pl320_probe,
193};
194
195static int __init ipc_init(void)
196{
197 return amba_driver_register(&pl320_driver);
198}
199module_init(ipc_init);
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 3d8984edeff7..9e58dbd8d8cb 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -340,24 +340,22 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
340} 340}
341 341
342/* 342/*
343 * validate_rebuild_devices 343 * validate_raid_redundancy
344 * @rs 344 * @rs
345 * 345 *
346 * Determine if the devices specified for rebuild can result in a valid 346 * Determine if there are enough devices in the array that haven't
347 * usable array that is capable of rebuilding the given devices. 347 * failed (or are being rebuilt) to form a usable array.
348 * 348 *
349 * Returns: 0 on success, -EINVAL on failure. 349 * Returns: 0 on success, -EINVAL on failure.
350 */ 350 */
351static int validate_rebuild_devices(struct raid_set *rs) 351static int validate_raid_redundancy(struct raid_set *rs)
352{ 352{
353 unsigned i, rebuild_cnt = 0; 353 unsigned i, rebuild_cnt = 0;
354 unsigned rebuilds_per_group, copies, d; 354 unsigned rebuilds_per_group, copies, d;
355 355
356 if (!(rs->print_flags & DMPF_REBUILD))
357 return 0;
358
359 for (i = 0; i < rs->md.raid_disks; i++) 356 for (i = 0; i < rs->md.raid_disks; i++)
360 if (!test_bit(In_sync, &rs->dev[i].rdev.flags)) 357 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
358 !rs->dev[i].rdev.sb_page)
361 rebuild_cnt++; 359 rebuild_cnt++;
362 360
363 switch (rs->raid_type->level) { 361 switch (rs->raid_type->level) {
@@ -393,27 +391,24 @@ static int validate_rebuild_devices(struct raid_set *rs)
393 * A A B B C 391 * A A B B C
394 * C D D E E 392 * C D D E E
395 */ 393 */
396 rebuilds_per_group = 0;
397 for (i = 0; i < rs->md.raid_disks * copies; i++) { 394 for (i = 0; i < rs->md.raid_disks * copies; i++) {
395 if (!(i % copies))
396 rebuilds_per_group = 0;
398 d = i % rs->md.raid_disks; 397 d = i % rs->md.raid_disks;
399 if (!test_bit(In_sync, &rs->dev[d].rdev.flags) && 398 if ((!rs->dev[d].rdev.sb_page ||
399 !test_bit(In_sync, &rs->dev[d].rdev.flags)) &&
400 (++rebuilds_per_group >= copies)) 400 (++rebuilds_per_group >= copies))
401 goto too_many; 401 goto too_many;
402 if (!((i + 1) % copies))
403 rebuilds_per_group = 0;
404 } 402 }
405 break; 403 break;
406 default: 404 default:
407 DMERR("The rebuild parameter is not supported for %s", 405 if (rebuild_cnt)
408 rs->raid_type->name); 406 return -EINVAL;
409 rs->ti->error = "Rebuild not supported for this RAID type";
410 return -EINVAL;
411 } 407 }
412 408
413 return 0; 409 return 0;
414 410
415too_many: 411too_many:
416 rs->ti->error = "Too many rebuild devices specified";
417 return -EINVAL; 412 return -EINVAL;
418} 413}
419 414
@@ -664,9 +659,6 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
664 } 659 }
665 rs->md.dev_sectors = sectors_per_dev; 660 rs->md.dev_sectors = sectors_per_dev;
666 661
667 if (validate_rebuild_devices(rs))
668 return -EINVAL;
669
670 /* Assume there are no metadata devices until the drives are parsed */ 662 /* Assume there are no metadata devices until the drives are parsed */
671 rs->md.persistent = 0; 663 rs->md.persistent = 0;
672 rs->md.external = 1; 664 rs->md.external = 1;
@@ -995,28 +987,10 @@ static int super_validate(struct mddev *mddev, struct md_rdev *rdev)
995static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) 987static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
996{ 988{
997 int ret; 989 int ret;
998 unsigned redundancy = 0;
999 struct raid_dev *dev; 990 struct raid_dev *dev;
1000 struct md_rdev *rdev, *tmp, *freshest; 991 struct md_rdev *rdev, *tmp, *freshest;
1001 struct mddev *mddev = &rs->md; 992 struct mddev *mddev = &rs->md;
1002 993
1003 switch (rs->raid_type->level) {
1004 case 1:
1005 redundancy = rs->md.raid_disks - 1;
1006 break;
1007 case 4:
1008 case 5:
1009 case 6:
1010 redundancy = rs->raid_type->parity_devs;
1011 break;
1012 case 10:
1013 redundancy = raid10_md_layout_to_copies(mddev->layout) - 1;
1014 break;
1015 default:
1016 ti->error = "Unknown RAID type";
1017 return -EINVAL;
1018 }
1019
1020 freshest = NULL; 994 freshest = NULL;
1021 rdev_for_each_safe(rdev, tmp, mddev) { 995 rdev_for_each_safe(rdev, tmp, mddev) {
1022 /* 996 /*
@@ -1045,44 +1019,43 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
1045 break; 1019 break;
1046 default: 1020 default:
1047 dev = container_of(rdev, struct raid_dev, rdev); 1021 dev = container_of(rdev, struct raid_dev, rdev);
1048 if (redundancy--) { 1022 if (dev->meta_dev)
1049 if (dev->meta_dev) 1023 dm_put_device(ti, dev->meta_dev);
1050 dm_put_device(ti, dev->meta_dev);
1051
1052 dev->meta_dev = NULL;
1053 rdev->meta_bdev = NULL;
1054 1024
1055 if (rdev->sb_page) 1025 dev->meta_dev = NULL;
1056 put_page(rdev->sb_page); 1026 rdev->meta_bdev = NULL;
1057 1027
1058 rdev->sb_page = NULL; 1028 if (rdev->sb_page)
1029 put_page(rdev->sb_page);
1059 1030
1060 rdev->sb_loaded = 0; 1031 rdev->sb_page = NULL;
1061 1032
1062 /* 1033 rdev->sb_loaded = 0;
1063 * We might be able to salvage the data device
1064 * even though the meta device has failed. For
1065 * now, we behave as though '- -' had been
1066 * set for this device in the table.
1067 */
1068 if (dev->data_dev)
1069 dm_put_device(ti, dev->data_dev);
1070 1034
1071 dev->data_dev = NULL; 1035 /*
1072 rdev->bdev = NULL; 1036 * We might be able to salvage the data device
1037 * even though the meta device has failed. For
1038 * now, we behave as though '- -' had been
1039 * set for this device in the table.
1040 */
1041 if (dev->data_dev)
1042 dm_put_device(ti, dev->data_dev);
1073 1043
1074 list_del(&rdev->same_set); 1044 dev->data_dev = NULL;
1045 rdev->bdev = NULL;
1075 1046
1076 continue; 1047 list_del(&rdev->same_set);
1077 }
1078 ti->error = "Failed to load superblock";
1079 return ret;
1080 } 1048 }
1081 } 1049 }
1082 1050
1083 if (!freshest) 1051 if (!freshest)
1084 return 0; 1052 return 0;
1085 1053
1054 if (validate_raid_redundancy(rs)) {
1055 rs->ti->error = "Insufficient redundancy to activate array";
1056 return -EINVAL;
1057 }
1058
1086 /* 1059 /*
1087 * Validation of the freshest device provides the source of 1060 * Validation of the freshest device provides the source of
1088 * validation for the remaining devices. 1061 * validation for the remaining devices.
@@ -1432,7 +1405,7 @@ static void raid_resume(struct dm_target *ti)
1432 1405
1433static struct target_type raid_target = { 1406static struct target_type raid_target = {
1434 .name = "raid", 1407 .name = "raid",
1435 .version = {1, 4, 0}, 1408 .version = {1, 4, 1},
1436 .module = THIS_MODULE, 1409 .module = THIS_MODULE,
1437 .ctr = raid_ctr, 1410 .ctr = raid_ctr,
1438 .dtr = raid_dtr, 1411 .dtr = raid_dtr,
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 675ae5274016..5409607d4875 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -2746,19 +2746,9 @@ static int thin_iterate_devices(struct dm_target *ti,
2746 return 0; 2746 return 0;
2747} 2747}
2748 2748
2749/*
2750 * A thin device always inherits its queue limits from its pool.
2751 */
2752static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
2753{
2754 struct thin_c *tc = ti->private;
2755
2756 *limits = bdev_get_queue(tc->pool_dev->bdev)->limits;
2757}
2758
2759static struct target_type thin_target = { 2749static struct target_type thin_target = {
2760 .name = "thin", 2750 .name = "thin",
2761 .version = {1, 6, 0}, 2751 .version = {1, 7, 0},
2762 .module = THIS_MODULE, 2752 .module = THIS_MODULE,
2763 .ctr = thin_ctr, 2753 .ctr = thin_ctr,
2764 .dtr = thin_dtr, 2754 .dtr = thin_dtr,
@@ -2767,7 +2757,6 @@ static struct target_type thin_target = {
2767 .postsuspend = thin_postsuspend, 2757 .postsuspend = thin_postsuspend,
2768 .status = thin_status, 2758 .status = thin_status,
2769 .iterate_devices = thin_iterate_devices, 2759 .iterate_devices = thin_iterate_devices,
2770 .io_hints = thin_io_hints,
2771}; 2760};
2772 2761
2773/*----------------------------------------------------------------*/ 2762/*----------------------------------------------------------------*/
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index c72e4d5a9617..314a0e2faf79 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1188,6 +1188,7 @@ static int __clone_and_map_changing_extent_only(struct clone_info *ci,
1188{ 1188{
1189 struct dm_target *ti; 1189 struct dm_target *ti;
1190 sector_t len; 1190 sector_t len;
1191 unsigned num_requests;
1191 1192
1192 do { 1193 do {
1193 ti = dm_table_find_target(ci->map, ci->sector); 1194 ti = dm_table_find_target(ci->map, ci->sector);
@@ -1200,7 +1201,8 @@ static int __clone_and_map_changing_extent_only(struct clone_info *ci,
1200 * reconfiguration might also have changed that since the 1201 * reconfiguration might also have changed that since the
1201 * check was performed. 1202 * check was performed.
1202 */ 1203 */
1203 if (!get_num_requests || !get_num_requests(ti)) 1204 num_requests = get_num_requests ? get_num_requests(ti) : 0;
1205 if (!num_requests)
1204 return -EOPNOTSUPP; 1206 return -EOPNOTSUPP;
1205 1207
1206 if (is_split_required && !is_split_required(ti)) 1208 if (is_split_required && !is_split_required(ti))
@@ -1208,7 +1210,7 @@ static int __clone_and_map_changing_extent_only(struct clone_info *ci,
1208 else 1210 else
1209 len = min(ci->sector_count, max_io_len(ci->sector, ti)); 1211 len = min(ci->sector_count, max_io_len(ci->sector, ti));
1210 1212
1211 __issue_target_requests(ci, ti, ti->num_discard_requests, len); 1213 __issue_target_requests(ci, ti, num_requests, len);
1212 1214
1213 ci->sector += len; 1215 ci->sector += len;
1214 } while (ci->sector_count -= len); 1216 } while (ci->sector_count -= len);
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index 49d95040096a..0223ad255cb4 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -1820,7 +1820,7 @@ static int dvb_frontend_ioctl(struct file *file,
1820 struct dvb_frontend *fe = dvbdev->priv; 1820 struct dvb_frontend *fe = dvbdev->priv;
1821 struct dtv_frontend_properties *c = &fe->dtv_property_cache; 1821 struct dtv_frontend_properties *c = &fe->dtv_property_cache;
1822 struct dvb_frontend_private *fepriv = fe->frontend_priv; 1822 struct dvb_frontend_private *fepriv = fe->frontend_priv;
1823 int err = -ENOTTY; 1823 int err = -EOPNOTSUPP;
1824 1824
1825 dev_dbg(fe->dvb->device, "%s: (%d)\n", __func__, _IOC_NR(cmd)); 1825 dev_dbg(fe->dvb->device, "%s: (%d)\n", __func__, _IOC_NR(cmd));
1826 if (fepriv->exit != DVB_FE_NO_EXIT) 1826 if (fepriv->exit != DVB_FE_NO_EXIT)
@@ -1938,7 +1938,7 @@ static int dvb_frontend_ioctl_properties(struct file *file,
1938 } 1938 }
1939 1939
1940 } else 1940 } else
1941 err = -ENOTTY; 1941 err = -EOPNOTSUPP;
1942 1942
1943out: 1943out:
1944 kfree(tvp); 1944 kfree(tvp);
@@ -2071,7 +2071,7 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
2071 struct dvb_frontend *fe = dvbdev->priv; 2071 struct dvb_frontend *fe = dvbdev->priv;
2072 struct dvb_frontend_private *fepriv = fe->frontend_priv; 2072 struct dvb_frontend_private *fepriv = fe->frontend_priv;
2073 struct dtv_frontend_properties *c = &fe->dtv_property_cache; 2073 struct dtv_frontend_properties *c = &fe->dtv_property_cache;
2074 int err = -ENOTTY; 2074 int err = -EOPNOTSUPP;
2075 2075
2076 switch (cmd) { 2076 switch (cmd) {
2077 case FE_GET_INFO: { 2077 case FE_GET_INFO: {
diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c
index 8a8d42fe2633..d4e7567b367c 100644
--- a/drivers/media/i2c/m5mols/m5mols_core.c
+++ b/drivers/media/i2c/m5mols/m5mols_core.c
@@ -556,7 +556,7 @@ static int m5mols_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
556 mutex_lock(&info->lock); 556 mutex_lock(&info->lock);
557 557
558 format = __find_format(info, fh, fmt->which, info->res_type); 558 format = __find_format(info, fh, fmt->which, info->res_type);
559 if (!format) 559 if (format)
560 fmt->format = *format; 560 fmt->format = *format;
561 else 561 else
562 ret = -EINVAL; 562 ret = -EINVAL;
diff --git a/drivers/media/platform/coda.c b/drivers/media/platform/coda.c
index 1cf8293c0fb0..4a980e029ca7 100644
--- a/drivers/media/platform/coda.c
+++ b/drivers/media/platform/coda.c
@@ -23,8 +23,8 @@
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/videodev2.h> 24#include <linux/videodev2.h>
25#include <linux/of.h> 25#include <linux/of.h>
26#include <linux/platform_data/imx-iram.h>
26 27
27#include <mach/iram.h>
28#include <media/v4l2-ctrls.h> 28#include <media/v4l2-ctrls.h>
29#include <media/v4l2-device.h> 29#include <media/v4l2-device.h>
30#include <media/v4l2-ioctl.h> 30#include <media/v4l2-ioctl.h>
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index e0d73a642186..8dac17511e61 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -35,9 +35,6 @@
35#include <linux/vmalloc.h> 35#include <linux/vmalloc.h>
36#include <media/v4l2-dev.h> 36#include <media/v4l2-dev.h>
37#include <media/v4l2-ioctl.h> 37#include <media/v4l2-ioctl.h>
38#include <plat/iommu.h>
39#include <plat/iovmm.h>
40#include <plat/omap-pm.h>
41 38
42#include "ispvideo.h" 39#include "ispvideo.h"
43#include "isp.h" 40#include "isp.h"
diff --git a/drivers/media/platform/s5p-fimc/fimc-mdevice.c b/drivers/media/platform/s5p-fimc/fimc-mdevice.c
index 4ab99f3a7b09..b4a68ecf0ca7 100644
--- a/drivers/media/platform/s5p-fimc/fimc-mdevice.c
+++ b/drivers/media/platform/s5p-fimc/fimc-mdevice.c
@@ -593,7 +593,7 @@ static int __fimc_md_create_flite_source_links(struct fimc_md *fmd)
593{ 593{
594 struct media_entity *source, *sink; 594 struct media_entity *source, *sink;
595 unsigned int flags = MEDIA_LNK_FL_ENABLED; 595 unsigned int flags = MEDIA_LNK_FL_ENABLED;
596 int i, ret; 596 int i, ret = 0;
597 597
598 for (i = 0; i < FIMC_LITE_MAX_DEVS; i++) { 598 for (i = 0; i < FIMC_LITE_MAX_DEVS; i++) {
599 struct fimc_lite *fimc = fmd->fimc_lite[i]; 599 struct fimc_lite *fimc = fmd->fimc_lite[i];
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index 379f57433711..681bc6ba149d 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -412,62 +412,48 @@ leave_handle_frame:
412} 412}
413 413
414/* Error handling for interrupt */ 414/* Error handling for interrupt */
415static void s5p_mfc_handle_error(struct s5p_mfc_ctx *ctx, 415static void s5p_mfc_handle_error(struct s5p_mfc_dev *dev,
416 unsigned int reason, unsigned int err) 416 struct s5p_mfc_ctx *ctx, unsigned int reason, unsigned int err)
417{ 417{
418 struct s5p_mfc_dev *dev;
419 unsigned long flags; 418 unsigned long flags;
420 419
421 /* If no context is available then all necessary
422 * processing has been done. */
423 if (ctx == NULL)
424 return;
425
426 dev = ctx->dev;
427 mfc_err("Interrupt Error: %08x\n", err); 420 mfc_err("Interrupt Error: %08x\n", err);
428 s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
429 wake_up_dev(dev, reason, err);
430 421
431 /* Error recovery is dependent on the state of context */ 422 if (ctx != NULL) {
432 switch (ctx->state) { 423 /* Error recovery is dependent on the state of context */
433 case MFCINST_INIT: 424 switch (ctx->state) {
434 /* This error had to happen while acquireing instance */ 425 case MFCINST_RES_CHANGE_INIT:
435 case MFCINST_GOT_INST: 426 case MFCINST_RES_CHANGE_FLUSH:
436 /* This error had to happen while parsing the header */ 427 case MFCINST_RES_CHANGE_END:
437 case MFCINST_HEAD_PARSED: 428 case MFCINST_FINISHING:
438 /* This error had to happen while setting dst buffers */ 429 case MFCINST_FINISHED:
439 case MFCINST_RETURN_INST: 430 case MFCINST_RUNNING:
440 /* This error had to happen while releasing instance */ 431 /* It is higly probable that an error occured
441 clear_work_bit(ctx); 432 * while decoding a frame */
442 wake_up_ctx(ctx, reason, err); 433 clear_work_bit(ctx);
443 if (test_and_clear_bit(0, &dev->hw_lock) == 0) 434 ctx->state = MFCINST_ERROR;
444 BUG(); 435 /* Mark all dst buffers as having an error */
445 s5p_mfc_clock_off(); 436 spin_lock_irqsave(&dev->irqlock, flags);
446 ctx->state = MFCINST_ERROR; 437 s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue,
447 break; 438 &ctx->dst_queue, &ctx->vq_dst);
448 case MFCINST_FINISHING: 439 /* Mark all src buffers as having an error */
449 case MFCINST_FINISHED: 440 s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue,
450 case MFCINST_RUNNING: 441 &ctx->src_queue, &ctx->vq_src);
451 /* It is higly probable that an error occured 442 spin_unlock_irqrestore(&dev->irqlock, flags);
452 * while decoding a frame */ 443 wake_up_ctx(ctx, reason, err);
453 clear_work_bit(ctx); 444 break;
454 ctx->state = MFCINST_ERROR; 445 default:
455 /* Mark all dst buffers as having an error */ 446 clear_work_bit(ctx);
456 spin_lock_irqsave(&dev->irqlock, flags); 447 ctx->state = MFCINST_ERROR;
457 s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue, &ctx->dst_queue, 448 wake_up_ctx(ctx, reason, err);
458 &ctx->vq_dst); 449 break;
459 /* Mark all src buffers as having an error */ 450 }
460 s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue, &ctx->src_queue,
461 &ctx->vq_src);
462 spin_unlock_irqrestore(&dev->irqlock, flags);
463 if (test_and_clear_bit(0, &dev->hw_lock) == 0)
464 BUG();
465 s5p_mfc_clock_off();
466 break;
467 default:
468 mfc_err("Encountered an error interrupt which had not been handled\n");
469 break;
470 } 451 }
452 if (test_and_clear_bit(0, &dev->hw_lock) == 0)
453 BUG();
454 s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
455 s5p_mfc_clock_off();
456 wake_up_dev(dev, reason, err);
471 return; 457 return;
472} 458}
473 459
@@ -632,7 +618,7 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv)
632 dev->warn_start) 618 dev->warn_start)
633 s5p_mfc_handle_frame(ctx, reason, err); 619 s5p_mfc_handle_frame(ctx, reason, err);
634 else 620 else
635 s5p_mfc_handle_error(ctx, reason, err); 621 s5p_mfc_handle_error(dev, ctx, reason, err);
636 clear_bit(0, &dev->enter_suspend); 622 clear_bit(0, &dev->enter_suspend);
637 break; 623 break;
638 624
diff --git a/drivers/media/radio/radio-keene.c b/drivers/media/radio/radio-keene.c
index e10e525f33e5..296941a9ae25 100644
--- a/drivers/media/radio/radio-keene.c
+++ b/drivers/media/radio/radio-keene.c
@@ -374,6 +374,7 @@ static int usb_keene_probe(struct usb_interface *intf,
374 radio->vdev.ioctl_ops = &usb_keene_ioctl_ops; 374 radio->vdev.ioctl_ops = &usb_keene_ioctl_ops;
375 radio->vdev.lock = &radio->lock; 375 radio->vdev.lock = &radio->lock;
376 radio->vdev.release = video_device_release_empty; 376 radio->vdev.release = video_device_release_empty;
377 radio->vdev.vfl_dir = VFL_DIR_TX;
377 378
378 radio->usbdev = interface_to_usbdev(intf); 379 radio->usbdev = interface_to_usbdev(intf);
379 radio->intf = intf; 380 radio->intf = intf;
diff --git a/drivers/media/radio/radio-si4713.c b/drivers/media/radio/radio-si4713.c
index a082e400ed0f..1507c9d508d7 100644
--- a/drivers/media/radio/radio-si4713.c
+++ b/drivers/media/radio/radio-si4713.c
@@ -250,6 +250,7 @@ static struct video_device radio_si4713_vdev_template = {
250 .name = "radio-si4713", 250 .name = "radio-si4713",
251 .release = video_device_release, 251 .release = video_device_release,
252 .ioctl_ops = &radio_si4713_ioctl_ops, 252 .ioctl_ops = &radio_si4713_ioctl_ops,
253 .vfl_dir = VFL_DIR_TX,
253}; 254};
254 255
255/* Platform driver interface */ 256/* Platform driver interface */
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index c48be195bbad..cabbe3adf435 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -1971,6 +1971,7 @@ static struct video_device wl1273_viddev_template = {
1971 .ioctl_ops = &wl1273_ioctl_ops, 1971 .ioctl_ops = &wl1273_ioctl_ops,
1972 .name = WL1273_FM_DRIVER_NAME, 1972 .name = WL1273_FM_DRIVER_NAME,
1973 .release = wl1273_vdev_release, 1973 .release = wl1273_vdev_release,
1974 .vfl_dir = VFL_DIR_TX,
1974}; 1975};
1975 1976
1976static int wl1273_fm_radio_remove(struct platform_device *pdev) 1977static int wl1273_fm_radio_remove(struct platform_device *pdev)
diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.c b/drivers/media/radio/wl128x/fmdrv_v4l2.c
index 048de4536036..0a8ee8fab924 100644
--- a/drivers/media/radio/wl128x/fmdrv_v4l2.c
+++ b/drivers/media/radio/wl128x/fmdrv_v4l2.c
@@ -518,6 +518,16 @@ static struct video_device fm_viddev_template = {
518 .ioctl_ops = &fm_drv_ioctl_ops, 518 .ioctl_ops = &fm_drv_ioctl_ops,
519 .name = FM_DRV_NAME, 519 .name = FM_DRV_NAME,
520 .release = video_device_release, 520 .release = video_device_release,
521 /*
522 * To ensure both the tuner and modulator ioctls are accessible we
523 * set the vfl_dir to M2M to indicate this.
524 *
525 * It is not really a mem2mem device of course, but it can both receive
526 * and transmit using the same radio device. It's the only radio driver
527 * that does this and it should really be split in two radio devices,
528 * but that would affect applications using this driver.
529 */
530 .vfl_dir = VFL_DIR_M2M,
521}; 531};
522 532
523int fm_v4l2_init_video_device(struct fmdev *fmdev, int radio_nr) 533int fm_v4l2_init_video_device(struct fmdev *fmdev, int radio_nr)
diff --git a/drivers/media/usb/gspca/kinect.c b/drivers/media/usb/gspca/kinect.c
index 40ad6687ee5d..3773a8a745df 100644
--- a/drivers/media/usb/gspca/kinect.c
+++ b/drivers/media/usb/gspca/kinect.c
@@ -381,6 +381,7 @@ static const struct sd_desc sd_desc = {
381/* -- module initialisation -- */ 381/* -- module initialisation -- */
382static const struct usb_device_id device_table[] = { 382static const struct usb_device_id device_table[] = {
383 {USB_DEVICE(0x045e, 0x02ae)}, 383 {USB_DEVICE(0x045e, 0x02ae)},
384 {USB_DEVICE(0x045e, 0x02bf)},
384 {} 385 {}
385}; 386};
386 387
diff --git a/drivers/media/usb/gspca/sonixb.c b/drivers/media/usb/gspca/sonixb.c
index 70511d5f9538..1220340e7602 100644
--- a/drivers/media/usb/gspca/sonixb.c
+++ b/drivers/media/usb/gspca/sonixb.c
@@ -496,7 +496,7 @@ static void reg_w(struct gspca_dev *gspca_dev,
496 } 496 }
497} 497}
498 498
499static void i2c_w(struct gspca_dev *gspca_dev, const __u8 *buffer) 499static void i2c_w(struct gspca_dev *gspca_dev, const u8 *buf)
500{ 500{
501 int retry = 60; 501 int retry = 60;
502 502
@@ -504,16 +504,19 @@ static void i2c_w(struct gspca_dev *gspca_dev, const __u8 *buffer)
504 return; 504 return;
505 505
506 /* is i2c ready */ 506 /* is i2c ready */
507 reg_w(gspca_dev, 0x08, buffer, 8); 507 reg_w(gspca_dev, 0x08, buf, 8);
508 while (retry--) { 508 while (retry--) {
509 if (gspca_dev->usb_err < 0) 509 if (gspca_dev->usb_err < 0)
510 return; 510 return;
511 msleep(10); 511 msleep(1);
512 reg_r(gspca_dev, 0x08); 512 reg_r(gspca_dev, 0x08);
513 if (gspca_dev->usb_buf[0] & 0x04) { 513 if (gspca_dev->usb_buf[0] & 0x04) {
514 if (gspca_dev->usb_buf[0] & 0x08) { 514 if (gspca_dev->usb_buf[0] & 0x08) {
515 dev_err(gspca_dev->v4l2_dev.dev, 515 dev_err(gspca_dev->v4l2_dev.dev,
516 "i2c write error\n"); 516 "i2c error writing %02x %02x %02x %02x"
517 " %02x %02x %02x %02x\n",
518 buf[0], buf[1], buf[2], buf[3],
519 buf[4], buf[5], buf[6], buf[7]);
517 gspca_dev->usb_err = -EIO; 520 gspca_dev->usb_err = -EIO;
518 } 521 }
519 return; 522 return;
@@ -530,7 +533,7 @@ static void i2c_w_vector(struct gspca_dev *gspca_dev,
530 for (;;) { 533 for (;;) {
531 if (gspca_dev->usb_err < 0) 534 if (gspca_dev->usb_err < 0)
532 return; 535 return;
533 reg_w(gspca_dev, 0x08, *buffer, 8); 536 i2c_w(gspca_dev, *buffer);
534 len -= 8; 537 len -= 8;
535 if (len <= 0) 538 if (len <= 0)
536 break; 539 break;
diff --git a/drivers/media/usb/gspca/sonixj.c b/drivers/media/usb/gspca/sonixj.c
index 5a86047b846f..36307a9028a9 100644
--- a/drivers/media/usb/gspca/sonixj.c
+++ b/drivers/media/usb/gspca/sonixj.c
@@ -1550,6 +1550,7 @@ static void i2c_w1(struct gspca_dev *gspca_dev, u8 reg, u8 val)
1550 0, 1550 0,
1551 gspca_dev->usb_buf, 8, 1551 gspca_dev->usb_buf, 8,
1552 500); 1552 500);
1553 msleep(2);
1553 if (ret < 0) { 1554 if (ret < 0) {
1554 pr_err("i2c_w1 err %d\n", ret); 1555 pr_err("i2c_w1 err %d\n", ret);
1555 gspca_dev->usb_err = ret; 1556 gspca_dev->usb_err = ret;
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index 2bb7613ddebb..d5baab17a5ef 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -1431,8 +1431,10 @@ int uvc_ctrl_set(struct uvc_video_chain *chain,
1431 int ret; 1431 int ret;
1432 1432
1433 ctrl = uvc_find_control(chain, xctrl->id, &mapping); 1433 ctrl = uvc_find_control(chain, xctrl->id, &mapping);
1434 if (ctrl == NULL || (ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR) == 0) 1434 if (ctrl == NULL)
1435 return -EINVAL; 1435 return -EINVAL;
1436 if (!(ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR))
1437 return -EACCES;
1436 1438
1437 /* Clamp out of range values. */ 1439 /* Clamp out of range values. */
1438 switch (mapping->v4l2_type) { 1440 switch (mapping->v4l2_type) {
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index f2ee8c6b0d8d..68d59b527492 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -657,8 +657,7 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
657 ret = uvc_ctrl_get(chain, ctrl); 657 ret = uvc_ctrl_get(chain, ctrl);
658 if (ret < 0) { 658 if (ret < 0) {
659 uvc_ctrl_rollback(handle); 659 uvc_ctrl_rollback(handle);
660 ctrls->error_idx = ret == -ENOENT 660 ctrls->error_idx = i;
661 ? ctrls->count : i;
662 return ret; 661 return ret;
663 } 662 }
664 } 663 }
@@ -686,8 +685,7 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
686 ret = uvc_ctrl_set(chain, ctrl); 685 ret = uvc_ctrl_set(chain, ctrl);
687 if (ret < 0) { 686 if (ret < 0) {
688 uvc_ctrl_rollback(handle); 687 uvc_ctrl_rollback(handle);
689 ctrls->error_idx = (ret == -ENOENT && 688 ctrls->error_idx = cmd == VIDIOC_S_EXT_CTRLS
690 cmd == VIDIOC_S_EXT_CTRLS)
691 ? ctrls->count : i; 689 ? ctrls->count : i;
692 return ret; 690 return ret;
693 } 691 }
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 9f81be23a81f..e02c4797b1c6 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -921,8 +921,10 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b
921 * In videobuf we use our internal V4l2_planes struct for 921 * In videobuf we use our internal V4l2_planes struct for
922 * single-planar buffers as well, for simplicity. 922 * single-planar buffers as well, for simplicity.
923 */ 923 */
924 if (V4L2_TYPE_IS_OUTPUT(b->type)) 924 if (V4L2_TYPE_IS_OUTPUT(b->type)) {
925 v4l2_planes[0].bytesused = b->bytesused; 925 v4l2_planes[0].bytesused = b->bytesused;
926 v4l2_planes[0].data_offset = 0;
927 }
926 928
927 if (b->memory == V4L2_MEMORY_USERPTR) { 929 if (b->memory == V4L2_MEMORY_USERPTR) {
928 v4l2_planes[0].m.userptr = b->m.userptr; 930 v4l2_planes[0].m.userptr = b->m.userptr;
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 47ad4e270877..ff553babf455 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -237,6 +237,7 @@ config MFD_TPS65910
237 depends on I2C=y && GPIOLIB 237 depends on I2C=y && GPIOLIB
238 select MFD_CORE 238 select MFD_CORE
239 select REGMAP_I2C 239 select REGMAP_I2C
240 select REGMAP_IRQ
240 select IRQ_DOMAIN 241 select IRQ_DOMAIN
241 help 242 help
242 if you say yes here you get support for the TPS65910 series of 243 if you say yes here you get support for the TPS65910 series of
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index e1650badd106..8b5d685ab980 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -19,6 +19,7 @@
19#include <linux/mfd/core.h> 19#include <linux/mfd/core.h>
20#include <linux/mfd/abx500.h> 20#include <linux/mfd/abx500.h>
21#include <linux/mfd/abx500/ab8500.h> 21#include <linux/mfd/abx500/ab8500.h>
22#include <linux/mfd/abx500/ab8500-bm.h>
22#include <linux/mfd/dbx500-prcmu.h> 23#include <linux/mfd/dbx500-prcmu.h>
23#include <linux/regulator/ab8500.h> 24#include <linux/regulator/ab8500.h>
24#include <linux/of.h> 25#include <linux/of.h>
@@ -749,6 +750,12 @@ static struct resource ab8500_charger_resources[] = {
749 .end = AB8500_INT_CH_WD_EXP, 750 .end = AB8500_INT_CH_WD_EXP,
750 .flags = IORESOURCE_IRQ, 751 .flags = IORESOURCE_IRQ,
751 }, 752 },
753 {
754 .name = "VBUS_CH_DROP_END",
755 .start = AB8500_INT_VBUS_CH_DROP_END,
756 .end = AB8500_INT_VBUS_CH_DROP_END,
757 .flags = IORESOURCE_IRQ,
758 },
752}; 759};
753 760
754static struct resource ab8500_btemp_resources[] = { 761static struct resource ab8500_btemp_resources[] = {
@@ -1011,40 +1018,32 @@ static struct mfd_cell ab8500_bm_devs[] = {
1011 .of_compatible = "stericsson,ab8500-charger", 1018 .of_compatible = "stericsson,ab8500-charger",
1012 .num_resources = ARRAY_SIZE(ab8500_charger_resources), 1019 .num_resources = ARRAY_SIZE(ab8500_charger_resources),
1013 .resources = ab8500_charger_resources, 1020 .resources = ab8500_charger_resources,
1014#ifndef CONFIG_OF
1015 .platform_data = &ab8500_bm_data, 1021 .platform_data = &ab8500_bm_data,
1016 .pdata_size = sizeof(ab8500_bm_data), 1022 .pdata_size = sizeof(ab8500_bm_data),
1017#endif
1018 }, 1023 },
1019 { 1024 {
1020 .name = "ab8500-btemp", 1025 .name = "ab8500-btemp",
1021 .of_compatible = "stericsson,ab8500-btemp", 1026 .of_compatible = "stericsson,ab8500-btemp",
1022 .num_resources = ARRAY_SIZE(ab8500_btemp_resources), 1027 .num_resources = ARRAY_SIZE(ab8500_btemp_resources),
1023 .resources = ab8500_btemp_resources, 1028 .resources = ab8500_btemp_resources,
1024#ifndef CONFIG_OF
1025 .platform_data = &ab8500_bm_data, 1029 .platform_data = &ab8500_bm_data,
1026 .pdata_size = sizeof(ab8500_bm_data), 1030 .pdata_size = sizeof(ab8500_bm_data),
1027#endif
1028 }, 1031 },
1029 { 1032 {
1030 .name = "ab8500-fg", 1033 .name = "ab8500-fg",
1031 .of_compatible = "stericsson,ab8500-fg", 1034 .of_compatible = "stericsson,ab8500-fg",
1032 .num_resources = ARRAY_SIZE(ab8500_fg_resources), 1035 .num_resources = ARRAY_SIZE(ab8500_fg_resources),
1033 .resources = ab8500_fg_resources, 1036 .resources = ab8500_fg_resources,
1034#ifndef CONFIG_OF
1035 .platform_data = &ab8500_bm_data, 1037 .platform_data = &ab8500_bm_data,
1036 .pdata_size = sizeof(ab8500_bm_data), 1038 .pdata_size = sizeof(ab8500_bm_data),
1037#endif
1038 }, 1039 },
1039 { 1040 {
1040 .name = "ab8500-chargalg", 1041 .name = "ab8500-chargalg",
1041 .of_compatible = "stericsson,ab8500-chargalg", 1042 .of_compatible = "stericsson,ab8500-chargalg",
1042 .num_resources = ARRAY_SIZE(ab8500_chargalg_resources), 1043 .num_resources = ARRAY_SIZE(ab8500_chargalg_resources),
1043 .resources = ab8500_chargalg_resources, 1044 .resources = ab8500_chargalg_resources,
1044#ifndef CONFIG_OF
1045 .platform_data = &ab8500_bm_data, 1045 .platform_data = &ab8500_bm_data,
1046 .pdata_size = sizeof(ab8500_bm_data), 1046 .pdata_size = sizeof(ab8500_bm_data),
1047#endif
1048 }, 1047 },
1049}; 1048};
1050 1049
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index bc8a3edb6bbf..222c03a5ddc0 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -239,7 +239,12 @@ static int arizona_runtime_resume(struct device *dev)
239 return ret; 239 return ret;
240 } 240 }
241 241
242 regcache_sync(arizona->regmap); 242 ret = regcache_sync(arizona->regmap);
243 if (ret != 0) {
244 dev_err(arizona->dev, "Failed to restore register cache\n");
245 regulator_disable(arizona->dcvdd);
246 return ret;
247 }
243 248
244 return 0; 249 return 0;
245} 250}
diff --git a/drivers/mfd/arizona-irq.c b/drivers/mfd/arizona-irq.c
index 74713bf5371f..2bec5f0db3ee 100644
--- a/drivers/mfd/arizona-irq.c
+++ b/drivers/mfd/arizona-irq.c
@@ -176,14 +176,7 @@ int arizona_irq_init(struct arizona *arizona)
176 aod = &wm5102_aod; 176 aod = &wm5102_aod;
177 irq = &wm5102_irq; 177 irq = &wm5102_irq;
178 178
179 switch (arizona->rev) { 179 ctrlif_error = false;
180 case 0:
181 case 1:
182 ctrlif_error = false;
183 break;
184 default:
185 break;
186 }
187 break; 180 break;
188#endif 181#endif
189#ifdef CONFIG_MFD_WM5110 182#ifdef CONFIG_MFD_WM5110
@@ -191,14 +184,7 @@ int arizona_irq_init(struct arizona *arizona)
191 aod = &wm5110_aod; 184 aod = &wm5110_aod;
192 irq = &wm5110_irq; 185 irq = &wm5110_irq;
193 186
194 switch (arizona->rev) { 187 ctrlif_error = false;
195 case 0:
196 case 1:
197 ctrlif_error = false;
198 break;
199 default:
200 break;
201 }
202 break; 188 break;
203#endif 189#endif
204 default: 190 default:
diff --git a/drivers/mfd/da9052-i2c.c b/drivers/mfd/da9052-i2c.c
index ac74a4d1daea..885e56780358 100644
--- a/drivers/mfd/da9052-i2c.c
+++ b/drivers/mfd/da9052-i2c.c
@@ -27,6 +27,66 @@
27#include <linux/of_device.h> 27#include <linux/of_device.h>
28#endif 28#endif
29 29
30/* I2C safe register check */
31static inline bool i2c_safe_reg(unsigned char reg)
32{
33 switch (reg) {
34 case DA9052_STATUS_A_REG:
35 case DA9052_STATUS_B_REG:
36 case DA9052_STATUS_C_REG:
37 case DA9052_STATUS_D_REG:
38 case DA9052_ADC_RES_L_REG:
39 case DA9052_ADC_RES_H_REG:
40 case DA9052_VDD_RES_REG:
41 case DA9052_ICHG_AV_REG:
42 case DA9052_TBAT_RES_REG:
43 case DA9052_ADCIN4_RES_REG:
44 case DA9052_ADCIN5_RES_REG:
45 case DA9052_ADCIN6_RES_REG:
46 case DA9052_TJUNC_RES_REG:
47 case DA9052_TSI_X_MSB_REG:
48 case DA9052_TSI_Y_MSB_REG:
49 case DA9052_TSI_LSB_REG:
50 case DA9052_TSI_Z_MSB_REG:
51 return true;
52 default:
53 return false;
54 }
55}
56
57/*
58 * There is an issue with DA9052 and DA9053_AA/BA/BB PMIC where the PMIC
59 * gets lockup up or fails to respond following a system reset.
60 * This fix is to follow any read or write with a dummy read to a safe
61 * register.
62 */
63int da9052_i2c_fix(struct da9052 *da9052, unsigned char reg)
64{
65 int val;
66
67 switch (da9052->chip_id) {
68 case DA9052:
69 case DA9053_AA:
70 case DA9053_BA:
71 case DA9053_BB:
72 /* A dummy read to a safe register address. */
73 if (!i2c_safe_reg(reg))
74 return regmap_read(da9052->regmap,
75 DA9052_PARK_REGISTER,
76 &val);
77 break;
78 default:
79 /*
80 * For other chips parking of I2C register
81 * to a safe place is not required.
82 */
83 break;
84 }
85
86 return 0;
87}
88EXPORT_SYMBOL(da9052_i2c_fix);
89
30static int da9052_i2c_enable_multiwrite(struct da9052 *da9052) 90static int da9052_i2c_enable_multiwrite(struct da9052 *da9052)
31{ 91{
32 int reg_val, ret; 92 int reg_val, ret;
@@ -83,6 +143,7 @@ static int da9052_i2c_probe(struct i2c_client *client,
83 143
84 da9052->dev = &client->dev; 144 da9052->dev = &client->dev;
85 da9052->chip_irq = client->irq; 145 da9052->chip_irq = client->irq;
146 da9052->fix_io = da9052_i2c_fix;
86 147
87 i2c_set_clientdata(client, da9052); 148 i2c_set_clientdata(client, da9052);
88 149
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index dc8826d8d69d..268f45d42394 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -2524,7 +2524,7 @@ static bool read_mailbox_0(void)
2524 2524
2525 for (n = 0; n < NUM_PRCMU_WAKEUPS; n++) { 2525 for (n = 0; n < NUM_PRCMU_WAKEUPS; n++) {
2526 if (ev & prcmu_irq_bit[n]) 2526 if (ev & prcmu_irq_bit[n])
2527 generic_handle_irq(IRQ_PRCMU_BASE + n); 2527 generic_handle_irq(irq_find_mapping(db8500_irq_domain, n));
2528 } 2528 }
2529 r = true; 2529 r = true;
2530 break; 2530 break;
@@ -2737,13 +2737,14 @@ static int db8500_irq_map(struct irq_domain *d, unsigned int virq,
2737} 2737}
2738 2738
2739static struct irq_domain_ops db8500_irq_ops = { 2739static struct irq_domain_ops db8500_irq_ops = {
2740 .map = db8500_irq_map, 2740 .map = db8500_irq_map,
2741 .xlate = irq_domain_xlate_twocell, 2741 .xlate = irq_domain_xlate_twocell,
2742}; 2742};
2743 2743
2744static int db8500_irq_init(struct device_node *np) 2744static int db8500_irq_init(struct device_node *np)
2745{ 2745{
2746 int irq_base = -1; 2746 int irq_base = 0;
2747 int i;
2747 2748
2748 /* In the device tree case, just take some IRQs */ 2749 /* In the device tree case, just take some IRQs */
2749 if (!np) 2750 if (!np)
@@ -2758,6 +2759,10 @@ static int db8500_irq_init(struct device_node *np)
2758 return -ENOSYS; 2759 return -ENOSYS;
2759 } 2760 }
2760 2761
2762 /* All wakeups will be used, so create mappings for all */
2763 for (i = 0; i < NUM_PRCMU_WAKEUPS; i++)
2764 irq_create_mapping(db8500_irq_domain, i);
2765
2761 return 0; 2766 return 0;
2762} 2767}
2763 2768
diff --git a/drivers/mfd/max77686.c b/drivers/mfd/max77686.c
index f6878f8db57d..4d73963cd8f0 100644
--- a/drivers/mfd/max77686.c
+++ b/drivers/mfd/max77686.c
@@ -93,15 +93,6 @@ static int max77686_i2c_probe(struct i2c_client *i2c,
93 if (max77686 == NULL) 93 if (max77686 == NULL)
94 return -ENOMEM; 94 return -ENOMEM;
95 95
96 max77686->regmap = regmap_init_i2c(i2c, &max77686_regmap_config);
97 if (IS_ERR(max77686->regmap)) {
98 ret = PTR_ERR(max77686->regmap);
99 dev_err(max77686->dev, "Failed to allocate register map: %d\n",
100 ret);
101 kfree(max77686);
102 return ret;
103 }
104
105 i2c_set_clientdata(i2c, max77686); 96 i2c_set_clientdata(i2c, max77686);
106 max77686->dev = &i2c->dev; 97 max77686->dev = &i2c->dev;
107 max77686->i2c = i2c; 98 max77686->i2c = i2c;
@@ -111,6 +102,15 @@ static int max77686_i2c_probe(struct i2c_client *i2c,
111 max77686->irq_gpio = pdata->irq_gpio; 102 max77686->irq_gpio = pdata->irq_gpio;
112 max77686->irq = i2c->irq; 103 max77686->irq = i2c->irq;
113 104
105 max77686->regmap = regmap_init_i2c(i2c, &max77686_regmap_config);
106 if (IS_ERR(max77686->regmap)) {
107 ret = PTR_ERR(max77686->regmap);
108 dev_err(max77686->dev, "Failed to allocate register map: %d\n",
109 ret);
110 kfree(max77686);
111 return ret;
112 }
113
114 if (regmap_read(max77686->regmap, 114 if (regmap_read(max77686->regmap,
115 MAX77686_REG_DEVICE_ID, &data) < 0) { 115 MAX77686_REG_DEVICE_ID, &data) < 0) {
116 dev_err(max77686->dev, 116 dev_err(max77686->dev,
diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c
index cc5155e20494..9e60fed5ff82 100644
--- a/drivers/mfd/max77693.c
+++ b/drivers/mfd/max77693.c
@@ -114,35 +114,37 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
114 u8 reg_data; 114 u8 reg_data;
115 int ret = 0; 115 int ret = 0;
116 116
117 if (!pdata) {
118 dev_err(&i2c->dev, "No platform data found.\n");
119 return -EINVAL;
120 }
121
117 max77693 = devm_kzalloc(&i2c->dev, 122 max77693 = devm_kzalloc(&i2c->dev,
118 sizeof(struct max77693_dev), GFP_KERNEL); 123 sizeof(struct max77693_dev), GFP_KERNEL);
119 if (max77693 == NULL) 124 if (max77693 == NULL)
120 return -ENOMEM; 125 return -ENOMEM;
121 126
122 max77693->regmap = devm_regmap_init_i2c(i2c, &max77693_regmap_config);
123 if (IS_ERR(max77693->regmap)) {
124 ret = PTR_ERR(max77693->regmap);
125 dev_err(max77693->dev,"failed to allocate register map: %d\n",
126 ret);
127 goto err_regmap;
128 }
129
130 i2c_set_clientdata(i2c, max77693); 127 i2c_set_clientdata(i2c, max77693);
131 max77693->dev = &i2c->dev; 128 max77693->dev = &i2c->dev;
132 max77693->i2c = i2c; 129 max77693->i2c = i2c;
133 max77693->irq = i2c->irq; 130 max77693->irq = i2c->irq;
134 max77693->type = id->driver_data; 131 max77693->type = id->driver_data;
135 132
136 if (!pdata) 133 max77693->regmap = devm_regmap_init_i2c(i2c, &max77693_regmap_config);
137 goto err_regmap; 134 if (IS_ERR(max77693->regmap)) {
135 ret = PTR_ERR(max77693->regmap);
136 dev_err(max77693->dev, "failed to allocate register map: %d\n",
137 ret);
138 return ret;
139 }
138 140
139 max77693->wakeup = pdata->wakeup; 141 max77693->wakeup = pdata->wakeup;
140 142
141 if (max77693_read_reg(max77693->regmap, 143 ret = max77693_read_reg(max77693->regmap, MAX77693_PMIC_REG_PMIC_ID2,
142 MAX77693_PMIC_REG_PMIC_ID2, &reg_data) < 0) { 144 &reg_data);
145 if (ret < 0) {
143 dev_err(max77693->dev, "device not found on this channel\n"); 146 dev_err(max77693->dev, "device not found on this channel\n");
144 ret = -ENODEV; 147 return ret;
145 goto err_regmap;
146 } else 148 } else
147 dev_info(max77693->dev, "device ID: 0x%x\n", reg_data); 149 dev_info(max77693->dev, "device ID: 0x%x\n", reg_data);
148 150
@@ -163,7 +165,7 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
163 ret = PTR_ERR(max77693->regmap_muic); 165 ret = PTR_ERR(max77693->regmap_muic);
164 dev_err(max77693->dev, 166 dev_err(max77693->dev,
165 "failed to allocate register map: %d\n", ret); 167 "failed to allocate register map: %d\n", ret);
166 goto err_regmap; 168 goto err_regmap_muic;
167 } 169 }
168 170
169 ret = max77693_irq_init(max77693); 171 ret = max77693_irq_init(max77693);
@@ -184,9 +186,9 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
184err_mfd: 186err_mfd:
185 max77693_irq_exit(max77693); 187 max77693_irq_exit(max77693);
186err_irq: 188err_irq:
189err_regmap_muic:
187 i2c_unregister_device(max77693->muic); 190 i2c_unregister_device(max77693->muic);
188 i2c_unregister_device(max77693->haptic); 191 i2c_unregister_device(max77693->haptic);
189err_regmap:
190 return ret; 192 return ret;
191} 193}
192 194
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index 64803f13bcec..d11567307fbe 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -208,6 +208,8 @@ static int pcf50633_probe(struct i2c_client *client,
208 if (!pcf) 208 if (!pcf)
209 return -ENOMEM; 209 return -ENOMEM;
210 210
211 i2c_set_clientdata(client, pcf);
212 pcf->dev = &client->dev;
211 pcf->pdata = pdata; 213 pcf->pdata = pdata;
212 214
213 mutex_init(&pcf->lock); 215 mutex_init(&pcf->lock);
@@ -219,9 +221,6 @@ static int pcf50633_probe(struct i2c_client *client,
219 return ret; 221 return ret;
220 } 222 }
221 223
222 i2c_set_clientdata(client, pcf);
223 pcf->dev = &client->dev;
224
225 version = pcf50633_reg_read(pcf, 0); 224 version = pcf50633_reg_read(pcf, 0);
226 variant = pcf50633_reg_read(pcf, 1); 225 variant = pcf50633_reg_read(pcf, 1);
227 if (version < 0 || variant < 0) { 226 if (version < 0 || variant < 0) {
diff --git a/drivers/mfd/rtl8411.c b/drivers/mfd/rtl8411.c
index 89f046ca9e41..3d3b4addf81a 100644
--- a/drivers/mfd/rtl8411.c
+++ b/drivers/mfd/rtl8411.c
@@ -112,6 +112,21 @@ static int rtl8411_card_power_off(struct rtsx_pcr *pcr, int card)
112 BPP_LDO_POWB, BPP_LDO_SUSPEND); 112 BPP_LDO_POWB, BPP_LDO_SUSPEND);
113} 113}
114 114
115static int rtl8411_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
116{
117 u8 mask, val;
118
119 mask = (BPP_REG_TUNED18 << BPP_TUNED18_SHIFT_8411) | BPP_PAD_MASK;
120 if (voltage == OUTPUT_3V3)
121 val = (BPP_ASIC_3V3 << BPP_TUNED18_SHIFT_8411) | BPP_PAD_3V3;
122 else if (voltage == OUTPUT_1V8)
123 val = (BPP_ASIC_1V8 << BPP_TUNED18_SHIFT_8411) | BPP_PAD_1V8;
124 else
125 return -EINVAL;
126
127 return rtsx_pci_write_register(pcr, LDO_CTL, mask, val);
128}
129
115static unsigned int rtl8411_cd_deglitch(struct rtsx_pcr *pcr) 130static unsigned int rtl8411_cd_deglitch(struct rtsx_pcr *pcr)
116{ 131{
117 unsigned int card_exist; 132 unsigned int card_exist;
@@ -163,6 +178,18 @@ static unsigned int rtl8411_cd_deglitch(struct rtsx_pcr *pcr)
163 return card_exist; 178 return card_exist;
164} 179}
165 180
181static int rtl8411_conv_clk_and_div_n(int input, int dir)
182{
183 int output;
184
185 if (dir == CLK_TO_DIV_N)
186 output = input * 4 / 5 - 2;
187 else
188 output = (input + 2) * 5 / 4;
189
190 return output;
191}
192
166static const struct pcr_ops rtl8411_pcr_ops = { 193static const struct pcr_ops rtl8411_pcr_ops = {
167 .extra_init_hw = rtl8411_extra_init_hw, 194 .extra_init_hw = rtl8411_extra_init_hw,
168 .optimize_phy = NULL, 195 .optimize_phy = NULL,
@@ -172,7 +199,9 @@ static const struct pcr_ops rtl8411_pcr_ops = {
172 .disable_auto_blink = rtl8411_disable_auto_blink, 199 .disable_auto_blink = rtl8411_disable_auto_blink,
173 .card_power_on = rtl8411_card_power_on, 200 .card_power_on = rtl8411_card_power_on,
174 .card_power_off = rtl8411_card_power_off, 201 .card_power_off = rtl8411_card_power_off,
202 .switch_output_voltage = rtl8411_switch_output_voltage,
175 .cd_deglitch = rtl8411_cd_deglitch, 203 .cd_deglitch = rtl8411_cd_deglitch,
204 .conv_clk_and_div_n = rtl8411_conv_clk_and_div_n,
176}; 205};
177 206
178/* SD Pull Control Enable: 207/* SD Pull Control Enable:
diff --git a/drivers/mfd/rts5209.c b/drivers/mfd/rts5209.c
index 283a4f148084..98fe0f39463e 100644
--- a/drivers/mfd/rts5209.c
+++ b/drivers/mfd/rts5209.c
@@ -144,6 +144,25 @@ static int rts5209_card_power_off(struct rtsx_pcr *pcr, int card)
144 return rtsx_pci_send_cmd(pcr, 100); 144 return rtsx_pci_send_cmd(pcr, 100);
145} 145}
146 146
147static int rts5209_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
148{
149 int err;
150
151 if (voltage == OUTPUT_3V3) {
152 err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24);
153 if (err < 0)
154 return err;
155 } else if (voltage == OUTPUT_1V8) {
156 err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C40 | 0x24);
157 if (err < 0)
158 return err;
159 } else {
160 return -EINVAL;
161 }
162
163 return 0;
164}
165
147static const struct pcr_ops rts5209_pcr_ops = { 166static const struct pcr_ops rts5209_pcr_ops = {
148 .extra_init_hw = rts5209_extra_init_hw, 167 .extra_init_hw = rts5209_extra_init_hw,
149 .optimize_phy = rts5209_optimize_phy, 168 .optimize_phy = rts5209_optimize_phy,
@@ -153,7 +172,9 @@ static const struct pcr_ops rts5209_pcr_ops = {
153 .disable_auto_blink = rts5209_disable_auto_blink, 172 .disable_auto_blink = rts5209_disable_auto_blink,
154 .card_power_on = rts5209_card_power_on, 173 .card_power_on = rts5209_card_power_on,
155 .card_power_off = rts5209_card_power_off, 174 .card_power_off = rts5209_card_power_off,
175 .switch_output_voltage = rts5209_switch_output_voltage,
156 .cd_deglitch = NULL, 176 .cd_deglitch = NULL,
177 .conv_clk_and_div_n = NULL,
157}; 178};
158 179
159/* SD Pull Control Enable: 180/* SD Pull Control Enable:
diff --git a/drivers/mfd/rts5229.c b/drivers/mfd/rts5229.c
index b9dbab266fda..29d889cbb9c5 100644
--- a/drivers/mfd/rts5229.c
+++ b/drivers/mfd/rts5229.c
@@ -114,6 +114,25 @@ static int rts5229_card_power_off(struct rtsx_pcr *pcr, int card)
114 return rtsx_pci_send_cmd(pcr, 100); 114 return rtsx_pci_send_cmd(pcr, 100);
115} 115}
116 116
117static int rts5229_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
118{
119 int err;
120
121 if (voltage == OUTPUT_3V3) {
122 err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24);
123 if (err < 0)
124 return err;
125 } else if (voltage == OUTPUT_1V8) {
126 err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C40 | 0x24);
127 if (err < 0)
128 return err;
129 } else {
130 return -EINVAL;
131 }
132
133 return 0;
134}
135
117static const struct pcr_ops rts5229_pcr_ops = { 136static const struct pcr_ops rts5229_pcr_ops = {
118 .extra_init_hw = rts5229_extra_init_hw, 137 .extra_init_hw = rts5229_extra_init_hw,
119 .optimize_phy = rts5229_optimize_phy, 138 .optimize_phy = rts5229_optimize_phy,
@@ -123,7 +142,9 @@ static const struct pcr_ops rts5229_pcr_ops = {
123 .disable_auto_blink = rts5229_disable_auto_blink, 142 .disable_auto_blink = rts5229_disable_auto_blink,
124 .card_power_on = rts5229_card_power_on, 143 .card_power_on = rts5229_card_power_on,
125 .card_power_off = rts5229_card_power_off, 144 .card_power_off = rts5229_card_power_off,
145 .switch_output_voltage = rts5229_switch_output_voltage,
126 .cd_deglitch = NULL, 146 .cd_deglitch = NULL,
147 .conv_clk_and_div_n = NULL,
127}; 148};
128 149
129/* SD Pull Control Enable: 150/* SD Pull Control Enable:
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
index 7a7b0bda4618..9fc57009e228 100644
--- a/drivers/mfd/rtsx_pcr.c
+++ b/drivers/mfd/rtsx_pcr.c
@@ -630,7 +630,10 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
630 if (clk == pcr->cur_clock) 630 if (clk == pcr->cur_clock)
631 return 0; 631 return 0;
632 632
633 N = (u8)(clk - 2); 633 if (pcr->ops->conv_clk_and_div_n)
634 N = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
635 else
636 N = (u8)(clk - 2);
634 if ((clk <= 2) || (N > max_N)) 637 if ((clk <= 2) || (N > max_N))
635 return -EINVAL; 638 return -EINVAL;
636 639
@@ -641,7 +644,14 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
641 /* Make sure that the SSC clock div_n is equal or greater than min_N */ 644 /* Make sure that the SSC clock div_n is equal or greater than min_N */
642 div = CLK_DIV_1; 645 div = CLK_DIV_1;
643 while ((N < min_N) && (div < max_div)) { 646 while ((N < min_N) && (div < max_div)) {
644 N = (N + 2) * 2 - 2; 647 if (pcr->ops->conv_clk_and_div_n) {
648 int dbl_clk = pcr->ops->conv_clk_and_div_n(N,
649 DIV_N_TO_CLK) * 2;
650 N = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk,
651 CLK_TO_DIV_N);
652 } else {
653 N = (N + 2) * 2 - 2;
654 }
645 div++; 655 div++;
646 } 656 }
647 dev_dbg(&(pcr->pci->dev), "N = %d, div = %d\n", N, div); 657 dev_dbg(&(pcr->pci->dev), "N = %d, div = %d\n", N, div);
@@ -703,6 +713,15 @@ int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card)
703} 713}
704EXPORT_SYMBOL_GPL(rtsx_pci_card_power_off); 714EXPORT_SYMBOL_GPL(rtsx_pci_card_power_off);
705 715
716int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
717{
718 if (pcr->ops->switch_output_voltage)
719 return pcr->ops->switch_output_voltage(pcr, voltage);
720
721 return 0;
722}
723EXPORT_SYMBOL_GPL(rtsx_pci_switch_output_voltage);
724
706unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr) 725unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr)
707{ 726{
708 unsigned int val; 727 unsigned int val;
@@ -767,10 +786,10 @@ static void rtsx_pci_card_detect(struct work_struct *work)
767 786
768 spin_unlock_irqrestore(&pcr->lock, flags); 787 spin_unlock_irqrestore(&pcr->lock, flags);
769 788
770 if (card_detect & SD_EXIST) 789 if ((card_detect & SD_EXIST) && pcr->slots[RTSX_SD_CARD].card_event)
771 pcr->slots[RTSX_SD_CARD].card_event( 790 pcr->slots[RTSX_SD_CARD].card_event(
772 pcr->slots[RTSX_SD_CARD].p_dev); 791 pcr->slots[RTSX_SD_CARD].p_dev);
773 if (card_detect & MS_EXIST) 792 if ((card_detect & MS_EXIST) && pcr->slots[RTSX_MS_CARD].card_event)
774 pcr->slots[RTSX_MS_CARD].card_event( 793 pcr->slots[RTSX_MS_CARD].card_event(
775 pcr->slots[RTSX_MS_CARD].p_dev); 794 pcr->slots[RTSX_MS_CARD].p_dev);
776} 795}
diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
index 49d361a618d0..77ee26ef5941 100644
--- a/drivers/mfd/sec-core.c
+++ b/drivers/mfd/sec-core.c
@@ -17,6 +17,7 @@
17#include <linux/err.h> 17#include <linux/err.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/i2c.h> 19#include <linux/i2c.h>
20#include <linux/of_irq.h>
20#include <linux/interrupt.h> 21#include <linux/interrupt.h>
21#include <linux/pm_runtime.h> 22#include <linux/pm_runtime.h>
22#include <linux/mutex.h> 23#include <linux/mutex.h>
@@ -60,6 +61,15 @@ static struct mfd_cell s2mps11_devs[] = {
60 }, 61 },
61}; 62};
62 63
64#ifdef CONFIG_OF
65static struct of_device_id sec_dt_match[] = {
66 { .compatible = "samsung,s5m8767-pmic",
67 .data = (void *)S5M8767X,
68 },
69 {},
70};
71#endif
72
63int sec_reg_read(struct sec_pmic_dev *sec_pmic, u8 reg, void *dest) 73int sec_reg_read(struct sec_pmic_dev *sec_pmic, u8 reg, void *dest)
64{ 74{
65 return regmap_read(sec_pmic->regmap, reg, dest); 75 return regmap_read(sec_pmic->regmap, reg, dest);
@@ -95,6 +105,57 @@ static struct regmap_config sec_regmap_config = {
95 .val_bits = 8, 105 .val_bits = 8,
96}; 106};
97 107
108
109#ifdef CONFIG_OF
110/*
111 * Only the common platform data elements for s5m8767 are parsed here from the
112 * device tree. Other sub-modules of s5m8767 such as pmic, rtc , charger and
113 * others have to parse their own platform data elements from device tree.
114 *
115 * The s5m8767 platform data structure is instantiated here and the drivers for
116 * the sub-modules need not instantiate another instance while parsing their
117 * platform data.
118 */
119static struct sec_platform_data *sec_pmic_i2c_parse_dt_pdata(
120 struct device *dev)
121{
122 struct sec_platform_data *pd;
123
124 pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
125 if (!pd) {
126 dev_err(dev, "could not allocate memory for pdata\n");
127 return ERR_PTR(-ENOMEM);
128 }
129
130 /*
131 * ToDo: the 'wakeup' member in the platform data is more of a linux
132 * specfic information. Hence, there is no binding for that yet and
133 * not parsed here.
134 */
135
136 return pd;
137}
138#else
139static struct sec_platform_data *sec_pmic_i2c_parse_dt_pdata(
140 struct device *dev)
141{
142 return 0;
143}
144#endif
145
146static inline int sec_i2c_get_driver_data(struct i2c_client *i2c,
147 const struct i2c_device_id *id)
148{
149#ifdef CONFIG_OF
150 if (i2c->dev.of_node) {
151 const struct of_device_id *match;
152 match = of_match_node(sec_dt_match, i2c->dev.of_node);
153 return (int)match->data;
154 }
155#endif
156 return (int)id->driver_data;
157}
158
98static int sec_pmic_probe(struct i2c_client *i2c, 159static int sec_pmic_probe(struct i2c_client *i2c,
99 const struct i2c_device_id *id) 160 const struct i2c_device_id *id)
100{ 161{
@@ -111,13 +172,22 @@ static int sec_pmic_probe(struct i2c_client *i2c,
111 sec_pmic->dev = &i2c->dev; 172 sec_pmic->dev = &i2c->dev;
112 sec_pmic->i2c = i2c; 173 sec_pmic->i2c = i2c;
113 sec_pmic->irq = i2c->irq; 174 sec_pmic->irq = i2c->irq;
114 sec_pmic->type = id->driver_data; 175 sec_pmic->type = sec_i2c_get_driver_data(i2c, id);
115 176
177 if (sec_pmic->dev->of_node) {
178 pdata = sec_pmic_i2c_parse_dt_pdata(sec_pmic->dev);
179 if (IS_ERR(pdata)) {
180 ret = PTR_ERR(pdata);
181 return ret;
182 }
183 pdata->device_type = sec_pmic->type;
184 }
116 if (pdata) { 185 if (pdata) {
117 sec_pmic->device_type = pdata->device_type; 186 sec_pmic->device_type = pdata->device_type;
118 sec_pmic->ono = pdata->ono; 187 sec_pmic->ono = pdata->ono;
119 sec_pmic->irq_base = pdata->irq_base; 188 sec_pmic->irq_base = pdata->irq_base;
120 sec_pmic->wakeup = pdata->wakeup; 189 sec_pmic->wakeup = pdata->wakeup;
190 sec_pmic->pdata = pdata;
121 } 191 }
122 192
123 sec_pmic->regmap = devm_regmap_init_i2c(i2c, &sec_regmap_config); 193 sec_pmic->regmap = devm_regmap_init_i2c(i2c, &sec_regmap_config);
@@ -192,6 +262,7 @@ static struct i2c_driver sec_pmic_driver = {
192 .driver = { 262 .driver = {
193 .name = "sec_pmic", 263 .name = "sec_pmic",
194 .owner = THIS_MODULE, 264 .owner = THIS_MODULE,
265 .of_match_table = of_match_ptr(sec_dt_match),
195 }, 266 },
196 .probe = sec_pmic_probe, 267 .probe = sec_pmic_probe,
197 .remove = sec_pmic_remove, 268 .remove = sec_pmic_remove,
diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c
index a06d66b929b1..ecc092c7f745 100644
--- a/drivers/mfd/tc3589x.c
+++ b/drivers/mfd/tc3589x.c
@@ -219,25 +219,18 @@ static void tc3589x_irq_unmap(struct irq_domain *d, unsigned int virq)
219} 219}
220 220
221static struct irq_domain_ops tc3589x_irq_ops = { 221static struct irq_domain_ops tc3589x_irq_ops = {
222 .map = tc3589x_irq_map, 222 .map = tc3589x_irq_map,
223 .unmap = tc3589x_irq_unmap, 223 .unmap = tc3589x_irq_unmap,
224 .xlate = irq_domain_xlate_twocell, 224 .xlate = irq_domain_xlate_twocell,
225}; 225};
226 226
227static int tc3589x_irq_init(struct tc3589x *tc3589x, struct device_node *np) 227static int tc3589x_irq_init(struct tc3589x *tc3589x, struct device_node *np)
228{ 228{
229 int base = tc3589x->irq_base; 229 int base = tc3589x->irq_base;
230 230
231 if (base) { 231 tc3589x->domain = irq_domain_add_simple(
232 tc3589x->domain = irq_domain_add_legacy( 232 np, TC3589x_NR_INTERNAL_IRQS, base,
233 NULL, TC3589x_NR_INTERNAL_IRQS, base, 233 &tc3589x_irq_ops, tc3589x);
234 0, &tc3589x_irq_ops, tc3589x);
235 }
236 else {
237 tc3589x->domain = irq_domain_add_linear(
238 np, TC3589x_NR_INTERNAL_IRQS,
239 &tc3589x_irq_ops, tc3589x);
240 }
241 234
242 if (!tc3589x->domain) { 235 if (!tc3589x->domain) {
243 dev_err(tc3589x->dev, "Failed to create irqdomain\n"); 236 dev_err(tc3589x->dev, "Failed to create irqdomain\n");
diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c
index 4dae241e5017..dd362c1078e1 100644
--- a/drivers/mfd/twl4030-power.c
+++ b/drivers/mfd/twl4030-power.c
@@ -159,7 +159,7 @@ out:
159static int twl4030_write_script(u8 address, struct twl4030_ins *script, 159static int twl4030_write_script(u8 address, struct twl4030_ins *script,
160 int len) 160 int len)
161{ 161{
162 int err; 162 int err = -EINVAL;
163 163
164 for (; len; len--, address++, script++) { 164 for (; len; len--, address++, script++) {
165 if (len == 1) { 165 if (len == 1) {
diff --git a/drivers/mfd/vexpress-config.c b/drivers/mfd/vexpress-config.c
index fae15d880758..3c1723aa6225 100644
--- a/drivers/mfd/vexpress-config.c
+++ b/drivers/mfd/vexpress-config.c
@@ -67,6 +67,7 @@ struct vexpress_config_bridge *vexpress_config_bridge_register(
67 67
68 return bridge; 68 return bridge;
69} 69}
70EXPORT_SYMBOL(vexpress_config_bridge_register);
70 71
71void vexpress_config_bridge_unregister(struct vexpress_config_bridge *bridge) 72void vexpress_config_bridge_unregister(struct vexpress_config_bridge *bridge)
72{ 73{
@@ -83,6 +84,7 @@ void vexpress_config_bridge_unregister(struct vexpress_config_bridge *bridge)
83 while (!list_empty(&__bridge.transactions)) 84 while (!list_empty(&__bridge.transactions))
84 cpu_relax(); 85 cpu_relax();
85} 86}
87EXPORT_SYMBOL(vexpress_config_bridge_unregister);
86 88
87 89
88struct vexpress_config_func { 90struct vexpress_config_func {
@@ -142,6 +144,7 @@ struct vexpress_config_func *__vexpress_config_func_get(struct device *dev,
142 144
143 return func; 145 return func;
144} 146}
147EXPORT_SYMBOL(__vexpress_config_func_get);
145 148
146void vexpress_config_func_put(struct vexpress_config_func *func) 149void vexpress_config_func_put(struct vexpress_config_func *func)
147{ 150{
@@ -149,7 +152,7 @@ void vexpress_config_func_put(struct vexpress_config_func *func)
149 of_node_put(func->bridge->node); 152 of_node_put(func->bridge->node);
150 kfree(func); 153 kfree(func);
151} 154}
152 155EXPORT_SYMBOL(vexpress_config_func_put);
153 156
154struct vexpress_config_trans { 157struct vexpress_config_trans {
155 struct vexpress_config_func *func; 158 struct vexpress_config_func *func;
@@ -229,6 +232,7 @@ void vexpress_config_complete(struct vexpress_config_bridge *bridge,
229 232
230 complete(&trans->completion); 233 complete(&trans->completion);
231} 234}
235EXPORT_SYMBOL(vexpress_config_complete);
232 236
233int vexpress_config_wait(struct vexpress_config_trans *trans) 237int vexpress_config_wait(struct vexpress_config_trans *trans)
234{ 238{
@@ -236,7 +240,7 @@ int vexpress_config_wait(struct vexpress_config_trans *trans)
236 240
237 return trans->status; 241 return trans->status;
238} 242}
239 243EXPORT_SYMBOL(vexpress_config_wait);
240 244
241int vexpress_config_read(struct vexpress_config_func *func, int offset, 245int vexpress_config_read(struct vexpress_config_func *func, int offset,
242 u32 *data) 246 u32 *data)
diff --git a/drivers/mfd/vexpress-sysreg.c b/drivers/mfd/vexpress-sysreg.c
index e5d8f63b252a..77048b18439e 100644
--- a/drivers/mfd/vexpress-sysreg.c
+++ b/drivers/mfd/vexpress-sysreg.c
@@ -313,19 +313,11 @@ static void vexpress_sysreg_config_complete(unsigned long data)
313} 313}
314 314
315 315
316void __init vexpress_sysreg_early_init(void __iomem *base) 316void __init vexpress_sysreg_setup(struct device_node *node)
317{ 317{
318 struct device_node *node = of_find_compatible_node(NULL, NULL, 318 if (WARN_ON(!vexpress_sysreg_base))
319 "arm,vexpress-sysreg");
320
321 if (node)
322 base = of_iomap(node, 0);
323
324 if (WARN_ON(!base))
325 return; 319 return;
326 320
327 vexpress_sysreg_base = base;
328
329 if (readl(vexpress_sysreg_base + SYS_MISC) & SYS_MISC_MASTERSITE) 321 if (readl(vexpress_sysreg_base + SYS_MISC) & SYS_MISC_MASTERSITE)
330 vexpress_master_site = VEXPRESS_SITE_DB2; 322 vexpress_master_site = VEXPRESS_SITE_DB2;
331 else 323 else
@@ -336,9 +328,23 @@ void __init vexpress_sysreg_early_init(void __iomem *base)
336 WARN_ON(!vexpress_sysreg_config_bridge); 328 WARN_ON(!vexpress_sysreg_config_bridge);
337} 329}
338 330
331void __init vexpress_sysreg_early_init(void __iomem *base)
332{
333 vexpress_sysreg_base = base;
334 vexpress_sysreg_setup(NULL);
335}
336
339void __init vexpress_sysreg_of_early_init(void) 337void __init vexpress_sysreg_of_early_init(void)
340{ 338{
341 vexpress_sysreg_early_init(NULL); 339 struct device_node *node = of_find_compatible_node(NULL, NULL,
340 "arm,vexpress-sysreg");
341
342 if (node) {
343 vexpress_sysreg_base = of_iomap(node, 0);
344 vexpress_sysreg_setup(node);
345 } else {
346 pr_info("vexpress-sysreg: No Device Tree node found.");
347 }
342} 348}
343 349
344 350
@@ -426,9 +432,11 @@ static int vexpress_sysreg_probe(struct platform_device *pdev)
426 return -EBUSY; 432 return -EBUSY;
427 } 433 }
428 434
429 if (!vexpress_sysreg_base) 435 if (!vexpress_sysreg_base) {
430 vexpress_sysreg_base = devm_ioremap(&pdev->dev, res->start, 436 vexpress_sysreg_base = devm_ioremap(&pdev->dev, res->start,
431 resource_size(res)); 437 resource_size(res));
438 vexpress_sysreg_setup(pdev->dev.of_node);
439 }
432 440
433 if (!vexpress_sysreg_base) { 441 if (!vexpress_sysreg_base) {
434 dev_err(&pdev->dev, "Failed to obtain base address!\n"); 442 dev_err(&pdev->dev, "Failed to obtain base address!\n");
diff --git a/drivers/mfd/wm5102-tables.c b/drivers/mfd/wm5102-tables.c
index 088872ab6338..f6fcb87b3504 100644
--- a/drivers/mfd/wm5102-tables.c
+++ b/drivers/mfd/wm5102-tables.c
@@ -96,6 +96,7 @@ const struct regmap_irq_chip wm5102_aod = {
96 .mask_base = ARIZONA_AOD_IRQ_MASK_IRQ1, 96 .mask_base = ARIZONA_AOD_IRQ_MASK_IRQ1,
97 .ack_base = ARIZONA_AOD_IRQ1, 97 .ack_base = ARIZONA_AOD_IRQ1,
98 .wake_base = ARIZONA_WAKE_CONTROL, 98 .wake_base = ARIZONA_WAKE_CONTROL,
99 .wake_invert = 1,
99 .num_regs = 1, 100 .num_regs = 1,
100 .irqs = wm5102_aod_irqs, 101 .irqs = wm5102_aod_irqs,
101 .num_irqs = ARRAY_SIZE(wm5102_aod_irqs), 102 .num_irqs = ARRAY_SIZE(wm5102_aod_irqs),
@@ -1882,7 +1883,7 @@ static bool wm5102_volatile_register(struct device *dev, unsigned int reg)
1882 } 1883 }
1883} 1884}
1884 1885
1885#define WM5102_MAX_REGISTER 0x1a8fff 1886#define WM5102_MAX_REGISTER 0x1a9800
1886 1887
1887const struct regmap_config wm5102_spi_regmap = { 1888const struct regmap_config wm5102_spi_regmap = {
1888 .reg_bits = 32, 1889 .reg_bits = 32,
diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c
index adda6b10b90d..c41599815299 100644
--- a/drivers/mfd/wm5110-tables.c
+++ b/drivers/mfd/wm5110-tables.c
@@ -255,6 +255,7 @@ const struct regmap_irq_chip wm5110_aod = {
255 .mask_base = ARIZONA_AOD_IRQ_MASK_IRQ1, 255 .mask_base = ARIZONA_AOD_IRQ_MASK_IRQ1,
256 .ack_base = ARIZONA_AOD_IRQ1, 256 .ack_base = ARIZONA_AOD_IRQ1,
257 .wake_base = ARIZONA_WAKE_CONTROL, 257 .wake_base = ARIZONA_WAKE_CONTROL,
258 .wake_invert = 1,
258 .num_regs = 1, 259 .num_regs = 1,
259 .irqs = wm5110_aod_irqs, 260 .irqs = wm5110_aod_irqs,
260 .num_irqs = ARRAY_SIZE(wm5110_aod_irqs), 261 .num_irqs = ARRAY_SIZE(wm5110_aod_irqs),
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c
index 492c8cac69ac..44d273c5e19d 100644
--- a/drivers/misc/sgi-gru/grufile.c
+++ b/drivers/misc/sgi-gru/grufile.c
@@ -517,7 +517,7 @@ static int __init gru_init(void)
517{ 517{
518 int ret; 518 int ret;
519 519
520 if (!is_uv_system()) 520 if (!is_uv_system() || (is_uvx_hub() && !is_uv2_hub()))
521 return 0; 521 return 0;
522 522
523#if defined CONFIG_IA64 523#if defined CONFIG_IA64
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index 9ff942a346ed..83269f1d16e3 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -468,6 +468,11 @@ long st_kim_start(void *kim_data)
468 if (pdata->chip_enable) 468 if (pdata->chip_enable)
469 pdata->chip_enable(kim_gdata); 469 pdata->chip_enable(kim_gdata);
470 470
471 /* Configure BT nShutdown to HIGH state */
472 gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
473 mdelay(5); /* FIXME: a proper toggle */
474 gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH);
475 mdelay(100);
471 /* re-initialize the completion */ 476 /* re-initialize the completion */
472 INIT_COMPLETION(kim_gdata->ldisc_installed); 477 INIT_COMPLETION(kim_gdata->ldisc_installed);
473 /* send notification to UIM */ 478 /* send notification to UIM */
@@ -509,7 +514,8 @@ long st_kim_start(void *kim_data)
509 * (b) upon failure to either install ldisc or download firmware. 514 * (b) upon failure to either install ldisc or download firmware.
510 * The function is responsible to (a) notify UIM about un-installation, 515 * The function is responsible to (a) notify UIM about un-installation,
511 * (b) flush UART if the ldisc was installed. 516 * (b) flush UART if the ldisc was installed.
512 * (c) invoke platform's chip disabling routine. 517 * (c) reset BT_EN - pull down nshutdown at the end.
518 * (d) invoke platform's chip disabling routine.
513 */ 519 */
514long st_kim_stop(void *kim_data) 520long st_kim_stop(void *kim_data)
515{ 521{
@@ -541,6 +547,13 @@ long st_kim_stop(void *kim_data)
541 err = -ETIMEDOUT; 547 err = -ETIMEDOUT;
542 } 548 }
543 549
550 /* By default configure BT nShutdown to LOW state */
551 gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
552 mdelay(1);
553 gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH);
554 mdelay(1);
555 gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
556
544 /* platform specific disable */ 557 /* platform specific disable */
545 if (pdata->chip_disable) 558 if (pdata->chip_disable)
546 pdata->chip_disable(kim_gdata); 559 pdata->chip_disable(kim_gdata);
@@ -733,6 +746,20 @@ static int kim_probe(struct platform_device *pdev)
733 /* refer to itself */ 746 /* refer to itself */
734 kim_gdata->core_data->kim_data = kim_gdata; 747 kim_gdata->core_data->kim_data = kim_gdata;
735 748
749 /* Claim the chip enable nShutdown gpio from the system */
750 kim_gdata->nshutdown = pdata->nshutdown_gpio;
751 err = gpio_request(kim_gdata->nshutdown, "kim");
752 if (unlikely(err)) {
753 pr_err(" gpio %ld request failed ", kim_gdata->nshutdown);
754 return err;
755 }
756
757 /* Configure nShutdown GPIO as output=0 */
758 err = gpio_direction_output(kim_gdata->nshutdown, 0);
759 if (unlikely(err)) {
760 pr_err(" unable to configure gpio %ld", kim_gdata->nshutdown);
761 return err;
762 }
736 /* get reference of pdev for request_firmware 763 /* get reference of pdev for request_firmware
737 */ 764 */
738 kim_gdata->kim_pdev = pdev; 765 kim_gdata->kim_pdev = pdev;
@@ -779,10 +806,18 @@ err_core_init:
779 806
780static int kim_remove(struct platform_device *pdev) 807static int kim_remove(struct platform_device *pdev)
781{ 808{
809 /* free the GPIOs requested */
810 struct ti_st_plat_data *pdata = pdev->dev.platform_data;
782 struct kim_data_s *kim_gdata; 811 struct kim_data_s *kim_gdata;
783 812
784 kim_gdata = dev_get_drvdata(&pdev->dev); 813 kim_gdata = dev_get_drvdata(&pdev->dev);
785 814
815 /* Free the Bluetooth/FM/GPIO
816 * nShutdown gpio from the system
817 */
818 gpio_free(pdata->nshutdown_gpio);
819 pr_info("nshutdown GPIO Freed");
820
786 debugfs_remove_recursive(kim_debugfs_dir); 821 debugfs_remove_recursive(kim_debugfs_dir);
787 sysfs_remove_group(&pdev->dev.kobj, &uim_attr_grp); 822 sysfs_remove_group(&pdev->dev.kobj, &uim_attr_grp);
788 pr_info("sysfs entries removed"); 823 pr_info("sysfs entries removed");
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 150772395cc6..372e921389c8 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -20,6 +20,7 @@
20#include <linux/err.h> 20#include <linux/err.h>
21#include <linux/highmem.h> 21#include <linux/highmem.h>
22#include <linux/log2.h> 22#include <linux/log2.h>
23#include <linux/mmc/pm.h>
23#include <linux/mmc/host.h> 24#include <linux/mmc/host.h>
24#include <linux/mmc/card.h> 25#include <linux/mmc/card.h>
25#include <linux/amba/bus.h> 26#include <linux/amba/bus.h>
@@ -59,6 +60,7 @@ static unsigned int fmax = 515633;
59 * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register 60 * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
60 * @pwrreg_powerup: power up value for MMCIPOWER register 61 * @pwrreg_powerup: power up value for MMCIPOWER register
61 * @signal_direction: input/out direction of bus signals can be indicated 62 * @signal_direction: input/out direction of bus signals can be indicated
63 * @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock
62 */ 64 */
63struct variant_data { 65struct variant_data {
64 unsigned int clkreg; 66 unsigned int clkreg;
@@ -71,6 +73,7 @@ struct variant_data {
71 bool blksz_datactrl16; 73 bool blksz_datactrl16;
72 u32 pwrreg_powerup; 74 u32 pwrreg_powerup;
73 bool signal_direction; 75 bool signal_direction;
76 bool pwrreg_clkgate;
74}; 77};
75 78
76static struct variant_data variant_arm = { 79static struct variant_data variant_arm = {
@@ -87,6 +90,14 @@ static struct variant_data variant_arm_extended_fifo = {
87 .pwrreg_powerup = MCI_PWR_UP, 90 .pwrreg_powerup = MCI_PWR_UP,
88}; 91};
89 92
93static struct variant_data variant_arm_extended_fifo_hwfc = {
94 .fifosize = 128 * 4,
95 .fifohalfsize = 64 * 4,
96 .clkreg_enable = MCI_ARM_HWFCEN,
97 .datalength_bits = 16,
98 .pwrreg_powerup = MCI_PWR_UP,
99};
100
90static struct variant_data variant_u300 = { 101static struct variant_data variant_u300 = {
91 .fifosize = 16 * 4, 102 .fifosize = 16 * 4,
92 .fifohalfsize = 8 * 4, 103 .fifohalfsize = 8 * 4,
@@ -95,6 +106,7 @@ static struct variant_data variant_u300 = {
95 .sdio = true, 106 .sdio = true,
96 .pwrreg_powerup = MCI_PWR_ON, 107 .pwrreg_powerup = MCI_PWR_ON,
97 .signal_direction = true, 108 .signal_direction = true,
109 .pwrreg_clkgate = true,
98}; 110};
99 111
100static struct variant_data variant_nomadik = { 112static struct variant_data variant_nomadik = {
@@ -106,6 +118,7 @@ static struct variant_data variant_nomadik = {
106 .st_clkdiv = true, 118 .st_clkdiv = true,
107 .pwrreg_powerup = MCI_PWR_ON, 119 .pwrreg_powerup = MCI_PWR_ON,
108 .signal_direction = true, 120 .signal_direction = true,
121 .pwrreg_clkgate = true,
109}; 122};
110 123
111static struct variant_data variant_ux500 = { 124static struct variant_data variant_ux500 = {
@@ -118,6 +131,7 @@ static struct variant_data variant_ux500 = {
118 .st_clkdiv = true, 131 .st_clkdiv = true,
119 .pwrreg_powerup = MCI_PWR_ON, 132 .pwrreg_powerup = MCI_PWR_ON,
120 .signal_direction = true, 133 .signal_direction = true,
134 .pwrreg_clkgate = true,
121}; 135};
122 136
123static struct variant_data variant_ux500v2 = { 137static struct variant_data variant_ux500v2 = {
@@ -131,9 +145,28 @@ static struct variant_data variant_ux500v2 = {
131 .blksz_datactrl16 = true, 145 .blksz_datactrl16 = true,
132 .pwrreg_powerup = MCI_PWR_ON, 146 .pwrreg_powerup = MCI_PWR_ON,
133 .signal_direction = true, 147 .signal_direction = true,
148 .pwrreg_clkgate = true,
134}; 149};
135 150
136/* 151/*
152 * Validate mmc prerequisites
153 */
154static int mmci_validate_data(struct mmci_host *host,
155 struct mmc_data *data)
156{
157 if (!data)
158 return 0;
159
160 if (!is_power_of_2(data->blksz)) {
161 dev_err(mmc_dev(host->mmc),
162 "unsupported block size (%d bytes)\n", data->blksz);
163 return -EINVAL;
164 }
165
166 return 0;
167}
168
169/*
137 * This must be called with host->lock held 170 * This must be called with host->lock held
138 */ 171 */
139static void mmci_write_clkreg(struct mmci_host *host, u32 clk) 172static void mmci_write_clkreg(struct mmci_host *host, u32 clk)
@@ -202,6 +235,9 @@ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
202 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) 235 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
203 clk |= MCI_ST_8BIT_BUS; 236 clk |= MCI_ST_8BIT_BUS;
204 237
238 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50)
239 clk |= MCI_ST_UX500_NEG_EDGE;
240
205 mmci_write_clkreg(host, clk); 241 mmci_write_clkreg(host, clk);
206} 242}
207 243
@@ -352,10 +388,33 @@ static inline void mmci_dma_release(struct mmci_host *host)
352 host->dma_rx_channel = host->dma_tx_channel = NULL; 388 host->dma_rx_channel = host->dma_tx_channel = NULL;
353} 389}
354 390
391static void mmci_dma_data_error(struct mmci_host *host)
392{
393 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
394 dmaengine_terminate_all(host->dma_current);
395 host->dma_current = NULL;
396 host->dma_desc_current = NULL;
397 host->data->host_cookie = 0;
398}
399
355static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 400static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
356{ 401{
357 struct dma_chan *chan = host->dma_current; 402 struct dma_chan *chan;
358 enum dma_data_direction dir; 403 enum dma_data_direction dir;
404
405 if (data->flags & MMC_DATA_READ) {
406 dir = DMA_FROM_DEVICE;
407 chan = host->dma_rx_channel;
408 } else {
409 dir = DMA_TO_DEVICE;
410 chan = host->dma_tx_channel;
411 }
412
413 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
414}
415
416static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
417{
359 u32 status; 418 u32 status;
360 int i; 419 int i;
361 420
@@ -374,19 +433,13 @@ static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
374 * contiguous buffers. On TX, we'll get a FIFO underrun error. 433 * contiguous buffers. On TX, we'll get a FIFO underrun error.
375 */ 434 */
376 if (status & MCI_RXDATAAVLBLMASK) { 435 if (status & MCI_RXDATAAVLBLMASK) {
377 dmaengine_terminate_all(chan); 436 mmci_dma_data_error(host);
378 if (!data->error) 437 if (!data->error)
379 data->error = -EIO; 438 data->error = -EIO;
380 } 439 }
381 440
382 if (data->flags & MMC_DATA_WRITE) {
383 dir = DMA_TO_DEVICE;
384 } else {
385 dir = DMA_FROM_DEVICE;
386 }
387
388 if (!data->host_cookie) 441 if (!data->host_cookie)
389 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); 442 mmci_dma_unmap(host, data);
390 443
391 /* 444 /*
392 * Use of DMA with scatter-gather is impossible. 445 * Use of DMA with scatter-gather is impossible.
@@ -396,16 +449,15 @@ static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
396 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n"); 449 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
397 mmci_dma_release(host); 450 mmci_dma_release(host);
398 } 451 }
399}
400 452
401static void mmci_dma_data_error(struct mmci_host *host) 453 host->dma_current = NULL;
402{ 454 host->dma_desc_current = NULL;
403 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
404 dmaengine_terminate_all(host->dma_current);
405} 455}
406 456
407static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, 457/* prepares DMA channel and DMA descriptor, returns non-zero on failure */
408 struct mmci_host_next *next) 458static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
459 struct dma_chan **dma_chan,
460 struct dma_async_tx_descriptor **dma_desc)
409{ 461{
410 struct variant_data *variant = host->variant; 462 struct variant_data *variant = host->variant;
411 struct dma_slave_config conf = { 463 struct dma_slave_config conf = {
@@ -423,16 +475,6 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
423 enum dma_data_direction buffer_dirn; 475 enum dma_data_direction buffer_dirn;
424 int nr_sg; 476 int nr_sg;
425 477
426 /* Check if next job is already prepared */
427 if (data->host_cookie && !next &&
428 host->dma_current && host->dma_desc_current)
429 return 0;
430
431 if (!next) {
432 host->dma_current = NULL;
433 host->dma_desc_current = NULL;
434 }
435
436 if (data->flags & MMC_DATA_READ) { 478 if (data->flags & MMC_DATA_READ) {
437 conf.direction = DMA_DEV_TO_MEM; 479 conf.direction = DMA_DEV_TO_MEM;
438 buffer_dirn = DMA_FROM_DEVICE; 480 buffer_dirn = DMA_FROM_DEVICE;
@@ -462,29 +504,41 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
462 if (!desc) 504 if (!desc)
463 goto unmap_exit; 505 goto unmap_exit;
464 506
465 if (next) { 507 *dma_chan = chan;
466 next->dma_chan = chan; 508 *dma_desc = desc;
467 next->dma_desc = desc;
468 } else {
469 host->dma_current = chan;
470 host->dma_desc_current = desc;
471 }
472 509
473 return 0; 510 return 0;
474 511
475 unmap_exit: 512 unmap_exit:
476 if (!next)
477 dmaengine_terminate_all(chan);
478 dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn); 513 dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
479 return -ENOMEM; 514 return -ENOMEM;
480} 515}
481 516
517static inline int mmci_dma_prep_data(struct mmci_host *host,
518 struct mmc_data *data)
519{
520 /* Check if next job is already prepared. */
521 if (host->dma_current && host->dma_desc_current)
522 return 0;
523
524 /* No job were prepared thus do it now. */
525 return __mmci_dma_prep_data(host, data, &host->dma_current,
526 &host->dma_desc_current);
527}
528
529static inline int mmci_dma_prep_next(struct mmci_host *host,
530 struct mmc_data *data)
531{
532 struct mmci_host_next *nd = &host->next_data;
533 return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc);
534}
535
482static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 536static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
483{ 537{
484 int ret; 538 int ret;
485 struct mmc_data *data = host->data; 539 struct mmc_data *data = host->data;
486 540
487 ret = mmci_dma_prep_data(host, host->data, NULL); 541 ret = mmci_dma_prep_data(host, host->data);
488 if (ret) 542 if (ret)
489 return ret; 543 return ret;
490 544
@@ -514,19 +568,11 @@ static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
514{ 568{
515 struct mmci_host_next *next = &host->next_data; 569 struct mmci_host_next *next = &host->next_data;
516 570
517 if (data->host_cookie && data->host_cookie != next->cookie) { 571 WARN_ON(data->host_cookie && data->host_cookie != next->cookie);
518 pr_warning("[%s] invalid cookie: data->host_cookie %d" 572 WARN_ON(!data->host_cookie && (next->dma_desc || next->dma_chan));
519 " host->next_data.cookie %d\n",
520 __func__, data->host_cookie, host->next_data.cookie);
521 data->host_cookie = 0;
522 }
523
524 if (!data->host_cookie)
525 return;
526 573
527 host->dma_desc_current = next->dma_desc; 574 host->dma_desc_current = next->dma_desc;
528 host->dma_current = next->dma_chan; 575 host->dma_current = next->dma_chan;
529
530 next->dma_desc = NULL; 576 next->dma_desc = NULL;
531 next->dma_chan = NULL; 577 next->dma_chan = NULL;
532} 578}
@@ -541,19 +587,13 @@ static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq,
541 if (!data) 587 if (!data)
542 return; 588 return;
543 589
544 if (data->host_cookie) { 590 BUG_ON(data->host_cookie);
545 data->host_cookie = 0; 591
592 if (mmci_validate_data(host, data))
546 return; 593 return;
547 }
548 594
549 /* if config for dma */ 595 if (!mmci_dma_prep_next(host, data))
550 if (((data->flags & MMC_DATA_WRITE) && host->dma_tx_channel) || 596 data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
551 ((data->flags & MMC_DATA_READ) && host->dma_rx_channel)) {
552 if (mmci_dma_prep_data(host, data, nd))
553 data->host_cookie = 0;
554 else
555 data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
556 }
557} 597}
558 598
559static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq, 599static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
@@ -561,29 +601,23 @@ static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
561{ 601{
562 struct mmci_host *host = mmc_priv(mmc); 602 struct mmci_host *host = mmc_priv(mmc);
563 struct mmc_data *data = mrq->data; 603 struct mmc_data *data = mrq->data;
564 struct dma_chan *chan;
565 enum dma_data_direction dir;
566 604
567 if (!data) 605 if (!data || !data->host_cookie)
568 return; 606 return;
569 607
570 if (data->flags & MMC_DATA_READ) { 608 mmci_dma_unmap(host, data);
571 dir = DMA_FROM_DEVICE;
572 chan = host->dma_rx_channel;
573 } else {
574 dir = DMA_TO_DEVICE;
575 chan = host->dma_tx_channel;
576 }
577 609
610 if (err) {
611 struct mmci_host_next *next = &host->next_data;
612 struct dma_chan *chan;
613 if (data->flags & MMC_DATA_READ)
614 chan = host->dma_rx_channel;
615 else
616 chan = host->dma_tx_channel;
617 dmaengine_terminate_all(chan);
578 618
579 /* if config for dma */ 619 next->dma_desc = NULL;
580 if (chan) { 620 next->dma_chan = NULL;
581 if (err)
582 dmaengine_terminate_all(chan);
583 if (data->host_cookie)
584 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
585 data->sg_len, dir);
586 mrq->data->host_cookie = 0;
587 } 621 }
588} 622}
589 623
@@ -604,6 +638,11 @@ static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
604{ 638{
605} 639}
606 640
641static inline void mmci_dma_finalize(struct mmci_host *host,
642 struct mmc_data *data)
643{
644}
645
607static inline void mmci_dma_data_error(struct mmci_host *host) 646static inline void mmci_dma_data_error(struct mmci_host *host)
608{ 647{
609} 648}
@@ -680,6 +719,9 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
680 mmci_write_clkreg(host, clk); 719 mmci_write_clkreg(host, clk);
681 } 720 }
682 721
722 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50)
723 datactrl |= MCI_ST_DPSM_DDRMODE;
724
683 /* 725 /*
684 * Attempt to use DMA operation mode, if this 726 * Attempt to use DMA operation mode, if this
685 * should fail, fall back to PIO mode 727 * should fail, fall back to PIO mode
@@ -751,8 +793,10 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
751 u32 remain, success; 793 u32 remain, success;
752 794
753 /* Terminate the DMA transfer */ 795 /* Terminate the DMA transfer */
754 if (dma_inprogress(host)) 796 if (dma_inprogress(host)) {
755 mmci_dma_data_error(host); 797 mmci_dma_data_error(host);
798 mmci_dma_unmap(host, data);
799 }
756 800
757 /* 801 /*
758 * Calculate how far we are into the transfer. Note that 802 * Calculate how far we are into the transfer. Note that
@@ -791,7 +835,7 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
791 835
792 if (status & MCI_DATAEND || data->error) { 836 if (status & MCI_DATAEND || data->error) {
793 if (dma_inprogress(host)) 837 if (dma_inprogress(host))
794 mmci_dma_unmap(host, data); 838 mmci_dma_finalize(host, data);
795 mmci_stop_data(host); 839 mmci_stop_data(host);
796 840
797 if (!data->error) 841 if (!data->error)
@@ -828,8 +872,10 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
828 if (!cmd->data || cmd->error) { 872 if (!cmd->data || cmd->error) {
829 if (host->data) { 873 if (host->data) {
830 /* Terminate the DMA transfer */ 874 /* Terminate the DMA transfer */
831 if (dma_inprogress(host)) 875 if (dma_inprogress(host)) {
832 mmci_dma_data_error(host); 876 mmci_dma_data_error(host);
877 mmci_dma_unmap(host, host->data);
878 }
833 mmci_stop_data(host); 879 mmci_stop_data(host);
834 } 880 }
835 mmci_request_end(host, cmd->mrq); 881 mmci_request_end(host, cmd->mrq);
@@ -1055,10 +1101,8 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1055 1101
1056 WARN_ON(host->mrq != NULL); 1102 WARN_ON(host->mrq != NULL);
1057 1103
1058 if (mrq->data && !is_power_of_2(mrq->data->blksz)) { 1104 mrq->cmd->error = mmci_validate_data(host, mrq->data);
1059 dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n", 1105 if (mrq->cmd->error) {
1060 mrq->data->blksz);
1061 mrq->cmd->error = -EINVAL;
1062 mmc_request_done(mmc, mrq); 1106 mmc_request_done(mmc, mrq);
1063 return; 1107 return;
1064 } 1108 }
@@ -1086,7 +1130,6 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1086 struct variant_data *variant = host->variant; 1130 struct variant_data *variant = host->variant;
1087 u32 pwr = 0; 1131 u32 pwr = 0;
1088 unsigned long flags; 1132 unsigned long flags;
1089 int ret;
1090 1133
1091 pm_runtime_get_sync(mmc_dev(mmc)); 1134 pm_runtime_get_sync(mmc_dev(mmc));
1092 1135
@@ -1096,23 +1139,13 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1096 1139
1097 switch (ios->power_mode) { 1140 switch (ios->power_mode) {
1098 case MMC_POWER_OFF: 1141 case MMC_POWER_OFF:
1099 if (host->vcc) 1142 if (!IS_ERR(mmc->supply.vmmc))
1100 ret = mmc_regulator_set_ocr(mmc, host->vcc, 0); 1143 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1101 break; 1144 break;
1102 case MMC_POWER_UP: 1145 case MMC_POWER_UP:
1103 if (host->vcc) { 1146 if (!IS_ERR(mmc->supply.vmmc))
1104 ret = mmc_regulator_set_ocr(mmc, host->vcc, ios->vdd); 1147 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
1105 if (ret) { 1148
1106 dev_err(mmc_dev(mmc), "unable to set OCR\n");
1107 /*
1108 * The .set_ios() function in the mmc_host_ops
1109 * struct return void, and failing to set the
1110 * power should be rare so we print an error
1111 * and return here.
1112 */
1113 goto out;
1114 }
1115 }
1116 /* 1149 /*
1117 * The ST Micro variant doesn't have the PL180s MCI_PWR_UP 1150 * The ST Micro variant doesn't have the PL180s MCI_PWR_UP
1118 * and instead uses MCI_PWR_ON so apply whatever value is 1151 * and instead uses MCI_PWR_ON so apply whatever value is
@@ -1154,6 +1187,13 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1154 } 1187 }
1155 } 1188 }
1156 1189
1190 /*
1191 * If clock = 0 and the variant requires the MMCIPOWER to be used for
1192 * gating the clock, the MCI_PWR_ON bit is cleared.
1193 */
1194 if (!ios->clock && variant->pwrreg_clkgate)
1195 pwr &= ~MCI_PWR_ON;
1196
1157 spin_lock_irqsave(&host->lock, flags); 1197 spin_lock_irqsave(&host->lock, flags);
1158 1198
1159 mmci_set_clkreg(host, ios->clock); 1199 mmci_set_clkreg(host, ios->clock);
@@ -1161,7 +1201,6 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1161 1201
1162 spin_unlock_irqrestore(&host->lock, flags); 1202 spin_unlock_irqrestore(&host->lock, flags);
1163 1203
1164 out:
1165 pm_runtime_mark_last_busy(mmc_dev(mmc)); 1204 pm_runtime_mark_last_busy(mmc_dev(mmc));
1166 pm_runtime_put_autosuspend(mmc_dev(mmc)); 1205 pm_runtime_put_autosuspend(mmc_dev(mmc));
1167} 1206}
@@ -1384,32 +1423,19 @@ static int mmci_probe(struct amba_device *dev,
1384 } else 1423 } else
1385 dev_warn(&dev->dev, "could not get default pinstate\n"); 1424 dev_warn(&dev->dev, "could not get default pinstate\n");
1386 1425
1387#ifdef CONFIG_REGULATOR 1426 /* Get regulators and the supported OCR mask */
1388 /* If we're using the regulator framework, try to fetch a regulator */ 1427 mmc_regulator_get_supply(mmc);
1389 host->vcc = regulator_get(&dev->dev, "vmmc"); 1428 if (!mmc->ocr_avail)
1390 if (IS_ERR(host->vcc))
1391 host->vcc = NULL;
1392 else {
1393 int mask = mmc_regulator_get_ocrmask(host->vcc);
1394
1395 if (mask < 0)
1396 dev_err(&dev->dev, "error getting OCR mask (%d)\n",
1397 mask);
1398 else {
1399 host->mmc->ocr_avail = (u32) mask;
1400 if (plat->ocr_mask)
1401 dev_warn(&dev->dev,
1402 "Provided ocr_mask/setpower will not be used "
1403 "(using regulator instead)\n");
1404 }
1405 }
1406#endif
1407 /* Fall back to platform data if no regulator is found */
1408 if (host->vcc == NULL)
1409 mmc->ocr_avail = plat->ocr_mask; 1429 mmc->ocr_avail = plat->ocr_mask;
1430 else if (plat->ocr_mask)
1431 dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
1432
1410 mmc->caps = plat->capabilities; 1433 mmc->caps = plat->capabilities;
1411 mmc->caps2 = plat->capabilities2; 1434 mmc->caps2 = plat->capabilities2;
1412 1435
1436 /* We support these PM capabilities. */
1437 mmc->pm_caps = MMC_PM_KEEP_POWER;
1438
1413 /* 1439 /*
1414 * We can do SGIO 1440 * We can do SGIO
1415 */ 1441 */
@@ -1585,10 +1611,6 @@ static int mmci_remove(struct amba_device *dev)
1585 clk_disable_unprepare(host->clk); 1611 clk_disable_unprepare(host->clk);
1586 clk_put(host->clk); 1612 clk_put(host->clk);
1587 1613
1588 if (host->vcc)
1589 mmc_regulator_set_ocr(mmc, host->vcc, 0);
1590 regulator_put(host->vcc);
1591
1592 mmc_free_host(mmc); 1614 mmc_free_host(mmc);
1593 1615
1594 amba_release_regions(dev); 1616 amba_release_regions(dev);
@@ -1636,8 +1658,37 @@ static int mmci_resume(struct device *dev)
1636} 1658}
1637#endif 1659#endif
1638 1660
1661#ifdef CONFIG_PM_RUNTIME
1662static int mmci_runtime_suspend(struct device *dev)
1663{
1664 struct amba_device *adev = to_amba_device(dev);
1665 struct mmc_host *mmc = amba_get_drvdata(adev);
1666
1667 if (mmc) {
1668 struct mmci_host *host = mmc_priv(mmc);
1669 clk_disable_unprepare(host->clk);
1670 }
1671
1672 return 0;
1673}
1674
1675static int mmci_runtime_resume(struct device *dev)
1676{
1677 struct amba_device *adev = to_amba_device(dev);
1678 struct mmc_host *mmc = amba_get_drvdata(adev);
1679
1680 if (mmc) {
1681 struct mmci_host *host = mmc_priv(mmc);
1682 clk_prepare_enable(host->clk);
1683 }
1684
1685 return 0;
1686}
1687#endif
1688
1639static const struct dev_pm_ops mmci_dev_pm_ops = { 1689static const struct dev_pm_ops mmci_dev_pm_ops = {
1640 SET_SYSTEM_SLEEP_PM_OPS(mmci_suspend, mmci_resume) 1690 SET_SYSTEM_SLEEP_PM_OPS(mmci_suspend, mmci_resume)
1691 SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL)
1641}; 1692};
1642 1693
1643static struct amba_id mmci_ids[] = { 1694static struct amba_id mmci_ids[] = {
@@ -1652,6 +1703,11 @@ static struct amba_id mmci_ids[] = {
1652 .data = &variant_arm_extended_fifo, 1703 .data = &variant_arm_extended_fifo,
1653 }, 1704 },
1654 { 1705 {
1706 .id = 0x02041180,
1707 .mask = 0xff0fffff,
1708 .data = &variant_arm_extended_fifo_hwfc,
1709 },
1710 {
1655 .id = 0x00041181, 1711 .id = 0x00041181,
1656 .mask = 0x000fffff, 1712 .mask = 0x000fffff,
1657 .data = &variant_arm, 1713 .data = &variant_arm,
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index d34d8c0add8e..1f33ad5333a0 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -28,6 +28,8 @@
28#define MCI_ST_UX500_NEG_EDGE (1 << 13) 28#define MCI_ST_UX500_NEG_EDGE (1 << 13)
29#define MCI_ST_UX500_HWFCEN (1 << 14) 29#define MCI_ST_UX500_HWFCEN (1 << 14)
30#define MCI_ST_UX500_CLK_INV (1 << 15) 30#define MCI_ST_UX500_CLK_INV (1 << 15)
31/* Modified PL180 on Versatile Express platform */
32#define MCI_ARM_HWFCEN (1 << 12)
31 33
32#define MMCIARGUMENT 0x008 34#define MMCIARGUMENT 0x008
33#define MMCICOMMAND 0x00c 35#define MMCICOMMAND 0x00c
@@ -193,7 +195,6 @@ struct mmci_host {
193 /* pio stuff */ 195 /* pio stuff */
194 struct sg_mapping_iter sg_miter; 196 struct sg_mapping_iter sg_miter;
195 unsigned int size; 197 unsigned int size;
196 struct regulator *vcc;
197 198
198 /* pinctrl handles */ 199 /* pinctrl handles */
199 struct pinctrl *pinctrl; 200 struct pinctrl *pinctrl;
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index de4c20b3936c..f8dd36102949 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -50,8 +50,6 @@ struct mvsd_host {
50 struct timer_list timer; 50 struct timer_list timer;
51 struct mmc_host *mmc; 51 struct mmc_host *mmc;
52 struct device *dev; 52 struct device *dev;
53 struct resource *res;
54 int irq;
55 struct clk *clk; 53 struct clk *clk;
56 int gpio_card_detect; 54 int gpio_card_detect;
57 int gpio_write_protect; 55 int gpio_write_protect;
@@ -718,10 +716,6 @@ static int __init mvsd_probe(struct platform_device *pdev)
718 if (!r || irq < 0 || !mvsd_data) 716 if (!r || irq < 0 || !mvsd_data)
719 return -ENXIO; 717 return -ENXIO;
720 718
721 r = request_mem_region(r->start, SZ_1K, DRIVER_NAME);
722 if (!r)
723 return -EBUSY;
724
725 mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev); 719 mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev);
726 if (!mmc) { 720 if (!mmc) {
727 ret = -ENOMEM; 721 ret = -ENOMEM;
@@ -731,8 +725,8 @@ static int __init mvsd_probe(struct platform_device *pdev)
731 host = mmc_priv(mmc); 725 host = mmc_priv(mmc);
732 host->mmc = mmc; 726 host->mmc = mmc;
733 host->dev = &pdev->dev; 727 host->dev = &pdev->dev;
734 host->res = r;
735 host->base_clock = mvsd_data->clock / 2; 728 host->base_clock = mvsd_data->clock / 2;
729 host->clk = ERR_PTR(-EINVAL);
736 730
737 mmc->ops = &mvsd_ops; 731 mmc->ops = &mvsd_ops;
738 732
@@ -752,7 +746,7 @@ static int __init mvsd_probe(struct platform_device *pdev)
752 746
753 spin_lock_init(&host->lock); 747 spin_lock_init(&host->lock);
754 748
755 host->base = ioremap(r->start, SZ_4K); 749 host->base = devm_request_and_ioremap(&pdev->dev, r);
756 if (!host->base) { 750 if (!host->base) {
757 ret = -ENOMEM; 751 ret = -ENOMEM;
758 goto out; 752 goto out;
@@ -765,44 +759,45 @@ static int __init mvsd_probe(struct platform_device *pdev)
765 759
766 mvsd_power_down(host); 760 mvsd_power_down(host);
767 761
768 ret = request_irq(irq, mvsd_irq, 0, DRIVER_NAME, host); 762 ret = devm_request_irq(&pdev->dev, irq, mvsd_irq, 0, DRIVER_NAME, host);
769 if (ret) { 763 if (ret) {
770 pr_err("%s: cannot assign irq %d\n", DRIVER_NAME, irq); 764 pr_err("%s: cannot assign irq %d\n", DRIVER_NAME, irq);
771 goto out; 765 goto out;
772 } else 766 }
773 host->irq = irq;
774 767
775 /* Not all platforms can gate the clock, so it is not 768 /* Not all platforms can gate the clock, so it is not
776 an error if the clock does not exists. */ 769 an error if the clock does not exists. */
777 host->clk = clk_get(&pdev->dev, NULL); 770 host->clk = devm_clk_get(&pdev->dev, NULL);
778 if (!IS_ERR(host->clk)) { 771 if (!IS_ERR(host->clk))
779 clk_prepare_enable(host->clk); 772 clk_prepare_enable(host->clk);
780 }
781 773
782 if (mvsd_data->gpio_card_detect) { 774 if (mvsd_data->gpio_card_detect) {
783 ret = gpio_request(mvsd_data->gpio_card_detect, 775 ret = devm_gpio_request_one(&pdev->dev,
784 DRIVER_NAME " cd"); 776 mvsd_data->gpio_card_detect,
777 GPIOF_IN, DRIVER_NAME " cd");
785 if (ret == 0) { 778 if (ret == 0) {
786 gpio_direction_input(mvsd_data->gpio_card_detect);
787 irq = gpio_to_irq(mvsd_data->gpio_card_detect); 779 irq = gpio_to_irq(mvsd_data->gpio_card_detect);
788 ret = request_irq(irq, mvsd_card_detect_irq, 780 ret = devm_request_irq(&pdev->dev, irq,
789 IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING, 781 mvsd_card_detect_irq,
790 DRIVER_NAME " cd", host); 782 IRQ_TYPE_EDGE_RISING |
783 IRQ_TYPE_EDGE_FALLING,
784 DRIVER_NAME " cd", host);
791 if (ret == 0) 785 if (ret == 0)
792 host->gpio_card_detect = 786 host->gpio_card_detect =
793 mvsd_data->gpio_card_detect; 787 mvsd_data->gpio_card_detect;
794 else 788 else
795 gpio_free(mvsd_data->gpio_card_detect); 789 devm_gpio_free(&pdev->dev,
790 mvsd_data->gpio_card_detect);
796 } 791 }
797 } 792 }
798 if (!host->gpio_card_detect) 793 if (!host->gpio_card_detect)
799 mmc->caps |= MMC_CAP_NEEDS_POLL; 794 mmc->caps |= MMC_CAP_NEEDS_POLL;
800 795
801 if (mvsd_data->gpio_write_protect) { 796 if (mvsd_data->gpio_write_protect) {
802 ret = gpio_request(mvsd_data->gpio_write_protect, 797 ret = devm_gpio_request_one(&pdev->dev,
803 DRIVER_NAME " wp"); 798 mvsd_data->gpio_write_protect,
799 GPIOF_IN, DRIVER_NAME " wp");
804 if (ret == 0) { 800 if (ret == 0) {
805 gpio_direction_input(mvsd_data->gpio_write_protect);
806 host->gpio_write_protect = 801 host->gpio_write_protect =
807 mvsd_data->gpio_write_protect; 802 mvsd_data->gpio_write_protect;
808 } 803 }
@@ -824,26 +819,11 @@ static int __init mvsd_probe(struct platform_device *pdev)
824 return 0; 819 return 0;
825 820
826out: 821out:
827 if (host) { 822 if (mmc) {
828 if (host->irq) 823 if (!IS_ERR(host->clk))
829 free_irq(host->irq, host);
830 if (host->gpio_card_detect) {
831 free_irq(gpio_to_irq(host->gpio_card_detect), host);
832 gpio_free(host->gpio_card_detect);
833 }
834 if (host->gpio_write_protect)
835 gpio_free(host->gpio_write_protect);
836 if (host->base)
837 iounmap(host->base);
838 }
839 if (r)
840 release_resource(r);
841 if (mmc)
842 if (!IS_ERR_OR_NULL(host->clk)) {
843 clk_disable_unprepare(host->clk); 824 clk_disable_unprepare(host->clk);
844 clk_put(host->clk);
845 }
846 mmc_free_host(mmc); 825 mmc_free_host(mmc);
826 }
847 827
848 return ret; 828 return ret;
849} 829}
@@ -852,28 +832,16 @@ static int __exit mvsd_remove(struct platform_device *pdev)
852{ 832{
853 struct mmc_host *mmc = platform_get_drvdata(pdev); 833 struct mmc_host *mmc = platform_get_drvdata(pdev);
854 834
855 if (mmc) { 835 struct mvsd_host *host = mmc_priv(mmc);
856 struct mvsd_host *host = mmc_priv(mmc);
857 836
858 if (host->gpio_card_detect) { 837 mmc_remove_host(mmc);
859 free_irq(gpio_to_irq(host->gpio_card_detect), host); 838 del_timer_sync(&host->timer);
860 gpio_free(host->gpio_card_detect); 839 mvsd_power_down(host);
861 } 840
862 mmc_remove_host(mmc); 841 if (!IS_ERR(host->clk))
863 free_irq(host->irq, host); 842 clk_disable_unprepare(host->clk);
864 if (host->gpio_write_protect) 843 mmc_free_host(mmc);
865 gpio_free(host->gpio_write_protect);
866 del_timer_sync(&host->timer);
867 mvsd_power_down(host);
868 iounmap(host->base);
869 release_resource(host->res);
870 844
871 if (!IS_ERR(host->clk)) {
872 clk_disable_unprepare(host->clk);
873 clk_put(host->clk);
874 }
875 mmc_free_host(mmc);
876 }
877 platform_set_drvdata(pdev, NULL); 845 platform_set_drvdata(pdev, NULL);
878 return 0; 846 return 0;
879} 847}
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index 571915dfb218..f74b5adca642 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -1060,26 +1060,6 @@ static int sd_wait_voltage_stable_2(struct realtek_pci_sdmmc *host)
1060 return 0; 1060 return 0;
1061} 1061}
1062 1062
1063static int sd_change_bank_voltage(struct realtek_pci_sdmmc *host, u8 voltage)
1064{
1065 struct rtsx_pcr *pcr = host->pcr;
1066 int err;
1067
1068 if (voltage == SD_IO_3V3) {
1069 err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24);
1070 if (err < 0)
1071 return err;
1072 } else if (voltage == SD_IO_1V8) {
1073 err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C40 | 0x24);
1074 if (err < 0)
1075 return err;
1076 } else {
1077 return -EINVAL;
1078 }
1079
1080 return 0;
1081}
1082
1083static int sdmmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios) 1063static int sdmmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
1084{ 1064{
1085 struct realtek_pci_sdmmc *host = mmc_priv(mmc); 1065 struct realtek_pci_sdmmc *host = mmc_priv(mmc);
@@ -1098,11 +1078,11 @@ static int sdmmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
1098 rtsx_pci_start_run(pcr); 1078 rtsx_pci_start_run(pcr);
1099 1079
1100 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) 1080 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
1101 voltage = SD_IO_3V3; 1081 voltage = OUTPUT_3V3;
1102 else 1082 else
1103 voltage = SD_IO_1V8; 1083 voltage = OUTPUT_1V8;
1104 1084
1105 if (voltage == SD_IO_1V8) { 1085 if (voltage == OUTPUT_1V8) {
1106 err = rtsx_pci_write_register(pcr, 1086 err = rtsx_pci_write_register(pcr,
1107 SD30_DRIVE_SEL, 0x07, DRIVER_TYPE_B); 1087 SD30_DRIVE_SEL, 0x07, DRIVER_TYPE_B);
1108 if (err < 0) 1088 if (err < 0)
@@ -1113,11 +1093,11 @@ static int sdmmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
1113 goto out; 1093 goto out;
1114 } 1094 }
1115 1095
1116 err = sd_change_bank_voltage(host, voltage); 1096 err = rtsx_pci_switch_output_voltage(pcr, voltage);
1117 if (err < 0) 1097 if (err < 0)
1118 goto out; 1098 goto out;
1119 1099
1120 if (voltage == SD_IO_1V8) { 1100 if (voltage == OUTPUT_1V8) {
1121 err = sd_wait_voltage_stable_2(host); 1101 err = sd_wait_voltage_stable_2(host);
1122 if (err < 0) 1102 if (err < 0)
1123 goto out; 1103 goto out;
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 27f80cd8aef3..46dcb54c32ec 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -272,6 +272,7 @@ config MTD_DOCG3
272 tristate "M-Systems Disk-On-Chip G3" 272 tristate "M-Systems Disk-On-Chip G3"
273 select BCH 273 select BCH
274 select BCH_CONST_PARAMS 274 select BCH_CONST_PARAMS
275 select BITREVERSE
275 ---help--- 276 ---help---
276 This provides an MTD device driver for the M-Systems DiskOnChip 277 This provides an MTD device driver for the M-Systems DiskOnChip
277 G3 devices. 278 G3 devices.
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 67cc73c18ddd..7901d72c9242 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -170,7 +170,7 @@ static int of_flash_probe(struct platform_device *dev)
170 resource_size_t res_size; 170 resource_size_t res_size;
171 struct mtd_part_parser_data ppdata; 171 struct mtd_part_parser_data ppdata;
172 bool map_indirect; 172 bool map_indirect;
173 const char *mtd_name; 173 const char *mtd_name = NULL;
174 174
175 match = of_match_device(of_flash_match, &dev->dev); 175 match = of_match_device(of_flash_match, &dev->dev);
176 if (!match) 176 if (!match)
diff --git a/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c b/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
index 86c9a79b89b3..595de4012e71 100644
--- a/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
+++ b/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
@@ -17,8 +17,8 @@
17#include "bcm47xxnflash.h" 17#include "bcm47xxnflash.h"
18 18
19/* Broadcom uses 1'000'000 but it seems to be too many. Tests on WNDR4500 has 19/* Broadcom uses 1'000'000 but it seems to be too many. Tests on WNDR4500 has
20 * shown 164 retries as maxiumum. */ 20 * shown ~1000 retries as maxiumum. */
21#define NFLASH_READY_RETRIES 1000 21#define NFLASH_READY_RETRIES 10000
22 22
23#define NFLASH_SECTOR_SIZE 512 23#define NFLASH_SECTOR_SIZE 512
24 24
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 3502606f6480..feae55c7b880 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -523,7 +523,7 @@ static struct nand_ecclayout hwecc4_2048 __initconst = {
523static const struct of_device_id davinci_nand_of_match[] = { 523static const struct of_device_id davinci_nand_of_match[] = {
524 {.compatible = "ti,davinci-nand", }, 524 {.compatible = "ti,davinci-nand", },
525 {}, 525 {},
526} 526};
527MODULE_DEVICE_TABLE(of, davinci_nand_of_match); 527MODULE_DEVICE_TABLE(of, davinci_nand_of_match);
528 528
529static struct davinci_nand_pdata 529static struct davinci_nand_pdata
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 8323ac991ad1..3766682a0289 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2857,8 +2857,11 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
2857 int i; 2857 int i;
2858 int val; 2858 int val;
2859 2859
2860 /* ONFI need to be probed in 8 bits mode */ 2860 /* ONFI need to be probed in 8 bits mode, and 16 bits should be selected with NAND_BUSWIDTH_AUTO */
2861 WARN_ON(chip->options & NAND_BUSWIDTH_16); 2861 if (chip->options & NAND_BUSWIDTH_16) {
2862 pr_err("Trying ONFI probe in 16 bits mode, aborting !\n");
2863 return 0;
2864 }
2862 /* Try ONFI for unknown chip or LP */ 2865 /* Try ONFI for unknown chip or LP */
2863 chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1); 2866 chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
2864 if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' || 2867 if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 1877ed7ca086..1c9e09fbdff8 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -1053,6 +1053,7 @@ static ssize_t bonding_store_primary(struct device *d,
1053 pr_info("%s: Setting primary slave to None.\n", 1053 pr_info("%s: Setting primary slave to None.\n",
1054 bond->dev->name); 1054 bond->dev->name);
1055 bond->primary_slave = NULL; 1055 bond->primary_slave = NULL;
1056 memset(bond->params.primary, 0, sizeof(bond->params.primary));
1056 bond_select_active_slave(bond); 1057 bond_select_active_slave(bond);
1057 goto out; 1058 goto out;
1058 } 1059 }
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 5233b8f58d77..2282b1ae9765 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -488,8 +488,12 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,
488 488
489 priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface), 489 priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface),
490 IFX_WRITE_LOW_16BIT(mask)); 490 IFX_WRITE_LOW_16BIT(mask));
491
492 /* According to C_CAN documentation, the reserved bit
493 * in IFx_MASK2 register is fixed 1
494 */
491 priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface), 495 priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface),
492 IFX_WRITE_HIGH_16BIT(mask)); 496 IFX_WRITE_HIGH_16BIT(mask) | BIT(13));
493 497
494 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 498 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface),
495 IFX_WRITE_LOW_16BIT(id)); 499 IFX_WRITE_LOW_16BIT(id));
@@ -960,7 +964,7 @@ static int c_can_handle_bus_err(struct net_device *dev,
960 break; 964 break;
961 case LEC_ACK_ERROR: 965 case LEC_ACK_ERROR:
962 netdev_dbg(dev, "ack error\n"); 966 netdev_dbg(dev, "ack error\n");
963 cf->data[2] |= (CAN_ERR_PROT_LOC_ACK | 967 cf->data[3] |= (CAN_ERR_PROT_LOC_ACK |
964 CAN_ERR_PROT_LOC_ACK_DEL); 968 CAN_ERR_PROT_LOC_ACK_DEL);
965 break; 969 break;
966 case LEC_BIT1_ERROR: 970 case LEC_BIT1_ERROR:
@@ -973,7 +977,7 @@ static int c_can_handle_bus_err(struct net_device *dev,
973 break; 977 break;
974 case LEC_CRC_ERROR: 978 case LEC_CRC_ERROR:
975 netdev_dbg(dev, "CRC error\n"); 979 netdev_dbg(dev, "CRC error\n");
976 cf->data[2] |= (CAN_ERR_PROT_LOC_CRC_SEQ | 980 cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
977 CAN_ERR_PROT_LOC_CRC_DEL); 981 CAN_ERR_PROT_LOC_CRC_DEL);
978 break; 982 break;
979 default: 983 default:
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 7d1748575b1f..5c314a961970 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -560,7 +560,7 @@ static void pch_can_error(struct net_device *ndev, u32 status)
560 stats->rx_errors++; 560 stats->rx_errors++;
561 break; 561 break;
562 case PCH_CRC_ERR: 562 case PCH_CRC_ERR:
563 cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ | 563 cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ |
564 CAN_ERR_PROT_LOC_CRC_DEL; 564 CAN_ERR_PROT_LOC_CRC_DEL;
565 priv->can.can_stats.bus_error++; 565 priv->can.can_stats.bus_error++;
566 stats->rx_errors++; 566 stats->rx_errors++;
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index d84888f03d92..600ac7226e5c 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -339,8 +339,7 @@ static void peak_pciec_set_leds(struct peak_pciec_card *card, u8 led_mask, u8 s)
339 */ 339 */
340static void peak_pciec_start_led_work(struct peak_pciec_card *card) 340static void peak_pciec_start_led_work(struct peak_pciec_card *card)
341{ 341{
342 if (!delayed_work_pending(&card->led_work)) 342 schedule_delayed_work(&card->led_work, HZ);
343 schedule_delayed_work(&card->led_work, HZ);
344} 343}
345 344
346/* 345/*
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index f898c6363729..300581b24ff3 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -746,12 +746,12 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
746 } 746 }
747 if (err_status & HECC_CANES_CRCE) { 747 if (err_status & HECC_CANES_CRCE) {
748 hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE); 748 hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE);
749 cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ | 749 cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ |
750 CAN_ERR_PROT_LOC_CRC_DEL; 750 CAN_ERR_PROT_LOC_CRC_DEL;
751 } 751 }
752 if (err_status & HECC_CANES_ACKE) { 752 if (err_status & HECC_CANES_ACKE) {
753 hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE); 753 hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE);
754 cf->data[2] |= CAN_ERR_PROT_LOC_ACK | 754 cf->data[3] |= CAN_ERR_PROT_LOC_ACK |
755 CAN_ERR_PROT_LOC_ACK_DEL; 755 CAN_ERR_PROT_LOC_ACK_DEL;
756 } 756 }
757 } 757 }
diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c
index 66df93638085..ffd8de28a76a 100644
--- a/drivers/net/ethernet/3com/3c574_cs.c
+++ b/drivers/net/ethernet/3com/3c574_cs.c
@@ -432,7 +432,7 @@ static int tc574_config(struct pcmcia_device *link)
432 netdev_info(dev, "%s at io %#3lx, irq %d, hw_addr %pM\n", 432 netdev_info(dev, "%s at io %#3lx, irq %d, hw_addr %pM\n",
433 cardname, dev->base_addr, dev->irq, dev->dev_addr); 433 cardname, dev->base_addr, dev->irq, dev->dev_addr);
434 netdev_info(dev, " %dK FIFO split %s Rx:Tx, %sMII interface.\n", 434 netdev_info(dev, " %dK FIFO split %s Rx:Tx, %sMII interface.\n",
435 8 << config & Ram_size, 435 8 << (config & Ram_size),
436 ram_split[(config & Ram_split) >> Ram_split_shift], 436 ram_split[(config & Ram_split) >> Ram_split_shift],
437 config & Autoselect ? "autoselect " : ""); 437 config & Autoselect ? "autoselect " : "");
438 438
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 56d3f697e0c7..0035c01660b6 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -21,7 +21,7 @@
21 21
22#include "atl1c.h" 22#include "atl1c.h"
23 23
24#define ATL1C_DRV_VERSION "1.0.1.0-NAPI" 24#define ATL1C_DRV_VERSION "1.0.1.1-NAPI"
25char atl1c_driver_name[] = "atl1c"; 25char atl1c_driver_name[] = "atl1c";
26char atl1c_driver_version[] = ATL1C_DRV_VERSION; 26char atl1c_driver_version[] = ATL1C_DRV_VERSION;
27 27
@@ -1652,6 +1652,7 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter)
1652 u16 num_alloc = 0; 1652 u16 num_alloc = 0;
1653 u16 rfd_next_to_use, next_next; 1653 u16 rfd_next_to_use, next_next;
1654 struct atl1c_rx_free_desc *rfd_desc; 1654 struct atl1c_rx_free_desc *rfd_desc;
1655 dma_addr_t mapping;
1655 1656
1656 next_next = rfd_next_to_use = rfd_ring->next_to_use; 1657 next_next = rfd_next_to_use = rfd_ring->next_to_use;
1657 if (++next_next == rfd_ring->count) 1658 if (++next_next == rfd_ring->count)
@@ -1678,9 +1679,18 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter)
1678 ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); 1679 ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
1679 buffer_info->skb = skb; 1680 buffer_info->skb = skb;
1680 buffer_info->length = adapter->rx_buffer_len; 1681 buffer_info->length = adapter->rx_buffer_len;
1681 buffer_info->dma = pci_map_single(pdev, vir_addr, 1682 mapping = pci_map_single(pdev, vir_addr,
1682 buffer_info->length, 1683 buffer_info->length,
1683 PCI_DMA_FROMDEVICE); 1684 PCI_DMA_FROMDEVICE);
1685 if (unlikely(pci_dma_mapping_error(pdev, mapping))) {
1686 dev_kfree_skb(skb);
1687 buffer_info->skb = NULL;
1688 buffer_info->length = 0;
1689 ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
1690 netif_warn(adapter, rx_err, adapter->netdev, "RX pci_map_single failed");
1691 break;
1692 }
1693 buffer_info->dma = mapping;
1684 ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE, 1694 ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
1685 ATL1C_PCIMAP_FROMDEVICE); 1695 ATL1C_PCIMAP_FROMDEVICE);
1686 rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 1696 rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
@@ -2015,7 +2025,29 @@ check_sum:
2015 return 0; 2025 return 0;
2016} 2026}
2017 2027
2018static void atl1c_tx_map(struct atl1c_adapter *adapter, 2028static void atl1c_tx_rollback(struct atl1c_adapter *adpt,
2029 struct atl1c_tpd_desc *first_tpd,
2030 enum atl1c_trans_queue type)
2031{
2032 struct atl1c_tpd_ring *tpd_ring = &adpt->tpd_ring[type];
2033 struct atl1c_buffer *buffer_info;
2034 struct atl1c_tpd_desc *tpd;
2035 u16 first_index, index;
2036
2037 first_index = first_tpd - (struct atl1c_tpd_desc *)tpd_ring->desc;
2038 index = first_index;
2039 while (index != tpd_ring->next_to_use) {
2040 tpd = ATL1C_TPD_DESC(tpd_ring, index);
2041 buffer_info = &tpd_ring->buffer_info[index];
2042 atl1c_clean_buffer(adpt->pdev, buffer_info, 0);
2043 memset(tpd, 0, sizeof(struct atl1c_tpd_desc));
2044 if (++index == tpd_ring->count)
2045 index = 0;
2046 }
2047 tpd_ring->next_to_use = first_index;
2048}
2049
2050static int atl1c_tx_map(struct atl1c_adapter *adapter,
2019 struct sk_buff *skb, struct atl1c_tpd_desc *tpd, 2051 struct sk_buff *skb, struct atl1c_tpd_desc *tpd,
2020 enum atl1c_trans_queue type) 2052 enum atl1c_trans_queue type)
2021{ 2053{
@@ -2040,7 +2072,10 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
2040 buffer_info->length = map_len; 2072 buffer_info->length = map_len;
2041 buffer_info->dma = pci_map_single(adapter->pdev, 2073 buffer_info->dma = pci_map_single(adapter->pdev,
2042 skb->data, hdr_len, PCI_DMA_TODEVICE); 2074 skb->data, hdr_len, PCI_DMA_TODEVICE);
2043 ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); 2075 if (unlikely(pci_dma_mapping_error(adapter->pdev,
2076 buffer_info->dma)))
2077 goto err_dma;
2078
2044 ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE, 2079 ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
2045 ATL1C_PCIMAP_TODEVICE); 2080 ATL1C_PCIMAP_TODEVICE);
2046 mapped_len += map_len; 2081 mapped_len += map_len;
@@ -2062,6 +2097,10 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
2062 buffer_info->dma = 2097 buffer_info->dma =
2063 pci_map_single(adapter->pdev, skb->data + mapped_len, 2098 pci_map_single(adapter->pdev, skb->data + mapped_len,
2064 buffer_info->length, PCI_DMA_TODEVICE); 2099 buffer_info->length, PCI_DMA_TODEVICE);
2100 if (unlikely(pci_dma_mapping_error(adapter->pdev,
2101 buffer_info->dma)))
2102 goto err_dma;
2103
2065 ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); 2104 ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
2066 ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE, 2105 ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
2067 ATL1C_PCIMAP_TODEVICE); 2106 ATL1C_PCIMAP_TODEVICE);
@@ -2083,6 +2122,9 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
2083 frag, 0, 2122 frag, 0,
2084 buffer_info->length, 2123 buffer_info->length,
2085 DMA_TO_DEVICE); 2124 DMA_TO_DEVICE);
2125 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma))
2126 goto err_dma;
2127
2086 ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); 2128 ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
2087 ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_PAGE, 2129 ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_PAGE,
2088 ATL1C_PCIMAP_TODEVICE); 2130 ATL1C_PCIMAP_TODEVICE);
@@ -2095,6 +2137,13 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
2095 /* The last buffer info contain the skb address, 2137 /* The last buffer info contain the skb address,
2096 so it will be free after unmap */ 2138 so it will be free after unmap */
2097 buffer_info->skb = skb; 2139 buffer_info->skb = skb;
2140
2141 return 0;
2142
2143err_dma:
2144 buffer_info->dma = 0;
2145 buffer_info->length = 0;
2146 return -1;
2098} 2147}
2099 2148
2100static void atl1c_tx_queue(struct atl1c_adapter *adapter, struct sk_buff *skb, 2149static void atl1c_tx_queue(struct atl1c_adapter *adapter, struct sk_buff *skb,
@@ -2157,10 +2206,18 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
2157 if (skb_network_offset(skb) != ETH_HLEN) 2206 if (skb_network_offset(skb) != ETH_HLEN)
2158 tpd->word1 |= 1 << TPD_ETH_TYPE_SHIFT; /* Ethernet frame */ 2207 tpd->word1 |= 1 << TPD_ETH_TYPE_SHIFT; /* Ethernet frame */
2159 2208
2160 atl1c_tx_map(adapter, skb, tpd, type); 2209 if (atl1c_tx_map(adapter, skb, tpd, type) < 0) {
2161 atl1c_tx_queue(adapter, skb, tpd, type); 2210 netif_info(adapter, tx_done, adapter->netdev,
2211 "tx-skb droppted due to dma error\n");
2212 /* roll back tpd/buffer */
2213 atl1c_tx_rollback(adapter, tpd, type);
2214 spin_unlock_irqrestore(&adapter->tx_lock, flags);
2215 dev_kfree_skb(skb);
2216 } else {
2217 atl1c_tx_queue(adapter, skb, tpd, type);
2218 spin_unlock_irqrestore(&adapter->tx_lock, flags);
2219 }
2162 2220
2163 spin_unlock_irqrestore(&adapter->tx_lock, flags);
2164 return NETDEV_TX_OK; 2221 return NETDEV_TX_OK;
2165} 2222}
2166 2223
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index f771ddfba646..a5edac8df67b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -504,13 +504,11 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
504 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, 504 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
505 tpa_info->parsing_flags, len_on_bd); 505 tpa_info->parsing_flags, len_on_bd);
506 506
507 /* set for GRO */ 507 skb_shinfo(skb)->gso_type =
508 if (fp->mode == TPA_MODE_GRO) 508 (GET_FLAG(tpa_info->parsing_flags,
509 skb_shinfo(skb)->gso_type = 509 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
510 (GET_FLAG(tpa_info->parsing_flags, 510 PRS_FLAG_OVERETH_IPV6) ?
511 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) == 511 SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
512 PRS_FLAG_OVERETH_IPV6) ?
513 SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
514 } 512 }
515 513
516 514
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 78ea90c40e19..bdb086934cd9 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -1283,14 +1283,26 @@ static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1283 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg); 1283 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1284} 1284}
1285 1285
1286#define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \ 1286static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1287 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \ 1287{
1288 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \ 1288 u32 val;
1289 MII_TG3_AUXCTL_ACTL_TX_6DB) 1289 int err;
1290 1290
1291#define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \ 1291 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1292 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \ 1292
1293 MII_TG3_AUXCTL_ACTL_TX_6DB); 1293 if (err)
1294 return err;
1295 if (enable)
1296
1297 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1298 else
1299 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1300
1301 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1302 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1303
1304 return err;
1305}
1294 1306
1295static int tg3_bmcr_reset(struct tg3 *tp) 1307static int tg3_bmcr_reset(struct tg3 *tp)
1296{ 1308{
@@ -2223,7 +2235,7 @@ static void tg3_phy_apply_otp(struct tg3 *tp)
2223 2235
2224 otp = tp->phy_otp; 2236 otp = tp->phy_otp;
2225 2237
2226 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) 2238 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2227 return; 2239 return;
2228 2240
2229 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT); 2241 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
@@ -2248,7 +2260,7 @@ static void tg3_phy_apply_otp(struct tg3 *tp)
2248 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT); 2260 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2249 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy); 2261 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2250 2262
2251 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); 2263 tg3_phy_toggle_auxctl_smdsp(tp, false);
2252} 2264}
2253 2265
2254static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up) 2266static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
@@ -2284,9 +2296,9 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2284 2296
2285 if (!tp->setlpicnt) { 2297 if (!tp->setlpicnt) {
2286 if (current_link_up == 1 && 2298 if (current_link_up == 1 &&
2287 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { 2299 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2288 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000); 2300 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2289 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); 2301 tg3_phy_toggle_auxctl_smdsp(tp, false);
2290 } 2302 }
2291 2303
2292 val = tr32(TG3_CPMU_EEE_MODE); 2304 val = tr32(TG3_CPMU_EEE_MODE);
@@ -2302,11 +2314,11 @@ static void tg3_phy_eee_enable(struct tg3 *tp)
2302 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 2314 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2303 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || 2315 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2304 tg3_flag(tp, 57765_CLASS)) && 2316 tg3_flag(tp, 57765_CLASS)) &&
2305 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { 2317 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2306 val = MII_TG3_DSP_TAP26_ALNOKO | 2318 val = MII_TG3_DSP_TAP26_ALNOKO |
2307 MII_TG3_DSP_TAP26_RMRXSTO; 2319 MII_TG3_DSP_TAP26_RMRXSTO;
2308 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); 2320 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2309 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); 2321 tg3_phy_toggle_auxctl_smdsp(tp, false);
2310 } 2322 }
2311 2323
2312 val = tr32(TG3_CPMU_EEE_MODE); 2324 val = tr32(TG3_CPMU_EEE_MODE);
@@ -2450,7 +2462,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2450 tg3_writephy(tp, MII_CTRL1000, 2462 tg3_writephy(tp, MII_CTRL1000,
2451 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER); 2463 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2452 2464
2453 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp); 2465 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2454 if (err) 2466 if (err)
2455 return err; 2467 return err;
2456 2468
@@ -2471,7 +2483,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2471 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200); 2483 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2472 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000); 2484 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2473 2485
2474 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); 2486 tg3_phy_toggle_auxctl_smdsp(tp, false);
2475 2487
2476 tg3_writephy(tp, MII_CTRL1000, phy9_orig); 2488 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2477 2489
@@ -2572,10 +2584,10 @@ static int tg3_phy_reset(struct tg3 *tp)
2572 2584
2573out: 2585out:
2574 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) && 2586 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2575 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { 2587 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2576 tg3_phydsp_write(tp, 0x201f, 0x2aaa); 2588 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2577 tg3_phydsp_write(tp, 0x000a, 0x0323); 2589 tg3_phydsp_write(tp, 0x000a, 0x0323);
2578 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); 2590 tg3_phy_toggle_auxctl_smdsp(tp, false);
2579 } 2591 }
2580 2592
2581 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) { 2593 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
@@ -2584,14 +2596,14 @@ out:
2584 } 2596 }
2585 2597
2586 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) { 2598 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2587 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { 2599 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2588 tg3_phydsp_write(tp, 0x000a, 0x310b); 2600 tg3_phydsp_write(tp, 0x000a, 0x310b);
2589 tg3_phydsp_write(tp, 0x201f, 0x9506); 2601 tg3_phydsp_write(tp, 0x201f, 0x9506);
2590 tg3_phydsp_write(tp, 0x401f, 0x14e2); 2602 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2591 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); 2603 tg3_phy_toggle_auxctl_smdsp(tp, false);
2592 } 2604 }
2593 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) { 2605 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2594 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { 2606 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2595 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); 2607 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2596 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) { 2608 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2597 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b); 2609 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
@@ -2600,7 +2612,7 @@ out:
2600 } else 2612 } else
2601 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b); 2613 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2602 2614
2603 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); 2615 tg3_phy_toggle_auxctl_smdsp(tp, false);
2604 } 2616 }
2605 } 2617 }
2606 2618
@@ -4009,7 +4021,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4009 tw32(TG3_CPMU_EEE_MODE, 4021 tw32(TG3_CPMU_EEE_MODE,
4010 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE); 4022 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4011 4023
4012 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp); 4024 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4013 if (!err) { 4025 if (!err) {
4014 u32 err2; 4026 u32 err2;
4015 4027
@@ -4042,7 +4054,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4042 MII_TG3_DSP_CH34TP2_HIBW01); 4054 MII_TG3_DSP_CH34TP2_HIBW01);
4043 } 4055 }
4044 4056
4045 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); 4057 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4046 if (!err) 4058 if (!err)
4047 err = err2; 4059 err = err2;
4048 } 4060 }
@@ -6950,6 +6962,9 @@ static void tg3_poll_controller(struct net_device *dev)
6950 int i; 6962 int i;
6951 struct tg3 *tp = netdev_priv(dev); 6963 struct tg3 *tp = netdev_priv(dev);
6952 6964
6965 if (tg3_irq_sync(tp))
6966 return;
6967
6953 for (i = 0; i < tp->irq_cnt; i++) 6968 for (i = 0; i < tp->irq_cnt; i++)
6954 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]); 6969 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6955} 6970}
@@ -16367,6 +16382,7 @@ static int tg3_init_one(struct pci_dev *pdev,
16367 tp->pm_cap = pm_cap; 16382 tp->pm_cap = pm_cap;
16368 tp->rx_mode = TG3_DEF_RX_MODE; 16383 tp->rx_mode = TG3_DEF_RX_MODE;
16369 tp->tx_mode = TG3_DEF_TX_MODE; 16384 tp->tx_mode = TG3_DEF_TX_MODE;
16385 tp->irq_sync = 1;
16370 16386
16371 if (tg3_debug > 0) 16387 if (tg3_debug > 0)
16372 tp->msg_enable = tg3_debug; 16388 tp->msg_enable = tg3_debug;
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index a9b0830fb39d..b9d4bb9530e5 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -693,6 +693,11 @@ static int macb_poll(struct napi_struct *napi, int budget)
693 * get notified when new packets arrive. 693 * get notified when new packets arrive.
694 */ 694 */
695 macb_writel(bp, IER, MACB_RX_INT_FLAGS); 695 macb_writel(bp, IER, MACB_RX_INT_FLAGS);
696
697 /* Packets received while interrupts were disabled */
698 status = macb_readl(bp, RSR);
699 if (unlikely(status))
700 napi_reschedule(napi);
696 } 701 }
697 702
698 /* TODO: Handle errors */ 703 /* TODO: Handle errors */
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index b407043ce9b0..f7f02900f650 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -548,6 +548,10 @@ static int desc_get_rx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)
548 return -1; 548 return -1;
549 } 549 }
550 550
551 /* All frames should fit into a single buffer */
552 if (!(status & RXDESC_FIRST_SEG) || !(status & RXDESC_LAST_SEG))
553 return -1;
554
551 /* Check if packet has checksum already */ 555 /* Check if packet has checksum already */
552 if ((status & RXDESC_FRAME_TYPE) && (status & RXDESC_EXT_STATUS) && 556 if ((status & RXDESC_FRAME_TYPE) && (status & RXDESC_EXT_STATUS) &&
553 !(ext_status & RXDESC_IP_PAYLOAD_MASK)) 557 !(ext_status & RXDESC_IP_PAYLOAD_MASK))
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index f0718e1a8369..c306df7d4568 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -1994,9 +1994,20 @@ static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1994{ 1994{
1995 const struct port_info *pi = netdev_priv(dev); 1995 const struct port_info *pi = netdev_priv(dev);
1996 struct adapter *adap = pi->adapter; 1996 struct adapter *adap = pi->adapter;
1997 1997 struct sge_rspq *q;
1998 return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq, 1998 int i;
1999 c->rx_coalesce_usecs, c->rx_max_coalesced_frames); 1999 int r = 0;
2000
2001 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) {
2002 q = &adap->sge.ethrxq[i].rspq;
2003 r = set_rxq_intr_params(adap, q, c->rx_coalesce_usecs,
2004 c->rx_max_coalesced_frames);
2005 if (r) {
2006 dev_err(&dev->dev, "failed to set coalesce %d\n", r);
2007 break;
2008 }
2009 }
2010 return r;
2000} 2011}
2001 2012
2002static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) 2013static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 4eba17b83ba8..f1b3df167ff2 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -36,13 +36,13 @@
36 36
37#define DRV_VER "4.4.161.0u" 37#define DRV_VER "4.4.161.0u"
38#define DRV_NAME "be2net" 38#define DRV_NAME "be2net"
39#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC" 39#define BE_NAME "Emulex BladeEngine2"
40#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC" 40#define BE3_NAME "Emulex BladeEngine3"
41#define OC_NAME "Emulex OneConnect 10Gbps NIC" 41#define OC_NAME "Emulex OneConnect"
42#define OC_NAME_BE OC_NAME "(be3)" 42#define OC_NAME_BE OC_NAME "(be3)"
43#define OC_NAME_LANCER OC_NAME "(Lancer)" 43#define OC_NAME_LANCER OC_NAME "(Lancer)"
44#define OC_NAME_SH OC_NAME "(Skyhawk)" 44#define OC_NAME_SH OC_NAME "(Skyhawk)"
45#define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver" 45#define DRV_DESC "Emulex OneConnect 10Gbps NIC Driver"
46 46
47#define BE_VENDOR_ID 0x19a2 47#define BE_VENDOR_ID 0x19a2
48#define EMULEX_VENDOR_ID 0x10df 48#define EMULEX_VENDOR_ID 0x10df
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 5c995700e534..4d6f3c54427a 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -25,7 +25,7 @@
25MODULE_VERSION(DRV_VER); 25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids); 26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER); 27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28MODULE_AUTHOR("ServerEngines Corporation"); 28MODULE_AUTHOR("Emulex Corporation");
29MODULE_LICENSE("GPL"); 29MODULE_LICENSE("GPL");
30 30
31static unsigned int num_vfs; 31static unsigned int num_vfs;
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 02a12b69555f..4dab6fc265a2 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -232,6 +232,7 @@
232#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ 232#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
233#define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */ 233#define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */
234#define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */ 234#define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */
235#define E1000_CTRL_MEHE 0x00080000 /* Memory Error Handling Enable */
235#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ 236#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
236#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ 237#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
237#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ 238#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
@@ -389,6 +390,12 @@
389 390
390#define E1000_PBS_16K E1000_PBA_16K 391#define E1000_PBS_16K E1000_PBA_16K
391 392
393/* Uncorrectable/correctable ECC Error counts and enable bits */
394#define E1000_PBECCSTS_CORR_ERR_CNT_MASK 0x000000FF
395#define E1000_PBECCSTS_UNCORR_ERR_CNT_MASK 0x0000FF00
396#define E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT 8
397#define E1000_PBECCSTS_ECC_ENABLE 0x00010000
398
392#define IFS_MAX 80 399#define IFS_MAX 80
393#define IFS_MIN 40 400#define IFS_MIN 40
394#define IFS_RATIO 4 401#define IFS_RATIO 4
@@ -408,6 +415,7 @@
408#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */ 415#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */
409#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ 416#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
410#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ 417#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
418#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */
411#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ 419#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */
412#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */ 420#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */
413#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */ 421#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */
@@ -443,6 +451,7 @@
443#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ 451#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
444#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ 452#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
445#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */ 453#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */
454#define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */
446#define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */ 455#define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */
447#define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */ 456#define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */
448#define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */ 457#define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 6782a2eea1bc..7e95f221d60b 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -309,6 +309,8 @@ struct e1000_adapter {
309 309
310 struct napi_struct napi; 310 struct napi_struct napi;
311 311
312 unsigned int uncorr_errors; /* uncorrectable ECC errors */
313 unsigned int corr_errors; /* correctable ECC errors */
312 unsigned int restart_queue; 314 unsigned int restart_queue;
313 u32 txd_cmd; 315 u32 txd_cmd;
314 316
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index f95bc6ee1c22..fd4772a2691c 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -108,6 +108,8 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
108 E1000_STAT("dropped_smbus", stats.mgpdc), 108 E1000_STAT("dropped_smbus", stats.mgpdc),
109 E1000_STAT("rx_dma_failed", rx_dma_failed), 109 E1000_STAT("rx_dma_failed", rx_dma_failed),
110 E1000_STAT("tx_dma_failed", tx_dma_failed), 110 E1000_STAT("tx_dma_failed", tx_dma_failed),
111 E1000_STAT("uncorr_ecc_errors", uncorr_errors),
112 E1000_STAT("corr_ecc_errors", corr_errors),
111}; 113};
112 114
113#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats) 115#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats)
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index cf217777586c..b88676ff3d86 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -77,6 +77,7 @@ enum e1e_registers {
77#define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */ 77#define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */
78 E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */ 78 E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */
79 E1000_PBS = 0x01008, /* Packet Buffer Size */ 79 E1000_PBS = 0x01008, /* Packet Buffer Size */
80 E1000_PBECCSTS = 0x0100C, /* Packet Buffer ECC Status - RW */
80 E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */ 81 E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */
81 E1000_EEWR = 0x0102C, /* EEPROM Write Register - RW */ 82 E1000_EEWR = 0x0102C, /* EEPROM Write Register - RW */
82 E1000_FLOP = 0x0103C, /* FLASH Opcode Register */ 83 E1000_FLOP = 0x0103C, /* FLASH Opcode Register */
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 976336547607..24d9f61956f0 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -3624,6 +3624,17 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
3624 if (hw->mac.type == e1000_ich8lan) 3624 if (hw->mac.type == e1000_ich8lan)
3625 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); 3625 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
3626 ew32(RFCTL, reg); 3626 ew32(RFCTL, reg);
3627
3628 /* Enable ECC on Lynxpoint */
3629 if (hw->mac.type == e1000_pch_lpt) {
3630 reg = er32(PBECCSTS);
3631 reg |= E1000_PBECCSTS_ECC_ENABLE;
3632 ew32(PBECCSTS, reg);
3633
3634 reg = er32(CTRL);
3635 reg |= E1000_CTRL_MEHE;
3636 ew32(CTRL, reg);
3637 }
3627} 3638}
3628 3639
3629/** 3640/**
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index fbf75fdca994..643c883dd795 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1678,6 +1678,23 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
1678 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1678 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1679 } 1679 }
1680 1680
1681 /* Reset on uncorrectable ECC error */
1682 if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
1683 u32 pbeccsts = er32(PBECCSTS);
1684
1685 adapter->corr_errors +=
1686 pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
1687 adapter->uncorr_errors +=
1688 (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
1689 E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
1690
1691 /* Do the reset outside of interrupt context */
1692 schedule_work(&adapter->reset_task);
1693
1694 /* return immediately since reset is imminent */
1695 return IRQ_HANDLED;
1696 }
1697
1681 if (napi_schedule_prep(&adapter->napi)) { 1698 if (napi_schedule_prep(&adapter->napi)) {
1682 adapter->total_tx_bytes = 0; 1699 adapter->total_tx_bytes = 0;
1683 adapter->total_tx_packets = 0; 1700 adapter->total_tx_packets = 0;
@@ -1741,6 +1758,23 @@ static irqreturn_t e1000_intr(int irq, void *data)
1741 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1758 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1742 } 1759 }
1743 1760
1761 /* Reset on uncorrectable ECC error */
1762 if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
1763 u32 pbeccsts = er32(PBECCSTS);
1764
1765 adapter->corr_errors +=
1766 pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
1767 adapter->uncorr_errors +=
1768 (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
1769 E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
1770
1771 /* Do the reset outside of interrupt context */
1772 schedule_work(&adapter->reset_task);
1773
1774 /* return immediately since reset is imminent */
1775 return IRQ_HANDLED;
1776 }
1777
1744 if (napi_schedule_prep(&adapter->napi)) { 1778 if (napi_schedule_prep(&adapter->napi)) {
1745 adapter->total_tx_bytes = 0; 1779 adapter->total_tx_bytes = 0;
1746 adapter->total_tx_packets = 0; 1780 adapter->total_tx_packets = 0;
@@ -2104,6 +2138,8 @@ static void e1000_irq_enable(struct e1000_adapter *adapter)
2104 if (adapter->msix_entries) { 2138 if (adapter->msix_entries) {
2105 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); 2139 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
2106 ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC); 2140 ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
2141 } else if (hw->mac.type == e1000_pch_lpt) {
2142 ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER);
2107 } else { 2143 } else {
2108 ew32(IMS, IMS_ENABLE_MASK); 2144 ew32(IMS, IMS_ENABLE_MASK);
2109 } 2145 }
@@ -4251,6 +4287,16 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
4251 adapter->stats.mgptc += er32(MGTPTC); 4287 adapter->stats.mgptc += er32(MGTPTC);
4252 adapter->stats.mgprc += er32(MGTPRC); 4288 adapter->stats.mgprc += er32(MGTPRC);
4253 adapter->stats.mgpdc += er32(MGTPDC); 4289 adapter->stats.mgpdc += er32(MGTPDC);
4290
4291 /* Correctable ECC Errors */
4292 if (hw->mac.type == e1000_pch_lpt) {
4293 u32 pbeccsts = er32(PBECCSTS);
4294 adapter->corr_errors +=
4295 pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
4296 adapter->uncorr_errors +=
4297 (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
4298 E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
4299 }
4254} 4300}
4255 4301
4256/** 4302/**
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index f3a632bf8d96..687c83d1bdab 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -32,7 +32,7 @@
32 32
33obj-$(CONFIG_IXGBE) += ixgbe.o 33obj-$(CONFIG_IXGBE) += ixgbe.o
34 34
35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o ixgbe_debugfs.o\ 35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
36 ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ 36 ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
37 ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o ixgbe_ptp.o 37 ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o ixgbe_ptp.o
38 38
@@ -40,4 +40,5 @@ ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
40 ixgbe_dcb_82599.o ixgbe_dcb_nl.o 40 ixgbe_dcb_82599.o ixgbe_dcb_nl.o
41 41
42ixgbe-$(CONFIG_IXGBE_HWMON) += ixgbe_sysfs.o 42ixgbe-$(CONFIG_IXGBE_HWMON) += ixgbe_sysfs.o
43ixgbe-$(CONFIG_DEBUG_FS) += ixgbe_debugfs.o
43ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o 44ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
index 50aa546b8c7a..3504686d3af5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
@@ -24,9 +24,6 @@
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 25
26*******************************************************************************/ 26*******************************************************************************/
27
28#ifdef CONFIG_DEBUG_FS
29
30#include <linux/debugfs.h> 27#include <linux/debugfs.h>
31#include <linux/module.h> 28#include <linux/module.h>
32 29
@@ -277,5 +274,3 @@ void ixgbe_dbg_exit(void)
277{ 274{
278 debugfs_remove_recursive(ixgbe_dbg_root); 275 debugfs_remove_recursive(ixgbe_dbg_root);
279} 276}
280
281#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 20a5af6d87d0..b3e3294cfe53 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1401,6 +1401,7 @@ static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
1401 /* set gso_size to avoid messing up TCP MSS */ 1401 /* set gso_size to avoid messing up TCP MSS */
1402 skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len), 1402 skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len),
1403 IXGBE_CB(skb)->append_cnt); 1403 IXGBE_CB(skb)->append_cnt);
1404 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1404} 1405}
1405 1406
1406static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring, 1407static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 1a751c9d09c4..bb9256a1b0a9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -660,11 +660,11 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
660 break; 660 break;
661 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 661 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
662 tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; 662 tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
663 tsync_rx_mtrl = IXGBE_RXMTRL_V1_SYNC_MSG; 663 tsync_rx_mtrl |= IXGBE_RXMTRL_V1_SYNC_MSG;
664 break; 664 break;
665 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 665 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
666 tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; 666 tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
667 tsync_rx_mtrl = IXGBE_RXMTRL_V1_DELAY_REQ_MSG; 667 tsync_rx_mtrl |= IXGBE_RXMTRL_V1_DELAY_REQ_MSG;
668 break; 668 break;
669 case HWTSTAMP_FILTER_PTP_V2_EVENT: 669 case HWTSTAMP_FILTER_PTP_V2_EVENT:
670 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 670 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 2b799f4f1c37..6771b69f40d5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -630,10 +630,15 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
630 ring->tx_csum++; 630 ring->tx_csum++;
631 } 631 }
632 632
633 /* Copy dst mac address to wqe */ 633 if (mlx4_is_mfunc(mdev->dev) || priv->validate_loopback) {
634 ethh = (struct ethhdr *)skb->data; 634 /* Copy dst mac address to wqe. This allows loopback in eSwitch,
635 tx_desc->ctrl.srcrb_flags16[0] = get_unaligned((__be16 *)ethh->h_dest); 635 * so that VFs and PF can communicate with each other
636 tx_desc->ctrl.imm = get_unaligned((__be32 *)(ethh->h_dest + 2)); 636 */
637 ethh = (struct ethhdr *)skb->data;
638 tx_desc->ctrl.srcrb_flags16[0] = get_unaligned((__be16 *)ethh->h_dest);
639 tx_desc->ctrl.imm = get_unaligned((__be32 *)(ethh->h_dest + 2));
640 }
641
637 /* Handle LSO (TSO) packets */ 642 /* Handle LSO (TSO) packets */
638 if (lso_header_size) { 643 if (lso_header_size) {
639 /* Mark opcode as LSO */ 644 /* Mark opcode as LSO */
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index e1bafffbc3b1..5163af314990 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -380,7 +380,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
380 } 380 }
381 } 381 }
382 382
383 if ((dev_cap->flags & 383 if ((dev->caps.flags &
384 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) && 384 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) &&
385 mlx4_is_master(dev)) 385 mlx4_is_master(dev))
386 dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE; 386 dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE;
@@ -1790,15 +1790,8 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
1790 int i; 1790 int i;
1791 1791
1792 if (msi_x) { 1792 if (msi_x) {
1793 /* In multifunction mode each function gets 2 msi-X vectors 1793 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
1794 * one for data path completions anf the other for asynch events 1794 nreq);
1795 * or command completions */
1796 if (mlx4_is_mfunc(dev)) {
1797 nreq = 2;
1798 } else {
1799 nreq = min_t(int, dev->caps.num_eqs -
1800 dev->caps.reserved_eqs, nreq);
1801 }
1802 1795
1803 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); 1796 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
1804 if (!entries) 1797 if (!entries)
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index bc165f4d0f65..695667d471a1 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -144,7 +144,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
144 buffrag->length, PCI_DMA_TODEVICE); 144 buffrag->length, PCI_DMA_TODEVICE);
145 buffrag->dma = 0ULL; 145 buffrag->dma = 0ULL;
146 } 146 }
147 for (j = 0; j < cmd_buf->frag_count; j++) { 147 for (j = 1; j < cmd_buf->frag_count; j++) {
148 buffrag++; 148 buffrag++;
149 if (buffrag->dma) { 149 if (buffrag->dma) {
150 pci_unmap_page(adapter->pdev, buffrag->dma, 150 pci_unmap_page(adapter->pdev, buffrag->dma,
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 6098fd4adfeb..69e321a65077 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -1963,10 +1963,12 @@ unwind:
1963 while (--i >= 0) { 1963 while (--i >= 0) {
1964 nf = &pbuf->frag_array[i+1]; 1964 nf = &pbuf->frag_array[i+1];
1965 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE); 1965 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
1966 nf->dma = 0ULL;
1966 } 1967 }
1967 1968
1968 nf = &pbuf->frag_array[0]; 1969 nf = &pbuf->frag_array[0];
1969 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE); 1970 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
1971 nf->dma = 0ULL;
1970 1972
1971out_err: 1973out_err:
1972 return -ENOMEM; 1974 return -ENOMEM;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 6f82812d0fab..09aa310b6194 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -986,8 +986,13 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
986 th->seq = htonl(seq_number); 986 th->seq = htonl(seq_number);
987 length = skb->len; 987 length = skb->len;
988 988
989 if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) 989 if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) {
990 skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1); 990 skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1);
991 if (skb->protocol == htons(ETH_P_IPV6))
992 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
993 else
994 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
995 }
991 996
992 if (vid != 0xffff) 997 if (vid != 0xffff)
993 __vlan_hwaccel_put_tag(skb, vid); 998 __vlan_hwaccel_put_tag(skb, vid);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index ed96f309bca8..998974f78742 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -450,7 +450,6 @@ enum rtl8168_registers {
450#define PWM_EN (1 << 22) 450#define PWM_EN (1 << 22)
451#define RXDV_GATED_EN (1 << 19) 451#define RXDV_GATED_EN (1 << 19)
452#define EARLY_TALLY_EN (1 << 16) 452#define EARLY_TALLY_EN (1 << 16)
453#define FORCE_CLK (1 << 15) /* force clock request */
454}; 453};
455 454
456enum rtl_register_content { 455enum rtl_register_content {
@@ -514,7 +513,6 @@ enum rtl_register_content {
514 PMEnable = (1 << 0), /* Power Management Enable */ 513 PMEnable = (1 << 0), /* Power Management Enable */
515 514
516 /* Config2 register p. 25 */ 515 /* Config2 register p. 25 */
517 ClkReqEn = (1 << 7), /* Clock Request Enable */
518 MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */ 516 MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */
519 PCI_Clock_66MHz = 0x01, 517 PCI_Clock_66MHz = 0x01,
520 PCI_Clock_33MHz = 0x00, 518 PCI_Clock_33MHz = 0x00,
@@ -535,7 +533,6 @@ enum rtl_register_content {
535 Spi_en = (1 << 3), 533 Spi_en = (1 << 3),
536 LanWake = (1 << 1), /* LanWake enable/disable */ 534 LanWake = (1 << 1), /* LanWake enable/disable */
537 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */ 535 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
538 ASPM_en = (1 << 0), /* ASPM enable */
539 536
540 /* TBICSR p.28 */ 537 /* TBICSR p.28 */
541 TBIReset = 0x80000000, 538 TBIReset = 0x80000000,
@@ -684,7 +681,6 @@ enum features {
684 RTL_FEATURE_WOL = (1 << 0), 681 RTL_FEATURE_WOL = (1 << 0),
685 RTL_FEATURE_MSI = (1 << 1), 682 RTL_FEATURE_MSI = (1 << 1),
686 RTL_FEATURE_GMII = (1 << 2), 683 RTL_FEATURE_GMII = (1 << 2),
687 RTL_FEATURE_FW_LOADED = (1 << 3),
688}; 684};
689 685
690struct rtl8169_counters { 686struct rtl8169_counters {
@@ -1826,8 +1822,6 @@ static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
1826 1822
1827 if (opts2 & RxVlanTag) 1823 if (opts2 & RxVlanTag)
1828 __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff)); 1824 __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
1829
1830 desc->opts2 = 0;
1831} 1825}
1832 1826
1833static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd) 1827static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -2391,10 +2385,8 @@ static void rtl_apply_firmware(struct rtl8169_private *tp)
2391 struct rtl_fw *rtl_fw = tp->rtl_fw; 2385 struct rtl_fw *rtl_fw = tp->rtl_fw;
2392 2386
2393 /* TODO: release firmware once rtl_phy_write_fw signals failures. */ 2387 /* TODO: release firmware once rtl_phy_write_fw signals failures. */
2394 if (!IS_ERR_OR_NULL(rtl_fw)) { 2388 if (!IS_ERR_OR_NULL(rtl_fw))
2395 rtl_phy_write_fw(tp, rtl_fw); 2389 rtl_phy_write_fw(tp, rtl_fw);
2396 tp->features |= RTL_FEATURE_FW_LOADED;
2397 }
2398} 2390}
2399 2391
2400static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val) 2392static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
@@ -2405,31 +2397,6 @@ static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
2405 rtl_apply_firmware(tp); 2397 rtl_apply_firmware(tp);
2406} 2398}
2407 2399
2408static void r810x_aldps_disable(struct rtl8169_private *tp)
2409{
2410 rtl_writephy(tp, 0x1f, 0x0000);
2411 rtl_writephy(tp, 0x18, 0x0310);
2412 msleep(100);
2413}
2414
2415static void r810x_aldps_enable(struct rtl8169_private *tp)
2416{
2417 if (!(tp->features & RTL_FEATURE_FW_LOADED))
2418 return;
2419
2420 rtl_writephy(tp, 0x1f, 0x0000);
2421 rtl_writephy(tp, 0x18, 0x8310);
2422}
2423
2424static void r8168_aldps_enable_1(struct rtl8169_private *tp)
2425{
2426 if (!(tp->features & RTL_FEATURE_FW_LOADED))
2427 return;
2428
2429 rtl_writephy(tp, 0x1f, 0x0000);
2430 rtl_w1w0_phy(tp, 0x15, 0x1000, 0x0000);
2431}
2432
2433static void rtl8169s_hw_phy_config(struct rtl8169_private *tp) 2400static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
2434{ 2401{
2435 static const struct phy_reg phy_reg_init[] = { 2402 static const struct phy_reg phy_reg_init[] = {
@@ -3220,8 +3187,6 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
3220 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400); 3187 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3221 rtl_writephy(tp, 0x1f, 0x0000); 3188 rtl_writephy(tp, 0x1f, 0x0000);
3222 3189
3223 r8168_aldps_enable_1(tp);
3224
3225 /* Broken BIOS workaround: feed GigaMAC registers with MAC address. */ 3190 /* Broken BIOS workaround: feed GigaMAC registers with MAC address. */
3226 rtl_rar_exgmac_set(tp, tp->dev->dev_addr); 3191 rtl_rar_exgmac_set(tp, tp->dev->dev_addr);
3227} 3192}
@@ -3296,8 +3261,6 @@ static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
3296 rtl_writephy(tp, 0x05, 0x8b85); 3261 rtl_writephy(tp, 0x05, 0x8b85);
3297 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000); 3262 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3298 rtl_writephy(tp, 0x1f, 0x0000); 3263 rtl_writephy(tp, 0x1f, 0x0000);
3299
3300 r8168_aldps_enable_1(tp);
3301} 3264}
3302 3265
3303static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp) 3266static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
@@ -3305,8 +3268,6 @@ static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
3305 rtl_apply_firmware(tp); 3268 rtl_apply_firmware(tp);
3306 3269
3307 rtl8168f_hw_phy_config(tp); 3270 rtl8168f_hw_phy_config(tp);
3308
3309 r8168_aldps_enable_1(tp);
3310} 3271}
3311 3272
3312static void rtl8411_hw_phy_config(struct rtl8169_private *tp) 3273static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
@@ -3404,8 +3365,6 @@ static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
3404 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001); 3365 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3405 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400); 3366 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3406 rtl_writephy(tp, 0x1f, 0x0000); 3367 rtl_writephy(tp, 0x1f, 0x0000);
3407
3408 r8168_aldps_enable_1(tp);
3409} 3368}
3410 3369
3411static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp) 3370static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
@@ -3491,19 +3450,21 @@ static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
3491 }; 3450 };
3492 3451
3493 /* Disable ALDPS before ram code */ 3452 /* Disable ALDPS before ram code */
3494 r810x_aldps_disable(tp); 3453 rtl_writephy(tp, 0x1f, 0x0000);
3454 rtl_writephy(tp, 0x18, 0x0310);
3455 msleep(100);
3495 3456
3496 rtl_apply_firmware(tp); 3457 rtl_apply_firmware(tp);
3497 3458
3498 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); 3459 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3499
3500 r810x_aldps_enable(tp);
3501} 3460}
3502 3461
3503static void rtl8402_hw_phy_config(struct rtl8169_private *tp) 3462static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
3504{ 3463{
3505 /* Disable ALDPS before setting firmware */ 3464 /* Disable ALDPS before setting firmware */
3506 r810x_aldps_disable(tp); 3465 rtl_writephy(tp, 0x1f, 0x0000);
3466 rtl_writephy(tp, 0x18, 0x0310);
3467 msleep(20);
3507 3468
3508 rtl_apply_firmware(tp); 3469 rtl_apply_firmware(tp);
3509 3470
@@ -3513,8 +3474,6 @@ static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
3513 rtl_writephy(tp, 0x10, 0x401f); 3474 rtl_writephy(tp, 0x10, 0x401f);
3514 rtl_writephy(tp, 0x19, 0x7030); 3475 rtl_writephy(tp, 0x19, 0x7030);
3515 rtl_writephy(tp, 0x1f, 0x0000); 3476 rtl_writephy(tp, 0x1f, 0x0000);
3516
3517 r810x_aldps_enable(tp);
3518} 3477}
3519 3478
3520static void rtl8106e_hw_phy_config(struct rtl8169_private *tp) 3479static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
@@ -3527,7 +3486,9 @@ static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
3527 }; 3486 };
3528 3487
3529 /* Disable ALDPS before ram code */ 3488 /* Disable ALDPS before ram code */
3530 r810x_aldps_disable(tp); 3489 rtl_writephy(tp, 0x1f, 0x0000);
3490 rtl_writephy(tp, 0x18, 0x0310);
3491 msleep(100);
3531 3492
3532 rtl_apply_firmware(tp); 3493 rtl_apply_firmware(tp);
3533 3494
@@ -3535,8 +3496,6 @@ static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
3535 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); 3496 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3536 3497
3537 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); 3498 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3538
3539 r810x_aldps_enable(tp);
3540} 3499}
3541 3500
3542static void rtl_hw_phy_config(struct net_device *dev) 3501static void rtl_hw_phy_config(struct net_device *dev)
@@ -5053,6 +5012,8 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
5053 5012
5054 RTL_W8(MaxTxPacketSize, EarlySize); 5013 RTL_W8(MaxTxPacketSize, EarlySize);
5055 5014
5015 rtl_disable_clock_request(pdev);
5016
5056 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); 5017 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5057 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); 5018 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5058 5019
@@ -5061,8 +5022,7 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
5061 5022
5062 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); 5023 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5063 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN); 5024 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
5064 RTL_W8(Config5, (RTL_R8(Config5) & ~Spi_en) | ASPM_en); 5025 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
5065 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5066} 5026}
5067 5027
5068static void rtl_hw_start_8168f(struct rtl8169_private *tp) 5028static void rtl_hw_start_8168f(struct rtl8169_private *tp)
@@ -5087,12 +5047,13 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
5087 5047
5088 RTL_W8(MaxTxPacketSize, EarlySize); 5048 RTL_W8(MaxTxPacketSize, EarlySize);
5089 5049
5050 rtl_disable_clock_request(pdev);
5051
5090 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); 5052 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5091 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); 5053 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5092 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); 5054 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5093 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN | FORCE_CLK); 5055 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
5094 RTL_W8(Config5, (RTL_R8(Config5) & ~Spi_en) | ASPM_en); 5056 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
5095 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5096} 5057}
5097 5058
5098static void rtl_hw_start_8168f_1(struct rtl8169_private *tp) 5059static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
@@ -5149,10 +5110,8 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
5149 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); 5110 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5150 5111
5151 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); 5112 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5152 RTL_W32(MISC, (RTL_R32(MISC) | FORCE_CLK) & ~RXDV_GATED_EN); 5113 RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
5153 RTL_W8(MaxTxPacketSize, EarlySize); 5114 RTL_W8(MaxTxPacketSize, EarlySize);
5154 RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
5155 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5156 5115
5157 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); 5116 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5158 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); 5117 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
@@ -5368,9 +5327,6 @@ static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
5368 5327
5369 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET); 5328 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5370 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); 5329 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5371 RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
5372 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5373 RTL_W32(MISC, RTL_R32(MISC) | FORCE_CLK);
5374 5330
5375 rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1)); 5331 rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
5376} 5332}
@@ -5396,9 +5352,6 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
5396 5352
5397 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); 5353 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5398 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); 5354 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5399 RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
5400 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5401 RTL_W32(MISC, RTL_R32(MISC) | FORCE_CLK);
5402 5355
5403 rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402)); 5356 rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
5404 5357
@@ -5420,10 +5373,7 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
5420 /* Force LAN exit from ASPM if Rx/Tx are not idle */ 5373 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5421 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800); 5374 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5422 5375
5423 RTL_W32(MISC, 5376 RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
5424 (RTL_R32(MISC) | DISABLE_LAN_EN | FORCE_CLK) & ~EARLY_TALLY_EN);
5425 RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
5426 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5427 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET); 5377 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5428 RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN); 5378 RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
5429} 5379}
@@ -6064,8 +6014,6 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget
6064 !(status & (RxRWT | RxFOVF)) && 6014 !(status & (RxRWT | RxFOVF)) &&
6065 (dev->features & NETIF_F_RXALL)) 6015 (dev->features & NETIF_F_RXALL))
6066 goto process_pkt; 6016 goto process_pkt;
6067
6068 rtl8169_mark_to_asic(desc, rx_buf_sz);
6069 } else { 6017 } else {
6070 struct sk_buff *skb; 6018 struct sk_buff *skb;
6071 dma_addr_t addr; 6019 dma_addr_t addr;
@@ -6086,16 +6034,14 @@ process_pkt:
6086 if (unlikely(rtl8169_fragmented_frame(status))) { 6034 if (unlikely(rtl8169_fragmented_frame(status))) {
6087 dev->stats.rx_dropped++; 6035 dev->stats.rx_dropped++;
6088 dev->stats.rx_length_errors++; 6036 dev->stats.rx_length_errors++;
6089 rtl8169_mark_to_asic(desc, rx_buf_sz); 6037 goto release_descriptor;
6090 continue;
6091 } 6038 }
6092 6039
6093 skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry], 6040 skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
6094 tp, pkt_size, addr); 6041 tp, pkt_size, addr);
6095 rtl8169_mark_to_asic(desc, rx_buf_sz);
6096 if (!skb) { 6042 if (!skb) {
6097 dev->stats.rx_dropped++; 6043 dev->stats.rx_dropped++;
6098 continue; 6044 goto release_descriptor;
6099 } 6045 }
6100 6046
6101 rtl8169_rx_csum(skb, status); 6047 rtl8169_rx_csum(skb, status);
@@ -6111,13 +6057,10 @@ process_pkt:
6111 tp->rx_stats.bytes += pkt_size; 6057 tp->rx_stats.bytes += pkt_size;
6112 u64_stats_update_end(&tp->rx_stats.syncp); 6058 u64_stats_update_end(&tp->rx_stats.syncp);
6113 } 6059 }
6114 6060release_descriptor:
6115 /* Work around for AMD plateform. */ 6061 desc->opts2 = 0;
6116 if ((desc->opts2 & cpu_to_le32(0xfffe000)) && 6062 wmb();
6117 (tp->mac_version == RTL_GIGA_MAC_VER_05)) { 6063 rtl8169_mark_to_asic(desc, rx_buf_sz);
6118 desc->opts2 = 0;
6119 cur_rx++;
6120 }
6121 } 6064 }
6122 6065
6123 count = cur_rx - tp->cur_rx; 6066 count = cur_rx - tp->cur_rx;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index f07c0612abf6..b75f4b286895 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -69,7 +69,7 @@
69 69
70#undef STMMAC_XMIT_DEBUG 70#undef STMMAC_XMIT_DEBUG
71/*#define STMMAC_XMIT_DEBUG*/ 71/*#define STMMAC_XMIT_DEBUG*/
72#ifdef STMMAC_TX_DEBUG 72#ifdef STMMAC_XMIT_DEBUG
73#define TX_DBG(fmt, args...) printk(fmt, ## args) 73#define TX_DBG(fmt, args...) printk(fmt, ## args)
74#else 74#else
75#define TX_DBG(fmt, args...) do { } while (0) 75#define TX_DBG(fmt, args...) do { } while (0)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 0376a5e6b2bf..0b9829fe3eea 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -188,8 +188,6 @@ int stmmac_mdio_register(struct net_device *ndev)
188 goto bus_register_fail; 188 goto bus_register_fail;
189 } 189 }
190 190
191 priv->mii = new_bus;
192
193 found = 0; 191 found = 0;
194 for (addr = 0; addr < PHY_MAX_ADDR; addr++) { 192 for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
195 struct phy_device *phydev = new_bus->phy_map[addr]; 193 struct phy_device *phydev = new_bus->phy_map[addr];
@@ -237,8 +235,14 @@ int stmmac_mdio_register(struct net_device *ndev)
237 } 235 }
238 } 236 }
239 237
240 if (!found) 238 if (!found) {
241 pr_warning("%s: No PHY found\n", ndev->name); 239 pr_warning("%s: No PHY found\n", ndev->name);
240 mdiobus_unregister(new_bus);
241 mdiobus_free(new_bus);
242 return -ENODEV;
243 }
244
245 priv->mii = new_bus;
242 246
243 return 0; 247 return 0;
244 248
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 7992b3e05d3d..78ace59efd29 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -1801,7 +1801,7 @@ static void rhine_tx(struct net_device *dev)
1801 rp->tx_skbuff[entry]->len, 1801 rp->tx_skbuff[entry]->len,
1802 PCI_DMA_TODEVICE); 1802 PCI_DMA_TODEVICE);
1803 } 1803 }
1804 dev_kfree_skb_irq(rp->tx_skbuff[entry]); 1804 dev_kfree_skb(rp->tx_skbuff[entry]);
1805 rp->tx_skbuff[entry] = NULL; 1805 rp->tx_skbuff[entry] = NULL;
1806 entry = (++rp->dirty_tx) % TX_RING_SIZE; 1806 entry = (++rp->dirty_tx) % TX_RING_SIZE;
1807 } 1807 }
@@ -2010,11 +2010,7 @@ static void rhine_slow_event_task(struct work_struct *work)
2010 if (intr_status & IntrPCIErr) 2010 if (intr_status & IntrPCIErr)
2011 netif_warn(rp, hw, dev, "PCI error\n"); 2011 netif_warn(rp, hw, dev, "PCI error\n");
2012 2012
2013 napi_disable(&rp->napi); 2013 iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
2014 rhine_irq_disable(rp);
2015 /* Slow and safe. Consider __napi_schedule as a replacement ? */
2016 napi_enable(&rp->napi);
2017 napi_schedule(&rp->napi);
2018 2014
2019out_unlock: 2015out_unlock:
2020 mutex_unlock(&rp->task_lock); 2016 mutex_unlock(&rp->task_lock);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 5fd6f4674326..e6fe0d80d612 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -84,7 +84,7 @@ struct hv_netvsc_packet {
84}; 84};
85 85
86struct netvsc_device_info { 86struct netvsc_device_info {
87 unsigned char mac_adr[6]; 87 unsigned char mac_adr[ETH_ALEN];
88 bool link_state; /* 0 - link up, 1 - link down */ 88 bool link_state; /* 0 - link up, 1 - link down */
89 int ring_size; 89 int ring_size;
90}; 90};
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index f825a629a699..8264f0ef7692 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -349,7 +349,7 @@ static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
349 struct net_device_context *ndevctx = netdev_priv(ndev); 349 struct net_device_context *ndevctx = netdev_priv(ndev);
350 struct hv_device *hdev = ndevctx->device_ctx; 350 struct hv_device *hdev = ndevctx->device_ctx;
351 struct sockaddr *addr = p; 351 struct sockaddr *addr = p;
352 char save_adr[14]; 352 char save_adr[ETH_ALEN];
353 unsigned char save_aatype; 353 unsigned char save_aatype;
354 int err; 354 int err;
355 355
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 81f8f9e31db5..fcbf680c3e62 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -77,6 +77,11 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
77 77
78 skb_orphan(skb); 78 skb_orphan(skb);
79 79
80 /* Before queueing this packet to netif_rx(),
81 * make sure dst is refcounted.
82 */
83 skb_dst_force(skb);
84
80 skb->protocol = eth_type_trans(skb, dev); 85 skb->protocol = eth_type_trans(skb, dev);
81 86
82 /* it's OK to use per_cpu_ptr() because BHs are off */ 87 /* it's OK to use per_cpu_ptr() because BHs are off */
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 68a43fe602e7..d3fb97d97cbc 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -822,7 +822,10 @@ static int macvlan_changelink(struct net_device *dev,
822 822
823static size_t macvlan_get_size(const struct net_device *dev) 823static size_t macvlan_get_size(const struct net_device *dev)
824{ 824{
825 return nla_total_size(4); 825 return (0
826 + nla_total_size(4) /* IFLA_MACVLAN_MODE */
827 + nla_total_size(2) /* IFLA_MACVLAN_FLAGS */
828 );
826} 829}
827 830
828static int macvlan_fill_info(struct sk_buff *skb, 831static int macvlan_fill_info(struct sk_buff *skb,
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index d5199cb4caec..b5ddd5077a80 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -36,8 +36,9 @@ MODULE_LICENSE("GPL");
36 36
37/* IP101A/G - IP1001 */ 37/* IP101A/G - IP1001 */
38#define IP10XX_SPEC_CTRL_STATUS 16 /* Spec. Control Register */ 38#define IP10XX_SPEC_CTRL_STATUS 16 /* Spec. Control Register */
39#define IP1001_RXPHASE_SEL (1<<0) /* Add delay on RX_CLK */
40#define IP1001_TXPHASE_SEL (1<<1) /* Add delay on TX_CLK */
39#define IP1001_SPEC_CTRL_STATUS_2 20 /* IP1001 Spec. Control Reg 2 */ 41#define IP1001_SPEC_CTRL_STATUS_2 20 /* IP1001 Spec. Control Reg 2 */
40#define IP1001_PHASE_SEL_MASK 3 /* IP1001 RX/TXPHASE_SEL */
41#define IP1001_APS_ON 11 /* IP1001 APS Mode bit */ 42#define IP1001_APS_ON 11 /* IP1001 APS Mode bit */
42#define IP101A_G_APS_ON 2 /* IP101A/G APS Mode bit */ 43#define IP101A_G_APS_ON 2 /* IP101A/G APS Mode bit */
43#define IP101A_G_IRQ_CONF_STATUS 0x11 /* Conf Info IRQ & Status Reg */ 44#define IP101A_G_IRQ_CONF_STATUS 0x11 /* Conf Info IRQ & Status Reg */
@@ -138,19 +139,24 @@ static int ip1001_config_init(struct phy_device *phydev)
138 if (c < 0) 139 if (c < 0)
139 return c; 140 return c;
140 141
141 /* INTR pin used: speed/link/duplex will cause an interrupt */ 142 if ((phydev->interface == PHY_INTERFACE_MODE_RGMII) ||
142 c = phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, IP101A_G_IRQ_DEFAULT); 143 (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) ||
143 if (c < 0) 144 (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
144 return c; 145 (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
145 146
146 if (phydev->interface == PHY_INTERFACE_MODE_RGMII) {
147 /* Additional delay (2ns) used to adjust RX clock phase
148 * at RGMII interface */
149 c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS); 147 c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS);
150 if (c < 0) 148 if (c < 0)
151 return c; 149 return c;
152 150
153 c |= IP1001_PHASE_SEL_MASK; 151 c &= ~(IP1001_RXPHASE_SEL | IP1001_TXPHASE_SEL);
152
153 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
154 c |= (IP1001_RXPHASE_SEL | IP1001_TXPHASE_SEL);
155 else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
156 c |= IP1001_RXPHASE_SEL;
157 else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
158 c |= IP1001_TXPHASE_SEL;
159
154 c = phy_write(phydev, IP10XX_SPEC_CTRL_STATUS, c); 160 c = phy_write(phydev, IP10XX_SPEC_CTRL_STATUS, c);
155 if (c < 0) 161 if (c < 0)
156 return c; 162 return c;
@@ -167,6 +173,11 @@ static int ip101a_g_config_init(struct phy_device *phydev)
167 if (c < 0) 173 if (c < 0)
168 return c; 174 return c;
169 175
176 /* INTR pin used: speed/link/duplex will cause an interrupt */
177 c = phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, IP101A_G_IRQ_DEFAULT);
178 if (c < 0)
179 return c;
180
170 /* Enable Auto Power Saving mode */ 181 /* Enable Auto Power Saving mode */
171 c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS); 182 c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS);
172 c |= IP101A_G_APS_ON; 183 c |= IP101A_G_APS_ON;
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 5d2a3f215887..22dec9c7ef05 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -353,15 +353,6 @@ static int m88e1111_config_init(struct phy_device *phydev)
353 int err; 353 int err;
354 int temp; 354 int temp;
355 355
356 /* Enable Fiber/Copper auto selection */
357 temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
358 temp &= ~MII_M1111_HWCFG_FIBER_COPPER_AUTO;
359 phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
360
361 temp = phy_read(phydev, MII_BMCR);
362 temp |= BMCR_RESET;
363 phy_write(phydev, MII_BMCR, temp);
364
365 if ((phydev->interface == PHY_INTERFACE_MODE_RGMII) || 356 if ((phydev->interface == PHY_INTERFACE_MODE_RGMII) ||
366 (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) || 357 (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) ||
367 (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) || 358 (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c
index 0c9accb1c14f..e91d7d736ae2 100644
--- a/drivers/net/phy/mdio-mux-gpio.c
+++ b/drivers/net/phy/mdio-mux-gpio.c
@@ -53,7 +53,7 @@ static int mdio_mux_gpio_probe(struct platform_device *pdev)
53{ 53{
54 enum of_gpio_flags f; 54 enum of_gpio_flags f;
55 struct mdio_mux_gpio_state *s; 55 struct mdio_mux_gpio_state *s;
56 unsigned int num_gpios; 56 int num_gpios;
57 unsigned int n; 57 unsigned int n;
58 int r; 58 int r;
59 59
@@ -61,7 +61,7 @@ static int mdio_mux_gpio_probe(struct platform_device *pdev)
61 return -ENODEV; 61 return -ENODEV;
62 62
63 num_gpios = of_gpio_count(pdev->dev.of_node); 63 num_gpios = of_gpio_count(pdev->dev.of_node);
64 if (num_gpios == 0 || num_gpios > MDIO_MUX_GPIO_MAX_BITS) 64 if (num_gpios <= 0 || num_gpios > MDIO_MUX_GPIO_MAX_BITS)
65 return -ENODEV; 65 return -ENODEV;
66 66
67 s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL); 67 s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index af372d0957fe..2917a86f4c43 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -109,11 +109,11 @@ struct tap_filter {
109 unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN]; 109 unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN];
110}; 110};
111 111
112/* 1024 is probably a high enough limit: modern hypervisors seem to support on 112/* DEFAULT_MAX_NUM_RSS_QUEUES were choosed to let the rx/tx queues allocated for
113 * the order of 100-200 CPUs so this leaves us some breathing space if we want 113 * the netdevice to be fit in one page. So we can make sure the success of
114 * to match a queue per guest CPU. 114 * memory allocation. TODO: increase the limit. */
115 */ 115#define MAX_TAP_QUEUES DEFAULT_MAX_NUM_RSS_QUEUES
116#define MAX_TAP_QUEUES 1024 116#define MAX_TAP_FLOWS 4096
117 117
118#define TUN_FLOW_EXPIRE (3 * HZ) 118#define TUN_FLOW_EXPIRE (3 * HZ)
119 119
@@ -185,6 +185,8 @@ struct tun_struct {
185 unsigned long ageing_time; 185 unsigned long ageing_time;
186 unsigned int numdisabled; 186 unsigned int numdisabled;
187 struct list_head disabled; 187 struct list_head disabled;
188 void *security;
189 u32 flow_count;
188}; 190};
189 191
190static inline u32 tun_hashfn(u32 rxhash) 192static inline u32 tun_hashfn(u32 rxhash)
@@ -218,6 +220,7 @@ static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
218 e->queue_index = queue_index; 220 e->queue_index = queue_index;
219 e->tun = tun; 221 e->tun = tun;
220 hlist_add_head_rcu(&e->hash_link, head); 222 hlist_add_head_rcu(&e->hash_link, head);
223 ++tun->flow_count;
221 } 224 }
222 return e; 225 return e;
223} 226}
@@ -228,6 +231,7 @@ static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
228 e->rxhash, e->queue_index); 231 e->rxhash, e->queue_index);
229 hlist_del_rcu(&e->hash_link); 232 hlist_del_rcu(&e->hash_link);
230 kfree_rcu(e, rcu); 233 kfree_rcu(e, rcu);
234 --tun->flow_count;
231} 235}
232 236
233static void tun_flow_flush(struct tun_struct *tun) 237static void tun_flow_flush(struct tun_struct *tun)
@@ -294,11 +298,12 @@ static void tun_flow_cleanup(unsigned long data)
294} 298}
295 299
296static void tun_flow_update(struct tun_struct *tun, u32 rxhash, 300static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
297 u16 queue_index) 301 struct tun_file *tfile)
298{ 302{
299 struct hlist_head *head; 303 struct hlist_head *head;
300 struct tun_flow_entry *e; 304 struct tun_flow_entry *e;
301 unsigned long delay = tun->ageing_time; 305 unsigned long delay = tun->ageing_time;
306 u16 queue_index = tfile->queue_index;
302 307
303 if (!rxhash) 308 if (!rxhash)
304 return; 309 return;
@@ -307,7 +312,9 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
307 312
308 rcu_read_lock(); 313 rcu_read_lock();
309 314
310 if (tun->numqueues == 1) 315 /* We may get a very small possibility of OOO during switching, not
316 * worth to optimize.*/
317 if (tun->numqueues == 1 || tfile->detached)
311 goto unlock; 318 goto unlock;
312 319
313 e = tun_flow_find(head, rxhash); 320 e = tun_flow_find(head, rxhash);
@@ -317,7 +324,8 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
317 e->updated = jiffies; 324 e->updated = jiffies;
318 } else { 325 } else {
319 spin_lock_bh(&tun->lock); 326 spin_lock_bh(&tun->lock);
320 if (!tun_flow_find(head, rxhash)) 327 if (!tun_flow_find(head, rxhash) &&
328 tun->flow_count < MAX_TAP_FLOWS)
321 tun_flow_create(tun, head, rxhash, queue_index); 329 tun_flow_create(tun, head, rxhash, queue_index);
322 330
323 if (!timer_pending(&tun->flow_gc_timer)) 331 if (!timer_pending(&tun->flow_gc_timer))
@@ -406,21 +414,21 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
406 414
407 tun = rtnl_dereference(tfile->tun); 415 tun = rtnl_dereference(tfile->tun);
408 416
409 if (tun) { 417 if (tun && !tfile->detached) {
410 u16 index = tfile->queue_index; 418 u16 index = tfile->queue_index;
411 BUG_ON(index >= tun->numqueues); 419 BUG_ON(index >= tun->numqueues);
412 dev = tun->dev; 420 dev = tun->dev;
413 421
414 rcu_assign_pointer(tun->tfiles[index], 422 rcu_assign_pointer(tun->tfiles[index],
415 tun->tfiles[tun->numqueues - 1]); 423 tun->tfiles[tun->numqueues - 1]);
416 rcu_assign_pointer(tfile->tun, NULL);
417 ntfile = rtnl_dereference(tun->tfiles[index]); 424 ntfile = rtnl_dereference(tun->tfiles[index]);
418 ntfile->queue_index = index; 425 ntfile->queue_index = index;
419 426
420 --tun->numqueues; 427 --tun->numqueues;
421 if (clean) 428 if (clean) {
429 rcu_assign_pointer(tfile->tun, NULL);
422 sock_put(&tfile->sk); 430 sock_put(&tfile->sk);
423 else 431 } else
424 tun_disable_queue(tun, tfile); 432 tun_disable_queue(tun, tfile);
425 433
426 synchronize_net(); 434 synchronize_net();
@@ -434,10 +442,13 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
434 } 442 }
435 443
436 if (clean) { 444 if (clean) {
437 if (tun && tun->numqueues == 0 && tun->numdisabled == 0 && 445 if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
438 !(tun->flags & TUN_PERSIST)) 446 netif_carrier_off(tun->dev);
439 if (tun->dev->reg_state == NETREG_REGISTERED) 447
448 if (!(tun->flags & TUN_PERSIST) &&
449 tun->dev->reg_state == NETREG_REGISTERED)
440 unregister_netdevice(tun->dev); 450 unregister_netdevice(tun->dev);
451 }
441 452
442 BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED, 453 BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED,
443 &tfile->socket.flags)); 454 &tfile->socket.flags));
@@ -465,6 +476,10 @@ static void tun_detach_all(struct net_device *dev)
465 rcu_assign_pointer(tfile->tun, NULL); 476 rcu_assign_pointer(tfile->tun, NULL);
466 --tun->numqueues; 477 --tun->numqueues;
467 } 478 }
479 list_for_each_entry(tfile, &tun->disabled, next) {
480 wake_up_all(&tfile->wq.wait);
481 rcu_assign_pointer(tfile->tun, NULL);
482 }
468 BUG_ON(tun->numqueues != 0); 483 BUG_ON(tun->numqueues != 0);
469 484
470 synchronize_net(); 485 synchronize_net();
@@ -490,8 +505,12 @@ static int tun_attach(struct tun_struct *tun, struct file *file)
490 struct tun_file *tfile = file->private_data; 505 struct tun_file *tfile = file->private_data;
491 int err; 506 int err;
492 507
508 err = security_tun_dev_attach(tfile->socket.sk, tun->security);
509 if (err < 0)
510 goto out;
511
493 err = -EINVAL; 512 err = -EINVAL;
494 if (rtnl_dereference(tfile->tun)) 513 if (rtnl_dereference(tfile->tun) && !tfile->detached)
495 goto out; 514 goto out;
496 515
497 err = -EBUSY; 516 err = -EBUSY;
@@ -1190,7 +1209,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1190 tun->dev->stats.rx_packets++; 1209 tun->dev->stats.rx_packets++;
1191 tun->dev->stats.rx_bytes += len; 1210 tun->dev->stats.rx_bytes += len;
1192 1211
1193 tun_flow_update(tun, rxhash, tfile->queue_index); 1212 tun_flow_update(tun, rxhash, tfile);
1194 return total_len; 1213 return total_len;
1195} 1214}
1196 1215
@@ -1373,6 +1392,7 @@ static void tun_free_netdev(struct net_device *dev)
1373 1392
1374 BUG_ON(!(list_empty(&tun->disabled))); 1393 BUG_ON(!(list_empty(&tun->disabled)));
1375 tun_flow_uninit(tun); 1394 tun_flow_uninit(tun);
1395 security_tun_dev_free_security(tun->security);
1376 free_netdev(dev); 1396 free_netdev(dev);
1377} 1397}
1378 1398
@@ -1562,7 +1582,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1562 1582
1563 if (tun_not_capable(tun)) 1583 if (tun_not_capable(tun))
1564 return -EPERM; 1584 return -EPERM;
1565 err = security_tun_dev_attach(tfile->socket.sk); 1585 err = security_tun_dev_open(tun->security);
1566 if (err < 0) 1586 if (err < 0)
1567 return err; 1587 return err;
1568 1588
@@ -1577,6 +1597,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1577 else { 1597 else {
1578 char *name; 1598 char *name;
1579 unsigned long flags = 0; 1599 unsigned long flags = 0;
1600 int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ?
1601 MAX_TAP_QUEUES : 1;
1580 1602
1581 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 1603 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1582 return -EPERM; 1604 return -EPERM;
@@ -1600,8 +1622,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1600 name = ifr->ifr_name; 1622 name = ifr->ifr_name;
1601 1623
1602 dev = alloc_netdev_mqs(sizeof(struct tun_struct), name, 1624 dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
1603 tun_setup, 1625 tun_setup, queues, queues);
1604 MAX_TAP_QUEUES, MAX_TAP_QUEUES); 1626
1605 if (!dev) 1627 if (!dev)
1606 return -ENOMEM; 1628 return -ENOMEM;
1607 1629
@@ -1619,7 +1641,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1619 1641
1620 spin_lock_init(&tun->lock); 1642 spin_lock_init(&tun->lock);
1621 1643
1622 security_tun_dev_post_create(&tfile->sk); 1644 err = security_tun_dev_alloc_security(&tun->security);
1645 if (err < 0)
1646 goto err_free_dev;
1623 1647
1624 tun_net_init(dev); 1648 tun_net_init(dev);
1625 1649
@@ -1644,10 +1668,10 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1644 device_create_file(&tun->dev->dev, &dev_attr_owner) || 1668 device_create_file(&tun->dev->dev, &dev_attr_owner) ||
1645 device_create_file(&tun->dev->dev, &dev_attr_group)) 1669 device_create_file(&tun->dev->dev, &dev_attr_group))
1646 pr_err("Failed to create tun sysfs files\n"); 1670 pr_err("Failed to create tun sysfs files\n");
1647
1648 netif_carrier_on(tun->dev);
1649 } 1671 }
1650 1672
1673 netif_carrier_on(tun->dev);
1674
1651 tun_debug(KERN_INFO, tun, "tun_set_iff\n"); 1675 tun_debug(KERN_INFO, tun, "tun_set_iff\n");
1652 1676
1653 if (ifr->ifr_flags & IFF_NO_PI) 1677 if (ifr->ifr_flags & IFF_NO_PI)
@@ -1789,19 +1813,24 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
1789 1813
1790 if (ifr->ifr_flags & IFF_ATTACH_QUEUE) { 1814 if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
1791 tun = tfile->detached; 1815 tun = tfile->detached;
1792 if (!tun) 1816 if (!tun) {
1793 ret = -EINVAL; 1817 ret = -EINVAL;
1794 else 1818 goto unlock;
1795 ret = tun_attach(tun, file); 1819 }
1820 ret = security_tun_dev_attach_queue(tun->security);
1821 if (ret < 0)
1822 goto unlock;
1823 ret = tun_attach(tun, file);
1796 } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { 1824 } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
1797 tun = rtnl_dereference(tfile->tun); 1825 tun = rtnl_dereference(tfile->tun);
1798 if (!tun || !(tun->flags & TUN_TAP_MQ)) 1826 if (!tun || !(tun->flags & TUN_TAP_MQ) || tfile->detached)
1799 ret = -EINVAL; 1827 ret = -EINVAL;
1800 else 1828 else
1801 __tun_detach(tfile, false); 1829 __tun_detach(tfile, false);
1802 } else 1830 } else
1803 ret = -EINVAL; 1831 ret = -EINVAL;
1804 1832
1833unlock:
1805 rtnl_unlock(); 1834 rtnl_unlock();
1806 return ret; 1835 return ret;
1807} 1836}
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index 42f51c71ec1f..248d2dc765a5 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -374,6 +374,21 @@ static const struct driver_info cdc_mbim_info = {
374 .tx_fixup = cdc_mbim_tx_fixup, 374 .tx_fixup = cdc_mbim_tx_fixup,
375}; 375};
376 376
377/* MBIM and NCM devices should not need a ZLP after NTBs with
378 * dwNtbOutMaxSize length. This driver_info is for the exceptional
379 * devices requiring it anyway, allowing them to be supported without
380 * forcing the performance penalty on all the sane devices.
381 */
382static const struct driver_info cdc_mbim_info_zlp = {
383 .description = "CDC MBIM",
384 .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN | FLAG_SEND_ZLP,
385 .bind = cdc_mbim_bind,
386 .unbind = cdc_mbim_unbind,
387 .manage_power = cdc_mbim_manage_power,
388 .rx_fixup = cdc_mbim_rx_fixup,
389 .tx_fixup = cdc_mbim_tx_fixup,
390};
391
377static const struct usb_device_id mbim_devs[] = { 392static const struct usb_device_id mbim_devs[] = {
378 /* This duplicate NCM entry is intentional. MBIM devices can 393 /* This duplicate NCM entry is intentional. MBIM devices can
379 * be disguised as NCM by default, and this is necessary to 394 * be disguised as NCM by default, and this is necessary to
@@ -385,6 +400,10 @@ static const struct usb_device_id mbim_devs[] = {
385 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE), 400 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
386 .driver_info = (unsigned long)&cdc_mbim_info, 401 .driver_info = (unsigned long)&cdc_mbim_info,
387 }, 402 },
403 /* Sierra Wireless MC7710 need ZLPs */
404 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68a2, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
405 .driver_info = (unsigned long)&cdc_mbim_info_zlp,
406 },
388 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), 407 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
389 .driver_info = (unsigned long)&cdc_mbim_info, 408 .driver_info = (unsigned long)&cdc_mbim_info,
390 }, 409 },
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 71b6e92b8e9b..00d3b2d37828 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -435,6 +435,13 @@ advance:
435 len -= temp; 435 len -= temp;
436 } 436 }
437 437
438 /* some buggy devices have an IAD but no CDC Union */
439 if (!ctx->union_desc && intf->intf_assoc && intf->intf_assoc->bInterfaceCount == 2) {
440 ctx->control = intf;
441 ctx->data = usb_ifnum_to_if(dev->udev, intf->cur_altsetting->desc.bInterfaceNumber + 1);
442 dev_dbg(&intf->dev, "CDC Union missing - got slave from IAD\n");
443 }
444
438 /* check if we got everything */ 445 /* check if we got everything */
439 if ((ctx->control == NULL) || (ctx->data == NULL) || 446 if ((ctx->control == NULL) || (ctx->data == NULL) ||
440 ((!ctx->mbim_desc) && ((ctx->ether_desc == NULL) || (ctx->control != intf)))) 447 ((!ctx->mbim_desc) && ((ctx->ether_desc == NULL) || (ctx->control != intf))))
@@ -497,7 +504,8 @@ advance:
497error2: 504error2:
498 usb_set_intfdata(ctx->control, NULL); 505 usb_set_intfdata(ctx->control, NULL);
499 usb_set_intfdata(ctx->data, NULL); 506 usb_set_intfdata(ctx->data, NULL);
500 usb_driver_release_interface(driver, ctx->data); 507 if (ctx->data != ctx->control)
508 usb_driver_release_interface(driver, ctx->data);
501error: 509error:
502 cdc_ncm_free((struct cdc_ncm_ctx *)dev->data[0]); 510 cdc_ncm_free((struct cdc_ncm_ctx *)dev->data[0]);
503 dev->data[0] = 0; 511 dev->data[0] = 0;
@@ -1155,6 +1163,20 @@ static const struct driver_info wwan_info = {
1155 .tx_fixup = cdc_ncm_tx_fixup, 1163 .tx_fixup = cdc_ncm_tx_fixup,
1156}; 1164};
1157 1165
1166/* Same as wwan_info, but with FLAG_NOARP */
1167static const struct driver_info wwan_noarp_info = {
1168 .description = "Mobile Broadband Network Device (NO ARP)",
1169 .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
1170 | FLAG_WWAN | FLAG_NOARP,
1171 .bind = cdc_ncm_bind,
1172 .unbind = cdc_ncm_unbind,
1173 .check_connect = cdc_ncm_check_connect,
1174 .manage_power = usbnet_manage_power,
1175 .status = cdc_ncm_status,
1176 .rx_fixup = cdc_ncm_rx_fixup,
1177 .tx_fixup = cdc_ncm_tx_fixup,
1178};
1179
1158static const struct usb_device_id cdc_devs[] = { 1180static const struct usb_device_id cdc_devs[] = {
1159 /* Ericsson MBM devices like F5521gw */ 1181 /* Ericsson MBM devices like F5521gw */
1160 { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO 1182 { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
@@ -1193,6 +1215,16 @@ static const struct usb_device_id cdc_devs[] = {
1193 { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x46), 1215 { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x46),
1194 .driver_info = (unsigned long)&wwan_info, 1216 .driver_info = (unsigned long)&wwan_info,
1195 }, 1217 },
1218 { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76),
1219 .driver_info = (unsigned long)&wwan_info,
1220 },
1221
1222 /* Infineon(now Intel) HSPA Modem platform */
1223 { USB_DEVICE_AND_INTERFACE_INFO(0x1519, 0x0443,
1224 USB_CLASS_COMM,
1225 USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
1226 .driver_info = (unsigned long)&wwan_noarp_info,
1227 },
1196 1228
1197 /* Generic CDC-NCM devices */ 1229 /* Generic CDC-NCM devices */
1198 { USB_INTERFACE_INFO(USB_CLASS_COMM, 1230 { USB_INTERFACE_INFO(USB_CLASS_COMM,
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 3f554c1149f3..d7e99445518e 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -45,6 +45,12 @@
45#define DM_MCAST_ADDR 0x16 /* 8 bytes */ 45#define DM_MCAST_ADDR 0x16 /* 8 bytes */
46#define DM_GPR_CTRL 0x1e 46#define DM_GPR_CTRL 0x1e
47#define DM_GPR_DATA 0x1f 47#define DM_GPR_DATA 0x1f
48#define DM_CHIP_ID 0x2c
49#define DM_MODE_CTRL 0x91 /* only on dm9620 */
50
51/* chip id values */
52#define ID_DM9601 0
53#define ID_DM9620 1
48 54
49#define DM_MAX_MCAST 64 55#define DM_MAX_MCAST 64
50#define DM_MCAST_SIZE 8 56#define DM_MCAST_SIZE 8
@@ -53,7 +59,6 @@
53#define DM_RX_OVERHEAD 7 /* 3 byte header + 4 byte crc tail */ 59#define DM_RX_OVERHEAD 7 /* 3 byte header + 4 byte crc tail */
54#define DM_TIMEOUT 1000 60#define DM_TIMEOUT 1000
55 61
56
57static int dm_read(struct usbnet *dev, u8 reg, u16 length, void *data) 62static int dm_read(struct usbnet *dev, u8 reg, u16 length, void *data)
58{ 63{
59 int err; 64 int err;
@@ -84,32 +89,23 @@ static int dm_write(struct usbnet *dev, u8 reg, u16 length, void *data)
84 89
85static int dm_write_reg(struct usbnet *dev, u8 reg, u8 value) 90static int dm_write_reg(struct usbnet *dev, u8 reg, u8 value)
86{ 91{
87 return usbnet_write_cmd(dev, DM_WRITE_REGS, 92 return usbnet_write_cmd(dev, DM_WRITE_REG,
88 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 93 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
89 value, reg, NULL, 0); 94 value, reg, NULL, 0);
90} 95}
91 96
92static void dm_write_async_helper(struct usbnet *dev, u8 reg, u8 value, 97static void dm_write_async(struct usbnet *dev, u8 reg, u16 length, void *data)
93 u16 length, void *data)
94{ 98{
95 usbnet_write_cmd_async(dev, DM_WRITE_REGS, 99 usbnet_write_cmd_async(dev, DM_WRITE_REGS,
96 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 100 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
97 value, reg, data, length); 101 0, reg, data, length);
98}
99
100static void dm_write_async(struct usbnet *dev, u8 reg, u16 length, void *data)
101{
102 netdev_dbg(dev->net, "dm_write_async() reg=0x%02x length=%d\n", reg, length);
103
104 dm_write_async_helper(dev, reg, 0, length, data);
105} 102}
106 103
107static void dm_write_reg_async(struct usbnet *dev, u8 reg, u8 value) 104static void dm_write_reg_async(struct usbnet *dev, u8 reg, u8 value)
108{ 105{
109 netdev_dbg(dev->net, "dm_write_reg_async() reg=0x%02x value=0x%02x\n", 106 usbnet_write_cmd_async(dev, DM_WRITE_REG,
110 reg, value); 107 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
111 108 value, reg, NULL, 0);
112 dm_write_async_helper(dev, reg, value, 0, NULL);
113} 109}
114 110
115static int dm_read_shared_word(struct usbnet *dev, int phy, u8 reg, __le16 *value) 111static int dm_read_shared_word(struct usbnet *dev, int phy, u8 reg, __le16 *value)
@@ -358,7 +354,7 @@ static const struct net_device_ops dm9601_netdev_ops = {
358static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf) 354static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
359{ 355{
360 int ret; 356 int ret;
361 u8 mac[ETH_ALEN]; 357 u8 mac[ETH_ALEN], id;
362 358
363 ret = usbnet_get_endpoints(dev, intf); 359 ret = usbnet_get_endpoints(dev, intf);
364 if (ret) 360 if (ret)
@@ -399,6 +395,24 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
399 __dm9601_set_mac_address(dev); 395 __dm9601_set_mac_address(dev);
400 } 396 }
401 397
398 if (dm_read_reg(dev, DM_CHIP_ID, &id) < 0) {
399 netdev_err(dev->net, "Error reading chip ID\n");
400 ret = -ENODEV;
401 goto out;
402 }
403
404 /* put dm9620 devices in dm9601 mode */
405 if (id == ID_DM9620) {
406 u8 mode;
407
408 if (dm_read_reg(dev, DM_MODE_CTRL, &mode) < 0) {
409 netdev_err(dev->net, "Error reading MODE_CTRL\n");
410 ret = -ENODEV;
411 goto out;
412 }
413 dm_write_reg(dev, DM_MODE_CTRL, mode & 0x7f);
414 }
415
402 /* power up phy */ 416 /* power up phy */
403 dm_write_reg(dev, DM_GPR_CTRL, 1); 417 dm_write_reg(dev, DM_GPR_CTRL, 1);
404 dm_write_reg(dev, DM_GPR_DATA, 0); 418 dm_write_reg(dev, DM_GPR_DATA, 0);
@@ -581,6 +595,10 @@ static const struct usb_device_id products[] = {
581 USB_DEVICE(0x0a46, 0x9000), /* DM9000E */ 595 USB_DEVICE(0x0a46, 0x9000), /* DM9000E */
582 .driver_info = (unsigned long)&dm9601_info, 596 .driver_info = (unsigned long)&dm9601_info,
583 }, 597 },
598 {
599 USB_DEVICE(0x0a46, 0x9620), /* DM9620 USB to Fast Ethernet Adapter */
600 .driver_info = (unsigned long)&dm9601_info,
601 },
584 {}, // END 602 {}, // END
585}; 603};
586 604
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 6a1ca500e612..19d903598b0d 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -351,6 +351,10 @@ static const struct usb_device_id products[] = {
351 USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 57), 351 USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 57),
352 .driver_info = (unsigned long)&qmi_wwan_info, 352 .driver_info = (unsigned long)&qmi_wwan_info,
353 }, 353 },
354 { /* HUAWEI_INTERFACE_NDIS_CONTROL_QUALCOMM */
355 USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x69),
356 .driver_info = (unsigned long)&qmi_wwan_info,
357 },
354 358
355 /* 2. Combined interface devices matching on class+protocol */ 359 /* 2. Combined interface devices matching on class+protocol */
356 { /* Huawei E367 and possibly others in "Windows mode" */ 360 { /* Huawei E367 and possibly others in "Windows mode" */
@@ -361,6 +365,14 @@ static const struct usb_device_id products[] = {
361 USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 17), 365 USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 17),
362 .driver_info = (unsigned long)&qmi_wwan_info, 366 .driver_info = (unsigned long)&qmi_wwan_info,
363 }, 367 },
368 { /* HUAWEI_NDIS_SINGLE_INTERFACE_VDF */
369 USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x37),
370 .driver_info = (unsigned long)&qmi_wwan_info,
371 },
372 { /* HUAWEI_INTERFACE_NDIS_HW_QUALCOMM */
373 USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x67),
374 .driver_info = (unsigned long)&qmi_wwan_info,
375 },
364 { /* Pantech UML290, P4200 and more */ 376 { /* Pantech UML290, P4200 and more */
365 USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf0, 0xff), 377 USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf0, 0xff),
366 .driver_info = (unsigned long)&qmi_wwan_info, 378 .driver_info = (unsigned long)&qmi_wwan_info,
@@ -399,6 +411,7 @@ static const struct usb_device_id products[] = {
399 }, 411 },
400 412
401 /* 3. Combined interface devices matching on interface number */ 413 /* 3. Combined interface devices matching on interface number */
414 {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
402 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ 415 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
403 {QMI_FIXED_INTF(0x19d2, 0x0002, 1)}, 416 {QMI_FIXED_INTF(0x19d2, 0x0002, 1)},
404 {QMI_FIXED_INTF(0x19d2, 0x0012, 1)}, 417 {QMI_FIXED_INTF(0x19d2, 0x0012, 1)},
@@ -433,6 +446,7 @@ static const struct usb_device_id products[] = {
433 {QMI_FIXED_INTF(0x19d2, 0x0199, 1)}, /* ZTE MF820S */ 446 {QMI_FIXED_INTF(0x19d2, 0x0199, 1)}, /* ZTE MF820S */
434 {QMI_FIXED_INTF(0x19d2, 0x0200, 1)}, 447 {QMI_FIXED_INTF(0x19d2, 0x0200, 1)},
435 {QMI_FIXED_INTF(0x19d2, 0x0257, 3)}, /* ZTE MF821 */ 448 {QMI_FIXED_INTF(0x19d2, 0x0257, 3)}, /* ZTE MF821 */
449 {QMI_FIXED_INTF(0x19d2, 0x0265, 4)}, /* ONDA MT8205 4G LTE */
436 {QMI_FIXED_INTF(0x19d2, 0x0284, 4)}, /* ZTE MF880 */ 450 {QMI_FIXED_INTF(0x19d2, 0x0284, 4)}, /* ZTE MF880 */
437 {QMI_FIXED_INTF(0x19d2, 0x0326, 4)}, /* ZTE MF821D */ 451 {QMI_FIXED_INTF(0x19d2, 0x0326, 4)}, /* ZTE MF821D */
438 {QMI_FIXED_INTF(0x19d2, 0x1008, 4)}, /* ZTE (Vodafone) K3570-Z */ 452 {QMI_FIXED_INTF(0x19d2, 0x1008, 4)}, /* ZTE (Vodafone) K3570-Z */
@@ -459,6 +473,8 @@ static const struct usb_device_id products[] = {
459 {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */ 473 {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */
460 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ 474 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
461 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ 475 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
476 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
477 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
462 478
463 /* 4. Gobi 1000 devices */ 479 /* 4. Gobi 1000 devices */
464 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ 480 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 3d4bf01641b4..5e33606c1366 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -380,6 +380,12 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
380 unsigned long lockflags; 380 unsigned long lockflags;
381 size_t size = dev->rx_urb_size; 381 size_t size = dev->rx_urb_size;
382 382
383 /* prevent rx skb allocation when error ratio is high */
384 if (test_bit(EVENT_RX_KILL, &dev->flags)) {
385 usb_free_urb(urb);
386 return -ENOLINK;
387 }
388
383 skb = __netdev_alloc_skb_ip_align(dev->net, size, flags); 389 skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
384 if (!skb) { 390 if (!skb) {
385 netif_dbg(dev, rx_err, dev->net, "no rx skb\n"); 391 netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
@@ -539,6 +545,17 @@ block:
539 break; 545 break;
540 } 546 }
541 547
548 /* stop rx if packet error rate is high */
549 if (++dev->pkt_cnt > 30) {
550 dev->pkt_cnt = 0;
551 dev->pkt_err = 0;
552 } else {
553 if (state == rx_cleanup)
554 dev->pkt_err++;
555 if (dev->pkt_err > 20)
556 set_bit(EVENT_RX_KILL, &dev->flags);
557 }
558
542 state = defer_bh(dev, skb, &dev->rxq, state); 559 state = defer_bh(dev, skb, &dev->rxq, state);
543 560
544 if (urb) { 561 if (urb) {
@@ -791,6 +808,11 @@ int usbnet_open (struct net_device *net)
791 (dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" : 808 (dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" :
792 "simple"); 809 "simple");
793 810
811 /* reset rx error state */
812 dev->pkt_cnt = 0;
813 dev->pkt_err = 0;
814 clear_bit(EVENT_RX_KILL, &dev->flags);
815
794 // delay posting reads until we're fully open 816 // delay posting reads until we're fully open
795 tasklet_schedule (&dev->bh); 817 tasklet_schedule (&dev->bh);
796 if (info->manage_power) { 818 if (info->manage_power) {
@@ -1103,13 +1125,11 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1103 if (info->tx_fixup) { 1125 if (info->tx_fixup) {
1104 skb = info->tx_fixup (dev, skb, GFP_ATOMIC); 1126 skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
1105 if (!skb) { 1127 if (!skb) {
1106 if (netif_msg_tx_err(dev)) { 1128 /* packet collected; minidriver waiting for more */
1107 netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n"); 1129 if (info->flags & FLAG_MULTI_PACKET)
1108 goto drop;
1109 } else {
1110 /* cdc_ncm collected packet; waits for more */
1111 goto not_drop; 1130 goto not_drop;
1112 } 1131 netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
1132 goto drop;
1113 } 1133 }
1114 } 1134 }
1115 length = skb->len; 1135 length = skb->len;
@@ -1254,6 +1274,9 @@ static void usbnet_bh (unsigned long param)
1254 } 1274 }
1255 } 1275 }
1256 1276
1277 /* restart RX again after disabling due to high error rate */
1278 clear_bit(EVENT_RX_KILL, &dev->flags);
1279
1257 // waiting for all pending urbs to complete? 1280 // waiting for all pending urbs to complete?
1258 if (dev->wait) { 1281 if (dev->wait) {
1259 if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) { 1282 if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
@@ -1448,6 +1471,10 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1448 if ((dev->driver_info->flags & FLAG_WWAN) != 0) 1471 if ((dev->driver_info->flags & FLAG_WWAN) != 0)
1449 strcpy(net->name, "wwan%d"); 1472 strcpy(net->name, "wwan%d");
1450 1473
1474 /* devices that cannot do ARP */
1475 if ((dev->driver_info->flags & FLAG_NOARP) != 0)
1476 net->flags |= IFF_NOARP;
1477
1451 /* maybe the remote can't receive an Ethernet MTU */ 1478 /* maybe the remote can't receive an Ethernet MTU */
1452 if (net->mtu > (dev->hard_mtu - net->hard_header_len)) 1479 if (net->mtu > (dev->hard_mtu - net->hard_header_len))
1453 net->mtu = dev->hard_mtu - net->hard_header_len; 1480 net->mtu = dev->hard_mtu - net->hard_header_len;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index a6fcf15adc4f..35c00c5ea02a 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -26,6 +26,7 @@
26#include <linux/scatterlist.h> 26#include <linux/scatterlist.h>
27#include <linux/if_vlan.h> 27#include <linux/if_vlan.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/cpu.h>
29 30
30static int napi_weight = 128; 31static int napi_weight = 128;
31module_param(napi_weight, int, 0444); 32module_param(napi_weight, int, 0444);
@@ -123,6 +124,12 @@ struct virtnet_info {
123 124
124 /* Does the affinity hint is set for virtqueues? */ 125 /* Does the affinity hint is set for virtqueues? */
125 bool affinity_hint_set; 126 bool affinity_hint_set;
127
128 /* Per-cpu variable to show the mapping from CPU to virtqueue */
129 int __percpu *vq_index;
130
131 /* CPU hot plug notifier */
132 struct notifier_block nb;
126}; 133};
127 134
128struct skb_vnet_hdr { 135struct skb_vnet_hdr {
@@ -1013,32 +1020,75 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
1013 return 0; 1020 return 0;
1014} 1021}
1015 1022
1016static void virtnet_set_affinity(struct virtnet_info *vi, bool set) 1023static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
1017{ 1024{
1018 int i; 1025 int i;
1026 int cpu;
1027
1028 if (vi->affinity_hint_set) {
1029 for (i = 0; i < vi->max_queue_pairs; i++) {
1030 virtqueue_set_affinity(vi->rq[i].vq, -1);
1031 virtqueue_set_affinity(vi->sq[i].vq, -1);
1032 }
1033
1034 vi->affinity_hint_set = false;
1035 }
1036
1037 i = 0;
1038 for_each_online_cpu(cpu) {
1039 if (cpu == hcpu) {
1040 *per_cpu_ptr(vi->vq_index, cpu) = -1;
1041 } else {
1042 *per_cpu_ptr(vi->vq_index, cpu) =
1043 ++i % vi->curr_queue_pairs;
1044 }
1045 }
1046}
1047
1048static void virtnet_set_affinity(struct virtnet_info *vi)
1049{
1050 int i;
1051 int cpu;
1019 1052
1020 /* In multiqueue mode, when the number of cpu is equal to the number of 1053 /* In multiqueue mode, when the number of cpu is equal to the number of
1021 * queue pairs, we let the queue pairs to be private to one cpu by 1054 * queue pairs, we let the queue pairs to be private to one cpu by
1022 * setting the affinity hint to eliminate the contention. 1055 * setting the affinity hint to eliminate the contention.
1023 */ 1056 */
1024 if ((vi->curr_queue_pairs == 1 || 1057 if (vi->curr_queue_pairs == 1 ||
1025 vi->max_queue_pairs != num_online_cpus()) && set) { 1058 vi->max_queue_pairs != num_online_cpus()) {
1026 if (vi->affinity_hint_set) 1059 virtnet_clean_affinity(vi, -1);
1027 set = false; 1060 return;
1028 else
1029 return;
1030 } 1061 }
1031 1062
1032 for (i = 0; i < vi->max_queue_pairs; i++) { 1063 i = 0;
1033 int cpu = set ? i : -1; 1064 for_each_online_cpu(cpu) {
1034 virtqueue_set_affinity(vi->rq[i].vq, cpu); 1065 virtqueue_set_affinity(vi->rq[i].vq, cpu);
1035 virtqueue_set_affinity(vi->sq[i].vq, cpu); 1066 virtqueue_set_affinity(vi->sq[i].vq, cpu);
1067 *per_cpu_ptr(vi->vq_index, cpu) = i;
1068 i++;
1036 } 1069 }
1037 1070
1038 if (set) 1071 vi->affinity_hint_set = true;
1039 vi->affinity_hint_set = true; 1072}
1040 else 1073
1041 vi->affinity_hint_set = false; 1074static int virtnet_cpu_callback(struct notifier_block *nfb,
1075 unsigned long action, void *hcpu)
1076{
1077 struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb);
1078
1079 switch(action & ~CPU_TASKS_FROZEN) {
1080 case CPU_ONLINE:
1081 case CPU_DOWN_FAILED:
1082 case CPU_DEAD:
1083 virtnet_set_affinity(vi);
1084 break;
1085 case CPU_DOWN_PREPARE:
1086 virtnet_clean_affinity(vi, (long)hcpu);
1087 break;
1088 default:
1089 break;
1090 }
1091 return NOTIFY_OK;
1042} 1092}
1043 1093
1044static void virtnet_get_ringparam(struct net_device *dev, 1094static void virtnet_get_ringparam(struct net_device *dev,
@@ -1082,13 +1132,15 @@ static int virtnet_set_channels(struct net_device *dev,
1082 if (queue_pairs > vi->max_queue_pairs) 1132 if (queue_pairs > vi->max_queue_pairs)
1083 return -EINVAL; 1133 return -EINVAL;
1084 1134
1135 get_online_cpus();
1085 err = virtnet_set_queues(vi, queue_pairs); 1136 err = virtnet_set_queues(vi, queue_pairs);
1086 if (!err) { 1137 if (!err) {
1087 netif_set_real_num_tx_queues(dev, queue_pairs); 1138 netif_set_real_num_tx_queues(dev, queue_pairs);
1088 netif_set_real_num_rx_queues(dev, queue_pairs); 1139 netif_set_real_num_rx_queues(dev, queue_pairs);
1089 1140
1090 virtnet_set_affinity(vi, true); 1141 virtnet_set_affinity(vi);
1091 } 1142 }
1143 put_online_cpus();
1092 1144
1093 return err; 1145 return err;
1094} 1146}
@@ -1127,12 +1179,19 @@ static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
1127 1179
1128/* To avoid contending a lock hold by a vcpu who would exit to host, select the 1180/* To avoid contending a lock hold by a vcpu who would exit to host, select the
1129 * txq based on the processor id. 1181 * txq based on the processor id.
1130 * TODO: handle cpu hotplug.
1131 */ 1182 */
1132static u16 virtnet_select_queue(struct net_device *dev, struct sk_buff *skb) 1183static u16 virtnet_select_queue(struct net_device *dev, struct sk_buff *skb)
1133{ 1184{
1134 int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 1185 int txq;
1135 smp_processor_id(); 1186 struct virtnet_info *vi = netdev_priv(dev);
1187
1188 if (skb_rx_queue_recorded(skb)) {
1189 txq = skb_get_rx_queue(skb);
1190 } else {
1191 txq = *__this_cpu_ptr(vi->vq_index);
1192 if (txq == -1)
1193 txq = 0;
1194 }
1136 1195
1137 while (unlikely(txq >= dev->real_num_tx_queues)) 1196 while (unlikely(txq >= dev->real_num_tx_queues))
1138 txq -= dev->real_num_tx_queues; 1197 txq -= dev->real_num_tx_queues;
@@ -1248,7 +1307,7 @@ static void virtnet_del_vqs(struct virtnet_info *vi)
1248{ 1307{
1249 struct virtio_device *vdev = vi->vdev; 1308 struct virtio_device *vdev = vi->vdev;
1250 1309
1251 virtnet_set_affinity(vi, false); 1310 virtnet_clean_affinity(vi, -1);
1252 1311
1253 vdev->config->del_vqs(vdev); 1312 vdev->config->del_vqs(vdev);
1254 1313
@@ -1371,7 +1430,10 @@ static int init_vqs(struct virtnet_info *vi)
1371 if (ret) 1430 if (ret)
1372 goto err_free; 1431 goto err_free;
1373 1432
1374 virtnet_set_affinity(vi, true); 1433 get_online_cpus();
1434 virtnet_set_affinity(vi);
1435 put_online_cpus();
1436
1375 return 0; 1437 return 0;
1376 1438
1377err_free: 1439err_free:
@@ -1453,6 +1515,10 @@ static int virtnet_probe(struct virtio_device *vdev)
1453 if (vi->stats == NULL) 1515 if (vi->stats == NULL)
1454 goto free; 1516 goto free;
1455 1517
1518 vi->vq_index = alloc_percpu(int);
1519 if (vi->vq_index == NULL)
1520 goto free_stats;
1521
1456 mutex_init(&vi->config_lock); 1522 mutex_init(&vi->config_lock);
1457 vi->config_enable = true; 1523 vi->config_enable = true;
1458 INIT_WORK(&vi->config_work, virtnet_config_changed_work); 1524 INIT_WORK(&vi->config_work, virtnet_config_changed_work);
@@ -1476,7 +1542,7 @@ static int virtnet_probe(struct virtio_device *vdev)
1476 /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ 1542 /* Allocate/initialize the rx/tx queues, and invoke find_vqs */
1477 err = init_vqs(vi); 1543 err = init_vqs(vi);
1478 if (err) 1544 if (err)
1479 goto free_stats; 1545 goto free_index;
1480 1546
1481 netif_set_real_num_tx_queues(dev, 1); 1547 netif_set_real_num_tx_queues(dev, 1);
1482 netif_set_real_num_rx_queues(dev, 1); 1548 netif_set_real_num_rx_queues(dev, 1);
@@ -1499,6 +1565,13 @@ static int virtnet_probe(struct virtio_device *vdev)
1499 } 1565 }
1500 } 1566 }
1501 1567
1568 vi->nb.notifier_call = &virtnet_cpu_callback;
1569 err = register_hotcpu_notifier(&vi->nb);
1570 if (err) {
1571 pr_debug("virtio_net: registering cpu notifier failed\n");
1572 goto free_recv_bufs;
1573 }
1574
1502 /* Assume link up if device can't report link status, 1575 /* Assume link up if device can't report link status,
1503 otherwise get link status from config. */ 1576 otherwise get link status from config. */
1504 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { 1577 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
@@ -1520,6 +1593,8 @@ free_recv_bufs:
1520free_vqs: 1593free_vqs:
1521 cancel_delayed_work_sync(&vi->refill); 1594 cancel_delayed_work_sync(&vi->refill);
1522 virtnet_del_vqs(vi); 1595 virtnet_del_vqs(vi);
1596free_index:
1597 free_percpu(vi->vq_index);
1523free_stats: 1598free_stats:
1524 free_percpu(vi->stats); 1599 free_percpu(vi->stats);
1525free: 1600free:
@@ -1543,6 +1618,8 @@ static void virtnet_remove(struct virtio_device *vdev)
1543{ 1618{
1544 struct virtnet_info *vi = vdev->priv; 1619 struct virtnet_info *vi = vdev->priv;
1545 1620
1621 unregister_hotcpu_notifier(&vi->nb);
1622
1546 /* Prevent config work handler from accessing the device. */ 1623 /* Prevent config work handler from accessing the device. */
1547 mutex_lock(&vi->config_lock); 1624 mutex_lock(&vi->config_lock);
1548 vi->config_enable = false; 1625 vi->config_enable = false;
@@ -1554,6 +1631,7 @@ static void virtnet_remove(struct virtio_device *vdev)
1554 1631
1555 flush_work(&vi->config_work); 1632 flush_work(&vi->config_work);
1556 1633
1634 free_percpu(vi->vq_index);
1557 free_percpu(vi->stats); 1635 free_percpu(vi->stats);
1558 free_netdev(vi->dev); 1636 free_netdev(vi->dev);
1559} 1637}
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index dc8913c6238c..12c6440d1649 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -154,8 +154,7 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
154 if (ret & 1) { /* Link is up. */ 154 if (ret & 1) { /* Link is up. */
155 printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n", 155 printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n",
156 adapter->netdev->name, adapter->link_speed); 156 adapter->netdev->name, adapter->link_speed);
157 if (!netif_carrier_ok(adapter->netdev)) 157 netif_carrier_on(adapter->netdev);
158 netif_carrier_on(adapter->netdev);
159 158
160 if (affectTxQueue) { 159 if (affectTxQueue) {
161 for (i = 0; i < adapter->num_tx_queues; i++) 160 for (i = 0; i < adapter->num_tx_queues; i++)
@@ -165,8 +164,7 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
165 } else { 164 } else {
166 printk(KERN_INFO "%s: NIC Link is Down\n", 165 printk(KERN_INFO "%s: NIC Link is Down\n",
167 adapter->netdev->name); 166 adapter->netdev->name);
168 if (netif_carrier_ok(adapter->netdev)) 167 netif_carrier_off(adapter->netdev);
169 netif_carrier_off(adapter->netdev);
170 168
171 if (affectTxQueue) { 169 if (affectTxQueue) {
172 for (i = 0; i < adapter->num_tx_queues; i++) 170 for (i = 0; i < adapter->num_tx_queues; i++)
@@ -3061,6 +3059,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
3061 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); 3059 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
3062 netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues); 3060 netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
3063 3061
3062 netif_carrier_off(netdev);
3064 err = register_netdev(netdev); 3063 err = register_netdev(netdev);
3065 3064
3066 if (err) { 3065 if (err) {
diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c
index 1d76ae855f07..530581ca0191 100644
--- a/drivers/net/wimax/i2400m/netdev.c
+++ b/drivers/net/wimax/i2400m/netdev.c
@@ -156,7 +156,7 @@ void i2400m_wake_tx_work(struct work_struct *ws)
156 struct i2400m *i2400m = container_of(ws, struct i2400m, wake_tx_ws); 156 struct i2400m *i2400m = container_of(ws, struct i2400m, wake_tx_ws);
157 struct net_device *net_dev = i2400m->wimax_dev.net_dev; 157 struct net_device *net_dev = i2400m->wimax_dev.net_dev;
158 struct device *dev = i2400m_dev(i2400m); 158 struct device *dev = i2400m_dev(i2400m);
159 struct sk_buff *skb = i2400m->wake_tx_skb; 159 struct sk_buff *skb;
160 unsigned long flags; 160 unsigned long flags;
161 161
162 spin_lock_irqsave(&i2400m->tx_lock, flags); 162 spin_lock_irqsave(&i2400m->tx_lock, flags);
@@ -236,23 +236,26 @@ void i2400m_tx_prep_header(struct sk_buff *skb)
236void i2400m_net_wake_stop(struct i2400m *i2400m) 236void i2400m_net_wake_stop(struct i2400m *i2400m)
237{ 237{
238 struct device *dev = i2400m_dev(i2400m); 238 struct device *dev = i2400m_dev(i2400m);
239 struct sk_buff *wake_tx_skb;
240 unsigned long flags;
239 241
240 d_fnstart(3, dev, "(i2400m %p)\n", i2400m); 242 d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
241 /* See i2400m_hard_start_xmit(), references are taken there 243 /*
242 * and here we release them if the work was still 244 * See i2400m_hard_start_xmit(), references are taken there and
243 * pending. Note we can't differentiate work not pending vs 245 * here we release them if the packet was still pending.
244 * never scheduled, so the NULL check does that. */ 246 */
245 if (cancel_work_sync(&i2400m->wake_tx_ws) == 0 247 cancel_work_sync(&i2400m->wake_tx_ws);
246 && i2400m->wake_tx_skb != NULL) { 248
247 unsigned long flags; 249 spin_lock_irqsave(&i2400m->tx_lock, flags);
248 struct sk_buff *wake_tx_skb; 250 wake_tx_skb = i2400m->wake_tx_skb;
249 spin_lock_irqsave(&i2400m->tx_lock, flags); 251 i2400m->wake_tx_skb = NULL;
250 wake_tx_skb = i2400m->wake_tx_skb; /* compat help */ 252 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
251 i2400m->wake_tx_skb = NULL; /* compat help */ 253
252 spin_unlock_irqrestore(&i2400m->tx_lock, flags); 254 if (wake_tx_skb) {
253 i2400m_put(i2400m); 255 i2400m_put(i2400m);
254 kfree_skb(wake_tx_skb); 256 kfree_skb(wake_tx_skb);
255 } 257 }
258
256 d_fnend(3, dev, "(i2400m %p) = void\n", i2400m); 259 d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
257} 260}
258 261
@@ -288,7 +291,7 @@ int i2400m_net_wake_tx(struct i2400m *i2400m, struct net_device *net_dev,
288 * and if pending, release those resources. */ 291 * and if pending, release those resources. */
289 result = 0; 292 result = 0;
290 spin_lock_irqsave(&i2400m->tx_lock, flags); 293 spin_lock_irqsave(&i2400m->tx_lock, flags);
291 if (!work_pending(&i2400m->wake_tx_ws)) { 294 if (!i2400m->wake_tx_skb) {
292 netif_stop_queue(net_dev); 295 netif_stop_queue(net_dev);
293 i2400m_get(i2400m); 296 i2400m_get(i2400m);
294 i2400m->wake_tx_skb = skb_get(skb); /* transfer ref count */ 297 i2400m->wake_tx_skb = skb_get(skb); /* transfer ref count */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 8b0d8dcd7625..56317b0fb6b6 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -976,6 +976,8 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
976 AR_PHY_CL_TAB_1, 976 AR_PHY_CL_TAB_1,
977 AR_PHY_CL_TAB_2 }; 977 AR_PHY_CL_TAB_2 };
978 978
979 ar9003_hw_set_chain_masks(ah, ah->caps.rx_chainmask, ah->caps.tx_chainmask);
980
979 if (rtt) { 981 if (rtt) {
980 if (!ar9003_hw_rtt_restore(ah, chan)) 982 if (!ar9003_hw_rtt_restore(ah, chan))
981 run_rtt_cal = true; 983 run_rtt_cal = true;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index ce19c09fa8e8..3afc24bde6d6 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -586,32 +586,19 @@ static void ar9003_hw_init_bb(struct ath_hw *ah,
586 ath9k_hw_synth_delay(ah, chan, synthDelay); 586 ath9k_hw_synth_delay(ah, chan, synthDelay);
587} 587}
588 588
589static void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx) 589void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx)
590{ 590{
591 switch (rx) { 591 if (ah->caps.tx_chainmask == 5 || ah->caps.rx_chainmask == 5)
592 case 0x5:
593 REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP, 592 REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
594 AR_PHY_SWAP_ALT_CHAIN); 593 AR_PHY_SWAP_ALT_CHAIN);
595 case 0x3: 594
596 case 0x1: 595 REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx);
597 case 0x2: 596 REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx);
598 case 0x7:
599 REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx);
600 REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx);
601 break;
602 default:
603 break;
604 }
605 597
606 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && (tx == 0x7)) 598 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && (tx == 0x7))
607 REG_WRITE(ah, AR_SELFGEN_MASK, 0x3); 599 tx = 3;
608 else
609 REG_WRITE(ah, AR_SELFGEN_MASK, tx);
610 600
611 if (tx == 0x5) { 601 REG_WRITE(ah, AR_SELFGEN_MASK, tx);
612 REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
613 AR_PHY_SWAP_ALT_CHAIN);
614 }
615} 602}
616 603
617/* 604/*
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 86e26a19efda..42794c546a40 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -317,7 +317,6 @@ struct ath_rx {
317 u32 *rxlink; 317 u32 *rxlink;
318 u32 num_pkts; 318 u32 num_pkts;
319 unsigned int rxfilter; 319 unsigned int rxfilter;
320 spinlock_t rxbuflock;
321 struct list_head rxbuf; 320 struct list_head rxbuf;
322 struct ath_descdma rxdma; 321 struct ath_descdma rxdma;
323 struct ath_buf *rx_bufptr; 322 struct ath_buf *rx_bufptr;
@@ -328,7 +327,6 @@ struct ath_rx {
328 327
329int ath_startrecv(struct ath_softc *sc); 328int ath_startrecv(struct ath_softc *sc);
330bool ath_stoprecv(struct ath_softc *sc); 329bool ath_stoprecv(struct ath_softc *sc);
331void ath_flushrecv(struct ath_softc *sc);
332u32 ath_calcrxfilter(struct ath_softc *sc); 330u32 ath_calcrxfilter(struct ath_softc *sc);
333int ath_rx_init(struct ath_softc *sc, int nbufs); 331int ath_rx_init(struct ath_softc *sc, int nbufs);
334void ath_rx_cleanup(struct ath_softc *sc); 332void ath_rx_cleanup(struct ath_softc *sc);
@@ -646,7 +644,6 @@ void ath_ant_comb_update(struct ath_softc *sc);
646enum sc_op_flags { 644enum sc_op_flags {
647 SC_OP_INVALID, 645 SC_OP_INVALID,
648 SC_OP_BEACONS, 646 SC_OP_BEACONS,
649 SC_OP_RXFLUSH,
650 SC_OP_ANI_RUN, 647 SC_OP_ANI_RUN,
651 SC_OP_PRIM_STA_VIF, 648 SC_OP_PRIM_STA_VIF,
652 SC_OP_HW_RESET, 649 SC_OP_HW_RESET,
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 531fffd801a3..2ca355e94da6 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -147,6 +147,7 @@ static struct ath_buf *ath9k_beacon_generate(struct ieee80211_hw *hw,
147 skb->len, DMA_TO_DEVICE); 147 skb->len, DMA_TO_DEVICE);
148 dev_kfree_skb_any(skb); 148 dev_kfree_skb_any(skb);
149 bf->bf_buf_addr = 0; 149 bf->bf_buf_addr = 0;
150 bf->bf_mpdu = NULL;
150 } 151 }
151 152
152 skb = ieee80211_beacon_get(hw, vif); 153 skb = ieee80211_beacon_get(hw, vif);
@@ -359,7 +360,6 @@ void ath9k_beacon_tasklet(unsigned long data)
359 return; 360 return;
360 361
361 bf = ath9k_beacon_generate(sc->hw, vif); 362 bf = ath9k_beacon_generate(sc->hw, vif);
362 WARN_ON(!bf);
363 363
364 if (sc->beacon.bmisscnt != 0) { 364 if (sc->beacon.bmisscnt != 0) {
365 ath_dbg(common, BSTUCK, "resume beacon xmit after %u misses\n", 365 ath_dbg(common, BSTUCK, "resume beacon xmit after %u misses\n",
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 13ff9edc2401..e585fc827c50 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -861,7 +861,6 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
861 RXS_ERR("RX-LENGTH-ERR", rx_len_err); 861 RXS_ERR("RX-LENGTH-ERR", rx_len_err);
862 RXS_ERR("RX-OOM-ERR", rx_oom_err); 862 RXS_ERR("RX-OOM-ERR", rx_oom_err);
863 RXS_ERR("RX-RATE-ERR", rx_rate_err); 863 RXS_ERR("RX-RATE-ERR", rx_rate_err);
864 RXS_ERR("RX-DROP-RXFLUSH", rx_drop_rxflush);
865 RXS_ERR("RX-TOO-MANY-FRAGS", rx_too_many_frags_err); 864 RXS_ERR("RX-TOO-MANY-FRAGS", rx_too_many_frags_err);
866 865
867 PHY_ERR("UNDERRUN ERR", ATH9K_PHYERR_UNDERRUN); 866 PHY_ERR("UNDERRUN ERR", ATH9K_PHYERR_UNDERRUN);
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 375c3b46411e..6df2ab62dcb7 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -216,7 +216,6 @@ struct ath_tx_stats {
216 * @rx_oom_err: No. of frames dropped due to OOM issues. 216 * @rx_oom_err: No. of frames dropped due to OOM issues.
217 * @rx_rate_err: No. of frames dropped due to rate errors. 217 * @rx_rate_err: No. of frames dropped due to rate errors.
218 * @rx_too_many_frags_err: Frames dropped due to too-many-frags received. 218 * @rx_too_many_frags_err: Frames dropped due to too-many-frags received.
219 * @rx_drop_rxflush: No. of frames dropped due to RX-FLUSH.
220 * @rx_beacons: No. of beacons received. 219 * @rx_beacons: No. of beacons received.
221 * @rx_frags: No. of rx-fragements received. 220 * @rx_frags: No. of rx-fragements received.
222 */ 221 */
@@ -235,7 +234,6 @@ struct ath_rx_stats {
235 u32 rx_oom_err; 234 u32 rx_oom_err;
236 u32 rx_rate_err; 235 u32 rx_rate_err;
237 u32 rx_too_many_frags_err; 236 u32 rx_too_many_frags_err;
238 u32 rx_drop_rxflush;
239 u32 rx_beacons; 237 u32 rx_beacons;
240 u32 rx_frags; 238 u32 rx_frags;
241}; 239};
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index 4a9570dfba72..aac4a406a513 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -344,6 +344,8 @@ void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle,
344 endpoint->ep_callbacks.tx(endpoint->ep_callbacks.priv, 344 endpoint->ep_callbacks.tx(endpoint->ep_callbacks.priv,
345 skb, htc_hdr->endpoint_id, 345 skb, htc_hdr->endpoint_id,
346 txok); 346 txok);
347 } else {
348 kfree_skb(skb);
347 } 349 }
348 } 350 }
349 351
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 7f1a8e91c908..9d26fc56ca56 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -1066,6 +1066,7 @@ void ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain);
1066int ar9003_paprd_init_table(struct ath_hw *ah); 1066int ar9003_paprd_init_table(struct ath_hw *ah);
1067bool ar9003_paprd_is_done(struct ath_hw *ah); 1067bool ar9003_paprd_is_done(struct ath_hw *ah);
1068bool ar9003_is_paprd_enabled(struct ath_hw *ah); 1068bool ar9003_is_paprd_enabled(struct ath_hw *ah);
1069void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx);
1069 1070
1070/* Hardware family op attach helpers */ 1071/* Hardware family op attach helpers */
1071void ar5008_hw_attach_phy_ops(struct ath_hw *ah); 1072void ar5008_hw_attach_phy_ops(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index be30a9af1528..dd91f8fdc01c 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -182,7 +182,7 @@ static void ath_restart_work(struct ath_softc *sc)
182 ath_start_ani(sc); 182 ath_start_ani(sc);
183} 183}
184 184
185static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx, bool flush) 185static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx)
186{ 186{
187 struct ath_hw *ah = sc->sc_ah; 187 struct ath_hw *ah = sc->sc_ah;
188 bool ret = true; 188 bool ret = true;
@@ -202,14 +202,6 @@ static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx, bool flush)
202 if (!ath_drain_all_txq(sc, retry_tx)) 202 if (!ath_drain_all_txq(sc, retry_tx))
203 ret = false; 203 ret = false;
204 204
205 if (!flush) {
206 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
207 ath_rx_tasklet(sc, 1, true);
208 ath_rx_tasklet(sc, 1, false);
209 } else {
210 ath_flushrecv(sc);
211 }
212
213 return ret; 205 return ret;
214} 206}
215 207
@@ -262,11 +254,11 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan,
262 struct ath_common *common = ath9k_hw_common(ah); 254 struct ath_common *common = ath9k_hw_common(ah);
263 struct ath9k_hw_cal_data *caldata = NULL; 255 struct ath9k_hw_cal_data *caldata = NULL;
264 bool fastcc = true; 256 bool fastcc = true;
265 bool flush = false;
266 int r; 257 int r;
267 258
268 __ath_cancel_work(sc); 259 __ath_cancel_work(sc);
269 260
261 tasklet_disable(&sc->intr_tq);
270 spin_lock_bh(&sc->sc_pcu_lock); 262 spin_lock_bh(&sc->sc_pcu_lock);
271 263
272 if (!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)) { 264 if (!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)) {
@@ -276,11 +268,10 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan,
276 268
277 if (!hchan) { 269 if (!hchan) {
278 fastcc = false; 270 fastcc = false;
279 flush = true;
280 hchan = ah->curchan; 271 hchan = ah->curchan;
281 } 272 }
282 273
283 if (!ath_prepare_reset(sc, retry_tx, flush)) 274 if (!ath_prepare_reset(sc, retry_tx))
284 fastcc = false; 275 fastcc = false;
285 276
286 ath_dbg(common, CONFIG, "Reset to %u MHz, HT40: %d fastcc: %d\n", 277 ath_dbg(common, CONFIG, "Reset to %u MHz, HT40: %d fastcc: %d\n",
@@ -302,6 +293,8 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan,
302 293
303out: 294out:
304 spin_unlock_bh(&sc->sc_pcu_lock); 295 spin_unlock_bh(&sc->sc_pcu_lock);
296 tasklet_enable(&sc->intr_tq);
297
305 return r; 298 return r;
306} 299}
307 300
@@ -804,7 +797,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
804 ath9k_hw_cfg_gpio_input(ah, ah->led_pin); 797 ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
805 } 798 }
806 799
807 ath_prepare_reset(sc, false, true); 800 ath_prepare_reset(sc, false);
808 801
809 if (sc->rx.frag) { 802 if (sc->rx.frag) {
810 dev_kfree_skb_any(sc->rx.frag); 803 dev_kfree_skb_any(sc->rx.frag);
@@ -1833,6 +1826,9 @@ static u32 fill_chainmask(u32 cap, u32 new)
1833 1826
1834static bool validate_antenna_mask(struct ath_hw *ah, u32 val) 1827static bool validate_antenna_mask(struct ath_hw *ah, u32 val)
1835{ 1828{
1829 if (AR_SREV_9300_20_OR_LATER(ah))
1830 return true;
1831
1836 switch (val & 0x7) { 1832 switch (val & 0x7) {
1837 case 0x1: 1833 case 0x1:
1838 case 0x3: 1834 case 0x3:
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index d4df98a938bf..90752f246970 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -254,8 +254,6 @@ rx_init_fail:
254 254
255static void ath_edma_start_recv(struct ath_softc *sc) 255static void ath_edma_start_recv(struct ath_softc *sc)
256{ 256{
257 spin_lock_bh(&sc->rx.rxbuflock);
258
259 ath9k_hw_rxena(sc->sc_ah); 257 ath9k_hw_rxena(sc->sc_ah);
260 258
261 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP, 259 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP,
@@ -267,8 +265,6 @@ static void ath_edma_start_recv(struct ath_softc *sc)
267 ath_opmode_init(sc); 265 ath_opmode_init(sc);
268 266
269 ath9k_hw_startpcureceive(sc->sc_ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)); 267 ath9k_hw_startpcureceive(sc->sc_ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL));
270
271 spin_unlock_bh(&sc->rx.rxbuflock);
272} 268}
273 269
274static void ath_edma_stop_recv(struct ath_softc *sc) 270static void ath_edma_stop_recv(struct ath_softc *sc)
@@ -285,8 +281,6 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
285 int error = 0; 281 int error = 0;
286 282
287 spin_lock_init(&sc->sc_pcu_lock); 283 spin_lock_init(&sc->sc_pcu_lock);
288 spin_lock_init(&sc->rx.rxbuflock);
289 clear_bit(SC_OP_RXFLUSH, &sc->sc_flags);
290 284
291 common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 + 285 common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 +
292 sc->sc_ah->caps.rx_status_len; 286 sc->sc_ah->caps.rx_status_len;
@@ -447,7 +441,6 @@ int ath_startrecv(struct ath_softc *sc)
447 return 0; 441 return 0;
448 } 442 }
449 443
450 spin_lock_bh(&sc->rx.rxbuflock);
451 if (list_empty(&sc->rx.rxbuf)) 444 if (list_empty(&sc->rx.rxbuf))
452 goto start_recv; 445 goto start_recv;
453 446
@@ -468,26 +461,31 @@ start_recv:
468 ath_opmode_init(sc); 461 ath_opmode_init(sc);
469 ath9k_hw_startpcureceive(ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)); 462 ath9k_hw_startpcureceive(ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL));
470 463
471 spin_unlock_bh(&sc->rx.rxbuflock);
472
473 return 0; 464 return 0;
474} 465}
475 466
467static void ath_flushrecv(struct ath_softc *sc)
468{
469 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
470 ath_rx_tasklet(sc, 1, true);
471 ath_rx_tasklet(sc, 1, false);
472}
473
476bool ath_stoprecv(struct ath_softc *sc) 474bool ath_stoprecv(struct ath_softc *sc)
477{ 475{
478 struct ath_hw *ah = sc->sc_ah; 476 struct ath_hw *ah = sc->sc_ah;
479 bool stopped, reset = false; 477 bool stopped, reset = false;
480 478
481 spin_lock_bh(&sc->rx.rxbuflock);
482 ath9k_hw_abortpcurecv(ah); 479 ath9k_hw_abortpcurecv(ah);
483 ath9k_hw_setrxfilter(ah, 0); 480 ath9k_hw_setrxfilter(ah, 0);
484 stopped = ath9k_hw_stopdmarecv(ah, &reset); 481 stopped = ath9k_hw_stopdmarecv(ah, &reset);
485 482
483 ath_flushrecv(sc);
484
486 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 485 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
487 ath_edma_stop_recv(sc); 486 ath_edma_stop_recv(sc);
488 else 487 else
489 sc->rx.rxlink = NULL; 488 sc->rx.rxlink = NULL;
490 spin_unlock_bh(&sc->rx.rxbuflock);
491 489
492 if (!(ah->ah_flags & AH_UNPLUGGED) && 490 if (!(ah->ah_flags & AH_UNPLUGGED) &&
493 unlikely(!stopped)) { 491 unlikely(!stopped)) {
@@ -499,15 +497,6 @@ bool ath_stoprecv(struct ath_softc *sc)
499 return stopped && !reset; 497 return stopped && !reset;
500} 498}
501 499
502void ath_flushrecv(struct ath_softc *sc)
503{
504 set_bit(SC_OP_RXFLUSH, &sc->sc_flags);
505 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
506 ath_rx_tasklet(sc, 1, true);
507 ath_rx_tasklet(sc, 1, false);
508 clear_bit(SC_OP_RXFLUSH, &sc->sc_flags);
509}
510
511static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb) 500static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)
512{ 501{
513 /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */ 502 /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */
@@ -744,6 +733,7 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
744 return NULL; 733 return NULL;
745 } 734 }
746 735
736 list_del(&bf->list);
747 if (!bf->bf_mpdu) 737 if (!bf->bf_mpdu)
748 return bf; 738 return bf;
749 739
@@ -1059,16 +1049,12 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1059 dma_type = DMA_FROM_DEVICE; 1049 dma_type = DMA_FROM_DEVICE;
1060 1050
1061 qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP; 1051 qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
1062 spin_lock_bh(&sc->rx.rxbuflock);
1063 1052
1064 tsf = ath9k_hw_gettsf64(ah); 1053 tsf = ath9k_hw_gettsf64(ah);
1065 tsf_lower = tsf & 0xffffffff; 1054 tsf_lower = tsf & 0xffffffff;
1066 1055
1067 do { 1056 do {
1068 bool decrypt_error = false; 1057 bool decrypt_error = false;
1069 /* If handling rx interrupt and flush is in progress => exit */
1070 if (test_bit(SC_OP_RXFLUSH, &sc->sc_flags) && (flush == 0))
1071 break;
1072 1058
1073 memset(&rs, 0, sizeof(rs)); 1059 memset(&rs, 0, sizeof(rs));
1074 if (edma) 1060 if (edma)
@@ -1111,15 +1097,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1111 1097
1112 ath_debug_stat_rx(sc, &rs); 1098 ath_debug_stat_rx(sc, &rs);
1113 1099
1114 /*
1115 * If we're asked to flush receive queue, directly
1116 * chain it back at the queue without processing it.
1117 */
1118 if (test_bit(SC_OP_RXFLUSH, &sc->sc_flags)) {
1119 RX_STAT_INC(rx_drop_rxflush);
1120 goto requeue_drop_frag;
1121 }
1122
1123 memset(rxs, 0, sizeof(struct ieee80211_rx_status)); 1100 memset(rxs, 0, sizeof(struct ieee80211_rx_status));
1124 1101
1125 rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp; 1102 rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp;
@@ -1254,19 +1231,18 @@ requeue_drop_frag:
1254 sc->rx.frag = NULL; 1231 sc->rx.frag = NULL;
1255 } 1232 }
1256requeue: 1233requeue:
1234 list_add_tail(&bf->list, &sc->rx.rxbuf);
1235 if (flush)
1236 continue;
1237
1257 if (edma) { 1238 if (edma) {
1258 list_add_tail(&bf->list, &sc->rx.rxbuf);
1259 ath_rx_edma_buf_link(sc, qtype); 1239 ath_rx_edma_buf_link(sc, qtype);
1260 } else { 1240 } else {
1261 list_move_tail(&bf->list, &sc->rx.rxbuf);
1262 ath_rx_buf_link(sc, bf); 1241 ath_rx_buf_link(sc, bf);
1263 if (!flush) 1242 ath9k_hw_rxena(ah);
1264 ath9k_hw_rxena(ah);
1265 } 1243 }
1266 } while (1); 1244 } while (1);
1267 1245
1268 spin_unlock_bh(&sc->rx.rxbuflock);
1269
1270 if (!(ah->imask & ATH9K_INT_RXEOL)) { 1246 if (!(ah->imask & ATH9K_INT_RXEOL)) {
1271 ah->imask |= (ATH9K_INT_RXEOL | ATH9K_INT_RXORN); 1247 ah->imask |= (ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
1272 ath9k_hw_set_interrupts(ah); 1248 ath9k_hw_set_interrupts(ah);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 1fbd8ecbe2ea..e5fd20994bec 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -36,6 +36,7 @@
36#include "debug.h" 36#include "debug.h"
37 37
38#define N_TX_QUEUES 4 /* #tx queues on mac80211<->driver interface */ 38#define N_TX_QUEUES 4 /* #tx queues on mac80211<->driver interface */
39#define BRCMS_FLUSH_TIMEOUT 500 /* msec */
39 40
40/* Flags we support */ 41/* Flags we support */
41#define MAC_FILTERS (FIF_PROMISC_IN_BSS | \ 42#define MAC_FILTERS (FIF_PROMISC_IN_BSS | \
@@ -708,16 +709,29 @@ static void brcms_ops_rfkill_poll(struct ieee80211_hw *hw)
708 wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, blocked); 709 wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, blocked);
709} 710}
710 711
712static bool brcms_tx_flush_completed(struct brcms_info *wl)
713{
714 bool result;
715
716 spin_lock_bh(&wl->lock);
717 result = brcms_c_tx_flush_completed(wl->wlc);
718 spin_unlock_bh(&wl->lock);
719 return result;
720}
721
711static void brcms_ops_flush(struct ieee80211_hw *hw, bool drop) 722static void brcms_ops_flush(struct ieee80211_hw *hw, bool drop)
712{ 723{
713 struct brcms_info *wl = hw->priv; 724 struct brcms_info *wl = hw->priv;
725 int ret;
714 726
715 no_printk("%s: drop = %s\n", __func__, drop ? "true" : "false"); 727 no_printk("%s: drop = %s\n", __func__, drop ? "true" : "false");
716 728
717 /* wait for packet queue and dma fifos to run empty */ 729 ret = wait_event_timeout(wl->tx_flush_wq,
718 spin_lock_bh(&wl->lock); 730 brcms_tx_flush_completed(wl),
719 brcms_c_wait_for_tx_completion(wl->wlc, drop); 731 msecs_to_jiffies(BRCMS_FLUSH_TIMEOUT));
720 spin_unlock_bh(&wl->lock); 732
733 brcms_dbg_mac80211(wl->wlc->hw->d11core,
734 "ret=%d\n", jiffies_to_msecs(ret));
721} 735}
722 736
723static const struct ieee80211_ops brcms_ops = { 737static const struct ieee80211_ops brcms_ops = {
@@ -772,6 +786,7 @@ void brcms_dpc(unsigned long data)
772 786
773 done: 787 done:
774 spin_unlock_bh(&wl->lock); 788 spin_unlock_bh(&wl->lock);
789 wake_up(&wl->tx_flush_wq);
775} 790}
776 791
777/* 792/*
@@ -1020,6 +1035,8 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev)
1020 1035
1021 atomic_set(&wl->callbacks, 0); 1036 atomic_set(&wl->callbacks, 0);
1022 1037
1038 init_waitqueue_head(&wl->tx_flush_wq);
1039
1023 /* setup the bottom half handler */ 1040 /* setup the bottom half handler */
1024 tasklet_init(&wl->tasklet, brcms_dpc, (unsigned long) wl); 1041 tasklet_init(&wl->tasklet, brcms_dpc, (unsigned long) wl);
1025 1042
@@ -1407,9 +1424,10 @@ void brcms_add_timer(struct brcms_timer *t, uint ms, int periodic)
1407#endif 1424#endif
1408 t->ms = ms; 1425 t->ms = ms;
1409 t->periodic = (bool) periodic; 1426 t->periodic = (bool) periodic;
1410 t->set = true; 1427 if (!t->set) {
1411 1428 t->set = true;
1412 atomic_inc(&t->wl->callbacks); 1429 atomic_inc(&t->wl->callbacks);
1430 }
1413 1431
1414 ieee80211_queue_delayed_work(hw, &t->dly_wrk, msecs_to_jiffies(ms)); 1432 ieee80211_queue_delayed_work(hw, &t->dly_wrk, msecs_to_jiffies(ms));
1415} 1433}
@@ -1608,13 +1626,3 @@ bool brcms_rfkill_set_hw_state(struct brcms_info *wl)
1608 spin_lock_bh(&wl->lock); 1626 spin_lock_bh(&wl->lock);
1609 return blocked; 1627 return blocked;
1610} 1628}
1611
1612/*
1613 * precondition: perimeter lock has been acquired
1614 */
1615void brcms_msleep(struct brcms_info *wl, uint ms)
1616{
1617 spin_unlock_bh(&wl->lock);
1618 msleep(ms);
1619 spin_lock_bh(&wl->lock);
1620}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
index 9358bd5ebd35..947ccacf43e6 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
@@ -68,6 +68,8 @@ struct brcms_info {
68 spinlock_t lock; /* per-device perimeter lock */ 68 spinlock_t lock; /* per-device perimeter lock */
69 spinlock_t isr_lock; /* per-device ISR synchronization lock */ 69 spinlock_t isr_lock; /* per-device ISR synchronization lock */
70 70
71 /* tx flush */
72 wait_queue_head_t tx_flush_wq;
71 73
72 /* timer related fields */ 74 /* timer related fields */
73 atomic_t callbacks; /* # outstanding callback functions */ 75 atomic_t callbacks; /* # outstanding callback functions */
@@ -100,7 +102,6 @@ extern struct brcms_timer *brcms_init_timer(struct brcms_info *wl,
100extern void brcms_free_timer(struct brcms_timer *timer); 102extern void brcms_free_timer(struct brcms_timer *timer);
101extern void brcms_add_timer(struct brcms_timer *timer, uint ms, int periodic); 103extern void brcms_add_timer(struct brcms_timer *timer, uint ms, int periodic);
102extern bool brcms_del_timer(struct brcms_timer *timer); 104extern bool brcms_del_timer(struct brcms_timer *timer);
103extern void brcms_msleep(struct brcms_info *wl, uint ms);
104extern void brcms_dpc(unsigned long data); 105extern void brcms_dpc(unsigned long data);
105extern void brcms_timer(struct brcms_timer *t); 106extern void brcms_timer(struct brcms_timer *t);
106extern void brcms_fatal_error(struct brcms_info *wl); 107extern void brcms_fatal_error(struct brcms_info *wl);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 17594de4199e..8b5839008af3 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -1027,7 +1027,6 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
1027static bool 1027static bool
1028brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal) 1028brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
1029{ 1029{
1030 bool morepending = false;
1031 struct bcma_device *core; 1030 struct bcma_device *core;
1032 struct tx_status txstatus, *txs; 1031 struct tx_status txstatus, *txs;
1033 u32 s1, s2; 1032 u32 s1, s2;
@@ -1041,23 +1040,20 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
1041 txs = &txstatus; 1040 txs = &txstatus;
1042 core = wlc_hw->d11core; 1041 core = wlc_hw->d11core;
1043 *fatal = false; 1042 *fatal = false;
1044 s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
1045 while (!(*fatal)
1046 && (s1 & TXS_V)) {
1047 /* !give others some time to run! */
1048 if (n >= max_tx_num) {
1049 morepending = true;
1050 break;
1051 }
1052 1043
1044 while (n < max_tx_num) {
1045 s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
1053 if (s1 == 0xffffffff) { 1046 if (s1 == 0xffffffff) {
1054 brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit, 1047 brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit,
1055 __func__); 1048 __func__);
1056 *fatal = true; 1049 *fatal = true;
1057 return false; 1050 return false;
1058 } 1051 }
1059 s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2)); 1052 /* only process when valid */
1053 if (!(s1 & TXS_V))
1054 break;
1060 1055
1056 s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2));
1061 txs->status = s1 & TXS_STATUS_MASK; 1057 txs->status = s1 & TXS_STATUS_MASK;
1062 txs->frameid = (s1 & TXS_FID_MASK) >> TXS_FID_SHIFT; 1058 txs->frameid = (s1 & TXS_FID_MASK) >> TXS_FID_SHIFT;
1063 txs->sequence = s2 & TXS_SEQ_MASK; 1059 txs->sequence = s2 & TXS_SEQ_MASK;
@@ -1065,15 +1061,12 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
1065 txs->lasttxtime = 0; 1061 txs->lasttxtime = 0;
1066 1062
1067 *fatal = brcms_c_dotxstatus(wlc_hw->wlc, txs); 1063 *fatal = brcms_c_dotxstatus(wlc_hw->wlc, txs);
1068 1064 if (*fatal == true)
1069 s1 = bcma_read32(core, D11REGOFFS(frmtxstatus)); 1065 return false;
1070 n++; 1066 n++;
1071 } 1067 }
1072 1068
1073 if (*fatal) 1069 return n >= max_tx_num;
1074 return false;
1075
1076 return morepending;
1077} 1070}
1078 1071
1079static void brcms_c_tbtt(struct brcms_c_info *wlc) 1072static void brcms_c_tbtt(struct brcms_c_info *wlc)
@@ -7518,25 +7511,16 @@ int brcms_c_get_curband(struct brcms_c_info *wlc)
7518 return wlc->band->bandunit; 7511 return wlc->band->bandunit;
7519} 7512}
7520 7513
7521void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc, bool drop) 7514bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc)
7522{ 7515{
7523 int timeout = 20;
7524 int i; 7516 int i;
7525 7517
7526 /* Kick DMA to send any pending AMPDU */ 7518 /* Kick DMA to send any pending AMPDU */
7527 for (i = 0; i < ARRAY_SIZE(wlc->hw->di); i++) 7519 for (i = 0; i < ARRAY_SIZE(wlc->hw->di); i++)
7528 if (wlc->hw->di[i]) 7520 if (wlc->hw->di[i])
7529 dma_txflush(wlc->hw->di[i]); 7521 dma_kick_tx(wlc->hw->di[i]);
7530
7531 /* wait for queue and DMA fifos to run dry */
7532 while (brcms_txpktpendtot(wlc) > 0) {
7533 brcms_msleep(wlc->wl, 1);
7534
7535 if (--timeout == 0)
7536 break;
7537 }
7538 7522
7539 WARN_ON_ONCE(timeout == 0); 7523 return !brcms_txpktpendtot(wlc);
7540} 7524}
7541 7525
7542void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval) 7526void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval)
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pub.h b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
index 4fb2834f4e64..b0f14b7b8616 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pub.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
@@ -314,8 +314,6 @@ extern void brcms_c_associate_upd(struct brcms_c_info *wlc, bool state);
314extern void brcms_c_scan_start(struct brcms_c_info *wlc); 314extern void brcms_c_scan_start(struct brcms_c_info *wlc);
315extern void brcms_c_scan_stop(struct brcms_c_info *wlc); 315extern void brcms_c_scan_stop(struct brcms_c_info *wlc);
316extern int brcms_c_get_curband(struct brcms_c_info *wlc); 316extern int brcms_c_get_curband(struct brcms_c_info *wlc);
317extern void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc,
318 bool drop);
319extern int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel); 317extern int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel);
320extern int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl); 318extern int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl);
321extern void brcms_c_get_current_rateset(struct brcms_c_info *wlc, 319extern void brcms_c_get_current_rateset(struct brcms_c_info *wlc,
@@ -332,5 +330,6 @@ extern int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr);
332extern int brcms_c_get_tx_power(struct brcms_c_info *wlc); 330extern int brcms_c_get_tx_power(struct brcms_c_info *wlc);
333extern bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc); 331extern bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc);
334extern void brcms_c_mute(struct brcms_c_info *wlc, bool on); 332extern void brcms_c_mute(struct brcms_c_info *wlc, bool on);
333extern bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc);
335 334
336#endif /* _BRCM_PUB_H_ */ 335#endif /* _BRCM_PUB_H_ */
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index d92b21a8e597..b3ab7b7becae 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -2181,9 +2181,10 @@ static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status)
2181 mod_delayed_work(system_wq, &priv->rf_kill, round_jiffies_relative(HZ)); 2181 mod_delayed_work(system_wq, &priv->rf_kill, round_jiffies_relative(HZ));
2182} 2182}
2183 2183
2184static void send_scan_event(void *data) 2184static void ipw2100_scan_event(struct work_struct *work)
2185{ 2185{
2186 struct ipw2100_priv *priv = data; 2186 struct ipw2100_priv *priv = container_of(work, struct ipw2100_priv,
2187 scan_event.work);
2187 union iwreq_data wrqu; 2188 union iwreq_data wrqu;
2188 2189
2189 wrqu.data.length = 0; 2190 wrqu.data.length = 0;
@@ -2191,18 +2192,6 @@ static void send_scan_event(void *data)
2191 wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL); 2192 wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
2192} 2193}
2193 2194
2194static void ipw2100_scan_event_later(struct work_struct *work)
2195{
2196 send_scan_event(container_of(work, struct ipw2100_priv,
2197 scan_event_later.work));
2198}
2199
2200static void ipw2100_scan_event_now(struct work_struct *work)
2201{
2202 send_scan_event(container_of(work, struct ipw2100_priv,
2203 scan_event_now));
2204}
2205
2206static void isr_scan_complete(struct ipw2100_priv *priv, u32 status) 2195static void isr_scan_complete(struct ipw2100_priv *priv, u32 status)
2207{ 2196{
2208 IPW_DEBUG_SCAN("scan complete\n"); 2197 IPW_DEBUG_SCAN("scan complete\n");
@@ -2212,13 +2201,11 @@ static void isr_scan_complete(struct ipw2100_priv *priv, u32 status)
2212 2201
2213 /* Only userspace-requested scan completion events go out immediately */ 2202 /* Only userspace-requested scan completion events go out immediately */
2214 if (!priv->user_requested_scan) { 2203 if (!priv->user_requested_scan) {
2215 if (!delayed_work_pending(&priv->scan_event_later)) 2204 schedule_delayed_work(&priv->scan_event,
2216 schedule_delayed_work(&priv->scan_event_later, 2205 round_jiffies_relative(msecs_to_jiffies(4000)));
2217 round_jiffies_relative(msecs_to_jiffies(4000)));
2218 } else { 2206 } else {
2219 priv->user_requested_scan = 0; 2207 priv->user_requested_scan = 0;
2220 cancel_delayed_work(&priv->scan_event_later); 2208 mod_delayed_work(system_wq, &priv->scan_event, 0);
2221 schedule_work(&priv->scan_event_now);
2222 } 2209 }
2223} 2210}
2224 2211
@@ -4459,8 +4446,7 @@ static void ipw2100_kill_works(struct ipw2100_priv *priv)
4459 cancel_delayed_work_sync(&priv->wx_event_work); 4446 cancel_delayed_work_sync(&priv->wx_event_work);
4460 cancel_delayed_work_sync(&priv->hang_check); 4447 cancel_delayed_work_sync(&priv->hang_check);
4461 cancel_delayed_work_sync(&priv->rf_kill); 4448 cancel_delayed_work_sync(&priv->rf_kill);
4462 cancel_work_sync(&priv->scan_event_now); 4449 cancel_delayed_work_sync(&priv->scan_event);
4463 cancel_delayed_work_sync(&priv->scan_event_later);
4464} 4450}
4465 4451
4466static int ipw2100_tx_allocate(struct ipw2100_priv *priv) 4452static int ipw2100_tx_allocate(struct ipw2100_priv *priv)
@@ -6195,8 +6181,7 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
6195 INIT_DELAYED_WORK(&priv->wx_event_work, ipw2100_wx_event_work); 6181 INIT_DELAYED_WORK(&priv->wx_event_work, ipw2100_wx_event_work);
6196 INIT_DELAYED_WORK(&priv->hang_check, ipw2100_hang_check); 6182 INIT_DELAYED_WORK(&priv->hang_check, ipw2100_hang_check);
6197 INIT_DELAYED_WORK(&priv->rf_kill, ipw2100_rf_kill); 6183 INIT_DELAYED_WORK(&priv->rf_kill, ipw2100_rf_kill);
6198 INIT_WORK(&priv->scan_event_now, ipw2100_scan_event_now); 6184 INIT_DELAYED_WORK(&priv->scan_event, ipw2100_scan_event);
6199 INIT_DELAYED_WORK(&priv->scan_event_later, ipw2100_scan_event_later);
6200 6185
6201 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) 6186 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
6202 ipw2100_irq_tasklet, (unsigned long)priv); 6187 ipw2100_irq_tasklet, (unsigned long)priv);
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.h b/drivers/net/wireless/ipw2x00/ipw2100.h
index 5fe17cbab1f3..c6d78790cb0d 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.h
+++ b/drivers/net/wireless/ipw2x00/ipw2100.h
@@ -577,8 +577,7 @@ struct ipw2100_priv {
577 struct delayed_work wx_event_work; 577 struct delayed_work wx_event_work;
578 struct delayed_work hang_check; 578 struct delayed_work hang_check;
579 struct delayed_work rf_kill; 579 struct delayed_work rf_kill;
580 struct work_struct scan_event_now; 580 struct delayed_work scan_event;
581 struct delayed_work scan_event_later;
582 581
583 int user_requested_scan; 582 int user_requested_scan;
584 583
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 844f201b7b70..2c2d6db0536c 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -4480,18 +4480,11 @@ static void handle_scan_event(struct ipw_priv *priv)
4480{ 4480{
4481 /* Only userspace-requested scan completion events go out immediately */ 4481 /* Only userspace-requested scan completion events go out immediately */
4482 if (!priv->user_requested_scan) { 4482 if (!priv->user_requested_scan) {
4483 if (!delayed_work_pending(&priv->scan_event)) 4483 schedule_delayed_work(&priv->scan_event,
4484 schedule_delayed_work(&priv->scan_event, 4484 round_jiffies_relative(msecs_to_jiffies(4000)));
4485 round_jiffies_relative(msecs_to_jiffies(4000)));
4486 } else { 4485 } else {
4487 union iwreq_data wrqu;
4488
4489 priv->user_requested_scan = 0; 4486 priv->user_requested_scan = 0;
4490 cancel_delayed_work(&priv->scan_event); 4487 mod_delayed_work(system_wq, &priv->scan_event, 0);
4491
4492 wrqu.data.length = 0;
4493 wrqu.data.flags = 0;
4494 wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
4495 } 4488 }
4496} 4489}
4497 4490
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index 7e16d10a7f14..90b8970eadf0 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -3958,17 +3958,21 @@ il_connection_init_rx_config(struct il_priv *il)
3958 3958
3959 memset(&il->staging, 0, sizeof(il->staging)); 3959 memset(&il->staging, 0, sizeof(il->staging));
3960 3960
3961 if (!il->vif) { 3961 switch (il->iw_mode) {
3962 case NL80211_IFTYPE_UNSPECIFIED:
3962 il->staging.dev_type = RXON_DEV_TYPE_ESS; 3963 il->staging.dev_type = RXON_DEV_TYPE_ESS;
3963 } else if (il->vif->type == NL80211_IFTYPE_STATION) { 3964 break;
3965 case NL80211_IFTYPE_STATION:
3964 il->staging.dev_type = RXON_DEV_TYPE_ESS; 3966 il->staging.dev_type = RXON_DEV_TYPE_ESS;
3965 il->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK; 3967 il->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
3966 } else if (il->vif->type == NL80211_IFTYPE_ADHOC) { 3968 break;
3969 case NL80211_IFTYPE_ADHOC:
3967 il->staging.dev_type = RXON_DEV_TYPE_IBSS; 3970 il->staging.dev_type = RXON_DEV_TYPE_IBSS;
3968 il->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK; 3971 il->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
3969 il->staging.filter_flags = 3972 il->staging.filter_flags =
3970 RXON_FILTER_BCON_AWARE_MSK | RXON_FILTER_ACCEPT_GRP_MSK; 3973 RXON_FILTER_BCON_AWARE_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
3971 } else { 3974 break;
3975 default:
3972 IL_ERR("Unsupported interface type %d\n", il->vif->type); 3976 IL_ERR("Unsupported interface type %d\n", il->vif->type);
3973 return; 3977 return;
3974 } 3978 }
@@ -4550,8 +4554,7 @@ out:
4550EXPORT_SYMBOL(il_mac_add_interface); 4554EXPORT_SYMBOL(il_mac_add_interface);
4551 4555
4552static void 4556static void
4553il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif, 4557il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif)
4554 bool mode_change)
4555{ 4558{
4556 lockdep_assert_held(&il->mutex); 4559 lockdep_assert_held(&il->mutex);
4557 4560
@@ -4560,9 +4563,7 @@ il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif,
4560 il_force_scan_end(il); 4563 il_force_scan_end(il);
4561 } 4564 }
4562 4565
4563 if (!mode_change) 4566 il_set_mode(il);
4564 il_set_mode(il);
4565
4566} 4567}
4567 4568
4568void 4569void
@@ -4575,8 +4576,8 @@ il_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
4575 4576
4576 WARN_ON(il->vif != vif); 4577 WARN_ON(il->vif != vif);
4577 il->vif = NULL; 4578 il->vif = NULL;
4578 4579 il->iw_mode = NL80211_IFTYPE_UNSPECIFIED;
4579 il_teardown_interface(il, vif, false); 4580 il_teardown_interface(il, vif);
4580 memset(il->bssid, 0, ETH_ALEN); 4581 memset(il->bssid, 0, ETH_ALEN);
4581 4582
4582 D_MAC80211("leave\n"); 4583 D_MAC80211("leave\n");
@@ -4685,18 +4686,10 @@ il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
4685 } 4686 }
4686 4687
4687 /* success */ 4688 /* success */
4688 il_teardown_interface(il, vif, true);
4689 vif->type = newtype; 4689 vif->type = newtype;
4690 vif->p2p = false; 4690 vif->p2p = false;
4691 err = il_set_mode(il); 4691 il->iw_mode = newtype;
4692 WARN_ON(err); 4692 il_teardown_interface(il, vif);
4693 /*
4694 * We've switched internally, but submitting to the
4695 * device may have failed for some reason. Mask this
4696 * error, because otherwise mac80211 will not switch
4697 * (and set the interface type back) and we'll be
4698 * out of sync with it.
4699 */
4700 err = 0; 4693 err = 0;
4701 4694
4702out: 4695out:
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index a790599fe2c2..279796419ea0 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -1079,6 +1079,8 @@ static void iwlagn_set_tx_status(struct iwl_priv *priv,
1079{ 1079{
1080 u16 status = le16_to_cpu(tx_resp->status.status); 1080 u16 status = le16_to_cpu(tx_resp->status.status);
1081 1081
1082 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
1083
1082 info->status.rates[0].count = tx_resp->failure_frame + 1; 1084 info->status.rates[0].count = tx_resp->failure_frame + 1;
1083 info->flags |= iwl_tx_status_to_mac80211(status); 1085 info->flags |= iwl_tx_status_to_mac80211(status);
1084 iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags), 1086 iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
@@ -1151,6 +1153,13 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
1151 next_reclaimed = ssn; 1153 next_reclaimed = ssn;
1152 } 1154 }
1153 1155
1156 if (tid != IWL_TID_NON_QOS) {
1157 priv->tid_data[sta_id][tid].next_reclaimed =
1158 next_reclaimed;
1159 IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
1160 next_reclaimed);
1161 }
1162
1154 iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs); 1163 iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
1155 1164
1156 iwlagn_check_ratid_empty(priv, sta_id, tid); 1165 iwlagn_check_ratid_empty(priv, sta_id, tid);
@@ -1201,28 +1210,11 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
1201 if (!is_agg) 1210 if (!is_agg)
1202 iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1); 1211 iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
1203 1212
1204 /*
1205 * W/A for FW bug - the seq_ctl isn't updated when the
1206 * queues are flushed. Fetch it from the packet itself
1207 */
1208 if (!is_agg && status == TX_STATUS_FAIL_FIFO_FLUSHED) {
1209 next_reclaimed = le16_to_cpu(hdr->seq_ctrl);
1210 next_reclaimed =
1211 SEQ_TO_SN(next_reclaimed + 0x10);
1212 }
1213
1214 is_offchannel_skb = 1213 is_offchannel_skb =
1215 (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN); 1214 (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN);
1216 freed++; 1215 freed++;
1217 } 1216 }
1218 1217
1219 if (tid != IWL_TID_NON_QOS) {
1220 priv->tid_data[sta_id][tid].next_reclaimed =
1221 next_reclaimed;
1222 IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
1223 next_reclaimed);
1224 }
1225
1226 WARN_ON(!is_agg && freed != 1); 1218 WARN_ON(!is_agg && freed != 1);
1227 1219
1228 /* 1220 /*
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index efe525be27dd..cdb11b3964e2 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -1459,7 +1459,7 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
1459 struct cfg80211_ssid req_ssid; 1459 struct cfg80211_ssid req_ssid;
1460 int ret, auth_type = 0; 1460 int ret, auth_type = 0;
1461 struct cfg80211_bss *bss = NULL; 1461 struct cfg80211_bss *bss = NULL;
1462 u8 is_scanning_required = 0, config_bands = 0; 1462 u8 is_scanning_required = 0;
1463 1463
1464 memset(&req_ssid, 0, sizeof(struct cfg80211_ssid)); 1464 memset(&req_ssid, 0, sizeof(struct cfg80211_ssid));
1465 1465
@@ -1478,19 +1478,6 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
1478 /* disconnect before try to associate */ 1478 /* disconnect before try to associate */
1479 mwifiex_deauthenticate(priv, NULL); 1479 mwifiex_deauthenticate(priv, NULL);
1480 1480
1481 if (channel) {
1482 if (mode == NL80211_IFTYPE_STATION) {
1483 if (channel->band == IEEE80211_BAND_2GHZ)
1484 config_bands = BAND_B | BAND_G | BAND_GN;
1485 else
1486 config_bands = BAND_A | BAND_AN;
1487
1488 if (!((config_bands | priv->adapter->fw_bands) &
1489 ~priv->adapter->fw_bands))
1490 priv->adapter->config_bands = config_bands;
1491 }
1492 }
1493
1494 /* As this is new association, clear locally stored 1481 /* As this is new association, clear locally stored
1495 * keys and security related flags */ 1482 * keys and security related flags */
1496 priv->sec_info.wpa_enabled = false; 1483 priv->sec_info.wpa_enabled = false;
@@ -1707,7 +1694,7 @@ static int mwifiex_set_ibss_params(struct mwifiex_private *priv,
1707 1694
1708 if (cfg80211_get_chandef_type(&params->chandef) != 1695 if (cfg80211_get_chandef_type(&params->chandef) !=
1709 NL80211_CHAN_NO_HT) 1696 NL80211_CHAN_NO_HT)
1710 config_bands |= BAND_GN; 1697 config_bands |= BAND_G | BAND_GN;
1711 } else { 1698 } else {
1712 if (cfg80211_get_chandef_type(&params->chandef) == 1699 if (cfg80211_get_chandef_type(&params->chandef) ==
1713 NL80211_CHAN_NO_HT) 1700 NL80211_CHAN_NO_HT)
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index 13fbc4eb1595..b879e1338a54 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -161,7 +161,7 @@ static int mwifiex_pcie_suspend(struct pci_dev *pdev, pm_message_t state)
161 161
162 if (pdev) { 162 if (pdev) {
163 card = (struct pcie_service_card *) pci_get_drvdata(pdev); 163 card = (struct pcie_service_card *) pci_get_drvdata(pdev);
164 if (!card || card->adapter) { 164 if (!card || !card->adapter) {
165 pr_err("Card or adapter structure is not valid\n"); 165 pr_err("Card or adapter structure is not valid\n");
166 return 0; 166 return 0;
167 } 167 }
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 9189a32b7844..973a9d90e9ea 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -1563,7 +1563,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
1563 dev_err(adapter->dev, "SCAN_RESP: too many AP returned (%d)\n", 1563 dev_err(adapter->dev, "SCAN_RESP: too many AP returned (%d)\n",
1564 scan_rsp->number_of_sets); 1564 scan_rsp->number_of_sets);
1565 ret = -1; 1565 ret = -1;
1566 goto done; 1566 goto check_next_scan;
1567 } 1567 }
1568 1568
1569 bytes_left = le16_to_cpu(scan_rsp->bss_descript_size); 1569 bytes_left = le16_to_cpu(scan_rsp->bss_descript_size);
@@ -1634,7 +1634,8 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
1634 if (!beacon_size || beacon_size > bytes_left) { 1634 if (!beacon_size || beacon_size > bytes_left) {
1635 bss_info += bytes_left; 1635 bss_info += bytes_left;
1636 bytes_left = 0; 1636 bytes_left = 0;
1637 return -1; 1637 ret = -1;
1638 goto check_next_scan;
1638 } 1639 }
1639 1640
1640 /* Initialize the current working beacon pointer for this BSS 1641 /* Initialize the current working beacon pointer for this BSS
@@ -1690,7 +1691,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
1690 dev_err(priv->adapter->dev, 1691 dev_err(priv->adapter->dev,
1691 "%s: bytes left < IE length\n", 1692 "%s: bytes left < IE length\n",
1692 __func__); 1693 __func__);
1693 goto done; 1694 goto check_next_scan;
1694 } 1695 }
1695 if (element_id == WLAN_EID_DS_PARAMS) { 1696 if (element_id == WLAN_EID_DS_PARAMS) {
1696 channel = *(current_ptr + sizeof(struct ieee_types_header)); 1697 channel = *(current_ptr + sizeof(struct ieee_types_header));
@@ -1753,6 +1754,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
1753 } 1754 }
1754 } 1755 }
1755 1756
1757check_next_scan:
1756 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags); 1758 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
1757 if (list_empty(&adapter->scan_pending_q)) { 1759 if (list_empty(&adapter->scan_pending_q)) {
1758 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); 1760 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
@@ -1813,7 +1815,6 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
1813 } 1815 }
1814 } 1816 }
1815 1817
1816done:
1817 return ret; 1818 return ret;
1818} 1819}
1819 1820
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index 5a1c1d0e5599..f2874c3392b4 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -1752,6 +1752,8 @@ mwifiex_update_mp_end_port(struct mwifiex_adapter *adapter, u16 port)
1752static struct mmc_host *reset_host; 1752static struct mmc_host *reset_host;
1753static void sdio_card_reset_worker(struct work_struct *work) 1753static void sdio_card_reset_worker(struct work_struct *work)
1754{ 1754{
1755 struct mmc_host *target = reset_host;
1756
1755 /* The actual reset operation must be run outside of driver thread. 1757 /* The actual reset operation must be run outside of driver thread.
1756 * This is because mmc_remove_host() will cause the device to be 1758 * This is because mmc_remove_host() will cause the device to be
1757 * instantly destroyed, and the driver then needs to end its thread, 1759 * instantly destroyed, and the driver then needs to end its thread,
@@ -1761,10 +1763,10 @@ static void sdio_card_reset_worker(struct work_struct *work)
1761 */ 1763 */
1762 1764
1763 pr_err("Resetting card...\n"); 1765 pr_err("Resetting card...\n");
1764 mmc_remove_host(reset_host); 1766 mmc_remove_host(target);
1765 /* 20ms delay is based on experiment with sdhci controller */ 1767 /* 20ms delay is based on experiment with sdhci controller */
1766 mdelay(20); 1768 mdelay(20);
1767 mmc_add_host(reset_host); 1769 mmc_add_host(target);
1768} 1770}
1769static DECLARE_WORK(card_reset_work, sdio_card_reset_worker); 1771static DECLARE_WORK(card_reset_work, sdio_card_reset_worker);
1770 1772
@@ -1773,9 +1775,6 @@ static void mwifiex_sdio_card_reset(struct mwifiex_adapter *adapter)
1773{ 1775{
1774 struct sdio_mmc_card *card = adapter->card; 1776 struct sdio_mmc_card *card = adapter->card;
1775 1777
1776 if (work_pending(&card_reset_work))
1777 return;
1778
1779 reset_host = card->func->card->host; 1778 reset_host = card->func->card->host;
1780 schedule_work(&card_reset_work); 1779 schedule_work(&card_reset_work);
1781} 1780}
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 60e88b58039d..f542bb8ccbc8 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -283,6 +283,20 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
283 if (ret) 283 if (ret)
284 goto done; 284 goto done;
285 285
286 if (bss_desc) {
287 u8 config_bands = 0;
288
289 if (mwifiex_band_to_radio_type((u8) bss_desc->bss_band)
290 == HostCmd_SCAN_RADIO_TYPE_BG)
291 config_bands = BAND_B | BAND_G | BAND_GN;
292 else
293 config_bands = BAND_A | BAND_AN;
294
295 if (!((config_bands | adapter->fw_bands) &
296 ~adapter->fw_bands))
297 adapter->config_bands = config_bands;
298 }
299
286 ret = mwifiex_check_network_compatibility(priv, bss_desc); 300 ret = mwifiex_check_network_compatibility(priv, bss_desc);
287 if (ret) 301 if (ret)
288 goto done; 302 goto done;
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 83564d36e801..a00a03ea4ec9 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -318,20 +318,20 @@ struct mwl8k_sta {
318#define MWL8K_STA(_sta) ((struct mwl8k_sta *)&((_sta)->drv_priv)) 318#define MWL8K_STA(_sta) ((struct mwl8k_sta *)&((_sta)->drv_priv))
319 319
320static const struct ieee80211_channel mwl8k_channels_24[] = { 320static const struct ieee80211_channel mwl8k_channels_24[] = {
321 { .center_freq = 2412, .hw_value = 1, }, 321 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2412, .hw_value = 1, },
322 { .center_freq = 2417, .hw_value = 2, }, 322 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2417, .hw_value = 2, },
323 { .center_freq = 2422, .hw_value = 3, }, 323 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2422, .hw_value = 3, },
324 { .center_freq = 2427, .hw_value = 4, }, 324 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2427, .hw_value = 4, },
325 { .center_freq = 2432, .hw_value = 5, }, 325 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2432, .hw_value = 5, },
326 { .center_freq = 2437, .hw_value = 6, }, 326 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2437, .hw_value = 6, },
327 { .center_freq = 2442, .hw_value = 7, }, 327 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2442, .hw_value = 7, },
328 { .center_freq = 2447, .hw_value = 8, }, 328 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2447, .hw_value = 8, },
329 { .center_freq = 2452, .hw_value = 9, }, 329 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2452, .hw_value = 9, },
330 { .center_freq = 2457, .hw_value = 10, }, 330 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2457, .hw_value = 10, },
331 { .center_freq = 2462, .hw_value = 11, }, 331 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2462, .hw_value = 11, },
332 { .center_freq = 2467, .hw_value = 12, }, 332 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2467, .hw_value = 12, },
333 { .center_freq = 2472, .hw_value = 13, }, 333 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2472, .hw_value = 13, },
334 { .center_freq = 2484, .hw_value = 14, }, 334 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2484, .hw_value = 14, },
335}; 335};
336 336
337static const struct ieee80211_rate mwl8k_rates_24[] = { 337static const struct ieee80211_rate mwl8k_rates_24[] = {
@@ -352,10 +352,10 @@ static const struct ieee80211_rate mwl8k_rates_24[] = {
352}; 352};
353 353
354static const struct ieee80211_channel mwl8k_channels_50[] = { 354static const struct ieee80211_channel mwl8k_channels_50[] = {
355 { .center_freq = 5180, .hw_value = 36, }, 355 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5180, .hw_value = 36, },
356 { .center_freq = 5200, .hw_value = 40, }, 356 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5200, .hw_value = 40, },
357 { .center_freq = 5220, .hw_value = 44, }, 357 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5220, .hw_value = 44, },
358 { .center_freq = 5240, .hw_value = 48, }, 358 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5240, .hw_value = 48, },
359}; 359};
360 360
361static const struct ieee80211_rate mwl8k_rates_50[] = { 361static const struct ieee80211_rate mwl8k_rates_50[] = {
diff --git a/drivers/net/wireless/rtlwifi/Kconfig b/drivers/net/wireless/rtlwifi/Kconfig
index 21b1bbb93a7e..b80bc4612581 100644
--- a/drivers/net/wireless/rtlwifi/Kconfig
+++ b/drivers/net/wireless/rtlwifi/Kconfig
@@ -57,12 +57,12 @@ config RTL8192CU
57 57
58config RTLWIFI 58config RTLWIFI
59 tristate 59 tristate
60 depends on RTL8192CE || RTL8192CU || RTL8192SE || RTL8192DE 60 depends on RTL8192CE || RTL8192CU || RTL8192SE || RTL8192DE || RTL8723AE
61 default m 61 default m
62 62
63config RTLWIFI_DEBUG 63config RTLWIFI_DEBUG
64 bool "Additional debugging output" 64 bool "Additional debugging output"
65 depends on RTL8192CE || RTL8192CU || RTL8192SE || RTL8192DE 65 depends on RTL8192CE || RTL8192CU || RTL8192SE || RTL8192DE || RTL8723AE
66 default y 66 default y
67 67
68config RTL8192C_COMMON 68config RTL8192C_COMMON
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 4494d130b37c..0f8b05185eda 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -1004,7 +1004,8 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
1004 is_tx ? "Tx" : "Rx"); 1004 is_tx ? "Tx" : "Rx");
1005 1005
1006 if (is_tx) { 1006 if (is_tx) {
1007 rtl_lps_leave(hw); 1007 schedule_work(&rtlpriv->
1008 works.lps_leave_work);
1008 ppsc->last_delaylps_stamp_jiffies = 1009 ppsc->last_delaylps_stamp_jiffies =
1009 jiffies; 1010 jiffies;
1010 } 1011 }
@@ -1014,7 +1015,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
1014 } 1015 }
1015 } else if (ETH_P_ARP == ether_type) { 1016 } else if (ETH_P_ARP == ether_type) {
1016 if (is_tx) { 1017 if (is_tx) {
1017 rtl_lps_leave(hw); 1018 schedule_work(&rtlpriv->works.lps_leave_work);
1018 ppsc->last_delaylps_stamp_jiffies = jiffies; 1019 ppsc->last_delaylps_stamp_jiffies = jiffies;
1019 } 1020 }
1020 1021
@@ -1024,7 +1025,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
1024 "802.1X %s EAPOL pkt!!\n", is_tx ? "Tx" : "Rx"); 1025 "802.1X %s EAPOL pkt!!\n", is_tx ? "Tx" : "Rx");
1025 1026
1026 if (is_tx) { 1027 if (is_tx) {
1027 rtl_lps_leave(hw); 1028 schedule_work(&rtlpriv->works.lps_leave_work);
1028 ppsc->last_delaylps_stamp_jiffies = jiffies; 1029 ppsc->last_delaylps_stamp_jiffies = jiffies;
1029 } 1030 }
1030 1031
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index f2ecdeb3a90d..1535efda3d52 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -542,8 +542,8 @@ static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb)
542 WARN_ON(skb_queue_empty(&rx_queue)); 542 WARN_ON(skb_queue_empty(&rx_queue));
543 while (!skb_queue_empty(&rx_queue)) { 543 while (!skb_queue_empty(&rx_queue)) {
544 _skb = skb_dequeue(&rx_queue); 544 _skb = skb_dequeue(&rx_queue);
545 _rtl_usb_rx_process_agg(hw, skb); 545 _rtl_usb_rx_process_agg(hw, _skb);
546 ieee80211_rx_irqsafe(hw, skb); 546 ieee80211_rx_irqsafe(hw, _skb);
547 } 547 }
548} 548}
549 549
diff --git a/drivers/net/wireless/ti/wl1251/ps.c b/drivers/net/wireless/ti/wl1251/ps.c
index db719f7d2692..b9e27b98bbc9 100644
--- a/drivers/net/wireless/ti/wl1251/ps.c
+++ b/drivers/net/wireless/ti/wl1251/ps.c
@@ -68,8 +68,7 @@ int wl1251_ps_elp_wakeup(struct wl1251 *wl)
68 unsigned long timeout, start; 68 unsigned long timeout, start;
69 u32 elp_reg; 69 u32 elp_reg;
70 70
71 if (delayed_work_pending(&wl->elp_work)) 71 cancel_delayed_work(&wl->elp_work);
72 cancel_delayed_work(&wl->elp_work);
73 72
74 if (!wl->elp) 73 if (!wl->elp)
75 return 0; 74 return 0;
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 94b79c3338c4..9d7f1723dd8f 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -151,6 +151,9 @@ void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);
151/* Notify xenvif that ring now has space to send an skb to the frontend */ 151/* Notify xenvif that ring now has space to send an skb to the frontend */
152void xenvif_notify_tx_completion(struct xenvif *vif); 152void xenvif_notify_tx_completion(struct xenvif *vif);
153 153
154/* Prevent the device from generating any further traffic. */
155void xenvif_carrier_off(struct xenvif *vif);
156
154/* Returns number of ring slots required to send an skb to the frontend */ 157/* Returns number of ring slots required to send an skb to the frontend */
155unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb); 158unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
156 159
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index b7d41f8c338a..b8c5193bd420 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -343,17 +343,22 @@ err:
343 return err; 343 return err;
344} 344}
345 345
346void xenvif_disconnect(struct xenvif *vif) 346void xenvif_carrier_off(struct xenvif *vif)
347{ 347{
348 struct net_device *dev = vif->dev; 348 struct net_device *dev = vif->dev;
349 if (netif_carrier_ok(dev)) { 349
350 rtnl_lock(); 350 rtnl_lock();
351 netif_carrier_off(dev); /* discard queued packets */ 351 netif_carrier_off(dev); /* discard queued packets */
352 if (netif_running(dev)) 352 if (netif_running(dev))
353 xenvif_down(vif); 353 xenvif_down(vif);
354 rtnl_unlock(); 354 rtnl_unlock();
355 xenvif_put(vif); 355 xenvif_put(vif);
356 } 356}
357
358void xenvif_disconnect(struct xenvif *vif)
359{
360 if (netif_carrier_ok(vif->dev))
361 xenvif_carrier_off(vif);
357 362
358 atomic_dec(&vif->refcnt); 363 atomic_dec(&vif->refcnt);
359 wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0); 364 wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index f2d6b78d901d..2b9520c46e97 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -147,7 +147,8 @@ void xen_netbk_remove_xenvif(struct xenvif *vif)
147 atomic_dec(&netbk->netfront_count); 147 atomic_dec(&netbk->netfront_count);
148} 148}
149 149
150static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx); 150static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
151 u8 status);
151static void make_tx_response(struct xenvif *vif, 152static void make_tx_response(struct xenvif *vif,
152 struct xen_netif_tx_request *txp, 153 struct xen_netif_tx_request *txp,
153 s8 st); 154 s8 st);
@@ -879,7 +880,7 @@ static void netbk_tx_err(struct xenvif *vif,
879 880
880 do { 881 do {
881 make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); 882 make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
882 if (cons >= end) 883 if (cons == end)
883 break; 884 break;
884 txp = RING_GET_REQUEST(&vif->tx, cons++); 885 txp = RING_GET_REQUEST(&vif->tx, cons++);
885 } while (1); 886 } while (1);
@@ -888,6 +889,13 @@ static void netbk_tx_err(struct xenvif *vif,
888 xenvif_put(vif); 889 xenvif_put(vif);
889} 890}
890 891
892static void netbk_fatal_tx_err(struct xenvif *vif)
893{
894 netdev_err(vif->dev, "fatal error; disabling device\n");
895 xenvif_carrier_off(vif);
896 xenvif_put(vif);
897}
898
891static int netbk_count_requests(struct xenvif *vif, 899static int netbk_count_requests(struct xenvif *vif,
892 struct xen_netif_tx_request *first, 900 struct xen_netif_tx_request *first,
893 struct xen_netif_tx_request *txp, 901 struct xen_netif_tx_request *txp,
@@ -901,19 +909,22 @@ static int netbk_count_requests(struct xenvif *vif,
901 909
902 do { 910 do {
903 if (frags >= work_to_do) { 911 if (frags >= work_to_do) {
904 netdev_dbg(vif->dev, "Need more frags\n"); 912 netdev_err(vif->dev, "Need more frags\n");
913 netbk_fatal_tx_err(vif);
905 return -frags; 914 return -frags;
906 } 915 }
907 916
908 if (unlikely(frags >= MAX_SKB_FRAGS)) { 917 if (unlikely(frags >= MAX_SKB_FRAGS)) {
909 netdev_dbg(vif->dev, "Too many frags\n"); 918 netdev_err(vif->dev, "Too many frags\n");
919 netbk_fatal_tx_err(vif);
910 return -frags; 920 return -frags;
911 } 921 }
912 922
913 memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags), 923 memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),
914 sizeof(*txp)); 924 sizeof(*txp));
915 if (txp->size > first->size) { 925 if (txp->size > first->size) {
916 netdev_dbg(vif->dev, "Frags galore\n"); 926 netdev_err(vif->dev, "Frag is bigger than frame.\n");
927 netbk_fatal_tx_err(vif);
917 return -frags; 928 return -frags;
918 } 929 }
919 930
@@ -921,8 +932,9 @@ static int netbk_count_requests(struct xenvif *vif,
921 frags++; 932 frags++;
922 933
923 if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { 934 if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
924 netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n", 935 netdev_err(vif->dev, "txp->offset: %x, size: %u\n",
925 txp->offset, txp->size); 936 txp->offset, txp->size);
937 netbk_fatal_tx_err(vif);
926 return -frags; 938 return -frags;
927 } 939 }
928 } while ((txp++)->flags & XEN_NETTXF_more_data); 940 } while ((txp++)->flags & XEN_NETTXF_more_data);
@@ -966,7 +978,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
966 pending_idx = netbk->pending_ring[index]; 978 pending_idx = netbk->pending_ring[index];
967 page = xen_netbk_alloc_page(netbk, skb, pending_idx); 979 page = xen_netbk_alloc_page(netbk, skb, pending_idx);
968 if (!page) 980 if (!page)
969 return NULL; 981 goto err;
970 982
971 gop->source.u.ref = txp->gref; 983 gop->source.u.ref = txp->gref;
972 gop->source.domid = vif->domid; 984 gop->source.domid = vif->domid;
@@ -988,6 +1000,17 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
988 } 1000 }
989 1001
990 return gop; 1002 return gop;
1003err:
1004 /* Unwind, freeing all pages and sending error responses. */
1005 while (i-- > start) {
1006 xen_netbk_idx_release(netbk, frag_get_pending_idx(&frags[i]),
1007 XEN_NETIF_RSP_ERROR);
1008 }
1009 /* The head too, if necessary. */
1010 if (start)
1011 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
1012
1013 return NULL;
991} 1014}
992 1015
993static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, 1016static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
@@ -996,30 +1019,20 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
996{ 1019{
997 struct gnttab_copy *gop = *gopp; 1020 struct gnttab_copy *gop = *gopp;
998 u16 pending_idx = *((u16 *)skb->data); 1021 u16 pending_idx = *((u16 *)skb->data);
999 struct pending_tx_info *pending_tx_info = netbk->pending_tx_info;
1000 struct xenvif *vif = pending_tx_info[pending_idx].vif;
1001 struct xen_netif_tx_request *txp;
1002 struct skb_shared_info *shinfo = skb_shinfo(skb); 1022 struct skb_shared_info *shinfo = skb_shinfo(skb);
1003 int nr_frags = shinfo->nr_frags; 1023 int nr_frags = shinfo->nr_frags;
1004 int i, err, start; 1024 int i, err, start;
1005 1025
1006 /* Check status of header. */ 1026 /* Check status of header. */
1007 err = gop->status; 1027 err = gop->status;
1008 if (unlikely(err)) { 1028 if (unlikely(err))
1009 pending_ring_idx_t index; 1029 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
1010 index = pending_index(netbk->pending_prod++);
1011 txp = &pending_tx_info[pending_idx].req;
1012 make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
1013 netbk->pending_ring[index] = pending_idx;
1014 xenvif_put(vif);
1015 }
1016 1030
1017 /* Skip first skb fragment if it is on same page as header fragment. */ 1031 /* Skip first skb fragment if it is on same page as header fragment. */
1018 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); 1032 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
1019 1033
1020 for (i = start; i < nr_frags; i++) { 1034 for (i = start; i < nr_frags; i++) {
1021 int j, newerr; 1035 int j, newerr;
1022 pending_ring_idx_t index;
1023 1036
1024 pending_idx = frag_get_pending_idx(&shinfo->frags[i]); 1037 pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
1025 1038
@@ -1028,16 +1041,12 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
1028 if (likely(!newerr)) { 1041 if (likely(!newerr)) {
1029 /* Had a previous error? Invalidate this fragment. */ 1042 /* Had a previous error? Invalidate this fragment. */
1030 if (unlikely(err)) 1043 if (unlikely(err))
1031 xen_netbk_idx_release(netbk, pending_idx); 1044 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1032 continue; 1045 continue;
1033 } 1046 }
1034 1047
1035 /* Error on this fragment: respond to client with an error. */ 1048 /* Error on this fragment: respond to client with an error. */
1036 txp = &netbk->pending_tx_info[pending_idx].req; 1049 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
1037 make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
1038 index = pending_index(netbk->pending_prod++);
1039 netbk->pending_ring[index] = pending_idx;
1040 xenvif_put(vif);
1041 1050
1042 /* Not the first error? Preceding frags already invalidated. */ 1051 /* Not the first error? Preceding frags already invalidated. */
1043 if (err) 1052 if (err)
@@ -1045,10 +1054,10 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
1045 1054
1046 /* First error: invalidate header and preceding fragments. */ 1055 /* First error: invalidate header and preceding fragments. */
1047 pending_idx = *((u16 *)skb->data); 1056 pending_idx = *((u16 *)skb->data);
1048 xen_netbk_idx_release(netbk, pending_idx); 1057 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1049 for (j = start; j < i; j++) { 1058 for (j = start; j < i; j++) {
1050 pending_idx = frag_get_pending_idx(&shinfo->frags[j]); 1059 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1051 xen_netbk_idx_release(netbk, pending_idx); 1060 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1052 } 1061 }
1053 1062
1054 /* Remember the error: invalidate all subsequent fragments. */ 1063 /* Remember the error: invalidate all subsequent fragments. */
@@ -1082,7 +1091,7 @@ static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
1082 1091
1083 /* Take an extra reference to offset xen_netbk_idx_release */ 1092 /* Take an extra reference to offset xen_netbk_idx_release */
1084 get_page(netbk->mmap_pages[pending_idx]); 1093 get_page(netbk->mmap_pages[pending_idx]);
1085 xen_netbk_idx_release(netbk, pending_idx); 1094 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1086 } 1095 }
1087} 1096}
1088 1097
@@ -1095,7 +1104,8 @@ static int xen_netbk_get_extras(struct xenvif *vif,
1095 1104
1096 do { 1105 do {
1097 if (unlikely(work_to_do-- <= 0)) { 1106 if (unlikely(work_to_do-- <= 0)) {
1098 netdev_dbg(vif->dev, "Missing extra info\n"); 1107 netdev_err(vif->dev, "Missing extra info\n");
1108 netbk_fatal_tx_err(vif);
1099 return -EBADR; 1109 return -EBADR;
1100 } 1110 }
1101 1111
@@ -1104,8 +1114,9 @@ static int xen_netbk_get_extras(struct xenvif *vif,
1104 if (unlikely(!extra.type || 1114 if (unlikely(!extra.type ||
1105 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { 1115 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1106 vif->tx.req_cons = ++cons; 1116 vif->tx.req_cons = ++cons;
1107 netdev_dbg(vif->dev, 1117 netdev_err(vif->dev,
1108 "Invalid extra type: %d\n", extra.type); 1118 "Invalid extra type: %d\n", extra.type);
1119 netbk_fatal_tx_err(vif);
1109 return -EINVAL; 1120 return -EINVAL;
1110 } 1121 }
1111 1122
@@ -1121,13 +1132,15 @@ static int netbk_set_skb_gso(struct xenvif *vif,
1121 struct xen_netif_extra_info *gso) 1132 struct xen_netif_extra_info *gso)
1122{ 1133{
1123 if (!gso->u.gso.size) { 1134 if (!gso->u.gso.size) {
1124 netdev_dbg(vif->dev, "GSO size must not be zero.\n"); 1135 netdev_err(vif->dev, "GSO size must not be zero.\n");
1136 netbk_fatal_tx_err(vif);
1125 return -EINVAL; 1137 return -EINVAL;
1126 } 1138 }
1127 1139
1128 /* Currently only TCPv4 S.O. is supported. */ 1140 /* Currently only TCPv4 S.O. is supported. */
1129 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { 1141 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
1130 netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); 1142 netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
1143 netbk_fatal_tx_err(vif);
1131 return -EINVAL; 1144 return -EINVAL;
1132 } 1145 }
1133 1146
@@ -1264,9 +1277,25 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1264 1277
1265 /* Get a netif from the list with work to do. */ 1278 /* Get a netif from the list with work to do. */
1266 vif = poll_net_schedule_list(netbk); 1279 vif = poll_net_schedule_list(netbk);
1280 /* This can sometimes happen because the test of
1281 * list_empty(net_schedule_list) at the top of the
1282 * loop is unlocked. Just go back and have another
1283 * look.
1284 */
1267 if (!vif) 1285 if (!vif)
1268 continue; 1286 continue;
1269 1287
1288 if (vif->tx.sring->req_prod - vif->tx.req_cons >
1289 XEN_NETIF_TX_RING_SIZE) {
1290 netdev_err(vif->dev,
1291 "Impossible number of requests. "
1292 "req_prod %d, req_cons %d, size %ld\n",
1293 vif->tx.sring->req_prod, vif->tx.req_cons,
1294 XEN_NETIF_TX_RING_SIZE);
1295 netbk_fatal_tx_err(vif);
1296 continue;
1297 }
1298
1270 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do); 1299 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
1271 if (!work_to_do) { 1300 if (!work_to_do) {
1272 xenvif_put(vif); 1301 xenvif_put(vif);
@@ -1294,17 +1323,14 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1294 work_to_do = xen_netbk_get_extras(vif, extras, 1323 work_to_do = xen_netbk_get_extras(vif, extras,
1295 work_to_do); 1324 work_to_do);
1296 idx = vif->tx.req_cons; 1325 idx = vif->tx.req_cons;
1297 if (unlikely(work_to_do < 0)) { 1326 if (unlikely(work_to_do < 0))
1298 netbk_tx_err(vif, &txreq, idx);
1299 continue; 1327 continue;
1300 }
1301 } 1328 }
1302 1329
1303 ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do); 1330 ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
1304 if (unlikely(ret < 0)) { 1331 if (unlikely(ret < 0))
1305 netbk_tx_err(vif, &txreq, idx - ret);
1306 continue; 1332 continue;
1307 } 1333
1308 idx += ret; 1334 idx += ret;
1309 1335
1310 if (unlikely(txreq.size < ETH_HLEN)) { 1336 if (unlikely(txreq.size < ETH_HLEN)) {
@@ -1316,11 +1342,11 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1316 1342
1317 /* No crossing a page as the payload mustn't fragment. */ 1343 /* No crossing a page as the payload mustn't fragment. */
1318 if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) { 1344 if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
1319 netdev_dbg(vif->dev, 1345 netdev_err(vif->dev,
1320 "txreq.offset: %x, size: %u, end: %lu\n", 1346 "txreq.offset: %x, size: %u, end: %lu\n",
1321 txreq.offset, txreq.size, 1347 txreq.offset, txreq.size,
1322 (txreq.offset&~PAGE_MASK) + txreq.size); 1348 (txreq.offset&~PAGE_MASK) + txreq.size);
1323 netbk_tx_err(vif, &txreq, idx); 1349 netbk_fatal_tx_err(vif);
1324 continue; 1350 continue;
1325 } 1351 }
1326 1352
@@ -1348,8 +1374,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1348 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; 1374 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1349 1375
1350 if (netbk_set_skb_gso(vif, skb, gso)) { 1376 if (netbk_set_skb_gso(vif, skb, gso)) {
1377 /* Failure in netbk_set_skb_gso is fatal. */
1351 kfree_skb(skb); 1378 kfree_skb(skb);
1352 netbk_tx_err(vif, &txreq, idx);
1353 continue; 1379 continue;
1354 } 1380 }
1355 } 1381 }
@@ -1448,7 +1474,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
1448 txp->size -= data_len; 1474 txp->size -= data_len;
1449 } else { 1475 } else {
1450 /* Schedule a response immediately. */ 1476 /* Schedule a response immediately. */
1451 xen_netbk_idx_release(netbk, pending_idx); 1477 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1452 } 1478 }
1453 1479
1454 if (txp->flags & XEN_NETTXF_csum_blank) 1480 if (txp->flags & XEN_NETTXF_csum_blank)
@@ -1500,7 +1526,8 @@ static void xen_netbk_tx_action(struct xen_netbk *netbk)
1500 xen_netbk_tx_submit(netbk); 1526 xen_netbk_tx_submit(netbk);
1501} 1527}
1502 1528
1503static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx) 1529static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
1530 u8 status)
1504{ 1531{
1505 struct xenvif *vif; 1532 struct xenvif *vif;
1506 struct pending_tx_info *pending_tx_info; 1533 struct pending_tx_info *pending_tx_info;
@@ -1514,7 +1541,7 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
1514 1541
1515 vif = pending_tx_info->vif; 1542 vif = pending_tx_info->vif;
1516 1543
1517 make_tx_response(vif, &pending_tx_info->req, XEN_NETIF_RSP_OKAY); 1544 make_tx_response(vif, &pending_tx_info->req, status);
1518 1545
1519 index = pending_index(netbk->pending_prod++); 1546 index = pending_index(netbk->pending_prod++);
1520 netbk->pending_ring[index] = pending_idx; 1547 netbk->pending_ring[index] = pending_idx;
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 2390ddb22d60..321d3ef05006 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -24,38 +24,21 @@
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/proc_fs.h> 25#include <linux/proc_fs.h>
26 26
27/** 27#include "of_private.h"
28 * struct alias_prop - Alias property in 'aliases' node
29 * @link: List node to link the structure in aliases_lookup list
30 * @alias: Alias property name
31 * @np: Pointer to device_node that the alias stands for
32 * @id: Index value from end of alias name
33 * @stem: Alias string without the index
34 *
35 * The structure represents one alias property of 'aliases' node as
36 * an entry in aliases_lookup list.
37 */
38struct alias_prop {
39 struct list_head link;
40 const char *alias;
41 struct device_node *np;
42 int id;
43 char stem[0];
44};
45 28
46static LIST_HEAD(aliases_lookup); 29LIST_HEAD(aliases_lookup);
47 30
48struct device_node *of_allnodes; 31struct device_node *of_allnodes;
49EXPORT_SYMBOL(of_allnodes); 32EXPORT_SYMBOL(of_allnodes);
50struct device_node *of_chosen; 33struct device_node *of_chosen;
51struct device_node *of_aliases; 34struct device_node *of_aliases;
52 35
53static DEFINE_MUTEX(of_aliases_mutex); 36DEFINE_MUTEX(of_aliases_mutex);
54 37
55/* use when traversing tree through the allnext, child, sibling, 38/* use when traversing tree through the allnext, child, sibling,
56 * or parent members of struct device_node. 39 * or parent members of struct device_node.
57 */ 40 */
58DEFINE_RWLOCK(devtree_lock); 41DEFINE_RAW_SPINLOCK(devtree_lock);
59 42
60int of_n_addr_cells(struct device_node *np) 43int of_n_addr_cells(struct device_node *np)
61{ 44{
@@ -164,16 +147,14 @@ void of_node_put(struct device_node *node)
164EXPORT_SYMBOL(of_node_put); 147EXPORT_SYMBOL(of_node_put);
165#endif /* CONFIG_OF_DYNAMIC */ 148#endif /* CONFIG_OF_DYNAMIC */
166 149
167struct property *of_find_property(const struct device_node *np, 150static struct property *__of_find_property(const struct device_node *np,
168 const char *name, 151 const char *name, int *lenp)
169 int *lenp)
170{ 152{
171 struct property *pp; 153 struct property *pp;
172 154
173 if (!np) 155 if (!np)
174 return NULL; 156 return NULL;
175 157
176 read_lock(&devtree_lock);
177 for (pp = np->properties; pp; pp = pp->next) { 158 for (pp = np->properties; pp; pp = pp->next) {
178 if (of_prop_cmp(pp->name, name) == 0) { 159 if (of_prop_cmp(pp->name, name) == 0) {
179 if (lenp) 160 if (lenp)
@@ -181,7 +162,20 @@ struct property *of_find_property(const struct device_node *np,
181 break; 162 break;
182 } 163 }
183 } 164 }
184 read_unlock(&devtree_lock); 165
166 return pp;
167}
168
169struct property *of_find_property(const struct device_node *np,
170 const char *name,
171 int *lenp)
172{
173 struct property *pp;
174 unsigned long flags;
175
176 raw_spin_lock_irqsave(&devtree_lock, flags);
177 pp = __of_find_property(np, name, lenp);
178 raw_spin_unlock_irqrestore(&devtree_lock, flags);
185 179
186 return pp; 180 return pp;
187} 181}
@@ -199,13 +193,13 @@ struct device_node *of_find_all_nodes(struct device_node *prev)
199{ 193{
200 struct device_node *np; 194 struct device_node *np;
201 195
202 read_lock(&devtree_lock); 196 raw_spin_lock(&devtree_lock);
203 np = prev ? prev->allnext : of_allnodes; 197 np = prev ? prev->allnext : of_allnodes;
204 for (; np != NULL; np = np->allnext) 198 for (; np != NULL; np = np->allnext)
205 if (of_node_get(np)) 199 if (of_node_get(np))
206 break; 200 break;
207 of_node_put(prev); 201 of_node_put(prev);
208 read_unlock(&devtree_lock); 202 raw_spin_unlock(&devtree_lock);
209 return np; 203 return np;
210} 204}
211EXPORT_SYMBOL(of_find_all_nodes); 205EXPORT_SYMBOL(of_find_all_nodes);
@@ -214,8 +208,20 @@ EXPORT_SYMBOL(of_find_all_nodes);
214 * Find a property with a given name for a given node 208 * Find a property with a given name for a given node
215 * and return the value. 209 * and return the value.
216 */ 210 */
211static const void *__of_get_property(const struct device_node *np,
212 const char *name, int *lenp)
213{
214 struct property *pp = __of_find_property(np, name, lenp);
215
216 return pp ? pp->value : NULL;
217}
218
219/*
220 * Find a property with a given name for a given node
221 * and return the value.
222 */
217const void *of_get_property(const struct device_node *np, const char *name, 223const void *of_get_property(const struct device_node *np, const char *name,
218 int *lenp) 224 int *lenp)
219{ 225{
220 struct property *pp = of_find_property(np, name, lenp); 226 struct property *pp = of_find_property(np, name, lenp);
221 227
@@ -226,13 +232,13 @@ EXPORT_SYMBOL(of_get_property);
226/** Checks if the given "compat" string matches one of the strings in 232/** Checks if the given "compat" string matches one of the strings in
227 * the device's "compatible" property 233 * the device's "compatible" property
228 */ 234 */
229int of_device_is_compatible(const struct device_node *device, 235static int __of_device_is_compatible(const struct device_node *device,
230 const char *compat) 236 const char *compat)
231{ 237{
232 const char* cp; 238 const char* cp;
233 int cplen, l; 239 int cplen, l;
234 240
235 cp = of_get_property(device, "compatible", &cplen); 241 cp = __of_get_property(device, "compatible", &cplen);
236 if (cp == NULL) 242 if (cp == NULL)
237 return 0; 243 return 0;
238 while (cplen > 0) { 244 while (cplen > 0) {
@@ -245,6 +251,21 @@ int of_device_is_compatible(const struct device_node *device,
245 251
246 return 0; 252 return 0;
247} 253}
254
255/** Checks if the given "compat" string matches one of the strings in
256 * the device's "compatible" property
257 */
258int of_device_is_compatible(const struct device_node *device,
259 const char *compat)
260{
261 unsigned long flags;
262 int res;
263
264 raw_spin_lock_irqsave(&devtree_lock, flags);
265 res = __of_device_is_compatible(device, compat);
266 raw_spin_unlock_irqrestore(&devtree_lock, flags);
267 return res;
268}
248EXPORT_SYMBOL(of_device_is_compatible); 269EXPORT_SYMBOL(of_device_is_compatible);
249 270
250/** 271/**
@@ -269,19 +290,19 @@ int of_machine_is_compatible(const char *compat)
269EXPORT_SYMBOL(of_machine_is_compatible); 290EXPORT_SYMBOL(of_machine_is_compatible);
270 291
271/** 292/**
272 * of_device_is_available - check if a device is available for use 293 * __of_device_is_available - check if a device is available for use
273 * 294 *
274 * @device: Node to check for availability 295 * @device: Node to check for availability, with locks already held
275 * 296 *
276 * Returns 1 if the status property is absent or set to "okay" or "ok", 297 * Returns 1 if the status property is absent or set to "okay" or "ok",
277 * 0 otherwise 298 * 0 otherwise
278 */ 299 */
279int of_device_is_available(const struct device_node *device) 300static int __of_device_is_available(const struct device_node *device)
280{ 301{
281 const char *status; 302 const char *status;
282 int statlen; 303 int statlen;
283 304
284 status = of_get_property(device, "status", &statlen); 305 status = __of_get_property(device, "status", &statlen);
285 if (status == NULL) 306 if (status == NULL)
286 return 1; 307 return 1;
287 308
@@ -292,6 +313,26 @@ int of_device_is_available(const struct device_node *device)
292 313
293 return 0; 314 return 0;
294} 315}
316
317/**
318 * of_device_is_available - check if a device is available for use
319 *
320 * @device: Node to check for availability
321 *
322 * Returns 1 if the status property is absent or set to "okay" or "ok",
323 * 0 otherwise
324 */
325int of_device_is_available(const struct device_node *device)
326{
327 unsigned long flags;
328 int res;
329
330 raw_spin_lock_irqsave(&devtree_lock, flags);
331 res = __of_device_is_available(device);
332 raw_spin_unlock_irqrestore(&devtree_lock, flags);
333 return res;
334
335}
295EXPORT_SYMBOL(of_device_is_available); 336EXPORT_SYMBOL(of_device_is_available);
296 337
297/** 338/**
@@ -304,13 +345,14 @@ EXPORT_SYMBOL(of_device_is_available);
304struct device_node *of_get_parent(const struct device_node *node) 345struct device_node *of_get_parent(const struct device_node *node)
305{ 346{
306 struct device_node *np; 347 struct device_node *np;
348 unsigned long flags;
307 349
308 if (!node) 350 if (!node)
309 return NULL; 351 return NULL;
310 352
311 read_lock(&devtree_lock); 353 raw_spin_lock_irqsave(&devtree_lock, flags);
312 np = of_node_get(node->parent); 354 np = of_node_get(node->parent);
313 read_unlock(&devtree_lock); 355 raw_spin_unlock_irqrestore(&devtree_lock, flags);
314 return np; 356 return np;
315} 357}
316EXPORT_SYMBOL(of_get_parent); 358EXPORT_SYMBOL(of_get_parent);
@@ -329,14 +371,15 @@ EXPORT_SYMBOL(of_get_parent);
329struct device_node *of_get_next_parent(struct device_node *node) 371struct device_node *of_get_next_parent(struct device_node *node)
330{ 372{
331 struct device_node *parent; 373 struct device_node *parent;
374 unsigned long flags;
332 375
333 if (!node) 376 if (!node)
334 return NULL; 377 return NULL;
335 378
336 read_lock(&devtree_lock); 379 raw_spin_lock_irqsave(&devtree_lock, flags);
337 parent = of_node_get(node->parent); 380 parent = of_node_get(node->parent);
338 of_node_put(node); 381 of_node_put(node);
339 read_unlock(&devtree_lock); 382 raw_spin_unlock_irqrestore(&devtree_lock, flags);
340 return parent; 383 return parent;
341} 384}
342 385
@@ -352,14 +395,15 @@ struct device_node *of_get_next_child(const struct device_node *node,
352 struct device_node *prev) 395 struct device_node *prev)
353{ 396{
354 struct device_node *next; 397 struct device_node *next;
398 unsigned long flags;
355 399
356 read_lock(&devtree_lock); 400 raw_spin_lock_irqsave(&devtree_lock, flags);
357 next = prev ? prev->sibling : node->child; 401 next = prev ? prev->sibling : node->child;
358 for (; next; next = next->sibling) 402 for (; next; next = next->sibling)
359 if (of_node_get(next)) 403 if (of_node_get(next))
360 break; 404 break;
361 of_node_put(prev); 405 of_node_put(prev);
362 read_unlock(&devtree_lock); 406 raw_spin_unlock_irqrestore(&devtree_lock, flags);
363 return next; 407 return next;
364} 408}
365EXPORT_SYMBOL(of_get_next_child); 409EXPORT_SYMBOL(of_get_next_child);
@@ -377,16 +421,16 @@ struct device_node *of_get_next_available_child(const struct device_node *node,
377{ 421{
378 struct device_node *next; 422 struct device_node *next;
379 423
380 read_lock(&devtree_lock); 424 raw_spin_lock(&devtree_lock);
381 next = prev ? prev->sibling : node->child; 425 next = prev ? prev->sibling : node->child;
382 for (; next; next = next->sibling) { 426 for (; next; next = next->sibling) {
383 if (!of_device_is_available(next)) 427 if (!__of_device_is_available(next))
384 continue; 428 continue;
385 if (of_node_get(next)) 429 if (of_node_get(next))
386 break; 430 break;
387 } 431 }
388 of_node_put(prev); 432 of_node_put(prev);
389 read_unlock(&devtree_lock); 433 raw_spin_unlock(&devtree_lock);
390 return next; 434 return next;
391} 435}
392EXPORT_SYMBOL(of_get_next_available_child); 436EXPORT_SYMBOL(of_get_next_available_child);
@@ -424,14 +468,15 @@ EXPORT_SYMBOL(of_get_child_by_name);
424struct device_node *of_find_node_by_path(const char *path) 468struct device_node *of_find_node_by_path(const char *path)
425{ 469{
426 struct device_node *np = of_allnodes; 470 struct device_node *np = of_allnodes;
471 unsigned long flags;
427 472
428 read_lock(&devtree_lock); 473 raw_spin_lock_irqsave(&devtree_lock, flags);
429 for (; np; np = np->allnext) { 474 for (; np; np = np->allnext) {
430 if (np->full_name && (of_node_cmp(np->full_name, path) == 0) 475 if (np->full_name && (of_node_cmp(np->full_name, path) == 0)
431 && of_node_get(np)) 476 && of_node_get(np))
432 break; 477 break;
433 } 478 }
434 read_unlock(&devtree_lock); 479 raw_spin_unlock_irqrestore(&devtree_lock, flags);
435 return np; 480 return np;
436} 481}
437EXPORT_SYMBOL(of_find_node_by_path); 482EXPORT_SYMBOL(of_find_node_by_path);
@@ -451,15 +496,16 @@ struct device_node *of_find_node_by_name(struct device_node *from,
451 const char *name) 496 const char *name)
452{ 497{
453 struct device_node *np; 498 struct device_node *np;
499 unsigned long flags;
454 500
455 read_lock(&devtree_lock); 501 raw_spin_lock_irqsave(&devtree_lock, flags);
456 np = from ? from->allnext : of_allnodes; 502 np = from ? from->allnext : of_allnodes;
457 for (; np; np = np->allnext) 503 for (; np; np = np->allnext)
458 if (np->name && (of_node_cmp(np->name, name) == 0) 504 if (np->name && (of_node_cmp(np->name, name) == 0)
459 && of_node_get(np)) 505 && of_node_get(np))
460 break; 506 break;
461 of_node_put(from); 507 of_node_put(from);
462 read_unlock(&devtree_lock); 508 raw_spin_unlock_irqrestore(&devtree_lock, flags);
463 return np; 509 return np;
464} 510}
465EXPORT_SYMBOL(of_find_node_by_name); 511EXPORT_SYMBOL(of_find_node_by_name);
@@ -480,15 +526,16 @@ struct device_node *of_find_node_by_type(struct device_node *from,
480 const char *type) 526 const char *type)
481{ 527{
482 struct device_node *np; 528 struct device_node *np;
529 unsigned long flags;
483 530
484 read_lock(&devtree_lock); 531 raw_spin_lock_irqsave(&devtree_lock, flags);
485 np = from ? from->allnext : of_allnodes; 532 np = from ? from->allnext : of_allnodes;
486 for (; np; np = np->allnext) 533 for (; np; np = np->allnext)
487 if (np->type && (of_node_cmp(np->type, type) == 0) 534 if (np->type && (of_node_cmp(np->type, type) == 0)
488 && of_node_get(np)) 535 && of_node_get(np))
489 break; 536 break;
490 of_node_put(from); 537 of_node_put(from);
491 read_unlock(&devtree_lock); 538 raw_spin_unlock_irqrestore(&devtree_lock, flags);
492 return np; 539 return np;
493} 540}
494EXPORT_SYMBOL(of_find_node_by_type); 541EXPORT_SYMBOL(of_find_node_by_type);
@@ -511,18 +558,20 @@ struct device_node *of_find_compatible_node(struct device_node *from,
511 const char *type, const char *compatible) 558 const char *type, const char *compatible)
512{ 559{
513 struct device_node *np; 560 struct device_node *np;
561 unsigned long flags;
514 562
515 read_lock(&devtree_lock); 563 raw_spin_lock_irqsave(&devtree_lock, flags);
516 np = from ? from->allnext : of_allnodes; 564 np = from ? from->allnext : of_allnodes;
517 for (; np; np = np->allnext) { 565 for (; np; np = np->allnext) {
518 if (type 566 if (type
519 && !(np->type && (of_node_cmp(np->type, type) == 0))) 567 && !(np->type && (of_node_cmp(np->type, type) == 0)))
520 continue; 568 continue;
521 if (of_device_is_compatible(np, compatible) && of_node_get(np)) 569 if (__of_device_is_compatible(np, compatible) &&
570 of_node_get(np))
522 break; 571 break;
523 } 572 }
524 of_node_put(from); 573 of_node_put(from);
525 read_unlock(&devtree_lock); 574 raw_spin_unlock_irqrestore(&devtree_lock, flags);
526 return np; 575 return np;
527} 576}
528EXPORT_SYMBOL(of_find_compatible_node); 577EXPORT_SYMBOL(of_find_compatible_node);
@@ -544,8 +593,9 @@ struct device_node *of_find_node_with_property(struct device_node *from,
544{ 593{
545 struct device_node *np; 594 struct device_node *np;
546 struct property *pp; 595 struct property *pp;
596 unsigned long flags;
547 597
548 read_lock(&devtree_lock); 598 raw_spin_lock_irqsave(&devtree_lock, flags);
549 np = from ? from->allnext : of_allnodes; 599 np = from ? from->allnext : of_allnodes;
550 for (; np; np = np->allnext) { 600 for (; np; np = np->allnext) {
551 for (pp = np->properties; pp; pp = pp->next) { 601 for (pp = np->properties; pp; pp = pp->next) {
@@ -557,20 +607,14 @@ struct device_node *of_find_node_with_property(struct device_node *from,
557 } 607 }
558out: 608out:
559 of_node_put(from); 609 of_node_put(from);
560 read_unlock(&devtree_lock); 610 raw_spin_unlock_irqrestore(&devtree_lock, flags);
561 return np; 611 return np;
562} 612}
563EXPORT_SYMBOL(of_find_node_with_property); 613EXPORT_SYMBOL(of_find_node_with_property);
564 614
565/** 615static
566 * of_match_node - Tell if an device_node has a matching of_match structure 616const struct of_device_id *__of_match_node(const struct of_device_id *matches,
567 * @matches: array of of device match structures to search in 617 const struct device_node *node)
568 * @node: the of device structure to match against
569 *
570 * Low level utility function used by device matching.
571 */
572const struct of_device_id *of_match_node(const struct of_device_id *matches,
573 const struct device_node *node)
574{ 618{
575 if (!matches) 619 if (!matches)
576 return NULL; 620 return NULL;
@@ -584,14 +628,33 @@ const struct of_device_id *of_match_node(const struct of_device_id *matches,
584 match &= node->type 628 match &= node->type
585 && !strcmp(matches->type, node->type); 629 && !strcmp(matches->type, node->type);
586 if (matches->compatible[0]) 630 if (matches->compatible[0])
587 match &= of_device_is_compatible(node, 631 match &= __of_device_is_compatible(node,
588 matches->compatible); 632 matches->compatible);
589 if (match) 633 if (match)
590 return matches; 634 return matches;
591 matches++; 635 matches++;
592 } 636 }
593 return NULL; 637 return NULL;
594} 638}
639
640/**
641 * of_match_node - Tell if an device_node has a matching of_match structure
642 * @matches: array of of device match structures to search in
643 * @node: the of device structure to match against
644 *
645 * Low level utility function used by device matching.
646 */
647const struct of_device_id *of_match_node(const struct of_device_id *matches,
648 const struct device_node *node)
649{
650 const struct of_device_id *match;
651 unsigned long flags;
652
653 raw_spin_lock_irqsave(&devtree_lock, flags);
654 match = __of_match_node(matches, node);
655 raw_spin_unlock_irqrestore(&devtree_lock, flags);
656 return match;
657}
595EXPORT_SYMBOL(of_match_node); 658EXPORT_SYMBOL(of_match_node);
596 659
597/** 660/**
@@ -612,21 +675,24 @@ struct device_node *of_find_matching_node_and_match(struct device_node *from,
612 const struct of_device_id **match) 675 const struct of_device_id **match)
613{ 676{
614 struct device_node *np; 677 struct device_node *np;
678 const struct of_device_id *m;
679 unsigned long flags;
615 680
616 if (match) 681 if (match)
617 *match = NULL; 682 *match = NULL;
618 683
619 read_lock(&devtree_lock); 684 raw_spin_lock_irqsave(&devtree_lock, flags);
620 np = from ? from->allnext : of_allnodes; 685 np = from ? from->allnext : of_allnodes;
621 for (; np; np = np->allnext) { 686 for (; np; np = np->allnext) {
622 if (of_match_node(matches, np) && of_node_get(np)) { 687 m = __of_match_node(matches, np);
688 if (m && of_node_get(np)) {
623 if (match) 689 if (match)
624 *match = matches; 690 *match = m;
625 break; 691 break;
626 } 692 }
627 } 693 }
628 of_node_put(from); 694 of_node_put(from);
629 read_unlock(&devtree_lock); 695 raw_spin_unlock_irqrestore(&devtree_lock, flags);
630 return np; 696 return np;
631} 697}
632EXPORT_SYMBOL(of_find_matching_node_and_match); 698EXPORT_SYMBOL(of_find_matching_node_and_match);
@@ -669,12 +735,12 @@ struct device_node *of_find_node_by_phandle(phandle handle)
669{ 735{
670 struct device_node *np; 736 struct device_node *np;
671 737
672 read_lock(&devtree_lock); 738 raw_spin_lock(&devtree_lock);
673 for (np = of_allnodes; np; np = np->allnext) 739 for (np = of_allnodes; np; np = np->allnext)
674 if (np->phandle == handle) 740 if (np->phandle == handle)
675 break; 741 break;
676 of_node_get(np); 742 of_node_get(np);
677 read_unlock(&devtree_lock); 743 raw_spin_unlock(&devtree_lock);
678 return np; 744 return np;
679} 745}
680EXPORT_SYMBOL(of_find_node_by_phandle); 746EXPORT_SYMBOL(of_find_node_by_phandle);
@@ -1025,12 +1091,13 @@ EXPORT_SYMBOL(of_parse_phandle);
1025 * To get a device_node of the `node2' node you may call this: 1091 * To get a device_node of the `node2' node you may call this:
1026 * of_parse_phandle_with_args(node3, "list", "#list-cells", 1, &args); 1092 * of_parse_phandle_with_args(node3, "list", "#list-cells", 1, &args);
1027 */ 1093 */
1028int of_parse_phandle_with_args(const struct device_node *np, const char *list_name, 1094static int __of_parse_phandle_with_args(const struct device_node *np,
1029 const char *cells_name, int index, 1095 const char *list_name,
1030 struct of_phandle_args *out_args) 1096 const char *cells_name, int index,
1097 struct of_phandle_args *out_args)
1031{ 1098{
1032 const __be32 *list, *list_end; 1099 const __be32 *list, *list_end;
1033 int size, cur_index = 0; 1100 int rc = 0, size, cur_index = 0;
1034 uint32_t count = 0; 1101 uint32_t count = 0;
1035 struct device_node *node = NULL; 1102 struct device_node *node = NULL;
1036 phandle phandle; 1103 phandle phandle;
@@ -1043,6 +1110,7 @@ int of_parse_phandle_with_args(const struct device_node *np, const char *list_na
1043 1110
1044 /* Loop over the phandles until all the requested entry is found */ 1111 /* Loop over the phandles until all the requested entry is found */
1045 while (list < list_end) { 1112 while (list < list_end) {
1113 rc = -EINVAL;
1046 count = 0; 1114 count = 0;
1047 1115
1048 /* 1116 /*
@@ -1059,13 +1127,13 @@ int of_parse_phandle_with_args(const struct device_node *np, const char *list_na
1059 if (!node) { 1127 if (!node) {
1060 pr_err("%s: could not find phandle\n", 1128 pr_err("%s: could not find phandle\n",
1061 np->full_name); 1129 np->full_name);
1062 break; 1130 goto err;
1063 } 1131 }
1064 if (of_property_read_u32(node, cells_name, &count)) { 1132 if (of_property_read_u32(node, cells_name, &count)) {
1065 pr_err("%s: could not get %s for %s\n", 1133 pr_err("%s: could not get %s for %s\n",
1066 np->full_name, cells_name, 1134 np->full_name, cells_name,
1067 node->full_name); 1135 node->full_name);
1068 break; 1136 goto err;
1069 } 1137 }
1070 1138
1071 /* 1139 /*
@@ -1075,7 +1143,7 @@ int of_parse_phandle_with_args(const struct device_node *np, const char *list_na
1075 if (list + count > list_end) { 1143 if (list + count > list_end) {
1076 pr_err("%s: arguments longer than property\n", 1144 pr_err("%s: arguments longer than property\n",
1077 np->full_name); 1145 np->full_name);
1078 break; 1146 goto err;
1079 } 1147 }
1080 } 1148 }
1081 1149
@@ -1085,9 +1153,10 @@ int of_parse_phandle_with_args(const struct device_node *np, const char *list_na
1085 * index matches, then fill the out_args structure and return, 1153 * index matches, then fill the out_args structure and return,
1086 * or return -ENOENT for an empty entry. 1154 * or return -ENOENT for an empty entry.
1087 */ 1155 */
1156 rc = -ENOENT;
1088 if (cur_index == index) { 1157 if (cur_index == index) {
1089 if (!phandle) 1158 if (!phandle)
1090 return -ENOENT; 1159 goto err;
1091 1160
1092 if (out_args) { 1161 if (out_args) {
1093 int i; 1162 int i;
@@ -1098,6 +1167,10 @@ int of_parse_phandle_with_args(const struct device_node *np, const char *list_na
1098 for (i = 0; i < count; i++) 1167 for (i = 0; i < count; i++)
1099 out_args->args[i] = be32_to_cpup(list++); 1168 out_args->args[i] = be32_to_cpup(list++);
1100 } 1169 }
1170
1171 /* Found it! return success */
1172 if (node)
1173 of_node_put(node);
1101 return 0; 1174 return 0;
1102 } 1175 }
1103 1176
@@ -1107,13 +1180,51 @@ int of_parse_phandle_with_args(const struct device_node *np, const char *list_na
1107 cur_index++; 1180 cur_index++;
1108 } 1181 }
1109 1182
1110 /* Loop exited without finding a valid entry; return an error */ 1183 /*
1184 * Unlock node before returning result; will be one of:
1185 * -ENOENT : index is for empty phandle
1186 * -EINVAL : parsing error on data
1187 * [1..n] : Number of phandle (count mode; when index = -1)
1188 */
1189 rc = index < 0 ? cur_index : -ENOENT;
1190 err:
1111 if (node) 1191 if (node)
1112 of_node_put(node); 1192 of_node_put(node);
1113 return -EINVAL; 1193 return rc;
1194}
1195
1196int of_parse_phandle_with_args(const struct device_node *np, const char *list_name,
1197 const char *cells_name, int index,
1198 struct of_phandle_args *out_args)
1199{
1200 if (index < 0)
1201 return -EINVAL;
1202 return __of_parse_phandle_with_args(np, list_name, cells_name, index, out_args);
1114} 1203}
1115EXPORT_SYMBOL(of_parse_phandle_with_args); 1204EXPORT_SYMBOL(of_parse_phandle_with_args);
1116 1205
1206/**
1207 * of_count_phandle_with_args() - Find the number of phandles references in a property
1208 * @np: pointer to a device tree node containing a list
1209 * @list_name: property name that contains a list
1210 * @cells_name: property name that specifies phandles' arguments count
1211 *
1212 * Returns the number of phandle + argument tuples within a property. It
1213 * is a typical pattern to encode a list of phandle and variable
1214 * arguments into a single property. The number of arguments is encoded
1215 * by a property in the phandle-target node. For example, a gpios
1216 * property would contain a list of GPIO specifies consisting of a
1217 * phandle and 1 or more arguments. The number of arguments are
1218 * determined by the #gpio-cells property in the node pointed to by the
1219 * phandle.
1220 */
1221int of_count_phandle_with_args(const struct device_node *np, const char *list_name,
1222 const char *cells_name)
1223{
1224 return __of_parse_phandle_with_args(np, list_name, cells_name, -1, NULL);
1225}
1226EXPORT_SYMBOL(of_count_phandle_with_args);
1227
1117#if defined(CONFIG_OF_DYNAMIC) 1228#if defined(CONFIG_OF_DYNAMIC)
1118static int of_property_notify(int action, struct device_node *np, 1229static int of_property_notify(int action, struct device_node *np,
1119 struct property *prop) 1230 struct property *prop)
@@ -1146,18 +1257,18 @@ int of_add_property(struct device_node *np, struct property *prop)
1146 return rc; 1257 return rc;
1147 1258
1148 prop->next = NULL; 1259 prop->next = NULL;
1149 write_lock_irqsave(&devtree_lock, flags); 1260 raw_spin_lock_irqsave(&devtree_lock, flags);
1150 next = &np->properties; 1261 next = &np->properties;
1151 while (*next) { 1262 while (*next) {
1152 if (strcmp(prop->name, (*next)->name) == 0) { 1263 if (strcmp(prop->name, (*next)->name) == 0) {
1153 /* duplicate ! don't insert it */ 1264 /* duplicate ! don't insert it */
1154 write_unlock_irqrestore(&devtree_lock, flags); 1265 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1155 return -1; 1266 return -1;
1156 } 1267 }
1157 next = &(*next)->next; 1268 next = &(*next)->next;
1158 } 1269 }
1159 *next = prop; 1270 *next = prop;
1160 write_unlock_irqrestore(&devtree_lock, flags); 1271 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1161 1272
1162#ifdef CONFIG_PROC_DEVICETREE 1273#ifdef CONFIG_PROC_DEVICETREE
1163 /* try to add to proc as well if it was initialized */ 1274 /* try to add to proc as well if it was initialized */
@@ -1187,7 +1298,7 @@ int of_remove_property(struct device_node *np, struct property *prop)
1187 if (rc) 1298 if (rc)
1188 return rc; 1299 return rc;
1189 1300
1190 write_lock_irqsave(&devtree_lock, flags); 1301 raw_spin_lock_irqsave(&devtree_lock, flags);
1191 next = &np->properties; 1302 next = &np->properties;
1192 while (*next) { 1303 while (*next) {
1193 if (*next == prop) { 1304 if (*next == prop) {
@@ -1200,7 +1311,7 @@ int of_remove_property(struct device_node *np, struct property *prop)
1200 } 1311 }
1201 next = &(*next)->next; 1312 next = &(*next)->next;
1202 } 1313 }
1203 write_unlock_irqrestore(&devtree_lock, flags); 1314 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1204 1315
1205 if (!found) 1316 if (!found)
1206 return -ENODEV; 1317 return -ENODEV;
@@ -1240,7 +1351,7 @@ int of_update_property(struct device_node *np, struct property *newprop)
1240 if (!oldprop) 1351 if (!oldprop)
1241 return of_add_property(np, newprop); 1352 return of_add_property(np, newprop);
1242 1353
1243 write_lock_irqsave(&devtree_lock, flags); 1354 raw_spin_lock_irqsave(&devtree_lock, flags);
1244 next = &np->properties; 1355 next = &np->properties;
1245 while (*next) { 1356 while (*next) {
1246 if (*next == oldprop) { 1357 if (*next == oldprop) {
@@ -1254,7 +1365,7 @@ int of_update_property(struct device_node *np, struct property *newprop)
1254 } 1365 }
1255 next = &(*next)->next; 1366 next = &(*next)->next;
1256 } 1367 }
1257 write_unlock_irqrestore(&devtree_lock, flags); 1368 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1258 1369
1259 if (!found) 1370 if (!found)
1260 return -ENODEV; 1371 return -ENODEV;
@@ -1327,12 +1438,12 @@ int of_attach_node(struct device_node *np)
1327 if (rc) 1438 if (rc)
1328 return rc; 1439 return rc;
1329 1440
1330 write_lock_irqsave(&devtree_lock, flags); 1441 raw_spin_lock_irqsave(&devtree_lock, flags);
1331 np->sibling = np->parent->child; 1442 np->sibling = np->parent->child;
1332 np->allnext = of_allnodes; 1443 np->allnext = of_allnodes;
1333 np->parent->child = np; 1444 np->parent->child = np;
1334 of_allnodes = np; 1445 of_allnodes = np;
1335 write_unlock_irqrestore(&devtree_lock, flags); 1446 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1336 1447
1337 of_add_proc_dt_entry(np); 1448 of_add_proc_dt_entry(np);
1338 return 0; 1449 return 0;
@@ -1375,17 +1486,17 @@ int of_detach_node(struct device_node *np)
1375 if (rc) 1486 if (rc)
1376 return rc; 1487 return rc;
1377 1488
1378 write_lock_irqsave(&devtree_lock, flags); 1489 raw_spin_lock_irqsave(&devtree_lock, flags);
1379 1490
1380 if (of_node_check_flag(np, OF_DETACHED)) { 1491 if (of_node_check_flag(np, OF_DETACHED)) {
1381 /* someone already detached it */ 1492 /* someone already detached it */
1382 write_unlock_irqrestore(&devtree_lock, flags); 1493 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1383 return rc; 1494 return rc;
1384 } 1495 }
1385 1496
1386 parent = np->parent; 1497 parent = np->parent;
1387 if (!parent) { 1498 if (!parent) {
1388 write_unlock_irqrestore(&devtree_lock, flags); 1499 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1389 return rc; 1500 return rc;
1390 } 1501 }
1391 1502
@@ -1412,7 +1523,7 @@ int of_detach_node(struct device_node *np)
1412 } 1523 }
1413 1524
1414 of_node_set_flag(np, OF_DETACHED); 1525 of_node_set_flag(np, OF_DETACHED);
1415 write_unlock_irqrestore(&devtree_lock, flags); 1526 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1416 1527
1417 of_remove_proc_dt_entry(np); 1528 of_remove_proc_dt_entry(np);
1418 return rc; 1529 return rc;
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 4c74e4fc5a51..f685e55e0717 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -8,6 +8,7 @@
8#include <linux/slab.h> 8#include <linux/slab.h>
9 9
10#include <asm/errno.h> 10#include <asm/errno.h>
11#include "of_private.h"
11 12
12/** 13/**
13 * of_match_device - Tell if a struct device matches an of_device_id list 14 * of_match_device - Tell if a struct device matches an of_device_id list
@@ -131,6 +132,7 @@ ssize_t of_device_get_modalias(struct device *dev, char *str, ssize_t len)
131void of_device_uevent(struct device *dev, struct kobj_uevent_env *env) 132void of_device_uevent(struct device *dev, struct kobj_uevent_env *env)
132{ 133{
133 const char *compat; 134 const char *compat;
135 struct alias_prop *app;
134 int seen = 0, cplen, sl; 136 int seen = 0, cplen, sl;
135 137
136 if ((!dev) || (!dev->of_node)) 138 if ((!dev) || (!dev->of_node))
@@ -153,6 +155,17 @@ void of_device_uevent(struct device *dev, struct kobj_uevent_env *env)
153 seen++; 155 seen++;
154 } 156 }
155 add_uevent_var(env, "OF_COMPATIBLE_N=%d", seen); 157 add_uevent_var(env, "OF_COMPATIBLE_N=%d", seen);
158
159 seen = 0;
160 mutex_lock(&of_aliases_mutex);
161 list_for_each_entry(app, &aliases_lookup, link) {
162 if (dev->of_node == app->np) {
163 add_uevent_var(env, "OF_ALIAS_%d=%s", seen,
164 app->alias);
165 seen++;
166 }
167 }
168 mutex_unlock(&of_aliases_mutex);
156} 169}
157 170
158int of_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env) 171int of_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env)
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
new file mode 100644
index 000000000000..ff350c8fa7ac
--- /dev/null
+++ b/drivers/of/of_private.h
@@ -0,0 +1,36 @@
1#ifndef _LINUX_OF_PRIVATE_H
2#define _LINUX_OF_PRIVATE_H
3/*
4 * Private symbols used by OF support code
5 *
6 * Paul Mackerras August 1996.
7 * Copyright (C) 1996-2005 Paul Mackerras.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15/**
16 * struct alias_prop - Alias property in 'aliases' node
17 * @link: List node to link the structure in aliases_lookup list
18 * @alias: Alias property name
19 * @np: Pointer to device_node that the alias stands for
20 * @id: Index value from end of alias name
21 * @stem: Alias string without the index
22 *
23 * The structure represents one alias property of 'aliases' node as
24 * an entry in aliases_lookup list.
25 */
26struct alias_prop {
27 struct list_head link;
28 const char *alias;
29 struct device_node *np;
30 int id;
31 char stem[0];
32};
33
34extern struct mutex of_aliases_mutex;
35extern struct list_head aliases_lookup;
36#endif /* _LINUX_OF_PRIVATE_H */
diff --git a/drivers/of/selftest.c b/drivers/of/selftest.c
index f24ffd7088d2..0eb5c38b4e07 100644
--- a/drivers/of/selftest.c
+++ b/drivers/of/selftest.c
@@ -2,7 +2,7 @@
2 * Self tests for device tree subsystem 2 * Self tests for device tree subsystem
3 */ 3 */
4 4
5#define pr_fmt(fmt) "### %s(): " fmt, __func__ 5#define pr_fmt(fmt) "### dt-test ### " fmt
6 6
7#include <linux/clk.h> 7#include <linux/clk.h>
8#include <linux/err.h> 8#include <linux/err.h>
@@ -16,26 +16,30 @@
16 16
17static bool selftest_passed = true; 17static bool selftest_passed = true;
18#define selftest(result, fmt, ...) { \ 18#define selftest(result, fmt, ...) { \
19 selftest_passed &= (result); \ 19 if (!(result)) { \
20 if (!(result)) \
21 pr_err("FAIL %s:%i " fmt, __FILE__, __LINE__, ##__VA_ARGS__); \ 20 pr_err("FAIL %s:%i " fmt, __FILE__, __LINE__, ##__VA_ARGS__); \
21 selftest_passed = false; \
22 } else { \
23 pr_info("pass %s:%i\n", __FILE__, __LINE__); \
24 } \
22} 25}
23 26
24static void __init of_selftest_parse_phandle_with_args(void) 27static void __init of_selftest_parse_phandle_with_args(void)
25{ 28{
26 struct device_node *np; 29 struct device_node *np;
27 struct of_phandle_args args; 30 struct of_phandle_args args;
28 int rc, i; 31 int i, rc;
29 bool passed_all = true;
30 32
31 pr_info("start\n");
32 np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-a"); 33 np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-a");
33 if (!np) { 34 if (!np) {
34 pr_err("missing testcase data\n"); 35 pr_err("missing testcase data\n");
35 return; 36 return;
36 } 37 }
37 38
38 for (i = 0; i < 7; i++) { 39 rc = of_count_phandle_with_args(np, "phandle-list", "#phandle-cells");
40 selftest(rc == 7, "of_count_phandle_with_args() returned %i, expected 7\n", rc);
41
42 for (i = 0; i < 8; i++) {
39 bool passed = true; 43 bool passed = true;
40 rc = of_parse_phandle_with_args(np, "phandle-list", 44 rc = of_parse_phandle_with_args(np, "phandle-list",
41 "#phandle-cells", i, &args); 45 "#phandle-cells", i, &args);
@@ -79,45 +83,47 @@ static void __init of_selftest_parse_phandle_with_args(void)
79 passed &= (args.args[0] == (i + 1)); 83 passed &= (args.args[0] == (i + 1));
80 break; 84 break;
81 case 7: 85 case 7:
82 passed &= (rc == -EINVAL); 86 passed &= (rc == -ENOENT);
83 break; 87 break;
84 default: 88 default:
85 passed = false; 89 passed = false;
86 } 90 }
87 91
88 if (!passed) { 92 selftest(passed, "index %i - data error on node %s rc=%i\n",
89 int j; 93 i, args.np->full_name, rc);
90 pr_err("index %i - data error on node %s rc=%i regs=[",
91 i, args.np->full_name, rc);
92 for (j = 0; j < args.args_count; j++)
93 printk(" %i", args.args[j]);
94 printk(" ]\n");
95
96 passed_all = false;
97 }
98 } 94 }
99 95
100 /* Check for missing list property */ 96 /* Check for missing list property */
101 rc = of_parse_phandle_with_args(np, "phandle-list-missing", 97 rc = of_parse_phandle_with_args(np, "phandle-list-missing",
102 "#phandle-cells", 0, &args); 98 "#phandle-cells", 0, &args);
103 passed_all &= (rc == -EINVAL); 99 selftest(rc == -ENOENT, "expected:%i got:%i\n", -ENOENT, rc);
100 rc = of_count_phandle_with_args(np, "phandle-list-missing",
101 "#phandle-cells");
102 selftest(rc == -ENOENT, "expected:%i got:%i\n", -ENOENT, rc);
104 103
105 /* Check for missing cells property */ 104 /* Check for missing cells property */
106 rc = of_parse_phandle_with_args(np, "phandle-list", 105 rc = of_parse_phandle_with_args(np, "phandle-list",
107 "#phandle-cells-missing", 0, &args); 106 "#phandle-cells-missing", 0, &args);
108 passed_all &= (rc == -EINVAL); 107 selftest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
108 rc = of_count_phandle_with_args(np, "phandle-list",
109 "#phandle-cells-missing");
110 selftest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
109 111
110 /* Check for bad phandle in list */ 112 /* Check for bad phandle in list */
111 rc = of_parse_phandle_with_args(np, "phandle-list-bad-phandle", 113 rc = of_parse_phandle_with_args(np, "phandle-list-bad-phandle",
112 "#phandle-cells", 0, &args); 114 "#phandle-cells", 0, &args);
113 passed_all &= (rc == -EINVAL); 115 selftest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
116 rc = of_count_phandle_with_args(np, "phandle-list-bad-phandle",
117 "#phandle-cells");
118 selftest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
114 119
115 /* Check for incorrectly formed argument list */ 120 /* Check for incorrectly formed argument list */
116 rc = of_parse_phandle_with_args(np, "phandle-list-bad-args", 121 rc = of_parse_phandle_with_args(np, "phandle-list-bad-args",
117 "#phandle-cells", 1, &args); 122 "#phandle-cells", 1, &args);
118 passed_all &= (rc == -EINVAL); 123 selftest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
119 124 rc = of_count_phandle_with_args(np, "phandle-list-bad-args",
120 pr_info("end - %s\n", passed_all ? "PASS" : "FAIL"); 125 "#phandle-cells");
126 selftest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
121} 127}
122 128
123static void __init of_selftest_property_match_string(void) 129static void __init of_selftest_property_match_string(void)
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 3d6d4fd1e3c5..a951c22921d1 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -734,34 +734,24 @@ static unsigned char acpiphp_max_busnr(struct pci_bus *bus)
734 */ 734 */
735static int acpiphp_bus_add(struct acpiphp_func *func) 735static int acpiphp_bus_add(struct acpiphp_func *func)
736{ 736{
737 acpi_handle phandle; 737 struct acpi_device *device;
738 struct acpi_device *device, *pdevice;
739 int ret_val; 738 int ret_val;
740 739
741 acpi_get_parent(func->handle, &phandle);
742 if (acpi_bus_get_device(phandle, &pdevice)) {
743 dbg("no parent device, assuming NULL\n");
744 pdevice = NULL;
745 }
746 if (!acpi_bus_get_device(func->handle, &device)) { 740 if (!acpi_bus_get_device(func->handle, &device)) {
747 dbg("bus exists... trim\n"); 741 dbg("bus exists... trim\n");
748 /* this shouldn't be in here, so remove 742 /* this shouldn't be in here, so remove
749 * the bus then re-add it... 743 * the bus then re-add it...
750 */ 744 */
751 ret_val = acpi_bus_trim(device, 1); 745 acpi_bus_trim(device);
752 dbg("acpi_bus_trim return %x\n", ret_val);
753 } 746 }
754 747
755 ret_val = acpi_bus_add(&device, pdevice, func->handle, 748 ret_val = acpi_bus_scan(func->handle);
756 ACPI_BUS_TYPE_DEVICE); 749 if (!ret_val)
757 if (ret_val) { 750 ret_val = acpi_bus_get_device(func->handle, &device);
758 dbg("error adding bus, %x\n", 751
759 -ret_val); 752 if (ret_val)
760 goto acpiphp_bus_add_out; 753 dbg("error adding bus, %x\n", -ret_val);
761 }
762 ret_val = acpi_bus_start(device);
763 754
764acpiphp_bus_add_out:
765 return ret_val; 755 return ret_val;
766} 756}
767 757
@@ -781,11 +771,8 @@ static int acpiphp_bus_trim(acpi_handle handle)
781 return retval; 771 return retval;
782 } 772 }
783 773
784 retval = acpi_bus_trim(device, 1); 774 acpi_bus_trim(device);
785 if (retval) 775 return 0;
786 err("cannot remove from acpi list\n");
787
788 return retval;
789} 776}
790 777
791static void acpiphp_set_acpi_region(struct acpiphp_slot *slot) 778static void acpiphp_set_acpi_region(struct acpiphp_slot *slot)
@@ -1130,8 +1117,7 @@ static int acpiphp_configure_bridge (acpi_handle handle)
1130 1117
1131static void handle_bridge_insertion(acpi_handle handle, u32 type) 1118static void handle_bridge_insertion(acpi_handle handle, u32 type)
1132{ 1119{
1133 struct acpi_device *device, *pdevice; 1120 struct acpi_device *device;
1134 acpi_handle phandle;
1135 1121
1136 if ((type != ACPI_NOTIFY_BUS_CHECK) && 1122 if ((type != ACPI_NOTIFY_BUS_CHECK) &&
1137 (type != ACPI_NOTIFY_DEVICE_CHECK)) { 1123 (type != ACPI_NOTIFY_DEVICE_CHECK)) {
@@ -1139,17 +1125,15 @@ static void handle_bridge_insertion(acpi_handle handle, u32 type)
1139 return; 1125 return;
1140 } 1126 }
1141 1127
1142 acpi_get_parent(handle, &phandle); 1128 if (acpi_bus_scan(handle)) {
1143 if (acpi_bus_get_device(phandle, &pdevice)) {
1144 dbg("no parent device, assuming NULL\n");
1145 pdevice = NULL;
1146 }
1147 if (acpi_bus_add(&device, pdevice, handle, ACPI_BUS_TYPE_DEVICE)) {
1148 err("cannot add bridge to acpi list\n"); 1129 err("cannot add bridge to acpi list\n");
1149 return; 1130 return;
1150 } 1131 }
1151 if (!acpiphp_configure_bridge(handle) && 1132 if (acpi_bus_get_device(handle, &device)) {
1152 !acpi_bus_start(device)) 1133 err("ACPI device object missing\n");
1134 return;
1135 }
1136 if (!acpiphp_configure_bridge(handle))
1153 add_bridge(handle); 1137 add_bridge(handle);
1154 else 1138 else
1155 err("cannot configure and start bridge\n"); 1139 err("cannot configure and start bridge\n");
@@ -1234,6 +1218,8 @@ static void _handle_hotplug_event_bridge(struct work_struct *work)
1234 handle = hp_work->handle; 1218 handle = hp_work->handle;
1235 type = hp_work->type; 1219 type = hp_work->type;
1236 1220
1221 acpi_scan_lock_acquire();
1222
1237 if (acpi_bus_get_device(handle, &device)) { 1223 if (acpi_bus_get_device(handle, &device)) {
1238 /* This bridge must have just been physically inserted */ 1224 /* This bridge must have just been physically inserted */
1239 handle_bridge_insertion(handle, type); 1225 handle_bridge_insertion(handle, type);
@@ -1311,6 +1297,7 @@ static void _handle_hotplug_event_bridge(struct work_struct *work)
1311 } 1297 }
1312 1298
1313out: 1299out:
1300 acpi_scan_lock_release();
1314 kfree(hp_work); /* allocated in handle_hotplug_event_bridge */ 1301 kfree(hp_work); /* allocated in handle_hotplug_event_bridge */
1315} 1302}
1316 1303
@@ -1357,6 +1344,8 @@ static void _handle_hotplug_event_func(struct work_struct *work)
1357 1344
1358 func = (struct acpiphp_func *)context; 1345 func = (struct acpiphp_func *)context;
1359 1346
1347 acpi_scan_lock_acquire();
1348
1360 switch (type) { 1349 switch (type) {
1361 case ACPI_NOTIFY_BUS_CHECK: 1350 case ACPI_NOTIFY_BUS_CHECK:
1362 /* bus re-enumerate */ 1351 /* bus re-enumerate */
@@ -1387,6 +1376,7 @@ static void _handle_hotplug_event_func(struct work_struct *work)
1387 break; 1376 break;
1388 } 1377 }
1389 1378
1379 acpi_scan_lock_release();
1390 kfree(hp_work); /* allocated in handle_hotplug_event_func */ 1380 kfree(hp_work); /* allocated in handle_hotplug_event_func */
1391} 1381}
1392 1382
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 26ffd3e3fb74..2c113de94323 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -44,7 +44,6 @@ extern bool pciehp_poll_mode;
44extern int pciehp_poll_time; 44extern int pciehp_poll_time;
45extern bool pciehp_debug; 45extern bool pciehp_debug;
46extern bool pciehp_force; 46extern bool pciehp_force;
47extern struct workqueue_struct *pciehp_wq;
48 47
49#define dbg(format, arg...) \ 48#define dbg(format, arg...) \
50do { \ 49do { \
@@ -78,6 +77,7 @@ struct slot {
78 struct hotplug_slot *hotplug_slot; 77 struct hotplug_slot *hotplug_slot;
79 struct delayed_work work; /* work for button event */ 78 struct delayed_work work; /* work for button event */
80 struct mutex lock; 79 struct mutex lock;
80 struct workqueue_struct *wq;
81}; 81};
82 82
83struct event_info { 83struct event_info {
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 916bf4f53aba..939bd1d4b5b1 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -42,7 +42,6 @@ bool pciehp_debug;
42bool pciehp_poll_mode; 42bool pciehp_poll_mode;
43int pciehp_poll_time; 43int pciehp_poll_time;
44bool pciehp_force; 44bool pciehp_force;
45struct workqueue_struct *pciehp_wq;
46 45
47#define DRIVER_VERSION "0.4" 46#define DRIVER_VERSION "0.4"
48#define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>" 47#define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>"
@@ -340,18 +339,13 @@ static int __init pcied_init(void)
340{ 339{
341 int retval = 0; 340 int retval = 0;
342 341
343 pciehp_wq = alloc_workqueue("pciehp", 0, 0);
344 if (!pciehp_wq)
345 return -ENOMEM;
346
347 pciehp_firmware_init(); 342 pciehp_firmware_init();
348 retval = pcie_port_service_register(&hpdriver_portdrv); 343 retval = pcie_port_service_register(&hpdriver_portdrv);
349 dbg("pcie_port_service_register = %d\n", retval); 344 dbg("pcie_port_service_register = %d\n", retval);
350 info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); 345 info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
351 if (retval) { 346 if (retval)
352 destroy_workqueue(pciehp_wq);
353 dbg("Failure to register service\n"); 347 dbg("Failure to register service\n");
354 } 348
355 return retval; 349 return retval;
356} 350}
357 351
@@ -359,7 +353,6 @@ static void __exit pcied_cleanup(void)
359{ 353{
360 dbg("unload_pciehpd()\n"); 354 dbg("unload_pciehpd()\n");
361 pcie_port_service_unregister(&hpdriver_portdrv); 355 pcie_port_service_unregister(&hpdriver_portdrv);
362 destroy_workqueue(pciehp_wq);
363 info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n"); 356 info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n");
364} 357}
365 358
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 27f44295a657..38f018679175 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -49,7 +49,7 @@ static int queue_interrupt_event(struct slot *p_slot, u32 event_type)
49 info->p_slot = p_slot; 49 info->p_slot = p_slot;
50 INIT_WORK(&info->work, interrupt_event_handler); 50 INIT_WORK(&info->work, interrupt_event_handler);
51 51
52 queue_work(pciehp_wq, &info->work); 52 queue_work(p_slot->wq, &info->work);
53 53
54 return 0; 54 return 0;
55} 55}
@@ -344,7 +344,7 @@ void pciehp_queue_pushbutton_work(struct work_struct *work)
344 kfree(info); 344 kfree(info);
345 goto out; 345 goto out;
346 } 346 }
347 queue_work(pciehp_wq, &info->work); 347 queue_work(p_slot->wq, &info->work);
348 out: 348 out:
349 mutex_unlock(&p_slot->lock); 349 mutex_unlock(&p_slot->lock);
350} 350}
@@ -377,7 +377,7 @@ static void handle_button_press_event(struct slot *p_slot)
377 if (ATTN_LED(ctrl)) 377 if (ATTN_LED(ctrl))
378 pciehp_set_attention_status(p_slot, 0); 378 pciehp_set_attention_status(p_slot, 0);
379 379
380 queue_delayed_work(pciehp_wq, &p_slot->work, 5*HZ); 380 queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ);
381 break; 381 break;
382 case BLINKINGOFF_STATE: 382 case BLINKINGOFF_STATE:
383 case BLINKINGON_STATE: 383 case BLINKINGON_STATE:
@@ -439,7 +439,7 @@ static void handle_surprise_event(struct slot *p_slot)
439 else 439 else
440 p_slot->state = POWERON_STATE; 440 p_slot->state = POWERON_STATE;
441 441
442 queue_work(pciehp_wq, &info->work); 442 queue_work(p_slot->wq, &info->work);
443} 443}
444 444
445static void interrupt_event_handler(struct work_struct *work) 445static void interrupt_event_handler(struct work_struct *work)
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 13b2eaf7ba43..5127f3f41821 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -773,23 +773,32 @@ static void pcie_shutdown_notification(struct controller *ctrl)
773static int pcie_init_slot(struct controller *ctrl) 773static int pcie_init_slot(struct controller *ctrl)
774{ 774{
775 struct slot *slot; 775 struct slot *slot;
776 char name[32];
776 777
777 slot = kzalloc(sizeof(*slot), GFP_KERNEL); 778 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
778 if (!slot) 779 if (!slot)
779 return -ENOMEM; 780 return -ENOMEM;
780 781
782 snprintf(name, sizeof(name), "pciehp-%u", PSN(ctrl));
783 slot->wq = alloc_workqueue(name, 0, 0);
784 if (!slot->wq)
785 goto abort;
786
781 slot->ctrl = ctrl; 787 slot->ctrl = ctrl;
782 mutex_init(&slot->lock); 788 mutex_init(&slot->lock);
783 INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work); 789 INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work);
784 ctrl->slot = slot; 790 ctrl->slot = slot;
785 return 0; 791 return 0;
792abort:
793 kfree(slot);
794 return -ENOMEM;
786} 795}
787 796
788static void pcie_cleanup_slot(struct controller *ctrl) 797static void pcie_cleanup_slot(struct controller *ctrl)
789{ 798{
790 struct slot *slot = ctrl->slot; 799 struct slot *slot = ctrl->slot;
791 cancel_delayed_work(&slot->work); 800 cancel_delayed_work(&slot->work);
792 flush_workqueue(pciehp_wq); 801 destroy_workqueue(slot->wq);
793 kfree(slot); 802 kfree(slot);
794} 803}
795 804
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index f64ca92253da..574421bc2fa6 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -412,7 +412,6 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
412 if (SN_ACPI_BASE_SUPPORT() && ssdt) { 412 if (SN_ACPI_BASE_SUPPORT() && ssdt) {
413 unsigned long long adr; 413 unsigned long long adr;
414 struct acpi_device *pdevice; 414 struct acpi_device *pdevice;
415 struct acpi_device *device;
416 acpi_handle phandle; 415 acpi_handle phandle;
417 acpi_handle chandle = NULL; 416 acpi_handle chandle = NULL;
418 acpi_handle rethandle; 417 acpi_handle rethandle;
@@ -426,6 +425,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
426 pdevice = NULL; 425 pdevice = NULL;
427 } 426 }
428 427
428 acpi_scan_lock_acquire();
429 /* 429 /*
430 * Walk the rootbus node's immediate children looking for 430 * Walk the rootbus node's immediate children looking for
431 * the slot's device node(s). There can be more than 431 * the slot's device node(s). There can be more than
@@ -448,20 +448,18 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
448 if (ACPI_SUCCESS(ret) && 448 if (ACPI_SUCCESS(ret) &&
449 (adr>>16) == (slot->device_num + 1)) { 449 (adr>>16) == (slot->device_num + 1)) {
450 450
451 ret = acpi_bus_add(&device, pdevice, chandle, 451 ret = acpi_bus_scan(chandle);
452 ACPI_BUS_TYPE_DEVICE);
453 if (ACPI_FAILURE(ret)) { 452 if (ACPI_FAILURE(ret)) {
454 printk(KERN_ERR "%s: acpi_bus_add " 453 printk(KERN_ERR "%s: acpi_bus_scan "
455 "failed (0x%x) for slot %d " 454 "failed (0x%x) for slot %d "
456 "func %d\n", __func__, 455 "func %d\n", __func__,
457 ret, (int)(adr>>16), 456 ret, (int)(adr>>16),
458 (int)(adr&0xffff)); 457 (int)(adr&0xffff));
459 /* try to continue on */ 458 /* try to continue on */
460 } else {
461 acpi_bus_start(device);
462 } 459 }
463 } 460 }
464 } 461 }
462 acpi_scan_lock_release();
465 } 463 }
466 464
467 /* Call the driver for the new device */ 465 /* Call the driver for the new device */
@@ -512,6 +510,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
512 /* Get the rootbus node pointer */ 510 /* Get the rootbus node pointer */
513 phandle = PCI_CONTROLLER(slot->pci_bus)->acpi_handle; 511 phandle = PCI_CONTROLLER(slot->pci_bus)->acpi_handle;
514 512
513 acpi_scan_lock_acquire();
515 /* 514 /*
516 * Walk the rootbus node's immediate children looking for 515 * Walk the rootbus node's immediate children looking for
517 * the slot's device node(s). There can be more than 516 * the slot's device node(s). There can be more than
@@ -539,10 +538,10 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
539 ret = acpi_bus_get_device(chandle, 538 ret = acpi_bus_get_device(chandle,
540 &device); 539 &device);
541 if (ACPI_SUCCESS(ret)) 540 if (ACPI_SUCCESS(ret))
542 acpi_bus_trim(device, 1); 541 acpi_bus_trim(device);
543 } 542 }
544 } 543 }
545 544 acpi_scan_lock_release();
546 } 545 }
547 546
548 /* Free the SN resources assigned to the Linux device.*/ 547 /* Free the SN resources assigned to the Linux device.*/
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index ca64932e658b..b849f995075a 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -46,8 +46,6 @@
46extern bool shpchp_poll_mode; 46extern bool shpchp_poll_mode;
47extern int shpchp_poll_time; 47extern int shpchp_poll_time;
48extern bool shpchp_debug; 48extern bool shpchp_debug;
49extern struct workqueue_struct *shpchp_wq;
50extern struct workqueue_struct *shpchp_ordered_wq;
51 49
52#define dbg(format, arg...) \ 50#define dbg(format, arg...) \
53do { \ 51do { \
@@ -91,6 +89,7 @@ struct slot {
91 struct list_head slot_list; 89 struct list_head slot_list;
92 struct delayed_work work; /* work for button event */ 90 struct delayed_work work; /* work for button event */
93 struct mutex lock; 91 struct mutex lock;
92 struct workqueue_struct *wq;
94 u8 hp_slot; 93 u8 hp_slot;
95}; 94};
96 95
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index b6de307248e4..3100c52c837c 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -39,8 +39,6 @@
39bool shpchp_debug; 39bool shpchp_debug;
40bool shpchp_poll_mode; 40bool shpchp_poll_mode;
41int shpchp_poll_time; 41int shpchp_poll_time;
42struct workqueue_struct *shpchp_wq;
43struct workqueue_struct *shpchp_ordered_wq;
44 42
45#define DRIVER_VERSION "0.4" 43#define DRIVER_VERSION "0.4"
46#define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>" 44#define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>"
@@ -129,6 +127,14 @@ static int init_slots(struct controller *ctrl)
129 slot->device = ctrl->slot_device_offset + i; 127 slot->device = ctrl->slot_device_offset + i;
130 slot->hpc_ops = ctrl->hpc_ops; 128 slot->hpc_ops = ctrl->hpc_ops;
131 slot->number = ctrl->first_slot + (ctrl->slot_num_inc * i); 129 slot->number = ctrl->first_slot + (ctrl->slot_num_inc * i);
130
131 snprintf(name, sizeof(name), "shpchp-%d", slot->number);
132 slot->wq = alloc_workqueue(name, 0, 0);
133 if (!slot->wq) {
134 retval = -ENOMEM;
135 goto error_info;
136 }
137
132 mutex_init(&slot->lock); 138 mutex_init(&slot->lock);
133 INIT_DELAYED_WORK(&slot->work, shpchp_queue_pushbutton_work); 139 INIT_DELAYED_WORK(&slot->work, shpchp_queue_pushbutton_work);
134 140
@@ -148,7 +154,7 @@ static int init_slots(struct controller *ctrl)
148 if (retval) { 154 if (retval) {
149 ctrl_err(ctrl, "pci_hp_register failed with error %d\n", 155 ctrl_err(ctrl, "pci_hp_register failed with error %d\n",
150 retval); 156 retval);
151 goto error_info; 157 goto error_slotwq;
152 } 158 }
153 159
154 get_power_status(hotplug_slot, &info->power_status); 160 get_power_status(hotplug_slot, &info->power_status);
@@ -160,6 +166,8 @@ static int init_slots(struct controller *ctrl)
160 } 166 }
161 167
162 return 0; 168 return 0;
169error_slotwq:
170 destroy_workqueue(slot->wq);
163error_info: 171error_info:
164 kfree(info); 172 kfree(info);
165error_hpslot: 173error_hpslot:
@@ -180,8 +188,7 @@ void cleanup_slots(struct controller *ctrl)
180 slot = list_entry(tmp, struct slot, slot_list); 188 slot = list_entry(tmp, struct slot, slot_list);
181 list_del(&slot->slot_list); 189 list_del(&slot->slot_list);
182 cancel_delayed_work(&slot->work); 190 cancel_delayed_work(&slot->work);
183 flush_workqueue(shpchp_wq); 191 destroy_workqueue(slot->wq);
184 flush_workqueue(shpchp_ordered_wq);
185 pci_hp_deregister(slot->hotplug_slot); 192 pci_hp_deregister(slot->hotplug_slot);
186 } 193 }
187} 194}
@@ -364,25 +371,12 @@ static struct pci_driver shpc_driver = {
364 371
365static int __init shpcd_init(void) 372static int __init shpcd_init(void)
366{ 373{
367 int retval = 0; 374 int retval;
368
369 shpchp_wq = alloc_ordered_workqueue("shpchp", 0);
370 if (!shpchp_wq)
371 return -ENOMEM;
372
373 shpchp_ordered_wq = alloc_ordered_workqueue("shpchp_ordered", 0);
374 if (!shpchp_ordered_wq) {
375 destroy_workqueue(shpchp_wq);
376 return -ENOMEM;
377 }
378 375
379 retval = pci_register_driver(&shpc_driver); 376 retval = pci_register_driver(&shpc_driver);
380 dbg("%s: pci_register_driver = %d\n", __func__, retval); 377 dbg("%s: pci_register_driver = %d\n", __func__, retval);
381 info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); 378 info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
382 if (retval) { 379
383 destroy_workqueue(shpchp_ordered_wq);
384 destroy_workqueue(shpchp_wq);
385 }
386 return retval; 380 return retval;
387} 381}
388 382
@@ -390,8 +384,6 @@ static void __exit shpcd_cleanup(void)
390{ 384{
391 dbg("unload_shpchpd()\n"); 385 dbg("unload_shpchpd()\n");
392 pci_unregister_driver(&shpc_driver); 386 pci_unregister_driver(&shpc_driver);
393 destroy_workqueue(shpchp_ordered_wq);
394 destroy_workqueue(shpchp_wq);
395 info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n"); 387 info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n");
396} 388}
397 389
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index f9b5a52e4115..58499277903a 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -51,7 +51,7 @@ static int queue_interrupt_event(struct slot *p_slot, u32 event_type)
51 info->p_slot = p_slot; 51 info->p_slot = p_slot;
52 INIT_WORK(&info->work, interrupt_event_handler); 52 INIT_WORK(&info->work, interrupt_event_handler);
53 53
54 queue_work(shpchp_wq, &info->work); 54 queue_work(p_slot->wq, &info->work);
55 55
56 return 0; 56 return 0;
57} 57}
@@ -453,7 +453,7 @@ void shpchp_queue_pushbutton_work(struct work_struct *work)
453 kfree(info); 453 kfree(info);
454 goto out; 454 goto out;
455 } 455 }
456 queue_work(shpchp_ordered_wq, &info->work); 456 queue_work(p_slot->wq, &info->work);
457 out: 457 out:
458 mutex_unlock(&p_slot->lock); 458 mutex_unlock(&p_slot->lock);
459} 459}
@@ -501,7 +501,7 @@ static void handle_button_press_event(struct slot *p_slot)
501 p_slot->hpc_ops->green_led_blink(p_slot); 501 p_slot->hpc_ops->green_led_blink(p_slot);
502 p_slot->hpc_ops->set_attention_status(p_slot, 0); 502 p_slot->hpc_ops->set_attention_status(p_slot, 0);
503 503
504 queue_delayed_work(shpchp_wq, &p_slot->work, 5*HZ); 504 queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ);
505 break; 505 break;
506 case BLINKINGOFF_STATE: 506 case BLINKINGOFF_STATE:
507 case BLINKINGON_STATE: 507 case BLINKINGON_STATE:
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 5099636a6e5f..00cc78c7aa04 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -845,6 +845,32 @@ int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec)
845} 845}
846EXPORT_SYMBOL(pci_enable_msi_block); 846EXPORT_SYMBOL(pci_enable_msi_block);
847 847
848int pci_enable_msi_block_auto(struct pci_dev *dev, unsigned int *maxvec)
849{
850 int ret, pos, nvec;
851 u16 msgctl;
852
853 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
854 if (!pos)
855 return -EINVAL;
856
857 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl);
858 ret = 1 << ((msgctl & PCI_MSI_FLAGS_QMASK) >> 1);
859
860 if (maxvec)
861 *maxvec = ret;
862
863 do {
864 nvec = ret;
865 ret = pci_enable_msi_block(dev, nvec);
866 } while (ret > 0);
867
868 if (ret < 0)
869 return ret;
870 return nvec;
871}
872EXPORT_SYMBOL(pci_enable_msi_block_auto);
873
848void pci_msi_shutdown(struct pci_dev *dev) 874void pci_msi_shutdown(struct pci_dev *dev)
849{ 875{
850 struct msi_desc *desc; 876 struct msi_desc *desc;
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 1af4008182fd..e407c61559ca 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -283,7 +283,6 @@ static struct pci_platform_pm_ops acpi_pci_platform_pm = {
283 .is_manageable = acpi_pci_power_manageable, 283 .is_manageable = acpi_pci_power_manageable,
284 .set_state = acpi_pci_set_power_state, 284 .set_state = acpi_pci_set_power_state,
285 .choose_state = acpi_pci_choose_state, 285 .choose_state = acpi_pci_choose_state,
286 .can_wakeup = acpi_pci_can_wakeup,
287 .sleep_wake = acpi_pci_sleep_wake, 286 .sleep_wake = acpi_pci_sleep_wake,
288 .run_wake = acpi_pci_run_wake, 287 .run_wake = acpi_pci_run_wake,
289}; 288};
@@ -321,10 +320,65 @@ static int acpi_pci_find_root_bridge(struct device *dev, acpi_handle *handle)
321 return 0; 320 return 0;
322} 321}
323 322
323static void pci_acpi_setup(struct device *dev)
324{
325 struct pci_dev *pci_dev = to_pci_dev(dev);
326 acpi_handle handle = ACPI_HANDLE(dev);
327 struct acpi_device *adev;
328 acpi_status status;
329 acpi_handle dummy;
330
331 /*
332 * Evaluate and parse _PRT, if exists. This code allows parsing of
333 * _PRT objects within the scope of non-bridge devices. Note that
334 * _PRTs within the scope of a PCI bridge assume the bridge's
335 * subordinate bus number.
336 *
337 * TBD: Can _PRTs exist within the scope of non-bridge PCI devices?
338 */
339 status = acpi_get_handle(handle, METHOD_NAME__PRT, &dummy);
340 if (ACPI_SUCCESS(status)) {
341 unsigned char bus;
342
343 bus = pci_dev->subordinate ?
344 pci_dev->subordinate->number : pci_dev->bus->number;
345 acpi_pci_irq_add_prt(handle, pci_domain_nr(pci_dev->bus), bus);
346 }
347
348 if (acpi_bus_get_device(handle, &adev) || !adev->wakeup.flags.valid)
349 return;
350
351 device_set_wakeup_capable(dev, true);
352 acpi_pci_sleep_wake(pci_dev, false);
353
354 pci_acpi_add_pm_notifier(adev, pci_dev);
355 if (adev->wakeup.flags.run_wake)
356 device_set_run_wake(dev, true);
357}
358
359static void pci_acpi_cleanup(struct device *dev)
360{
361 struct pci_dev *pci_dev = to_pci_dev(dev);
362 acpi_handle handle = ACPI_HANDLE(dev);
363 struct acpi_device *adev;
364
365 if (!acpi_bus_get_device(handle, &adev) && adev->wakeup.flags.valid) {
366 device_set_wakeup_capable(dev, false);
367 device_set_run_wake(dev, false);
368 pci_acpi_remove_pm_notifier(adev);
369 }
370
371 if (pci_dev->subordinate)
372 acpi_pci_irq_del_prt(pci_domain_nr(pci_dev->bus),
373 pci_dev->subordinate->number);
374}
375
324static struct acpi_bus_type acpi_pci_bus = { 376static struct acpi_bus_type acpi_pci_bus = {
325 .bus = &pci_bus_type, 377 .bus = &pci_bus_type,
326 .find_device = acpi_pci_find_device, 378 .find_device = acpi_pci_find_device,
327 .find_bridge = acpi_pci_find_root_bridge, 379 .find_bridge = acpi_pci_find_root_bridge,
380 .setup = pci_acpi_setup,
381 .cleanup = pci_acpi_cleanup,
328}; 382};
329 383
330static int __init acpi_pci_init(void) 384static int __init acpi_pci_init(void)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 5cb5820fae40..0c4f641b7be1 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -450,7 +450,7 @@ static struct pci_platform_pm_ops *pci_platform_pm;
450int pci_set_platform_pm(struct pci_platform_pm_ops *ops) 450int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
451{ 451{
452 if (!ops->is_manageable || !ops->set_state || !ops->choose_state 452 if (!ops->is_manageable || !ops->set_state || !ops->choose_state
453 || !ops->sleep_wake || !ops->can_wakeup) 453 || !ops->sleep_wake)
454 return -EINVAL; 454 return -EINVAL;
455 pci_platform_pm = ops; 455 pci_platform_pm = ops;
456 return 0; 456 return 0;
@@ -473,11 +473,6 @@ static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
473 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR; 473 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
474} 474}
475 475
476static inline bool platform_pci_can_wakeup(struct pci_dev *dev)
477{
478 return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false;
479}
480
481static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable) 476static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
482{ 477{
483 return pci_platform_pm ? 478 return pci_platform_pm ?
@@ -1985,25 +1980,6 @@ void pci_pm_init(struct pci_dev *dev)
1985 } 1980 }
1986} 1981}
1987 1982
1988/**
1989 * platform_pci_wakeup_init - init platform wakeup if present
1990 * @dev: PCI device
1991 *
1992 * Some devices don't have PCI PM caps but can still generate wakeup
1993 * events through platform methods (like ACPI events). If @dev supports
1994 * platform wakeup events, set the device flag to indicate as much. This
1995 * may be redundant if the device also supports PCI PM caps, but double
1996 * initialization should be safe in that case.
1997 */
1998void platform_pci_wakeup_init(struct pci_dev *dev)
1999{
2000 if (!platform_pci_can_wakeup(dev))
2001 return;
2002
2003 device_set_wakeup_capable(&dev->dev, true);
2004 platform_pci_sleep_wake(dev, false);
2005}
2006
2007static void pci_add_saved_cap(struct pci_dev *pci_dev, 1983static void pci_add_saved_cap(struct pci_dev *pci_dev,
2008 struct pci_cap_saved_state *new_cap) 1984 struct pci_cap_saved_state *new_cap)
2009{ 1985{
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index e8518292826f..adfd172c5b9b 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -43,9 +43,6 @@ int pci_probe_reset_function(struct pci_dev *dev);
43 * platform; to be used during system-wide transitions from a 43 * platform; to be used during system-wide transitions from a
44 * sleeping state to the working state and vice versa 44 * sleeping state to the working state and vice versa
45 * 45 *
46 * @can_wakeup: returns 'true' if given device is capable of waking up the
47 * system from a sleeping state
48 *
49 * @sleep_wake: enables/disables the system wake up capability of given device 46 * @sleep_wake: enables/disables the system wake up capability of given device
50 * 47 *
51 * @run_wake: enables/disables the platform to generate run-time wake-up events 48 * @run_wake: enables/disables the platform to generate run-time wake-up events
@@ -59,7 +56,6 @@ struct pci_platform_pm_ops {
59 bool (*is_manageable)(struct pci_dev *dev); 56 bool (*is_manageable)(struct pci_dev *dev);
60 int (*set_state)(struct pci_dev *dev, pci_power_t state); 57 int (*set_state)(struct pci_dev *dev, pci_power_t state);
61 pci_power_t (*choose_state)(struct pci_dev *dev); 58 pci_power_t (*choose_state)(struct pci_dev *dev);
62 bool (*can_wakeup)(struct pci_dev *dev);
63 int (*sleep_wake)(struct pci_dev *dev, bool enable); 59 int (*sleep_wake)(struct pci_dev *dev, bool enable);
64 int (*run_wake)(struct pci_dev *dev, bool enable); 60 int (*run_wake)(struct pci_dev *dev, bool enable);
65}; 61};
@@ -74,7 +70,6 @@ extern void pci_wakeup_bus(struct pci_bus *bus);
74extern void pci_config_pm_runtime_get(struct pci_dev *dev); 70extern void pci_config_pm_runtime_get(struct pci_dev *dev);
75extern void pci_config_pm_runtime_put(struct pci_dev *dev); 71extern void pci_config_pm_runtime_put(struct pci_dev *dev);
76extern void pci_pm_init(struct pci_dev *dev); 72extern void pci_pm_init(struct pci_dev *dev);
77extern void platform_pci_wakeup_init(struct pci_dev *dev);
78extern void pci_allocate_cap_save_buffers(struct pci_dev *dev); 73extern void pci_allocate_cap_save_buffers(struct pci_dev *dev);
79void pci_free_cap_save_buffers(struct pci_dev *dev); 74void pci_free_cap_save_buffers(struct pci_dev *dev);
80 75
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 6c8bc5809787..fde4a32a0295 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -82,4 +82,4 @@ endchoice
82 82
83config PCIE_PME 83config PCIE_PME
84 def_bool y 84 def_bool y
85 depends on PCIEPORTBUS && PM_RUNTIME && EXPERIMENTAL && ACPI 85 depends on PCIEPORTBUS && PM_RUNTIME && ACPI
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 421bbc5fee32..564d97f94b6c 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -630,6 +630,7 @@ static void aer_recover_work_func(struct work_struct *work)
630 continue; 630 continue;
631 } 631 }
632 do_recovery(pdev, entry.severity); 632 do_recovery(pdev, entry.severity);
633 pci_dev_put(pdev);
633 } 634 }
634} 635}
635#endif 636#endif
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c
index 3ea51736f18d..5ab14251839d 100644
--- a/drivers/pci/pcie/aer/aerdrv_errprint.c
+++ b/drivers/pci/pcie/aer/aerdrv_errprint.c
@@ -23,6 +23,9 @@
23 23
24#include "aerdrv.h" 24#include "aerdrv.h"
25 25
26#define CREATE_TRACE_POINTS
27#include <trace/events/ras.h>
28
26#define AER_AGENT_RECEIVER 0 29#define AER_AGENT_RECEIVER 0
27#define AER_AGENT_REQUESTER 1 30#define AER_AGENT_REQUESTER 1
28#define AER_AGENT_COMPLETER 2 31#define AER_AGENT_COMPLETER 2
@@ -121,12 +124,11 @@ static const char *aer_agent_string[] = {
121 "Transmitter ID" 124 "Transmitter ID"
122}; 125};
123 126
124static void __aer_print_error(const char *prefix, 127static void __aer_print_error(struct pci_dev *dev,
125 struct aer_err_info *info) 128 struct aer_err_info *info)
126{ 129{
127 int i, status; 130 int i, status;
128 const char *errmsg = NULL; 131 const char *errmsg = NULL;
129
130 status = (info->status & ~info->mask); 132 status = (info->status & ~info->mask);
131 133
132 for (i = 0; i < 32; i++) { 134 for (i = 0; i < 32; i++) {
@@ -141,26 +143,22 @@ static void __aer_print_error(const char *prefix,
141 aer_uncorrectable_error_string[i] : NULL; 143 aer_uncorrectable_error_string[i] : NULL;
142 144
143 if (errmsg) 145 if (errmsg)
144 printk("%s"" [%2d] %-22s%s\n", prefix, i, errmsg, 146 dev_err(&dev->dev, " [%2d] %-22s%s\n", i, errmsg,
145 info->first_error == i ? " (First)" : ""); 147 info->first_error == i ? " (First)" : "");
146 else 148 else
147 printk("%s"" [%2d] Unknown Error Bit%s\n", prefix, i, 149 dev_err(&dev->dev, " [%2d] Unknown Error Bit%s\n",
148 info->first_error == i ? " (First)" : ""); 150 i, info->first_error == i ? " (First)" : "");
149 } 151 }
150} 152}
151 153
152void aer_print_error(struct pci_dev *dev, struct aer_err_info *info) 154void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
153{ 155{
154 int id = ((dev->bus->number << 8) | dev->devfn); 156 int id = ((dev->bus->number << 8) | dev->devfn);
155 char prefix[44];
156
157 snprintf(prefix, sizeof(prefix), "%s%s %s: ",
158 (info->severity == AER_CORRECTABLE) ? KERN_WARNING : KERN_ERR,
159 dev_driver_string(&dev->dev), dev_name(&dev->dev));
160 157
161 if (info->status == 0) { 158 if (info->status == 0) {
162 printk("%s""PCIe Bus Error: severity=%s, type=Unaccessible, " 159 dev_err(&dev->dev,
163 "id=%04x(Unregistered Agent ID)\n", prefix, 160 "PCIe Bus Error: severity=%s, type=Unaccessible, "
161 "id=%04x(Unregistered Agent ID)\n",
164 aer_error_severity_string[info->severity], id); 162 aer_error_severity_string[info->severity], id);
165 } else { 163 } else {
166 int layer, agent; 164 int layer, agent;
@@ -168,22 +166,24 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
168 layer = AER_GET_LAYER_ERROR(info->severity, info->status); 166 layer = AER_GET_LAYER_ERROR(info->severity, info->status);
169 agent = AER_GET_AGENT(info->severity, info->status); 167 agent = AER_GET_AGENT(info->severity, info->status);
170 168
171 printk("%s""PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n", 169 dev_err(&dev->dev,
172 prefix, aer_error_severity_string[info->severity], 170 "PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n",
171 aer_error_severity_string[info->severity],
173 aer_error_layer[layer], id, aer_agent_string[agent]); 172 aer_error_layer[layer], id, aer_agent_string[agent]);
174 173
175 printk("%s"" device [%04x:%04x] error status/mask=%08x/%08x\n", 174 dev_err(&dev->dev,
176 prefix, dev->vendor, dev->device, 175 " device [%04x:%04x] error status/mask=%08x/%08x\n",
176 dev->vendor, dev->device,
177 info->status, info->mask); 177 info->status, info->mask);
178 178
179 __aer_print_error(prefix, info); 179 __aer_print_error(dev, info);
180 180
181 if (info->tlp_header_valid) { 181 if (info->tlp_header_valid) {
182 unsigned char *tlp = (unsigned char *) &info->tlp; 182 unsigned char *tlp = (unsigned char *) &info->tlp;
183 printk("%s"" TLP Header:" 183 dev_err(&dev->dev, " TLP Header:"
184 " %02x%02x%02x%02x %02x%02x%02x%02x" 184 " %02x%02x%02x%02x %02x%02x%02x%02x"
185 " %02x%02x%02x%02x %02x%02x%02x%02x\n", 185 " %02x%02x%02x%02x %02x%02x%02x%02x\n",
186 prefix, *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp, 186 *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp,
187 *(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4), 187 *(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4),
188 *(tlp + 11), *(tlp + 10), *(tlp + 9), 188 *(tlp + 11), *(tlp + 10), *(tlp + 9),
189 *(tlp + 8), *(tlp + 15), *(tlp + 14), 189 *(tlp + 8), *(tlp + 15), *(tlp + 14),
@@ -192,8 +192,11 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
192 } 192 }
193 193
194 if (info->id && info->error_dev_num > 1 && info->id == id) 194 if (info->id && info->error_dev_num > 1 && info->id == id)
195 printk("%s"" Error of this Agent(%04x) is reported first\n", 195 dev_err(&dev->dev,
196 prefix, id); 196 " Error of this Agent(%04x) is reported first\n",
197 id);
198 trace_aer_event(dev_name(&dev->dev), (info->status & ~info->mask),
199 info->severity);
197} 200}
198 201
199void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info) 202void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info)
@@ -217,7 +220,7 @@ int cper_severity_to_aer(int cper_severity)
217} 220}
218EXPORT_SYMBOL_GPL(cper_severity_to_aer); 221EXPORT_SYMBOL_GPL(cper_severity_to_aer);
219 222
220void cper_print_aer(const char *prefix, int cper_severity, 223void cper_print_aer(const char *prefix, struct pci_dev *dev, int cper_severity,
221 struct aer_capability_regs *aer) 224 struct aer_capability_regs *aer)
222{ 225{
223 int aer_severity, layer, agent, status_strs_size, tlp_header_valid = 0; 226 int aer_severity, layer, agent, status_strs_size, tlp_header_valid = 0;
@@ -239,25 +242,27 @@ void cper_print_aer(const char *prefix, int cper_severity,
239 } 242 }
240 layer = AER_GET_LAYER_ERROR(aer_severity, status); 243 layer = AER_GET_LAYER_ERROR(aer_severity, status);
241 agent = AER_GET_AGENT(aer_severity, status); 244 agent = AER_GET_AGENT(aer_severity, status);
242 printk("%s""aer_status: 0x%08x, aer_mask: 0x%08x\n", 245 dev_err(&dev->dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n",
243 prefix, status, mask); 246 status, mask);
244 cper_print_bits(prefix, status, status_strs, status_strs_size); 247 cper_print_bits(prefix, status, status_strs, status_strs_size);
245 printk("%s""aer_layer=%s, aer_agent=%s\n", prefix, 248 dev_err(&dev->dev, "aer_layer=%s, aer_agent=%s\n",
246 aer_error_layer[layer], aer_agent_string[agent]); 249 aer_error_layer[layer], aer_agent_string[agent]);
247 if (aer_severity != AER_CORRECTABLE) 250 if (aer_severity != AER_CORRECTABLE)
248 printk("%s""aer_uncor_severity: 0x%08x\n", 251 dev_err(&dev->dev, "aer_uncor_severity: 0x%08x\n",
249 prefix, aer->uncor_severity); 252 aer->uncor_severity);
250 if (tlp_header_valid) { 253 if (tlp_header_valid) {
251 const unsigned char *tlp; 254 const unsigned char *tlp;
252 tlp = (const unsigned char *)&aer->header_log; 255 tlp = (const unsigned char *)&aer->header_log;
253 printk("%s""aer_tlp_header:" 256 dev_err(&dev->dev, "aer_tlp_header:"
254 " %02x%02x%02x%02x %02x%02x%02x%02x" 257 " %02x%02x%02x%02x %02x%02x%02x%02x"
255 " %02x%02x%02x%02x %02x%02x%02x%02x\n", 258 " %02x%02x%02x%02x %02x%02x%02x%02x\n",
256 prefix, *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp, 259 *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp,
257 *(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4), 260 *(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4),
258 *(tlp + 11), *(tlp + 10), *(tlp + 9), 261 *(tlp + 11), *(tlp + 10), *(tlp + 9),
259 *(tlp + 8), *(tlp + 15), *(tlp + 14), 262 *(tlp + 8), *(tlp + 15), *(tlp + 14),
260 *(tlp + 13), *(tlp + 12)); 263 *(tlp + 13), *(tlp + 12));
261 } 264 }
265 trace_aer_event(dev_name(&dev->dev), (status & ~mask),
266 aer_severity);
262} 267}
263#endif 268#endif
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index b52630b8eada..8474b6a4fc9b 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -771,6 +771,9 @@ void pcie_clear_aspm(struct pci_bus *bus)
771{ 771{
772 struct pci_dev *child; 772 struct pci_dev *child;
773 773
774 if (aspm_force)
775 return;
776
774 /* 777 /*
775 * Clear any ASPM setup that the firmware has carried out on this bus 778 * Clear any ASPM setup that the firmware has carried out on this bus
776 */ 779 */
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 6186f03d84f3..2dcd22d9c816 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1280,7 +1280,6 @@ static void pci_init_capabilities(struct pci_dev *dev)
1280 1280
1281 /* Power Management */ 1281 /* Power Management */
1282 pci_pm_init(dev); 1282 pci_pm_init(dev);
1283 platform_pci_wakeup_init(dev);
1284 1283
1285 /* Vital Product Data */ 1284 /* Vital Product Data */
1286 pci_vpd_pci22_init(dev); 1285 pci_vpd_pci22_init(dev);
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 7c0fd9252e6f..84954a726a94 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -19,6 +19,8 @@ static void pci_free_resources(struct pci_dev *dev)
19 19
20static void pci_stop_dev(struct pci_dev *dev) 20static void pci_stop_dev(struct pci_dev *dev)
21{ 21{
22 pci_pme_active(dev, false);
23
22 if (dev->is_added) { 24 if (dev->is_added) {
23 pci_proc_detach_device(dev); 25 pci_proc_detach_device(dev);
24 pci_remove_sysfs_dev_files(dev); 26 pci_remove_sysfs_dev_files(dev);
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index c31aeb01bb00..393b0ecf4ca4 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -26,6 +26,29 @@ config DEBUG_PINCTRL
26 help 26 help
27 Say Y here to add some extra checks and diagnostics to PINCTRL calls. 27 Say Y here to add some extra checks and diagnostics to PINCTRL calls.
28 28
29config PINCTRL_ABX500
30 bool "ST-Ericsson ABx500 family Mixed Signal Circuit gpio functions"
31 depends on AB8500_CORE
32 select GENERIC_PINCONF
33 help
34 Select this to enable the ABx500 family IC GPIO driver
35
36config PINCTRL_AB8500
37 bool "AB8500 pin controller driver"
38 depends on PINCTRL_ABX500 && ARCH_U8500
39
40config PINCTRL_AB8540
41 bool "AB8540 pin controller driver"
42 depends on PINCTRL_ABX500 && ARCH_U8500
43
44config PINCTRL_AB9540
45 bool "AB9540 pin controller driver"
46 depends on PINCTRL_ABX500 && ARCH_U8500
47
48config PINCTRL_AB8505
49 bool "AB8505 pin controller driver"
50 depends on PINCTRL_ABX500 && ARCH_U8500
51
29config PINCTRL_AT91 52config PINCTRL_AT91
30 bool "AT91 pinctrl driver" 53 bool "AT91 pinctrl driver"
31 depends on OF 54 depends on OF
@@ -151,6 +174,11 @@ config PINCTRL_SIRF
151 depends on ARCH_SIRF 174 depends on ARCH_SIRF
152 select PINMUX 175 select PINMUX
153 176
177config PINCTRL_SUNXI
178 bool
179 select PINMUX
180 select GENERIC_PINCONF
181
154config PINCTRL_TEGRA 182config PINCTRL_TEGRA
155 bool 183 bool
156 select PINMUX 184 select PINMUX
@@ -164,6 +192,10 @@ config PINCTRL_TEGRA30
164 bool 192 bool
165 select PINCTRL_TEGRA 193 select PINCTRL_TEGRA
166 194
195config PINCTRL_TEGRA114
196 bool
197 select PINCTRL_TEGRA
198
167config PINCTRL_U300 199config PINCTRL_U300
168 bool "U300 pin controller driver" 200 bool "U300 pin controller driver"
169 depends on ARCH_U300 201 depends on ARCH_U300
@@ -181,12 +213,11 @@ config PINCTRL_COH901
181 213
182config PINCTRL_SAMSUNG 214config PINCTRL_SAMSUNG
183 bool 215 bool
184 depends on OF && GPIOLIB
185 select PINMUX 216 select PINMUX
186 select PINCONF 217 select PINCONF
187 218
188config PINCTRL_EXYNOS4 219config PINCTRL_EXYNOS
189 bool "Pinctrl driver data for Exynos4 SoC" 220 bool "Pinctrl driver data for Samsung EXYNOS SoCs"
190 depends on OF && GPIOLIB 221 depends on OF && GPIOLIB
191 select PINCTRL_SAMSUNG 222 select PINCTRL_SAMSUNG
192 223
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index fc4606f27dc7..0fd5f57fcb57 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -9,6 +9,11 @@ ifeq ($(CONFIG_OF),y)
9obj-$(CONFIG_PINCTRL) += devicetree.o 9obj-$(CONFIG_PINCTRL) += devicetree.o
10endif 10endif
11obj-$(CONFIG_GENERIC_PINCONF) += pinconf-generic.o 11obj-$(CONFIG_GENERIC_PINCONF) += pinconf-generic.o
12obj-$(CONFIG_PINCTRL_ABX500) += pinctrl-abx500.o
13obj-$(CONFIG_PINCTRL_AB8500) += pinctrl-ab8500.o
14obj-$(CONFIG_PINCTRL_AB8540) += pinctrl-ab8540.o
15obj-$(CONFIG_PINCTRL_AB9540) += pinctrl-ab9540.o
16obj-$(CONFIG_PINCTRL_AB8505) += pinctrl-ab8505.o
12obj-$(CONFIG_PINCTRL_AT91) += pinctrl-at91.o 17obj-$(CONFIG_PINCTRL_AT91) += pinctrl-at91.o
13obj-$(CONFIG_PINCTRL_BCM2835) += pinctrl-bcm2835.o 18obj-$(CONFIG_PINCTRL_BCM2835) += pinctrl-bcm2835.o
14obj-$(CONFIG_PINCTRL_IMX) += pinctrl-imx.o 19obj-$(CONFIG_PINCTRL_IMX) += pinctrl-imx.o
@@ -30,13 +35,15 @@ obj-$(CONFIG_PINCTRL_PXA168) += pinctrl-pxa168.o
30obj-$(CONFIG_PINCTRL_PXA910) += pinctrl-pxa910.o 35obj-$(CONFIG_PINCTRL_PXA910) += pinctrl-pxa910.o
31obj-$(CONFIG_PINCTRL_SINGLE) += pinctrl-single.o 36obj-$(CONFIG_PINCTRL_SINGLE) += pinctrl-single.o
32obj-$(CONFIG_PINCTRL_SIRF) += pinctrl-sirf.o 37obj-$(CONFIG_PINCTRL_SIRF) += pinctrl-sirf.o
38obj-$(CONFIG_PINCTRL_SUNXI) += pinctrl-sunxi.o
33obj-$(CONFIG_PINCTRL_TEGRA) += pinctrl-tegra.o 39obj-$(CONFIG_PINCTRL_TEGRA) += pinctrl-tegra.o
34obj-$(CONFIG_PINCTRL_TEGRA20) += pinctrl-tegra20.o 40obj-$(CONFIG_PINCTRL_TEGRA20) += pinctrl-tegra20.o
35obj-$(CONFIG_PINCTRL_TEGRA30) += pinctrl-tegra30.o 41obj-$(CONFIG_PINCTRL_TEGRA30) += pinctrl-tegra30.o
42obj-$(CONFIG_PINCTRL_TEGRA114) += pinctrl-tegra114.o
36obj-$(CONFIG_PINCTRL_U300) += pinctrl-u300.o 43obj-$(CONFIG_PINCTRL_U300) += pinctrl-u300.o
37obj-$(CONFIG_PINCTRL_COH901) += pinctrl-coh901.o 44obj-$(CONFIG_PINCTRL_COH901) += pinctrl-coh901.o
38obj-$(CONFIG_PINCTRL_SAMSUNG) += pinctrl-samsung.o 45obj-$(CONFIG_PINCTRL_SAMSUNG) += pinctrl-samsung.o
39obj-$(CONFIG_PINCTRL_EXYNOS4) += pinctrl-exynos.o 46obj-$(CONFIG_PINCTRL_EXYNOS) += pinctrl-exynos.o
40obj-$(CONFIG_PINCTRL_EXYNOS5440) += pinctrl-exynos5440.o 47obj-$(CONFIG_PINCTRL_EXYNOS5440) += pinctrl-exynos5440.o
41obj-$(CONFIG_PINCTRL_XWAY) += pinctrl-xway.o 48obj-$(CONFIG_PINCTRL_XWAY) += pinctrl-xway.o
42obj-$(CONFIG_PINCTRL_LANTIQ) += pinctrl-lantiq.o 49obj-$(CONFIG_PINCTRL_LANTIQ) += pinctrl-lantiq.o
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 59f5a965bdc4..b0de6e7f1fdb 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -14,6 +14,7 @@
14#define pr_fmt(fmt) "pinctrl core: " fmt 14#define pr_fmt(fmt) "pinctrl core: " fmt
15 15
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/kref.h>
17#include <linux/export.h> 18#include <linux/export.h>
18#include <linux/init.h> 19#include <linux/init.h>
19#include <linux/device.h> 20#include <linux/device.h>
@@ -31,17 +32,6 @@
31#include "pinmux.h" 32#include "pinmux.h"
32#include "pinconf.h" 33#include "pinconf.h"
33 34
34/**
35 * struct pinctrl_maps - a list item containing part of the mapping table
36 * @node: mapping table list node
37 * @maps: array of mapping table entries
38 * @num_maps: the number of entries in @maps
39 */
40struct pinctrl_maps {
41 struct list_head node;
42 struct pinctrl_map const *maps;
43 unsigned num_maps;
44};
45 35
46static bool pinctrl_dummy_state; 36static bool pinctrl_dummy_state;
47 37
@@ -55,13 +45,8 @@ LIST_HEAD(pinctrldev_list);
55static LIST_HEAD(pinctrl_list); 45static LIST_HEAD(pinctrl_list);
56 46
57/* List of pinctrl maps (struct pinctrl_maps) */ 47/* List of pinctrl maps (struct pinctrl_maps) */
58static LIST_HEAD(pinctrl_maps); 48LIST_HEAD(pinctrl_maps);
59 49
60#define for_each_maps(_maps_node_, _i_, _map_) \
61 list_for_each_entry(_maps_node_, &pinctrl_maps, node) \
62 for (_i_ = 0, _map_ = &_maps_node_->maps[_i_]; \
63 _i_ < _maps_node_->num_maps; \
64 _i_++, _map_ = &_maps_node_->maps[_i_])
65 50
66/** 51/**
67 * pinctrl_provide_dummies() - indicate if pinctrl provides dummy state support 52 * pinctrl_provide_dummies() - indicate if pinctrl provides dummy state support
@@ -83,6 +68,12 @@ const char *pinctrl_dev_get_name(struct pinctrl_dev *pctldev)
83} 68}
84EXPORT_SYMBOL_GPL(pinctrl_dev_get_name); 69EXPORT_SYMBOL_GPL(pinctrl_dev_get_name);
85 70
71const char *pinctrl_dev_get_devname(struct pinctrl_dev *pctldev)
72{
73 return dev_name(pctldev->dev);
74}
75EXPORT_SYMBOL_GPL(pinctrl_dev_get_devname);
76
86void *pinctrl_dev_get_drvdata(struct pinctrl_dev *pctldev) 77void *pinctrl_dev_get_drvdata(struct pinctrl_dev *pctldev)
87{ 78{
88 return pctldev->driver_data; 79 return pctldev->driver_data;
@@ -609,13 +600,16 @@ static int add_setting(struct pinctrl *p, struct pinctrl_map const *map)
609 600
610 setting->pctldev = get_pinctrl_dev_from_devname(map->ctrl_dev_name); 601 setting->pctldev = get_pinctrl_dev_from_devname(map->ctrl_dev_name);
611 if (setting->pctldev == NULL) { 602 if (setting->pctldev == NULL) {
612 dev_info(p->dev, "unknown pinctrl device %s in map entry, deferring probe",
613 map->ctrl_dev_name);
614 kfree(setting); 603 kfree(setting);
604 /* Do not defer probing of hogs (circular loop) */
605 if (!strcmp(map->ctrl_dev_name, map->dev_name))
606 return -ENODEV;
615 /* 607 /*
616 * OK let us guess that the driver is not there yet, and 608 * OK let us guess that the driver is not there yet, and
617 * let's defer obtaining this pinctrl handle to later... 609 * let's defer obtaining this pinctrl handle to later...
618 */ 610 */
611 dev_info(p->dev, "unknown pinctrl device %s in map entry, deferring probe",
612 map->ctrl_dev_name);
619 return -EPROBE_DEFER; 613 return -EPROBE_DEFER;
620 } 614 }
621 615
@@ -694,11 +688,31 @@ static struct pinctrl *create_pinctrl(struct device *dev)
694 continue; 688 continue;
695 689
696 ret = add_setting(p, map); 690 ret = add_setting(p, map);
697 if (ret < 0) { 691 /*
692 * At this point the adding of a setting may:
693 *
694 * - Defer, if the pinctrl device is not yet available
695 * - Fail, if the pinctrl device is not yet available,
696 * AND the setting is a hog. We cannot defer that, since
697 * the hog will kick in immediately after the device
698 * is registered.
699 *
700 * If the error returned was not -EPROBE_DEFER then we
701 * accumulate the errors to see if we end up with
702 * an -EPROBE_DEFER later, as that is the worst case.
703 */
704 if (ret == -EPROBE_DEFER) {
698 pinctrl_put_locked(p, false); 705 pinctrl_put_locked(p, false);
699 return ERR_PTR(ret); 706 return ERR_PTR(ret);
700 } 707 }
701 } 708 }
709 if (ret < 0) {
710 /* If some other error than deferral occured, return here */
711 pinctrl_put_locked(p, false);
712 return ERR_PTR(ret);
713 }
714
715 kref_init(&p->users);
702 716
703 /* Add the pinctrl handle to the global list */ 717 /* Add the pinctrl handle to the global list */
704 list_add_tail(&p->node, &pinctrl_list); 718 list_add_tail(&p->node, &pinctrl_list);
@@ -713,9 +727,17 @@ static struct pinctrl *pinctrl_get_locked(struct device *dev)
713 if (WARN_ON(!dev)) 727 if (WARN_ON(!dev))
714 return ERR_PTR(-EINVAL); 728 return ERR_PTR(-EINVAL);
715 729
730 /*
731 * See if somebody else (such as the device core) has already
732 * obtained a handle to the pinctrl for this device. In that case,
733 * return another pointer to it.
734 */
716 p = find_pinctrl(dev); 735 p = find_pinctrl(dev);
717 if (p != NULL) 736 if (p != NULL) {
718 return ERR_PTR(-EBUSY); 737 dev_dbg(dev, "obtain a copy of previously claimed pinctrl\n");
738 kref_get(&p->users);
739 return p;
740 }
719 741
720 return create_pinctrl(dev); 742 return create_pinctrl(dev);
721} 743}
@@ -771,13 +793,24 @@ static void pinctrl_put_locked(struct pinctrl *p, bool inlist)
771} 793}
772 794
773/** 795/**
774 * pinctrl_put() - release a previously claimed pinctrl handle 796 * pinctrl_release() - release the pinctrl handle
797 * @kref: the kref in the pinctrl being released
798 */
799static void pinctrl_release(struct kref *kref)
800{
801 struct pinctrl *p = container_of(kref, struct pinctrl, users);
802
803 pinctrl_put_locked(p, true);
804}
805
806/**
807 * pinctrl_put() - decrease use count on a previously claimed pinctrl handle
775 * @p: the pinctrl handle to release 808 * @p: the pinctrl handle to release
776 */ 809 */
777void pinctrl_put(struct pinctrl *p) 810void pinctrl_put(struct pinctrl *p)
778{ 811{
779 mutex_lock(&pinctrl_mutex); 812 mutex_lock(&pinctrl_mutex);
780 pinctrl_put_locked(p, true); 813 kref_put(&p->users, pinctrl_release);
781 mutex_unlock(&pinctrl_mutex); 814 mutex_unlock(&pinctrl_mutex);
782} 815}
783EXPORT_SYMBOL_GPL(pinctrl_put); 816EXPORT_SYMBOL_GPL(pinctrl_put);
@@ -1055,6 +1088,30 @@ void pinctrl_unregister_map(struct pinctrl_map const *map)
1055 } 1088 }
1056} 1089}
1057 1090
1091/**
1092 * pinctrl_force_sleep() - turn a given controller device into sleep state
1093 * @pctldev: pin controller device
1094 */
1095int pinctrl_force_sleep(struct pinctrl_dev *pctldev)
1096{
1097 if (!IS_ERR(pctldev->p) && !IS_ERR(pctldev->hog_sleep))
1098 return pinctrl_select_state(pctldev->p, pctldev->hog_sleep);
1099 return 0;
1100}
1101EXPORT_SYMBOL_GPL(pinctrl_force_sleep);
1102
1103/**
1104 * pinctrl_force_default() - turn a given controller device into default state
1105 * @pctldev: pin controller device
1106 */
1107int pinctrl_force_default(struct pinctrl_dev *pctldev)
1108{
1109 if (!IS_ERR(pctldev->p) && !IS_ERR(pctldev->hog_default))
1110 return pinctrl_select_state(pctldev->p, pctldev->hog_default);
1111 return 0;
1112}
1113EXPORT_SYMBOL_GPL(pinctrl_force_default);
1114
1058#ifdef CONFIG_DEBUG_FS 1115#ifdef CONFIG_DEBUG_FS
1059 1116
1060static int pinctrl_pins_show(struct seq_file *s, void *what) 1117static int pinctrl_pins_show(struct seq_file *s, void *what)
@@ -1500,16 +1557,23 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
1500 1557
1501 pctldev->p = pinctrl_get_locked(pctldev->dev); 1558 pctldev->p = pinctrl_get_locked(pctldev->dev);
1502 if (!IS_ERR(pctldev->p)) { 1559 if (!IS_ERR(pctldev->p)) {
1503 struct pinctrl_state *s = 1560 pctldev->hog_default =
1504 pinctrl_lookup_state_locked(pctldev->p, 1561 pinctrl_lookup_state_locked(pctldev->p,
1505 PINCTRL_STATE_DEFAULT); 1562 PINCTRL_STATE_DEFAULT);
1506 if (IS_ERR(s)) { 1563 if (IS_ERR(pctldev->hog_default)) {
1507 dev_dbg(dev, "failed to lookup the default state\n"); 1564 dev_dbg(dev, "failed to lookup the default state\n");
1508 } else { 1565 } else {
1509 if (pinctrl_select_state_locked(pctldev->p, s)) 1566 if (pinctrl_select_state_locked(pctldev->p,
1567 pctldev->hog_default))
1510 dev_err(dev, 1568 dev_err(dev,
1511 "failed to select default state\n"); 1569 "failed to select default state\n");
1512 } 1570 }
1571
1572 pctldev->hog_sleep =
1573 pinctrl_lookup_state_locked(pctldev->p,
1574 PINCTRL_STATE_SLEEP);
1575 if (IS_ERR(pctldev->hog_sleep))
1576 dev_dbg(dev, "failed to lookup the sleep state\n");
1513 } 1577 }
1514 1578
1515 mutex_unlock(&pinctrl_mutex); 1579 mutex_unlock(&pinctrl_mutex);
diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h
index 12f5694f3d5d..ee72f1f6d862 100644
--- a/drivers/pinctrl/core.h
+++ b/drivers/pinctrl/core.h
@@ -9,6 +9,7 @@
9 * License terms: GNU General Public License (GPL) version 2 9 * License terms: GNU General Public License (GPL) version 2
10 */ 10 */
11 11
12#include <linux/kref.h>
12#include <linux/mutex.h> 13#include <linux/mutex.h>
13#include <linux/radix-tree.h> 14#include <linux/radix-tree.h>
14#include <linux/pinctrl/pinconf.h> 15#include <linux/pinctrl/pinconf.h>
@@ -30,6 +31,8 @@ struct pinctrl_gpio_range;
30 * @driver_data: driver data for drivers registering to the pin controller 31 * @driver_data: driver data for drivers registering to the pin controller
31 * subsystem 32 * subsystem
32 * @p: result of pinctrl_get() for this device 33 * @p: result of pinctrl_get() for this device
34 * @hog_default: default state for pins hogged by this device
35 * @hog_sleep: sleep state for pins hogged by this device
33 * @device_root: debugfs root for this device 36 * @device_root: debugfs root for this device
34 */ 37 */
35struct pinctrl_dev { 38struct pinctrl_dev {
@@ -41,6 +44,8 @@ struct pinctrl_dev {
41 struct module *owner; 44 struct module *owner;
42 void *driver_data; 45 void *driver_data;
43 struct pinctrl *p; 46 struct pinctrl *p;
47 struct pinctrl_state *hog_default;
48 struct pinctrl_state *hog_sleep;
44#ifdef CONFIG_DEBUG_FS 49#ifdef CONFIG_DEBUG_FS
45 struct dentry *device_root; 50 struct dentry *device_root;
46#endif 51#endif
@@ -54,6 +59,7 @@ struct pinctrl_dev {
54 * @state: the current state 59 * @state: the current state
55 * @dt_maps: the mapping table chunks dynamically parsed from device tree for 60 * @dt_maps: the mapping table chunks dynamically parsed from device tree for
56 * this device, if any 61 * this device, if any
62 * @users: reference count
57 */ 63 */
58struct pinctrl { 64struct pinctrl {
59 struct list_head node; 65 struct list_head node;
@@ -61,6 +67,7 @@ struct pinctrl {
61 struct list_head states; 67 struct list_head states;
62 struct pinctrl_state *state; 68 struct pinctrl_state *state;
63 struct list_head dt_maps; 69 struct list_head dt_maps;
70 struct kref users;
64}; 71};
65 72
66/** 73/**
@@ -148,6 +155,18 @@ struct pin_desc {
148#endif 155#endif
149}; 156};
150 157
158/**
159 * struct pinctrl_maps - a list item containing part of the mapping table
160 * @node: mapping table list node
161 * @maps: array of mapping table entries
162 * @num_maps: the number of entries in @maps
163 */
164struct pinctrl_maps {
165 struct list_head node;
166 struct pinctrl_map const *maps;
167 unsigned num_maps;
168};
169
151struct pinctrl_dev *get_pinctrl_dev_from_devname(const char *dev_name); 170struct pinctrl_dev *get_pinctrl_dev_from_devname(const char *dev_name);
152int pin_get_from_name(struct pinctrl_dev *pctldev, const char *name); 171int pin_get_from_name(struct pinctrl_dev *pctldev, const char *name);
153const char *pin_get_name(struct pinctrl_dev *pctldev, const unsigned pin); 172const char *pin_get_name(struct pinctrl_dev *pctldev, const unsigned pin);
@@ -164,5 +183,15 @@ int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
164 bool dup, bool locked); 183 bool dup, bool locked);
165void pinctrl_unregister_map(struct pinctrl_map const *map); 184void pinctrl_unregister_map(struct pinctrl_map const *map);
166 185
186extern int pinctrl_force_sleep(struct pinctrl_dev *pctldev);
187extern int pinctrl_force_default(struct pinctrl_dev *pctldev);
188
167extern struct mutex pinctrl_mutex; 189extern struct mutex pinctrl_mutex;
168extern struct list_head pinctrldev_list; 190extern struct list_head pinctrldev_list;
191extern struct list_head pinctrl_maps;
192
193#define for_each_maps(_maps_node_, _i_, _map_) \
194 list_for_each_entry(_maps_node_, &pinctrl_maps, node) \
195 for (_i_ = 0, _map_ = &_maps_node_->maps[_i_]; \
196 _i_ < _maps_node_->num_maps; \
197 _i_++, _map_ = &_maps_node_->maps[_i_])
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index fe2d1af7cfa0..fd40a11ad645 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -141,6 +141,11 @@ static int dt_to_map_one_config(struct pinctrl *p, const char *statename,
141 pctldev = find_pinctrl_by_of_node(np_pctldev); 141 pctldev = find_pinctrl_by_of_node(np_pctldev);
142 if (pctldev) 142 if (pctldev)
143 break; 143 break;
144 /* Do not defer probing of hogs (circular loop) */
145 if (np_pctldev == p->dev->of_node) {
146 of_node_put(np_pctldev);
147 return -ENODEV;
148 }
144 } 149 }
145 of_node_put(np_pctldev); 150 of_node_put(np_pctldev);
146 151
diff --git a/drivers/pinctrl/mvebu/pinctrl-dove.c b/drivers/pinctrl/mvebu/pinctrl-dove.c
index 69aba3697287..428ea96a94d3 100644
--- a/drivers/pinctrl/mvebu/pinctrl-dove.c
+++ b/drivers/pinctrl/mvebu/pinctrl-dove.c
@@ -588,7 +588,7 @@ static int dove_pinctrl_probe(struct platform_device *pdev)
588{ 588{
589 const struct of_device_id *match = 589 const struct of_device_id *match =
590 of_match_device(dove_pinctrl_of_match, &pdev->dev); 590 of_match_device(dove_pinctrl_of_match, &pdev->dev);
591 pdev->dev.platform_data = match->data; 591 pdev->dev.platform_data = (void *)match->data;
592 592
593 /* 593 /*
594 * General MPP Configuration Register is part of pdma registers. 594 * General MPP Configuration Register is part of pdma registers.
diff --git a/drivers/pinctrl/mvebu/pinctrl-kirkwood.c b/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
index f12084e18057..cdd483df673e 100644
--- a/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
+++ b/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
@@ -66,9 +66,9 @@ static struct mvebu_mpp_mode mv88f6xxx_mpp_modes[] = {
66 MPP_VAR_FUNCTION(0x5, "sata0", "act", V(0, 1, 1, 1, 1, 0)), 66 MPP_VAR_FUNCTION(0x5, "sata0", "act", V(0, 1, 1, 1, 1, 0)),
67 MPP_VAR_FUNCTION(0xb, "lcd", "vsync", V(0, 0, 0, 0, 1, 0))), 67 MPP_VAR_FUNCTION(0xb, "lcd", "vsync", V(0, 0, 0, 0, 1, 0))),
68 MPP_MODE(6, 68 MPP_MODE(6,
69 MPP_VAR_FUNCTION(0x0, "sysrst", "out", V(1, 1, 1, 1, 1, 1)), 69 MPP_VAR_FUNCTION(0x1, "sysrst", "out", V(1, 1, 1, 1, 1, 1)),
70 MPP_VAR_FUNCTION(0x1, "spi", "mosi", V(1, 1, 1, 1, 1, 1)), 70 MPP_VAR_FUNCTION(0x2, "spi", "mosi", V(1, 1, 1, 1, 1, 1)),
71 MPP_VAR_FUNCTION(0x2, "ptp", "trig", V(1, 1, 1, 1, 0, 0))), 71 MPP_VAR_FUNCTION(0x3, "ptp", "trig", V(1, 1, 1, 1, 0, 0))),
72 MPP_MODE(7, 72 MPP_MODE(7,
73 MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(1, 1, 1, 1, 1, 1)), 73 MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(1, 1, 1, 1, 1, 1)),
74 MPP_VAR_FUNCTION(0x1, "pex", "rsto", V(1, 1, 1, 1, 0, 1)), 74 MPP_VAR_FUNCTION(0x1, "pex", "rsto", V(1, 1, 1, 1, 0, 1)),
@@ -458,7 +458,7 @@ static int kirkwood_pinctrl_probe(struct platform_device *pdev)
458{ 458{
459 const struct of_device_id *match = 459 const struct of_device_id *match =
460 of_match_device(kirkwood_pinctrl_of_match, &pdev->dev); 460 of_match_device(kirkwood_pinctrl_of_match, &pdev->dev);
461 pdev->dev.platform_data = match->data; 461 pdev->dev.platform_data = (void *)match->data;
462 return mvebu_pinctrl_probe(pdev); 462 return mvebu_pinctrl_probe(pdev);
463} 463}
464 464
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
index 833a36458157..06c304ac6f7d 100644
--- a/drivers/pinctrl/pinconf-generic.c
+++ b/drivers/pinctrl/pinconf-generic.c
@@ -41,11 +41,13 @@ struct pin_config_item conf_items[] = {
41 PCONFDUMP(PIN_CONFIG_DRIVE_PUSH_PULL, "output drive push pull", NULL), 41 PCONFDUMP(PIN_CONFIG_DRIVE_PUSH_PULL, "output drive push pull", NULL),
42 PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_DRAIN, "output drive open drain", NULL), 42 PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_DRAIN, "output drive open drain", NULL),
43 PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_SOURCE, "output drive open source", NULL), 43 PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_SOURCE, "output drive open source", NULL),
44 PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT_DISABLE, "input schmitt disabled", NULL), 44 PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT_ENABLE, "input schmitt enabled", NULL),
45 PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT, "input schmitt trigger", NULL), 45 PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT, "input schmitt trigger", NULL),
46 PCONFDUMP(PIN_CONFIG_INPUT_DEBOUNCE, "input debounce", "time units"), 46 PCONFDUMP(PIN_CONFIG_INPUT_DEBOUNCE, "input debounce", "time units"),
47 PCONFDUMP(PIN_CONFIG_POWER_SOURCE, "pin power source", "selector"), 47 PCONFDUMP(PIN_CONFIG_POWER_SOURCE, "pin power source", "selector"),
48 PCONFDUMP(PIN_CONFIG_SLEW_RATE, "slew rate", NULL),
48 PCONFDUMP(PIN_CONFIG_LOW_POWER_MODE, "pin low power", "mode"), 49 PCONFDUMP(PIN_CONFIG_LOW_POWER_MODE, "pin low power", "mode"),
50 PCONFDUMP(PIN_CONFIG_OUTPUT, "pin output", "level"),
49}; 51};
50 52
51void pinconf_generic_dump_pin(struct pinctrl_dev *pctldev, 53void pinconf_generic_dump_pin(struct pinctrl_dev *pctldev,
diff --git a/drivers/pinctrl/pinconf.c b/drivers/pinctrl/pinconf.c
index baee2cc46a17..ac8d382a79bb 100644
--- a/drivers/pinctrl/pinconf.c
+++ b/drivers/pinctrl/pinconf.c
@@ -574,6 +574,207 @@ static const struct file_operations pinconf_groups_ops = {
574 .release = single_release, 574 .release = single_release,
575}; 575};
576 576
577/* 32bit read/write ressources */
578#define MAX_NAME_LEN 16
579char dbg_pinname[MAX_NAME_LEN]; /* shared: name of the state of the pin*/
580char dbg_state_name[MAX_NAME_LEN]; /* shared: state of the pin*/
581static u32 dbg_config; /* shared: config to be read/set for the pin & state*/
582
583static int pinconf_dbg_pinname_print(struct seq_file *s, void *d)
584{
585 if (strlen(dbg_pinname))
586 seq_printf(s, "%s\n", dbg_pinname);
587 else
588 seq_printf(s, "No pin name set\n");
589 return 0;
590}
591
592static int pinconf_dbg_pinname_open(struct inode *inode, struct file *file)
593{
594 return single_open(file, pinconf_dbg_pinname_print, inode->i_private);
595}
596
597static int pinconf_dbg_pinname_write(struct file *file,
598 const char __user *user_buf, size_t count, loff_t *ppos)
599{
600 int err;
601
602 if (count > MAX_NAME_LEN)
603 return -EINVAL;
604
605 err = sscanf(user_buf, "%15s", dbg_pinname);
606
607 if (err != 1)
608 return -EINVAL;
609
610 return count;
611}
612
613static const struct file_operations pinconf_dbg_pinname_fops = {
614 .open = pinconf_dbg_pinname_open,
615 .write = pinconf_dbg_pinname_write,
616 .read = seq_read,
617 .llseek = seq_lseek,
618 .release = single_release,
619 .owner = THIS_MODULE,
620};
621
622static int pinconf_dbg_state_print(struct seq_file *s, void *d)
623{
624 if (strlen(dbg_state_name))
625 seq_printf(s, "%s\n", dbg_pinname);
626 else
627 seq_printf(s, "No pin state set\n");
628 return 0;
629}
630
631static int pinconf_dbg_state_open(struct inode *inode, struct file *file)
632{
633 return single_open(file, pinconf_dbg_state_print, inode->i_private);
634}
635
636static int pinconf_dbg_state_write(struct file *file,
637 const char __user *user_buf, size_t count, loff_t *ppos)
638{
639 int err;
640
641 if (count > MAX_NAME_LEN)
642 return -EINVAL;
643
644 err = sscanf(user_buf, "%15s", dbg_state_name);
645
646 if (err != 1)
647 return -EINVAL;
648
649 return count;
650}
651
652static const struct file_operations pinconf_dbg_pinstate_fops = {
653 .open = pinconf_dbg_state_open,
654 .write = pinconf_dbg_state_write,
655 .read = seq_read,
656 .llseek = seq_lseek,
657 .release = single_release,
658 .owner = THIS_MODULE,
659};
660
661/**
662 * pinconf_dbg_config_print() - display the pinctrl config from the pinctrl
663 * map, of a pin/state pair based on pinname and state that have been
664 * selected with the debugfs entries pinconf-name and pinconf-state
665 * @s: contains the 32bits config to be written
666 * @d: not used
667 */
668static int pinconf_dbg_config_print(struct seq_file *s, void *d)
669{
670 struct pinctrl_maps *maps_node;
671 struct pinctrl_map const *map;
672 struct pinctrl_dev *pctldev = NULL;
673 struct pinconf_ops *confops = NULL;
674 int i, j;
675 bool found = false;
676
677 mutex_lock(&pinctrl_mutex);
678
679 /* Parse the pinctrl map and look for the elected pin/state */
680 for_each_maps(maps_node, i, map) {
681 if (map->type != PIN_MAP_TYPE_CONFIGS_PIN)
682 continue;
683
684 if (strncmp(map->name, dbg_state_name, MAX_NAME_LEN) > 0)
685 continue;
686
687 for (j = 0; j < map->data.configs.num_configs; j++) {
688 if (0 == strncmp(map->data.configs.group_or_pin,
689 dbg_pinname, MAX_NAME_LEN)) {
690 /* We found the right pin / state, read the
691 * config and store the pctldev */
692 dbg_config = map->data.configs.configs[j];
693 pctldev = get_pinctrl_dev_from_devname
694 (map->ctrl_dev_name);
695 found = true;
696 break;
697 }
698 }
699 }
700
701 mutex_unlock(&pinctrl_mutex);
702
703 if (found) {
704 seq_printf(s, "Config of %s in state %s: 0x%08X\n", dbg_pinname,
705 dbg_state_name, dbg_config);
706
707 if (pctldev)
708 confops = pctldev->desc->confops;
709
710 if (confops && confops->pin_config_config_dbg_show)
711 confops->pin_config_config_dbg_show(pctldev,
712 s, dbg_config);
713 } else {
714 seq_printf(s, "No pin found for defined name/state\n");
715 }
716
717 return 0;
718}
719
720static int pinconf_dbg_config_open(struct inode *inode, struct file *file)
721{
722 return single_open(file, pinconf_dbg_config_print, inode->i_private);
723}
724
725/**
726 * pinconf_dbg_config_write() - overwrite the pinctrl config in thepinctrl
727 * map, of a pin/state pair based on pinname and state that have been
728 * selected with the debugfs entries pinconf-name and pinconf-state
729 */
730static int pinconf_dbg_config_write(struct file *file,
731 const char __user *user_buf, size_t count, loff_t *ppos)
732{
733 int err;
734 unsigned long config;
735 struct pinctrl_maps *maps_node;
736 struct pinctrl_map const *map;
737 int i, j;
738
739 err = kstrtoul_from_user(user_buf, count, 0, &config);
740
741 if (err)
742 return err;
743
744 dbg_config = config;
745
746 mutex_lock(&pinctrl_mutex);
747
748 /* Parse the pinctrl map and look for the selected pin/state */
749 for_each_maps(maps_node, i, map) {
750 if (map->type != PIN_MAP_TYPE_CONFIGS_PIN)
751 continue;
752
753 if (strncmp(map->name, dbg_state_name, MAX_NAME_LEN) > 0)
754 continue;
755
756 /* we found the right pin / state, so overwrite config */
757 for (j = 0; j < map->data.configs.num_configs; j++) {
758 if (strncmp(map->data.configs.group_or_pin, dbg_pinname,
759 MAX_NAME_LEN) == 0)
760 map->data.configs.configs[j] = dbg_config;
761 }
762 }
763
764 mutex_unlock(&pinctrl_mutex);
765
766 return count;
767}
768
769static const struct file_operations pinconf_dbg_pinconfig_fops = {
770 .open = pinconf_dbg_config_open,
771 .write = pinconf_dbg_config_write,
772 .read = seq_read,
773 .llseek = seq_lseek,
774 .release = single_release,
775 .owner = THIS_MODULE,
776};
777
577void pinconf_init_device_debugfs(struct dentry *devroot, 778void pinconf_init_device_debugfs(struct dentry *devroot,
578 struct pinctrl_dev *pctldev) 779 struct pinctrl_dev *pctldev)
579{ 780{
@@ -581,6 +782,12 @@ void pinconf_init_device_debugfs(struct dentry *devroot,
581 devroot, pctldev, &pinconf_pins_ops); 782 devroot, pctldev, &pinconf_pins_ops);
582 debugfs_create_file("pinconf-groups", S_IFREG | S_IRUGO, 783 debugfs_create_file("pinconf-groups", S_IFREG | S_IRUGO,
583 devroot, pctldev, &pinconf_groups_ops); 784 devroot, pctldev, &pinconf_groups_ops);
785 debugfs_create_file("pinconf-name", (S_IRUGO | S_IWUSR | S_IWGRP),
786 devroot, pctldev, &pinconf_dbg_pinname_fops);
787 debugfs_create_file("pinconf-state", (S_IRUGO | S_IWUSR | S_IWGRP),
788 devroot, pctldev, &pinconf_dbg_pinstate_fops);
789 debugfs_create_file("pinconf-config", (S_IRUGO | S_IWUSR | S_IWGRP),
790 devroot, pctldev, &pinconf_dbg_pinconfig_fops);
584} 791}
585 792
586#endif 793#endif
diff --git a/drivers/pinctrl/pinctrl-ab8500.c b/drivers/pinctrl/pinctrl-ab8500.c
new file mode 100644
index 000000000000..3b471d87c211
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-ab8500.c
@@ -0,0 +1,484 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2012
3 *
4 * Author: Patrice Chotard <patrice.chotard@stericsson.com> for ST-Ericsson.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/kernel.h>
12#include <linux/gpio.h>
13#include <linux/pinctrl/pinctrl.h>
14#include <linux/mfd/abx500/ab8500.h>
15#include "pinctrl-abx500.h"
16
17/* All the pins that can be used for GPIO and some other functions */
18#define ABX500_GPIO(offset) (offset)
19
20#define AB8500_PIN_T10 ABX500_GPIO(1)
21#define AB8500_PIN_T9 ABX500_GPIO(2)
22#define AB8500_PIN_U9 ABX500_GPIO(3)
23#define AB8500_PIN_W2 ABX500_GPIO(4)
24/* hole */
25#define AB8500_PIN_Y18 ABX500_GPIO(6)
26#define AB8500_PIN_AA20 ABX500_GPIO(7)
27#define AB8500_PIN_W18 ABX500_GPIO(8)
28#define AB8500_PIN_AA19 ABX500_GPIO(9)
29#define AB8500_PIN_U17 ABX500_GPIO(10)
30#define AB8500_PIN_AA18 ABX500_GPIO(11)
31#define AB8500_PIN_U16 ABX500_GPIO(12)
32#define AB8500_PIN_W17 ABX500_GPIO(13)
33#define AB8500_PIN_F14 ABX500_GPIO(14)
34#define AB8500_PIN_B17 ABX500_GPIO(15)
35#define AB8500_PIN_F15 ABX500_GPIO(16)
36#define AB8500_PIN_P5 ABX500_GPIO(17)
37#define AB8500_PIN_R5 ABX500_GPIO(18)
38#define AB8500_PIN_U5 ABX500_GPIO(19)
39#define AB8500_PIN_T5 ABX500_GPIO(20)
40#define AB8500_PIN_H19 ABX500_GPIO(21)
41#define AB8500_PIN_G20 ABX500_GPIO(22)
42#define AB8500_PIN_G19 ABX500_GPIO(23)
43#define AB8500_PIN_T14 ABX500_GPIO(24)
44#define AB8500_PIN_R16 ABX500_GPIO(25)
45#define AB8500_PIN_M16 ABX500_GPIO(26)
46#define AB8500_PIN_J6 ABX500_GPIO(27)
47#define AB8500_PIN_K6 ABX500_GPIO(28)
48#define AB8500_PIN_G6 ABX500_GPIO(29)
49#define AB8500_PIN_H6 ABX500_GPIO(30)
50#define AB8500_PIN_F5 ABX500_GPIO(31)
51#define AB8500_PIN_G5 ABX500_GPIO(32)
52/* hole */
53#define AB8500_PIN_R17 ABX500_GPIO(34)
54#define AB8500_PIN_W15 ABX500_GPIO(35)
55#define AB8500_PIN_A17 ABX500_GPIO(36)
56#define AB8500_PIN_E15 ABX500_GPIO(37)
57#define AB8500_PIN_C17 ABX500_GPIO(38)
58#define AB8500_PIN_E16 ABX500_GPIO(39)
59#define AB8500_PIN_T19 ABX500_GPIO(40)
60#define AB8500_PIN_U19 ABX500_GPIO(41)
61#define AB8500_PIN_U2 ABX500_GPIO(42)
62
63/* indicates the highest GPIO number */
64#define AB8500_GPIO_MAX_NUMBER 42
65
66/*
67 * The names of the pins are denoted by GPIO number and ball name, even
68 * though they can be used for other things than GPIO, this is the first
69 * column in the table of the data sheet and often used on schematics and
70 * such.
71 */
72static const struct pinctrl_pin_desc ab8500_pins[] = {
73 PINCTRL_PIN(AB8500_PIN_T10, "GPIO1_T10"),
74 PINCTRL_PIN(AB8500_PIN_T9, "GPIO2_T9"),
75 PINCTRL_PIN(AB8500_PIN_U9, "GPIO3_U9"),
76 PINCTRL_PIN(AB8500_PIN_W2, "GPIO4_W2"),
77 /* hole */
78 PINCTRL_PIN(AB8500_PIN_Y18, "GPIO6_Y18"),
79 PINCTRL_PIN(AB8500_PIN_AA20, "GPIO7_AA20"),
80 PINCTRL_PIN(AB8500_PIN_W18, "GPIO8_W18"),
81 PINCTRL_PIN(AB8500_PIN_AA19, "GPIO9_AA19"),
82 PINCTRL_PIN(AB8500_PIN_U17, "GPIO10_U17"),
83 PINCTRL_PIN(AB8500_PIN_AA18, "GPIO11_AA18"),
84 PINCTRL_PIN(AB8500_PIN_U16, "GPIO12_U16"),
85 PINCTRL_PIN(AB8500_PIN_W17, "GPIO13_W17"),
86 PINCTRL_PIN(AB8500_PIN_F14, "GPIO14_F14"),
87 PINCTRL_PIN(AB8500_PIN_B17, "GPIO15_B17"),
88 PINCTRL_PIN(AB8500_PIN_F15, "GPIO16_F15"),
89 PINCTRL_PIN(AB8500_PIN_P5, "GPIO17_P5"),
90 PINCTRL_PIN(AB8500_PIN_R5, "GPIO18_R5"),
91 PINCTRL_PIN(AB8500_PIN_U5, "GPIO19_U5"),
92 PINCTRL_PIN(AB8500_PIN_T5, "GPIO20_T5"),
93 PINCTRL_PIN(AB8500_PIN_H19, "GPIO21_H19"),
94 PINCTRL_PIN(AB8500_PIN_G20, "GPIO22_G20"),
95 PINCTRL_PIN(AB8500_PIN_G19, "GPIO23_G19"),
96 PINCTRL_PIN(AB8500_PIN_T14, "GPIO24_T14"),
97 PINCTRL_PIN(AB8500_PIN_R16, "GPIO25_R16"),
98 PINCTRL_PIN(AB8500_PIN_M16, "GPIO26_M16"),
99 PINCTRL_PIN(AB8500_PIN_J6, "GPIO27_J6"),
100 PINCTRL_PIN(AB8500_PIN_K6, "GPIO28_K6"),
101 PINCTRL_PIN(AB8500_PIN_G6, "GPIO29_G6"),
102 PINCTRL_PIN(AB8500_PIN_H6, "GPIO30_H6"),
103 PINCTRL_PIN(AB8500_PIN_F5, "GPIO31_F5"),
104 PINCTRL_PIN(AB8500_PIN_G5, "GPIO32_G5"),
105 /* hole */
106 PINCTRL_PIN(AB8500_PIN_R17, "GPIO34_R17"),
107 PINCTRL_PIN(AB8500_PIN_W15, "GPIO35_W15"),
108 PINCTRL_PIN(AB8500_PIN_A17, "GPIO36_A17"),
109 PINCTRL_PIN(AB8500_PIN_E15, "GPIO37_E15"),
110 PINCTRL_PIN(AB8500_PIN_C17, "GPIO38_C17"),
111 PINCTRL_PIN(AB8500_PIN_E16, "GPIO39_E16"),
112 PINCTRL_PIN(AB8500_PIN_T19, "GPIO40_T19"),
113 PINCTRL_PIN(AB8500_PIN_U19, "GPIO41_U19"),
114 PINCTRL_PIN(AB8500_PIN_U2, "GPIO42_U2"),
115};
116
117/*
118 * Maps local GPIO offsets to local pin numbers
119 */
120static const struct abx500_pinrange ab8500_pinranges[] = {
121 ABX500_PINRANGE(1, 4, ABX500_ALT_A),
122 ABX500_PINRANGE(6, 4, ABX500_ALT_A),
123 ABX500_PINRANGE(10, 4, ABX500_DEFAULT),
124 ABX500_PINRANGE(14, 12, ABX500_ALT_A),
125 ABX500_PINRANGE(26, 1, ABX500_DEFAULT),
126 ABX500_PINRANGE(27, 6, ABX500_ALT_A),
127 ABX500_PINRANGE(34, 1, ABX500_ALT_A),
128 ABX500_PINRANGE(35, 1, ABX500_DEFAULT),
129 ABX500_PINRANGE(36, 7, ABX500_ALT_A),
130};
131
132/*
133 * Read the pin group names like this:
134 * sysclkreq2_d_1 = first groups of pins for sysclkreq2 on default function
135 *
136 * The groups are arranged as sets per altfunction column, so we can
137 * mux in one group at a time by selecting the same altfunction for them
138 * all. When functions require pins on different altfunctions, you need
139 * to combine several groups.
140 */
141
142/* default column */
143static const unsigned sysclkreq2_d_1_pins[] = { AB8500_PIN_T10 };
144static const unsigned sysclkreq3_d_1_pins[] = { AB8500_PIN_T9 };
145static const unsigned sysclkreq4_d_1_pins[] = { AB8500_PIN_U9 };
146static const unsigned sysclkreq6_d_1_pins[] = { AB8500_PIN_W2 };
147static const unsigned ycbcr0123_d_1_pins[] = { AB8500_PIN_Y18, AB8500_PIN_AA20,
148 AB8500_PIN_W18, AB8500_PIN_AA19};
149static const unsigned gpio10_d_1_pins[] = { AB8500_PIN_U17 };
150static const unsigned gpio11_d_1_pins[] = { AB8500_PIN_AA18 };
151static const unsigned gpio12_d_1_pins[] = { AB8500_PIN_U16 };
152static const unsigned gpio13_d_1_pins[] = { AB8500_PIN_W17 };
153static const unsigned pwmout1_d_1_pins[] = { AB8500_PIN_F14 };
154static const unsigned pwmout2_d_1_pins[] = { AB8500_PIN_B17 };
155static const unsigned pwmout3_d_1_pins[] = { AB8500_PIN_F15 };
156
157/* audio data interface 1*/
158static const unsigned adi1_d_1_pins[] = { AB8500_PIN_P5, AB8500_PIN_R5,
159 AB8500_PIN_U5, AB8500_PIN_T5 };
160/* USBUICC */
161static const unsigned usbuicc_d_1_pins[] = { AB8500_PIN_H19, AB8500_PIN_G20,
162 AB8500_PIN_G19 };
163static const unsigned sysclkreq7_d_1_pins[] = { AB8500_PIN_T14 };
164static const unsigned sysclkreq8_d_1_pins[] = { AB8500_PIN_R16 };
165static const unsigned gpio26_d_1_pins[] = { AB8500_PIN_M16 };
166/* Digital microphone 1 and 2 */
167static const unsigned dmic12_d_1_pins[] = { AB8500_PIN_J6, AB8500_PIN_K6 };
168/* Digital microphone 3 and 4 */
169static const unsigned dmic34_d_1_pins[] = { AB8500_PIN_G6, AB8500_PIN_H6 };
170/* Digital microphone 5 and 6 */
171static const unsigned dmic56_d_1_pins[] = { AB8500_PIN_F5, AB8500_PIN_G5 };
172static const unsigned extcpena_d_1_pins[] = { AB8500_PIN_R17 };
173static const unsigned gpio35_d_1_pins[] = { AB8500_PIN_W15 };
174/* APE SPI */
175static const unsigned apespi_d_1_pins[] = { AB8500_PIN_A17, AB8500_PIN_E15,
176 AB8500_PIN_C17, AB8500_PIN_E16};
177/* modem SDA/SCL */
178static const unsigned modsclsda_d_1_pins[] = { AB8500_PIN_T19, AB8500_PIN_U19 };
179static const unsigned sysclkreq5_d_1_pins[] = { AB8500_PIN_U2 };
180
181/* Altfunction A column */
182static const unsigned gpio1_a_1_pins[] = { AB8500_PIN_T10 };
183static const unsigned gpio2_a_1_pins[] = { AB8500_PIN_T9 };
184static const unsigned gpio3_a_1_pins[] = { AB8500_PIN_U9 };
185static const unsigned gpio4_a_1_pins[] = { AB8500_PIN_W2 };
186static const unsigned gpio6_a_1_pins[] = { AB8500_PIN_Y18 };
187static const unsigned gpio7_a_1_pins[] = { AB8500_PIN_AA20 };
188static const unsigned gpio8_a_1_pins[] = { AB8500_PIN_W18 };
189static const unsigned gpio9_a_1_pins[] = { AB8500_PIN_AA19 };
190/* YCbCr4 YCbCr5 YCbCr6 YCbCr7*/
191static const unsigned ycbcr4567_a_1_pins[] = { AB8500_PIN_U17, AB8500_PIN_AA18,
192 AB8500_PIN_U16, AB8500_PIN_W17};
193static const unsigned gpio14_a_1_pins[] = { AB8500_PIN_F14 };
194static const unsigned gpio15_a_1_pins[] = { AB8500_PIN_B17 };
195static const unsigned gpio16_a_1_pins[] = { AB8500_PIN_F15 };
196static const unsigned gpio17_a_1_pins[] = { AB8500_PIN_P5 };
197static const unsigned gpio18_a_1_pins[] = { AB8500_PIN_R5 };
198static const unsigned gpio19_a_1_pins[] = { AB8500_PIN_U5 };
199static const unsigned gpio20_a_1_pins[] = { AB8500_PIN_T5 };
200static const unsigned gpio21_a_1_pins[] = { AB8500_PIN_H19 };
201static const unsigned gpio22_a_1_pins[] = { AB8500_PIN_G20 };
202static const unsigned gpio23_a_1_pins[] = { AB8500_PIN_G19 };
203static const unsigned gpio24_a_1_pins[] = { AB8500_PIN_T14 };
204static const unsigned gpio25_a_1_pins[] = { AB8500_PIN_R16 };
205static const unsigned gpio27_a_1_pins[] = { AB8500_PIN_J6 };
206static const unsigned gpio28_a_1_pins[] = { AB8500_PIN_K6 };
207static const unsigned gpio29_a_1_pins[] = { AB8500_PIN_G6 };
208static const unsigned gpio30_a_1_pins[] = { AB8500_PIN_H6 };
209static const unsigned gpio31_a_1_pins[] = { AB8500_PIN_F5 };
210static const unsigned gpio32_a_1_pins[] = { AB8500_PIN_G5 };
211static const unsigned gpio34_a_1_pins[] = { AB8500_PIN_R17 };
212static const unsigned gpio36_a_1_pins[] = { AB8500_PIN_A17 };
213static const unsigned gpio37_a_1_pins[] = { AB8500_PIN_E15 };
214static const unsigned gpio38_a_1_pins[] = { AB8500_PIN_C17 };
215static const unsigned gpio39_a_1_pins[] = { AB8500_PIN_E16 };
216static const unsigned gpio40_a_1_pins[] = { AB8500_PIN_T19 };
217static const unsigned gpio41_a_1_pins[] = { AB8500_PIN_U19 };
218static const unsigned gpio42_a_1_pins[] = { AB8500_PIN_U2 };
219
220/* Altfunction B colum */
221static const unsigned hiqclkena_b_1_pins[] = { AB8500_PIN_U17 };
222static const unsigned usbuiccpd_b_1_pins[] = { AB8500_PIN_AA18 };
223static const unsigned i2ctrig1_b_1_pins[] = { AB8500_PIN_U16 };
224static const unsigned i2ctrig2_b_1_pins[] = { AB8500_PIN_W17 };
225
226/* Altfunction C column */
227static const unsigned usbvdat_c_1_pins[] = { AB8500_PIN_W17 };
228
229
230#define AB8500_PIN_GROUP(a, b) { .name = #a, .pins = a##_pins, \
231 .npins = ARRAY_SIZE(a##_pins), .altsetting = b }
232
233static const struct abx500_pingroup ab8500_groups[] = {
234 /* default column */
235 AB8500_PIN_GROUP(sysclkreq2_d_1, ABX500_DEFAULT),
236 AB8500_PIN_GROUP(sysclkreq3_d_1, ABX500_DEFAULT),
237 AB8500_PIN_GROUP(sysclkreq4_d_1, ABX500_DEFAULT),
238 AB8500_PIN_GROUP(sysclkreq6_d_1, ABX500_DEFAULT),
239 AB8500_PIN_GROUP(ycbcr0123_d_1, ABX500_DEFAULT),
240 AB8500_PIN_GROUP(gpio10_d_1, ABX500_DEFAULT),
241 AB8500_PIN_GROUP(gpio11_d_1, ABX500_DEFAULT),
242 AB8500_PIN_GROUP(gpio12_d_1, ABX500_DEFAULT),
243 AB8500_PIN_GROUP(gpio13_d_1, ABX500_DEFAULT),
244 AB8500_PIN_GROUP(pwmout1_d_1, ABX500_DEFAULT),
245 AB8500_PIN_GROUP(pwmout2_d_1, ABX500_DEFAULT),
246 AB8500_PIN_GROUP(pwmout3_d_1, ABX500_DEFAULT),
247 AB8500_PIN_GROUP(adi1_d_1, ABX500_DEFAULT),
248 AB8500_PIN_GROUP(usbuicc_d_1, ABX500_DEFAULT),
249 AB8500_PIN_GROUP(sysclkreq7_d_1, ABX500_DEFAULT),
250 AB8500_PIN_GROUP(sysclkreq8_d_1, ABX500_DEFAULT),
251 AB8500_PIN_GROUP(gpio26_d_1, ABX500_DEFAULT),
252 AB8500_PIN_GROUP(dmic12_d_1, ABX500_DEFAULT),
253 AB8500_PIN_GROUP(dmic34_d_1, ABX500_DEFAULT),
254 AB8500_PIN_GROUP(dmic56_d_1, ABX500_DEFAULT),
255 AB8500_PIN_GROUP(extcpena_d_1, ABX500_DEFAULT),
256 AB8500_PIN_GROUP(gpio35_d_1, ABX500_DEFAULT),
257 AB8500_PIN_GROUP(apespi_d_1, ABX500_DEFAULT),
258 AB8500_PIN_GROUP(modsclsda_d_1, ABX500_DEFAULT),
259 AB8500_PIN_GROUP(sysclkreq5_d_1, ABX500_DEFAULT),
260 /* Altfunction A column */
261 AB8500_PIN_GROUP(gpio1_a_1, ABX500_ALT_A),
262 AB8500_PIN_GROUP(gpio2_a_1, ABX500_ALT_A),
263 AB8500_PIN_GROUP(gpio3_a_1, ABX500_ALT_A),
264 AB8500_PIN_GROUP(gpio4_a_1, ABX500_ALT_A),
265 AB8500_PIN_GROUP(gpio6_a_1, ABX500_ALT_A),
266 AB8500_PIN_GROUP(gpio7_a_1, ABX500_ALT_A),
267 AB8500_PIN_GROUP(gpio8_a_1, ABX500_ALT_A),
268 AB8500_PIN_GROUP(gpio9_a_1, ABX500_ALT_A),
269 AB8500_PIN_GROUP(ycbcr4567_a_1, ABX500_ALT_A),
270 AB8500_PIN_GROUP(gpio14_a_1, ABX500_ALT_A),
271 AB8500_PIN_GROUP(gpio15_a_1, ABX500_ALT_A),
272 AB8500_PIN_GROUP(gpio16_a_1, ABX500_ALT_A),
273 AB8500_PIN_GROUP(gpio17_a_1, ABX500_ALT_A),
274 AB8500_PIN_GROUP(gpio18_a_1, ABX500_ALT_A),
275 AB8500_PIN_GROUP(gpio19_a_1, ABX500_ALT_A),
276 AB8500_PIN_GROUP(gpio20_a_1, ABX500_ALT_A),
277 AB8500_PIN_GROUP(gpio21_a_1, ABX500_ALT_A),
278 AB8500_PIN_GROUP(gpio22_a_1, ABX500_ALT_A),
279 AB8500_PIN_GROUP(gpio23_a_1, ABX500_ALT_A),
280 AB8500_PIN_GROUP(gpio24_a_1, ABX500_ALT_A),
281 AB8500_PIN_GROUP(gpio25_a_1, ABX500_ALT_A),
282 AB8500_PIN_GROUP(gpio27_a_1, ABX500_ALT_A),
283 AB8500_PIN_GROUP(gpio28_a_1, ABX500_ALT_A),
284 AB8500_PIN_GROUP(gpio29_a_1, ABX500_ALT_A),
285 AB8500_PIN_GROUP(gpio30_a_1, ABX500_ALT_A),
286 AB8500_PIN_GROUP(gpio31_a_1, ABX500_ALT_A),
287 AB8500_PIN_GROUP(gpio32_a_1, ABX500_ALT_A),
288 AB8500_PIN_GROUP(gpio34_a_1, ABX500_ALT_A),
289 AB8500_PIN_GROUP(gpio36_a_1, ABX500_ALT_A),
290 AB8500_PIN_GROUP(gpio37_a_1, ABX500_ALT_A),
291 AB8500_PIN_GROUP(gpio38_a_1, ABX500_ALT_A),
292 AB8500_PIN_GROUP(gpio39_a_1, ABX500_ALT_A),
293 AB8500_PIN_GROUP(gpio40_a_1, ABX500_ALT_A),
294 AB8500_PIN_GROUP(gpio41_a_1, ABX500_ALT_A),
295 AB8500_PIN_GROUP(gpio42_a_1, ABX500_ALT_A),
296 /* Altfunction B column */
297 AB8500_PIN_GROUP(hiqclkena_b_1, ABX500_ALT_B),
298 AB8500_PIN_GROUP(usbuiccpd_b_1, ABX500_ALT_B),
299 AB8500_PIN_GROUP(i2ctrig1_b_1, ABX500_ALT_B),
300 AB8500_PIN_GROUP(i2ctrig2_b_1, ABX500_ALT_B),
301 /* Altfunction C column */
302 AB8500_PIN_GROUP(usbvdat_c_1, ABX500_ALT_C),
303};
304
305/* We use this macro to define the groups applicable to a function */
306#define AB8500_FUNC_GROUPS(a, b...) \
307static const char * const a##_groups[] = { b };
308
309AB8500_FUNC_GROUPS(sysclkreq, "sysclkreq2_d_1", "sysclkreq3_d_1",
310 "sysclkreq4_d_1", "sysclkreq5_d_1", "sysclkreq6_d_1",
311 "sysclkreq7_d_1", "sysclkreq8_d_1");
312AB8500_FUNC_GROUPS(ycbcr, "ycbcr0123_d_1", "ycbcr4567_a_1");
313AB8500_FUNC_GROUPS(gpio, "gpio1_a_1", "gpio2_a_1", "gpio3_a_1", "gpio4_a_1",
314 "gpio6_a_1", "gpio7_a_1", "gpio8_a_1", "gpio9_a_1",
315 "gpio10_d_1", "gpio11_d_1", "gpio12_d_1", "gpio13_d_1",
316 "gpio14_a_1", "gpio15_a_1", "gpio16_a_1", "gpio17_a_1",
317 "gpio18_a_1", "gpio19_a_1", "gpio20_a_1", "gpio21_a_1",
318 "gpio22_a_1", "gpio23_a_1", "gpio24_a_1", "gpio25_a_1",
319 "gpio26_d_1", "gpio27_a_1", "gpio28_a_1", "gpio29_a_1",
320 "gpio30_a_1", "gpio31_a_1", "gpio32_a_1", "gpio34_a_1",
321 "gpio35_d_1", "gpio36_a_1", "gpio37_a_1", "gpio38_a_1",
322 "gpio39_a_1", "gpio40_a_1", "gpio41_a_1", "gpio42_a_1");
323AB8500_FUNC_GROUPS(pwmout, "pwmout1_d_1", "pwmout2_d_1", "pwmout3_d_1");
324AB8500_FUNC_GROUPS(adi1, "adi1_d_1");
325AB8500_FUNC_GROUPS(usbuicc, "usbuicc_d_1", "usbuiccpd_b_1");
326AB8500_FUNC_GROUPS(dmic, "dmic12_d_1", "dmic34_d_1", "dmic56_d_1");
327AB8500_FUNC_GROUPS(extcpena, "extcpena_d_1");
328AB8500_FUNC_GROUPS(apespi, "apespi_d_1");
329AB8500_FUNC_GROUPS(modsclsda, "modsclsda_d_1");
330AB8500_FUNC_GROUPS(hiqclkena, "hiqclkena_b_1");
331AB8500_FUNC_GROUPS(i2ctrig, "i2ctrig1_b_1", "i2ctrig2_b_1");
332AB8500_FUNC_GROUPS(usbvdat, "usbvdat_c_1");
333
334#define FUNCTION(fname) \
335 { \
336 .name = #fname, \
337 .groups = fname##_groups, \
338 .ngroups = ARRAY_SIZE(fname##_groups), \
339 }
340
341static const struct abx500_function ab8500_functions[] = {
342 FUNCTION(sysclkreq),
343 FUNCTION(ycbcr),
344 FUNCTION(gpio),
345 FUNCTION(pwmout),
346 FUNCTION(adi1),
347 FUNCTION(usbuicc),
348 FUNCTION(dmic),
349 FUNCTION(extcpena),
350 FUNCTION(apespi),
351 FUNCTION(modsclsda),
352 FUNCTION(hiqclkena),
353 FUNCTION(i2ctrig),
354 FUNCTION(usbvdat),
355};
356
357/*
358 * this table translates what's is in the AB8500 specification regarding the
359 * balls alternate functions (as for DB, default, ALT_A, ALT_B and ALT_C).
360 * ALTERNATE_FUNCTIONS(GPIO_NUMBER, GPIOSEL bit, ALTERNATFUNC bit1,
361 * ALTERNATEFUNC bit2, ALTA val, ALTB val, ALTC val),
362 *
363 * example :
364 *
365 * ALTERNATE_FUNCTIONS(13, 4, 3, 4, 0, 1 ,2),
366 * means that pin AB8500_PIN_W17 (pin 13) supports 4 mux (default/ALT_A,
367 * ALT_B and ALT_C), so GPIOSEL and ALTERNATFUNC registers are used to
368 * select the mux. ALTA, ALTB and ALTC val indicates values to write in
369 * ALTERNATFUNC register. We need to specifies these values as SOC
370 * designers didn't apply the same logic on how to select mux in the
371 * ABx500 family.
372 *
373 * As this pins supports at least ALT_B mux, default mux is
374 * selected by writing 1 in GPIOSEL bit :
375 *
376 * | GPIOSEL bit=4 | alternatfunc bit2=4 | alternatfunc bit1=3
377 * default | 1 | 0 | 0
378 * alt_A | 0 | 0 | 0
379 * alt_B | 0 | 0 | 1
380 * alt_C | 0 | 1 | 0
381 *
382 * ALTERNATE_FUNCTIONS(8, 7, UNUSED, UNUSED),
383 * means that pin AB8500_PIN_W18 (pin 8) supports 2 mux, so only GPIOSEL
384 * register is used to select the mux. As this pins doesn't support at
385 * least ALT_B mux, default mux is by writing 0 in GPIOSEL bit :
386 *
387 * | GPIOSEL bit=7 | alternatfunc bit2= | alternatfunc bit1=
388 * default | 0 | 0 | 0
389 * alt_A | 1 | 0 | 0
390 */
391
392struct alternate_functions ab8500_alternate_functions[AB8500_GPIO_MAX_NUMBER + 1] = {
393 ALTERNATE_FUNCTIONS(0, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO0 */
394 ALTERNATE_FUNCTIONS(1, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO1, altA controlled by bit 0 */
395 ALTERNATE_FUNCTIONS(2, 1, UNUSED, UNUSED, 0, 0, 0), /* GPIO2, altA controlled by bit 1 */
396 ALTERNATE_FUNCTIONS(3, 2, UNUSED, UNUSED, 0, 0, 0), /* GPIO3, altA controlled by bit 2*/
397 ALTERNATE_FUNCTIONS(4, 3, UNUSED, UNUSED, 0, 0, 0), /* GPIO4, altA controlled by bit 3*/
398 /* bit 4 reserved */
399 ALTERNATE_FUNCTIONS(5, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO5 */
400 ALTERNATE_FUNCTIONS(6, 5, UNUSED, UNUSED, 0, 0, 0), /* GPIO6, altA controlled by bit 5*/
401 ALTERNATE_FUNCTIONS(7, 6, UNUSED, UNUSED, 0, 0, 0), /* GPIO7, altA controlled by bit 6*/
402 ALTERNATE_FUNCTIONS(8, 7, UNUSED, UNUSED, 0, 0, 0), /* GPIO8, altA controlled by bit 7*/
403
404 ALTERNATE_FUNCTIONS(9, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO9, altA controlled by bit 0*/
405 ALTERNATE_FUNCTIONS(10, 1, 0, UNUSED, 0, 1, 0), /* GPIO10, altA and altB controlled by bit 0 */
406 ALTERNATE_FUNCTIONS(11, 2, 1, UNUSED, 0, 1, 0), /* GPIO11, altA and altB controlled by bit 1 */
407 ALTERNATE_FUNCTIONS(12, 3, 2, UNUSED, 0, 1, 0), /* GPIO12, altA and altB controlled by bit 2 */
408 ALTERNATE_FUNCTIONS(13, 4, 3, 4, 0, 1, 2), /* GPIO13, altA altB and altC controlled by bit 3 and 4 */
409 ALTERNATE_FUNCTIONS(14, 5, UNUSED, UNUSED, 0, 0, 0), /* GPIO14, altA controlled by bit 5 */
410 ALTERNATE_FUNCTIONS(15, 6, UNUSED, UNUSED, 0, 0, 0), /* GPIO15, altA controlled by bit 6 */
411 ALTERNATE_FUNCTIONS(16, 7, UNUSED, UNUSED, 0, 0, 0), /* GPIO16, altA controlled by bit 7 */
412 /*
413 * pins 17 to 20 are special case, only bit 0 is used to select
414 * alternate function for these 4 pins.
415 * bits 1 to 3 are reserved
416 */
417 ALTERNATE_FUNCTIONS(17, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO17, altA controlled by bit 0 */
418 ALTERNATE_FUNCTIONS(18, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO18, altA controlled by bit 0 */
419 ALTERNATE_FUNCTIONS(19, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO19, altA controlled by bit 0 */
420 ALTERNATE_FUNCTIONS(20, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO20, altA controlled by bit 0 */
421 ALTERNATE_FUNCTIONS(21, 4, UNUSED, UNUSED, 0, 0, 0), /* GPIO21, altA controlled by bit 4 */
422 ALTERNATE_FUNCTIONS(22, 5, UNUSED, UNUSED, 0, 0, 0), /* GPIO22, altA controlled by bit 5 */
423 ALTERNATE_FUNCTIONS(23, 6, UNUSED, UNUSED, 0, 0, 0), /* GPIO23, altA controlled by bit 6 */
424 ALTERNATE_FUNCTIONS(24, 7, UNUSED, UNUSED, 0, 0, 0), /* GPIO24, altA controlled by bit 7 */
425
426 ALTERNATE_FUNCTIONS(25, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO25, altA controlled by bit 0 */
427 /* pin 26 special case, no alternate function, bit 1 reserved */
428 ALTERNATE_FUNCTIONS(26, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* GPIO26 */
429 ALTERNATE_FUNCTIONS(27, 2, UNUSED, UNUSED, 0, 0, 0), /* GPIO27, altA controlled by bit 2 */
430 ALTERNATE_FUNCTIONS(28, 3, UNUSED, UNUSED, 0, 0, 0), /* GPIO28, altA controlled by bit 3 */
431 ALTERNATE_FUNCTIONS(29, 4, UNUSED, UNUSED, 0, 0, 0), /* GPIO29, altA controlled by bit 4 */
432 ALTERNATE_FUNCTIONS(30, 5, UNUSED, UNUSED, 0, 0, 0), /* GPIO30, altA controlled by bit 5 */
433 ALTERNATE_FUNCTIONS(31, 6, UNUSED, UNUSED, 0, 0, 0), /* GPIO31, altA controlled by bit 6 */
434 ALTERNATE_FUNCTIONS(32, 7, UNUSED, UNUSED, 0, 0, 0), /* GPIO32, altA controlled by bit 7 */
435
436 ALTERNATE_FUNCTIONS(33, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO33 */
437 ALTERNATE_FUNCTIONS(34, 1, UNUSED, UNUSED, 0, 0, 0), /* GPIO34, altA controlled by bit 1 */
438 /* pin 35 special case, no alternate function, bit 2 reserved */
439 ALTERNATE_FUNCTIONS(35, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* GPIO35 */
440 ALTERNATE_FUNCTIONS(36, 3, UNUSED, UNUSED, 0, 0, 0), /* GPIO36, altA controlled by bit 3 */
441 ALTERNATE_FUNCTIONS(37, 4, UNUSED, UNUSED, 0, 0, 0), /* GPIO37, altA controlled by bit 4 */
442 ALTERNATE_FUNCTIONS(38, 5, UNUSED, UNUSED, 0, 0, 0), /* GPIO38, altA controlled by bit 5 */
443 ALTERNATE_FUNCTIONS(39, 6, UNUSED, UNUSED, 0, 0, 0), /* GPIO39, altA controlled by bit 6 */
444 ALTERNATE_FUNCTIONS(40, 7, UNUSED, UNUSED, 0, 0, 0), /* GPIO40, altA controlled by bit 7 */
445
446 ALTERNATE_FUNCTIONS(41, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO41, altA controlled by bit 0 */
447 ALTERNATE_FUNCTIONS(42, 1, UNUSED, UNUSED, 0, 0, 0), /* GPIO42, altA controlled by bit 1 */
448};
449
450/*
451 * Only some GPIOs are interrupt capable, and they are
452 * organized in discontiguous clusters:
453 *
454 * GPIO6 to GPIO13
455 * GPIO24 and GPIO25
456 * GPIO36 to GPIO41
457 */
458struct abx500_gpio_irq_cluster ab8500_gpio_irq_cluster[] = {
459 GPIO_IRQ_CLUSTER(6, 13, AB8500_INT_GPIO6R),
460 GPIO_IRQ_CLUSTER(24, 25, AB8500_INT_GPIO24R),
461 GPIO_IRQ_CLUSTER(36, 41, AB8500_INT_GPIO36R),
462};
463
464static struct abx500_pinctrl_soc_data ab8500_soc = {
465 .gpio_ranges = ab8500_pinranges,
466 .gpio_num_ranges = ARRAY_SIZE(ab8500_pinranges),
467 .pins = ab8500_pins,
468 .npins = ARRAY_SIZE(ab8500_pins),
469 .functions = ab8500_functions,
470 .nfunctions = ARRAY_SIZE(ab8500_functions),
471 .groups = ab8500_groups,
472 .ngroups = ARRAY_SIZE(ab8500_groups),
473 .alternate_functions = ab8500_alternate_functions,
474 .gpio_irq_cluster = ab8500_gpio_irq_cluster,
475 .ngpio_irq_cluster = ARRAY_SIZE(ab8500_gpio_irq_cluster),
476 .irq_gpio_rising_offset = AB8500_INT_GPIO6R,
477 .irq_gpio_falling_offset = AB8500_INT_GPIO6F,
478 .irq_gpio_factor = 1,
479};
480
481void abx500_pinctrl_ab8500_init(struct abx500_pinctrl_soc_data **soc)
482{
483 *soc = &ab8500_soc;
484}
diff --git a/drivers/pinctrl/pinctrl-ab8505.c b/drivers/pinctrl/pinctrl-ab8505.c
new file mode 100644
index 000000000000..3a4238e879e3
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-ab8505.c
@@ -0,0 +1,380 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2012
3 *
4 * Author: Patrice Chotard <patrice.chotard@stericsson.com> for ST-Ericsson.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/kernel.h>
12#include <linux/gpio.h>
13#include <linux/pinctrl/pinctrl.h>
14#include <linux/mfd/abx500/ab8500.h>
15#include "pinctrl-abx500.h"
16
17/* All the pins that can be used for GPIO and some other functions */
18#define ABX500_GPIO(offset) (offset)
19
20#define AB8505_PIN_N4 ABX500_GPIO(1)
21#define AB8505_PIN_R5 ABX500_GPIO(2)
22#define AB8505_PIN_P5 ABX500_GPIO(3)
23/* hole */
24#define AB8505_PIN_B16 ABX500_GPIO(10)
25#define AB8505_PIN_B17 ABX500_GPIO(11)
26/* hole */
27#define AB8505_PIN_D17 ABX500_GPIO(13)
28#define AB8505_PIN_C16 ABX500_GPIO(14)
29/* hole */
30#define AB8505_PIN_P2 ABX500_GPIO(17)
31#define AB8505_PIN_N3 ABX500_GPIO(18)
32#define AB8505_PIN_T1 ABX500_GPIO(19)
33#define AB8505_PIN_P3 ABX500_GPIO(20)
34/* hole */
35#define AB8505_PIN_H14 ABX500_GPIO(34)
36/* hole */
37#define AB8505_PIN_J15 ABX500_GPIO(40)
38#define AB8505_PIN_J14 ABX500_GPIO(41)
39/* hole */
40#define AB8505_PIN_L4 ABX500_GPIO(50)
41/* hole */
42#define AB8505_PIN_D16 ABX500_GPIO(52)
43#define AB8505_PIN_D15 ABX500_GPIO(53)
44
45/* indicates the higher GPIO number */
46#define AB8505_GPIO_MAX_NUMBER 53
47
48/*
49 * The names of the pins are denoted by GPIO number and ball name, even
50 * though they can be used for other things than GPIO, this is the first
51 * column in the table of the data sheet and often used on schematics and
52 * such.
53 */
54static const struct pinctrl_pin_desc ab8505_pins[] = {
55 PINCTRL_PIN(AB8505_PIN_N4, "GPIO1_N4"),
56 PINCTRL_PIN(AB8505_PIN_R5, "GPIO2_R5"),
57 PINCTRL_PIN(AB8505_PIN_P5, "GPIO3_P5"),
58/* hole */
59 PINCTRL_PIN(AB8505_PIN_B16, "GPIO10_B16"),
60 PINCTRL_PIN(AB8505_PIN_B17, "GPIO11_B17"),
61/* hole */
62 PINCTRL_PIN(AB8505_PIN_D17, "GPIO13_D17"),
63 PINCTRL_PIN(AB8505_PIN_C16, "GPIO14_C16"),
64/* hole */
65 PINCTRL_PIN(AB8505_PIN_P2, "GPIO17_P2"),
66 PINCTRL_PIN(AB8505_PIN_N3, "GPIO18_N3"),
67 PINCTRL_PIN(AB8505_PIN_T1, "GPIO19_T1"),
68 PINCTRL_PIN(AB8505_PIN_P3, "GPIO20_P3"),
69/* hole */
70 PINCTRL_PIN(AB8505_PIN_H14, "GPIO34_H14"),
71/* hole */
72 PINCTRL_PIN(AB8505_PIN_J15, "GPIO40_J15"),
73 PINCTRL_PIN(AB8505_PIN_J14, "GPIO41_J14"),
74/* hole */
75 PINCTRL_PIN(AB8505_PIN_L4, "GPIO50_L4"),
76/* hole */
77 PINCTRL_PIN(AB8505_PIN_D16, "GPIO52_D16"),
78 PINCTRL_PIN(AB8505_PIN_D15, "GPIO53_D15"),
79};
80
81/*
82 * Maps local GPIO offsets to local pin numbers
83 */
84static const struct abx500_pinrange ab8505_pinranges[] = {
85 ABX500_PINRANGE(1, 3, ABX500_ALT_A),
86 ABX500_PINRANGE(10, 2, ABX500_DEFAULT),
87 ABX500_PINRANGE(13, 1, ABX500_DEFAULT),
88 ABX500_PINRANGE(14, 1, ABX500_ALT_A),
89 ABX500_PINRANGE(17, 4, ABX500_ALT_A),
90 ABX500_PINRANGE(34, 1, ABX500_ALT_A),
91 ABX500_PINRANGE(40, 2, ABX500_ALT_A),
92 ABX500_PINRANGE(50, 1, ABX500_DEFAULT),
93 ABX500_PINRANGE(52, 2, ABX500_ALT_A),
94};
95
96/*
97 * Read the pin group names like this:
98 * sysclkreq2_d_1 = first groups of pins for sysclkreq2 on default function
99 *
100 * The groups are arranged as sets per altfunction column, so we can
101 * mux in one group at a time by selecting the same altfunction for them
102 * all. When functions require pins on different altfunctions, you need
103 * to combine several groups.
104 */
105
106/* default column */
107static const unsigned sysclkreq2_d_1_pins[] = { AB8505_PIN_N4 };
108static const unsigned sysclkreq3_d_1_pins[] = { AB8505_PIN_R5 };
109static const unsigned sysclkreq4_d_1_pins[] = { AB8505_PIN_P5 };
110static const unsigned gpio10_d_1_pins[] = { AB8505_PIN_B16 };
111static const unsigned gpio11_d_1_pins[] = { AB8505_PIN_B17 };
112static const unsigned gpio13_d_1_pins[] = { AB8505_PIN_D17 };
113static const unsigned pwmout1_d_1_pins[] = { AB8505_PIN_C16 };
114/* audio data interface 2*/
115static const unsigned adi2_d_1_pins[] = { AB8505_PIN_P2, AB8505_PIN_N3,
116 AB8505_PIN_T1, AB8505_PIN_P3 };
117static const unsigned extcpena_d_1_pins[] = { AB8505_PIN_H14 };
118/* modem SDA/SCL */
119static const unsigned modsclsda_d_1_pins[] = { AB8505_PIN_J15, AB8505_PIN_J14 };
120static const unsigned gpio50_d_1_pins[] = { AB8505_PIN_L4 };
121static const unsigned resethw_d_1_pins[] = { AB8505_PIN_D16 };
122static const unsigned service_d_1_pins[] = { AB8505_PIN_D15 };
123
124/* Altfunction A column */
125static const unsigned gpio1_a_1_pins[] = { AB8505_PIN_N4 };
126static const unsigned gpio2_a_1_pins[] = { AB8505_PIN_R5 };
127static const unsigned gpio3_a_1_pins[] = { AB8505_PIN_P5 };
128static const unsigned hiqclkena_a_1_pins[] = { AB8505_PIN_B16 };
129static const unsigned pdmclk_a_1_pins[] = { AB8505_PIN_B17 };
130static const unsigned uarttxdata_a_1_pins[] = { AB8505_PIN_D17 };
131static const unsigned gpio14_a_1_pins[] = { AB8505_PIN_C16 };
132static const unsigned gpio17_a_1_pins[] = { AB8505_PIN_P2 };
133static const unsigned gpio18_a_1_pins[] = { AB8505_PIN_N3 };
134static const unsigned gpio19_a_1_pins[] = { AB8505_PIN_T1 };
135static const unsigned gpio20_a_1_pins[] = { AB8505_PIN_P3 };
136static const unsigned gpio34_a_1_pins[] = { AB8505_PIN_H14 };
137static const unsigned gpio40_a_1_pins[] = { AB8505_PIN_J15 };
138static const unsigned gpio41_a_1_pins[] = { AB8505_PIN_J14 };
139static const unsigned uartrxdata_a_1_pins[] = { AB8505_PIN_J14 };
140static const unsigned gpio50_a_1_pins[] = { AB8505_PIN_L4 };
141static const unsigned gpio52_a_1_pins[] = { AB8505_PIN_D16 };
142static const unsigned gpio53_a_1_pins[] = { AB8505_PIN_D15 };
143
144/* Altfunction B colum */
145static const unsigned pdmdata_b_1_pins[] = { AB8505_PIN_B16 };
146static const unsigned extvibrapwm1_b_1_pins[] = { AB8505_PIN_D17 };
147static const unsigned extvibrapwm2_b_1_pins[] = { AB8505_PIN_L4 };
148
149/* Altfunction C column */
150static const unsigned usbvdat_c_1_pins[] = { AB8505_PIN_D17 };
151
152#define AB8505_PIN_GROUP(a, b) { .name = #a, .pins = a##_pins, \
153 .npins = ARRAY_SIZE(a##_pins), .altsetting = b }
154
155static const struct abx500_pingroup ab8505_groups[] = {
156 AB8505_PIN_GROUP(sysclkreq2_d_1, ABX500_DEFAULT),
157 AB8505_PIN_GROUP(sysclkreq3_d_1, ABX500_DEFAULT),
158 AB8505_PIN_GROUP(sysclkreq4_d_1, ABX500_DEFAULT),
159 AB8505_PIN_GROUP(gpio10_d_1, ABX500_DEFAULT),
160 AB8505_PIN_GROUP(gpio11_d_1, ABX500_DEFAULT),
161 AB8505_PIN_GROUP(gpio13_d_1, ABX500_DEFAULT),
162 AB8505_PIN_GROUP(pwmout1_d_1, ABX500_DEFAULT),
163 AB8505_PIN_GROUP(adi2_d_1, ABX500_DEFAULT),
164 AB8505_PIN_GROUP(extcpena_d_1, ABX500_DEFAULT),
165 AB8505_PIN_GROUP(modsclsda_d_1, ABX500_DEFAULT),
166 AB8505_PIN_GROUP(gpio50_d_1, ABX500_DEFAULT),
167 AB8505_PIN_GROUP(resethw_d_1, ABX500_DEFAULT),
168 AB8505_PIN_GROUP(service_d_1, ABX500_DEFAULT),
169 AB8505_PIN_GROUP(gpio1_a_1, ABX500_ALT_A),
170 AB8505_PIN_GROUP(gpio2_a_1, ABX500_ALT_A),
171 AB8505_PIN_GROUP(gpio3_a_1, ABX500_ALT_A),
172 AB8505_PIN_GROUP(hiqclkena_a_1, ABX500_ALT_A),
173 AB8505_PIN_GROUP(pdmclk_a_1, ABX500_ALT_A),
174 AB8505_PIN_GROUP(uarttxdata_a_1, ABX500_ALT_A),
175 AB8505_PIN_GROUP(gpio14_a_1, ABX500_ALT_A),
176 AB8505_PIN_GROUP(gpio17_a_1, ABX500_ALT_A),
177 AB8505_PIN_GROUP(gpio18_a_1, ABX500_ALT_A),
178 AB8505_PIN_GROUP(gpio19_a_1, ABX500_ALT_A),
179 AB8505_PIN_GROUP(gpio20_a_1, ABX500_ALT_A),
180 AB8505_PIN_GROUP(gpio34_a_1, ABX500_ALT_A),
181 AB8505_PIN_GROUP(gpio40_a_1, ABX500_ALT_A),
182 AB8505_PIN_GROUP(gpio41_a_1, ABX500_ALT_A),
183 AB8505_PIN_GROUP(uartrxdata_a_1, ABX500_ALT_A),
184 AB8505_PIN_GROUP(gpio52_a_1, ABX500_ALT_A),
185 AB8505_PIN_GROUP(gpio53_a_1, ABX500_ALT_A),
186 AB8505_PIN_GROUP(pdmdata_b_1, ABX500_ALT_B),
187 AB8505_PIN_GROUP(extvibrapwm1_b_1, ABX500_ALT_B),
188 AB8505_PIN_GROUP(extvibrapwm2_b_1, ABX500_ALT_B),
189 AB8505_PIN_GROUP(usbvdat_c_1, ABX500_ALT_C),
190};
191
192/* We use this macro to define the groups applicable to a function */
193#define AB8505_FUNC_GROUPS(a, b...) \
194static const char * const a##_groups[] = { b };
195
196AB8505_FUNC_GROUPS(sysclkreq, "sysclkreq2_d_1", "sysclkreq3_d_1",
197 "sysclkreq4_d_1");
198AB8505_FUNC_GROUPS(gpio, "gpio1_a_1", "gpio2_a_1", "gpio3_a_1",
199 "gpio10_d_1", "gpio11_d_1", "gpio13_d_1", "gpio14_a_1",
200 "gpio17_a_1", "gpio18_a_1", "gpio19_a_1", "gpio20_a_1",
201 "gpio34_a_1", "gpio40_a_1", "gpio41_a_1", "gpio50_d_1",
202 "gpio52_a_1", "gpio53_a_1");
203AB8505_FUNC_GROUPS(pwmout, "pwmout1_d_1");
204AB8505_FUNC_GROUPS(adi2, "adi2_d_1");
205AB8505_FUNC_GROUPS(extcpena, "extcpena_d_1");
206AB8505_FUNC_GROUPS(modsclsda, "modsclsda_d_1");
207AB8505_FUNC_GROUPS(resethw, "resethw_d_1");
208AB8505_FUNC_GROUPS(service, "service_d_1");
209AB8505_FUNC_GROUPS(hiqclkena, "hiqclkena_a_1");
210AB8505_FUNC_GROUPS(pdm, "pdmclk_a_1", "pdmdata_b_1");
211AB8505_FUNC_GROUPS(uartdata, "uarttxdata_a_1", "uartrxdata_a_1");
212AB8505_FUNC_GROUPS(extvibra, "extvibrapwm1_b_1", "extvibrapwm2_b_1");
213AB8505_FUNC_GROUPS(usbvdat, "usbvdat_c_1");
214
215#define FUNCTION(fname) \
216 { \
217 .name = #fname, \
218 .groups = fname##_groups, \
219 .ngroups = ARRAY_SIZE(fname##_groups), \
220 }
221
222static const struct abx500_function ab8505_functions[] = {
223 FUNCTION(sysclkreq),
224 FUNCTION(gpio),
225 FUNCTION(pwmout),
226 FUNCTION(adi2),
227 FUNCTION(extcpena),
228 FUNCTION(modsclsda),
229 FUNCTION(resethw),
230 FUNCTION(service),
231 FUNCTION(hiqclkena),
232 FUNCTION(pdm),
233 FUNCTION(uartdata),
234 FUNCTION(extvibra),
235 FUNCTION(extvibra),
236 FUNCTION(usbvdat),
237};
238
239/*
240 * this table translates what's is in the AB8505 specification regarding the
241 * balls alternate functions (as for DB, default, ALT_A, ALT_B and ALT_C).
242 * ALTERNATE_FUNCTIONS(GPIO_NUMBER, GPIOSEL bit, ALTERNATFUNC bit1,
243 * ALTERNATEFUNC bit2, ALTA val, ALTB val, ALTC val),
244 *
245 * example :
246 *
247 * ALTERNATE_FUNCTIONS(13, 4, 3, 4, 1, 0, 2),
248 * means that pin AB8505_PIN_D18 (pin 13) supports 4 mux (default/ALT_A,
249 * ALT_B and ALT_C), so GPIOSEL and ALTERNATFUNC registers are used to
250 * select the mux. ALTA, ALTB and ALTC val indicates values to write in
251 * ALTERNATFUNC register. We need to specifies these values as SOC
252 * designers didn't apply the same logic on how to select mux in the
253 * ABx500 family.
254 *
255 * As this pins supports at least ALT_B mux, default mux is
256 * selected by writing 1 in GPIOSEL bit :
257 *
258 * | GPIOSEL bit=4 | alternatfunc bit2=4 | alternatfunc bit1=3
259 * default | 1 | 0 | 0
260 * alt_A | 0 | 0 | 1
261 * alt_B | 0 | 0 | 0
262 * alt_C | 0 | 1 | 0
263 *
264 * ALTERNATE_FUNCTIONS(1, 0, UNUSED, UNUSED),
265 * means that pin AB9540_PIN_R4 (pin 1) supports 2 mux, so only GPIOSEL
266 * register is used to select the mux. As this pins doesn't support at
267 * least ALT_B mux, default mux is by writing 0 in GPIOSEL bit :
268 *
269 * | GPIOSEL bit=0 | alternatfunc bit2= | alternatfunc bit1=
270 * default | 0 | 0 | 0
271 * alt_A | 1 | 0 | 0
272 */
273
274struct alternate_functions ab8505_alternate_functions[AB8505_GPIO_MAX_NUMBER + 1] = {
275 ALTERNATE_FUNCTIONS(0, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO0 */
276 ALTERNATE_FUNCTIONS(1, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO1, altA controlled by bit 0 */
277 ALTERNATE_FUNCTIONS(2, 1, UNUSED, UNUSED, 0, 0, 0), /* GPIO2, altA controlled by bit 1 */
278 ALTERNATE_FUNCTIONS(3, 2, UNUSED, UNUSED, 0, 0, 0), /* GPIO3, altA controlled by bit 2*/
279 ALTERNATE_FUNCTIONS(4, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO4, bit 3 reserved */
280 ALTERNATE_FUNCTIONS(5, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO5, bit 4 reserved */
281 ALTERNATE_FUNCTIONS(6, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO6, bit 5 reserved */
282 ALTERNATE_FUNCTIONS(7, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO7, bit 6 reserved */
283 ALTERNATE_FUNCTIONS(8, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO8, bit 7 reserved */
284
285 ALTERNATE_FUNCTIONS(9, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO9, bit 0 reserved */
286 ALTERNATE_FUNCTIONS(10, 1, 0, UNUSED, 1, 0, 0), /* GPIO10, altA and altB controlled by bit 0 */
287 ALTERNATE_FUNCTIONS(11, 2, UNUSED, UNUSED, 0, 0, 0), /* GPIO11, altA controlled by bit 2 */
288 ALTERNATE_FUNCTIONS(12, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO12, bit3 reseved */
289 ALTERNATE_FUNCTIONS(13, 4, 3, 4, 1, 0, 2), /* GPIO13, altA altB and altC controlled by bit 3 and 4 */
290 ALTERNATE_FUNCTIONS(14, 5, UNUSED, UNUSED, 0, 0, 0), /* GPIO14, altA controlled by bit 5 */
291 ALTERNATE_FUNCTIONS(15, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO15, bit 6 reserved */
292 ALTERNATE_FUNCTIONS(16, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO15, bit 7 reserved */
293 /*
294 * pins 17 to 20 are special case, only bit 0 is used to select
295 * alternate function for these 4 pins.
296 * bits 1 to 3 are reserved
297 */
298 ALTERNATE_FUNCTIONS(17, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO17, altA controlled by bit 0 */
299 ALTERNATE_FUNCTIONS(18, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO18, altA controlled by bit 0 */
300 ALTERNATE_FUNCTIONS(19, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO19, altA controlled by bit 0 */
301 ALTERNATE_FUNCTIONS(20, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO20, altA controlled by bit 0 */
302 ALTERNATE_FUNCTIONS(21, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO21, bit 4 reserved */
303 ALTERNATE_FUNCTIONS(22, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO22, bit 5 reserved */
304 ALTERNATE_FUNCTIONS(23, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO23, bit 6 reserved */
305 ALTERNATE_FUNCTIONS(24, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO24, bit 7 reserved */
306
307 ALTERNATE_FUNCTIONS(25, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO25, bit 0 reserved */
308 ALTERNATE_FUNCTIONS(26, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO26, bit 1 reserved */
309 ALTERNATE_FUNCTIONS(27, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO27, bit 2 reserved */
310 ALTERNATE_FUNCTIONS(28, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO28, bit 3 reserved */
311 ALTERNATE_FUNCTIONS(29, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO29, bit 4 reserved */
312 ALTERNATE_FUNCTIONS(30, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO30, bit 5 reserved */
313 ALTERNATE_FUNCTIONS(31, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO31, bit 6 reserved */
314 ALTERNATE_FUNCTIONS(32, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO32, bit 7 reserved */
315
316 ALTERNATE_FUNCTIONS(33, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO33, bit 0 reserved */
317 ALTERNATE_FUNCTIONS(34, 1, UNUSED, UNUSED, 0, 0, 0), /* GPIO34, altA controlled by bit 1 */
318 ALTERNATE_FUNCTIONS(35, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO35, bit 2 reserved */
319 ALTERNATE_FUNCTIONS(36, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO36, bit 2 reserved */
320 ALTERNATE_FUNCTIONS(37, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO37, bit 2 reserved */
321 ALTERNATE_FUNCTIONS(38, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO38, bit 2 reserved */
322 ALTERNATE_FUNCTIONS(39, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO39, bit 2 reserved */
323 ALTERNATE_FUNCTIONS(40, 7, UNUSED, UNUSED, 0, 0, 0), /* GPIO40, altA controlled by bit 7*/
324
325 ALTERNATE_FUNCTIONS(41, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO41, altA controlled by bit 0 */
326 ALTERNATE_FUNCTIONS(42, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO42, bit 1 reserved */
327 ALTERNATE_FUNCTIONS(43, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO43, bit 2 reserved */
328 ALTERNATE_FUNCTIONS(44, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO44, bit 3 reserved */
329 ALTERNATE_FUNCTIONS(45, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO45, bit 4 reserved */
330 ALTERNATE_FUNCTIONS(46, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO46, bit 5 reserved */
331 ALTERNATE_FUNCTIONS(47, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO47, bit 6 reserved */
332 ALTERNATE_FUNCTIONS(48, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO48, bit 7 reserved */
333
334 ALTERNATE_FUNCTIONS(49, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO49, bit 0 reserved */
335 ALTERNATE_FUNCTIONS(50, 1, 2, UNUSED, 1, 0, 0), /* GPIO50, altA controlled by bit 1 */
336 ALTERNATE_FUNCTIONS(51, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO49, bit 0 reserved */
337 ALTERNATE_FUNCTIONS(52, 3, UNUSED, UNUSED, 0, 0, 0), /* GPIO52, altA controlled by bit 3 */
338 ALTERNATE_FUNCTIONS(53, 4, UNUSED, UNUSED, 0, 0, 0), /* GPIO53, altA controlled by bit 4 */
339};
340
341/*
342 * For AB8505 Only some GPIOs are interrupt capable, and they are
343 * organized in discontiguous clusters:
344 *
345 * GPIO10 to GPIO11
346 * GPIO13
347 * GPIO40 and GPIO41
348 * GPIO50
349 * GPIO52 to GPIO53
350 */
351struct abx500_gpio_irq_cluster ab8505_gpio_irq_cluster[] = {
352 GPIO_IRQ_CLUSTER(10, 11, AB8500_INT_GPIO10R),
353 GPIO_IRQ_CLUSTER(13, 13, AB8500_INT_GPIO13R),
354 GPIO_IRQ_CLUSTER(40, 41, AB8500_INT_GPIO40R),
355 GPIO_IRQ_CLUSTER(50, 50, AB9540_INT_GPIO50R),
356 GPIO_IRQ_CLUSTER(52, 53, AB9540_INT_GPIO52R),
357};
358
359static struct abx500_pinctrl_soc_data ab8505_soc = {
360 .gpio_ranges = ab8505_pinranges,
361 .gpio_num_ranges = ARRAY_SIZE(ab8505_pinranges),
362 .pins = ab8505_pins,
363 .npins = ARRAY_SIZE(ab8505_pins),
364 .functions = ab8505_functions,
365 .nfunctions = ARRAY_SIZE(ab8505_functions),
366 .groups = ab8505_groups,
367 .ngroups = ARRAY_SIZE(ab8505_groups),
368 .alternate_functions = ab8505_alternate_functions,
369 .gpio_irq_cluster = ab8505_gpio_irq_cluster,
370 .ngpio_irq_cluster = ARRAY_SIZE(ab8505_gpio_irq_cluster),
371 .irq_gpio_rising_offset = AB8500_INT_GPIO6R,
372 .irq_gpio_falling_offset = AB8500_INT_GPIO6F,
373 .irq_gpio_factor = 1,
374};
375
376void
377abx500_pinctrl_ab8505_init(struct abx500_pinctrl_soc_data **soc)
378{
379 *soc = &ab8505_soc;
380}
diff --git a/drivers/pinctrl/pinctrl-ab8540.c b/drivers/pinctrl/pinctrl-ab8540.c
new file mode 100644
index 000000000000..8ee1e8d95f65
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-ab8540.c
@@ -0,0 +1,407 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2012
3 *
4 * Author: Patrice Chotard <patrice.chotard@stericsson.com> for ST-Ericsson.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/kernel.h>
12#include <linux/gpio.h>
13#include <linux/pinctrl/pinctrl.h>
14#include <linux/mfd/abx500/ab8500.h>
15#include "pinctrl-abx500.h"
16
17/* All the pins that can be used for GPIO and some other functions */
18#define ABX500_GPIO(offset) (offset)
19
20#define AB8540_PIN_J16 ABX500_GPIO(1)
21#define AB8540_PIN_D17 ABX500_GPIO(2)
22#define AB8540_PIN_C12 ABX500_GPIO(3)
23#define AB8540_PIN_G12 ABX500_GPIO(4)
24/* hole */
25#define AB8540_PIN_D16 ABX500_GPIO(14)
26#define AB8540_PIN_F15 ABX500_GPIO(15)
27#define AB8540_PIN_J8 ABX500_GPIO(16)
28#define AB8540_PIN_K16 ABX500_GPIO(17)
29#define AB8540_PIN_G15 ABX500_GPIO(18)
30#define AB8540_PIN_F17 ABX500_GPIO(19)
31#define AB8540_PIN_E17 ABX500_GPIO(20)
32/* hole */
33#define AB8540_PIN_AA16 ABX500_GPIO(27)
34#define AB8540_PIN_W18 ABX500_GPIO(28)
35#define AB8540_PIN_Y15 ABX500_GPIO(29)
36#define AB8540_PIN_W16 ABX500_GPIO(30)
37#define AB8540_PIN_V15 ABX500_GPIO(31)
38#define AB8540_PIN_W17 ABX500_GPIO(32)
39/* hole */
40#define AB8540_PIN_D12 ABX500_GPIO(42)
41#define AB8540_PIN_P4 ABX500_GPIO(43)
42#define AB8540_PIN_AB1 ABX500_GPIO(44)
43#define AB8540_PIN_K7 ABX500_GPIO(45)
44#define AB8540_PIN_L7 ABX500_GPIO(46)
45#define AB8540_PIN_G10 ABX500_GPIO(47)
46#define AB8540_PIN_K12 ABX500_GPIO(48)
47/* hole */
48#define AB8540_PIN_N8 ABX500_GPIO(51)
49#define AB8540_PIN_P12 ABX500_GPIO(52)
50#define AB8540_PIN_K8 ABX500_GPIO(53)
51#define AB8540_PIN_J11 ABX500_GPIO(54)
52#define AB8540_PIN_AC2 ABX500_GPIO(55)
53#define AB8540_PIN_AB2 ABX500_GPIO(56)
54
55/* indicates the highest GPIO number */
56#define AB8540_GPIO_MAX_NUMBER 56
57
58/*
59 * The names of the pins are denoted by GPIO number and ball name, even
60 * though they can be used for other things than GPIO, this is the first
61 * column in the table of the data sheet and often used on schematics and
62 * such.
63 */
64static const struct pinctrl_pin_desc ab8540_pins[] = {
65 PINCTRL_PIN(AB8540_PIN_J16, "GPIO1_J16"),
66 PINCTRL_PIN(AB8540_PIN_D17, "GPIO2_D17"),
67 PINCTRL_PIN(AB8540_PIN_C12, "GPIO3_C12"),
68 PINCTRL_PIN(AB8540_PIN_G12, "GPIO4_G12"),
69 /* hole */
70 PINCTRL_PIN(AB8540_PIN_D16, "GPIO14_D16"),
71 PINCTRL_PIN(AB8540_PIN_F15, "GPIO15_F15"),
72 PINCTRL_PIN(AB8540_PIN_J8, "GPIO16_J8"),
73 PINCTRL_PIN(AB8540_PIN_K16, "GPIO17_K16"),
74 PINCTRL_PIN(AB8540_PIN_G15, "GPIO18_G15"),
75 PINCTRL_PIN(AB8540_PIN_F17, "GPIO19_F17"),
76 PINCTRL_PIN(AB8540_PIN_E17, "GPIO20_E17"),
77 /* hole */
78 PINCTRL_PIN(AB8540_PIN_AA16, "GPIO27_AA16"),
79 PINCTRL_PIN(AB8540_PIN_W18, "GPIO28_W18"),
80 PINCTRL_PIN(AB8540_PIN_Y15, "GPIO29_Y15"),
81 PINCTRL_PIN(AB8540_PIN_W16, "GPIO30_W16"),
82 PINCTRL_PIN(AB8540_PIN_V15, "GPIO31_V15"),
83 PINCTRL_PIN(AB8540_PIN_W17, "GPIO32_W17"),
84 /* hole */
85 PINCTRL_PIN(AB8540_PIN_D12, "GPIO42_D12"),
86 PINCTRL_PIN(AB8540_PIN_P4, "GPIO43_P4"),
87 PINCTRL_PIN(AB8540_PIN_AB1, "GPIO44_AB1"),
88 PINCTRL_PIN(AB8540_PIN_K7, "GPIO45_K7"),
89 PINCTRL_PIN(AB8540_PIN_L7, "GPIO46_L7"),
90 PINCTRL_PIN(AB8540_PIN_G10, "GPIO47_G10"),
91 PINCTRL_PIN(AB8540_PIN_K12, "GPIO48_K12"),
92 /* hole */
93 PINCTRL_PIN(AB8540_PIN_N8, "GPIO51_N8"),
94 PINCTRL_PIN(AB8540_PIN_P12, "GPIO52_P12"),
95 PINCTRL_PIN(AB8540_PIN_K8, "GPIO53_K8"),
96 PINCTRL_PIN(AB8540_PIN_J11, "GPIO54_J11"),
97 PINCTRL_PIN(AB8540_PIN_AC2, "GPIO55_AC2"),
98 PINCTRL_PIN(AB8540_PIN_AB2, "GPIO56_AB2"),
99};
100
101/*
102 * Maps local GPIO offsets to local pin numbers
103 */
104static const struct abx500_pinrange ab8540_pinranges[] = {
105 ABX500_PINRANGE(1, 4, ABX500_ALT_A),
106 ABX500_PINRANGE(14, 7, ABX500_ALT_A),
107 ABX500_PINRANGE(27, 6, ABX500_ALT_A),
108 ABX500_PINRANGE(42, 7, ABX500_ALT_A),
109 ABX500_PINRANGE(51, 6, ABX500_ALT_A),
110};
111
112/*
113 * Read the pin group names like this:
114 * sysclkreq2_d_1 = first groups of pins for sysclkreq2 on default function
115 *
116 * The groups are arranged as sets per altfunction column, so we can
117 * mux in one group at a time by selecting the same altfunction for them
118 * all. When functions require pins on different altfunctions, you need
119 * to combine several groups.
120 */
121
122/* default column */
123static const unsigned sysclkreq2_d_1_pins[] = { AB8540_PIN_J16 };
124static const unsigned sysclkreq3_d_1_pins[] = { AB8540_PIN_D17 };
125static const unsigned sysclkreq4_d_1_pins[] = { AB8540_PIN_C12 };
126static const unsigned sysclkreq6_d_1_pins[] = { AB8540_PIN_G12 };
127static const unsigned pwmout1_d_1_pins[] = { AB8540_PIN_D16 };
128static const unsigned pwmout2_d_1_pins[] = { AB8540_PIN_F15 };
129static const unsigned pwmout3_d_1_pins[] = { AB8540_PIN_J8 };
130
131/* audio data interface 1*/
132static const unsigned adi1_d_1_pins[] = { AB8540_PIN_K16, AB8540_PIN_G15,
133 AB8540_PIN_F17, AB8540_PIN_E17 };
134/* Digital microphone 1 and 2 */
135static const unsigned dmic12_d_1_pins[] = { AB8540_PIN_AA16, AB8540_PIN_W18 };
136/* Digital microphone 3 and 4 */
137static const unsigned dmic34_d_1_pins[] = { AB8540_PIN_Y15, AB8540_PIN_W16 };
138/* Digital microphone 5 and 6 */
139static const unsigned dmic56_d_1_pins[] = { AB8540_PIN_V15, AB8540_PIN_W17 };
140static const unsigned sysclkreq5_d_1_pins[] = { AB8540_PIN_D12 };
141static const unsigned batremn_d_1_pins[] = { AB8540_PIN_P4 };
142static const unsigned service_d_1_pins[] = { AB8540_PIN_AB1 };
143static const unsigned pwrctrl0_d_1_pins[] = { AB8540_PIN_K7 };
144static const unsigned pwrctrl1_d_1_pins[] = { AB8540_PIN_L7 };
145static const unsigned pwmextvibra1_d_1_pins[] = { AB8540_PIN_G10 };
146static const unsigned pwmextvibra2_d_1_pins[] = { AB8540_PIN_K12 };
147static const unsigned gpio1_vbat_d_1_pins[] = { AB8540_PIN_N8 };
148static const unsigned gpio2_vbat_d_1_pins[] = { AB8540_PIN_P12 };
149static const unsigned gpio3_vbat_d_1_pins[] = { AB8540_PIN_K8 };
150static const unsigned gpio4_vbat_d_1_pins[] = { AB8540_PIN_J11 };
151static const unsigned pdmclkdat_d_1_pins[] = { AB8540_PIN_AC2, AB8540_PIN_AB2 };
152
153/* Altfunction A column */
154static const unsigned gpio1_a_1_pins[] = { AB8540_PIN_J16 };
155static const unsigned gpio2_a_1_pins[] = { AB8540_PIN_D17 };
156static const unsigned gpio3_a_1_pins[] = { AB8540_PIN_C12 };
157static const unsigned gpio4_a_1_pins[] = { AB8540_PIN_G12 };
158static const unsigned gpio14_a_1_pins[] = { AB8540_PIN_D16 };
159static const unsigned gpio15_a_1_pins[] = { AB8540_PIN_F15 };
160static const unsigned gpio16_a_1_pins[] = { AB8540_PIN_J8 };
161static const unsigned gpio17_a_1_pins[] = { AB8540_PIN_K16 };
162static const unsigned gpio18_a_1_pins[] = { AB8540_PIN_G15 };
163static const unsigned gpio19_a_1_pins[] = { AB8540_PIN_F17 };
164static const unsigned gpio20_a_1_pins[] = { AB8540_PIN_E17 };
165static const unsigned gpio27_a_1_pins[] = { AB8540_PIN_AA16 };
166static const unsigned gpio28_a_1_pins[] = { AB8540_PIN_W18 };
167static const unsigned gpio29_a_1_pins[] = { AB8540_PIN_Y15 };
168static const unsigned gpio30_a_1_pins[] = { AB8540_PIN_W16 };
169static const unsigned gpio31_a_1_pins[] = { AB8540_PIN_V15 };
170static const unsigned gpio32_a_1_pins[] = { AB8540_PIN_W17 };
171static const unsigned gpio42_a_1_pins[] = { AB8540_PIN_D12 };
172static const unsigned gpio43_a_1_pins[] = { AB8540_PIN_P4 };
173static const unsigned gpio44_a_1_pins[] = { AB8540_PIN_AB1 };
174static const unsigned gpio45_a_1_pins[] = { AB8540_PIN_K7 };
175static const unsigned gpio46_a_1_pins[] = { AB8540_PIN_L7 };
176static const unsigned gpio47_a_1_pins[] = { AB8540_PIN_G10 };
177static const unsigned gpio48_a_1_pins[] = { AB8540_PIN_K12 };
178static const unsigned gpio51_a_1_pins[] = { AB8540_PIN_N8 };
179static const unsigned gpio52_a_1_pins[] = { AB8540_PIN_P12 };
180static const unsigned gpio53_a_1_pins[] = { AB8540_PIN_K8 };
181static const unsigned gpio54_a_1_pins[] = { AB8540_PIN_J11 };
182static const unsigned gpio55_a_1_pins[] = { AB8540_PIN_AC2 };
183static const unsigned gpio56_a_1_pins[] = { AB8540_PIN_AB2 };
184
185#define AB8540_PIN_GROUP(a, b) { .name = #a, .pins = a##_pins, \
186 .npins = ARRAY_SIZE(a##_pins), .altsetting = b }
187
188static const struct abx500_pingroup ab8540_groups[] = {
189 /* default column */
190 AB8540_PIN_GROUP(sysclkreq2_d_1, ABX500_DEFAULT),
191 AB8540_PIN_GROUP(sysclkreq3_d_1, ABX500_DEFAULT),
192 AB8540_PIN_GROUP(sysclkreq4_d_1, ABX500_DEFAULT),
193 AB8540_PIN_GROUP(sysclkreq6_d_1, ABX500_DEFAULT),
194 AB8540_PIN_GROUP(pwmout1_d_1, ABX500_DEFAULT),
195 AB8540_PIN_GROUP(pwmout2_d_1, ABX500_DEFAULT),
196 AB8540_PIN_GROUP(pwmout3_d_1, ABX500_DEFAULT),
197 AB8540_PIN_GROUP(adi1_d_1, ABX500_DEFAULT),
198 AB8540_PIN_GROUP(dmic12_d_1, ABX500_DEFAULT),
199 AB8540_PIN_GROUP(dmic34_d_1, ABX500_DEFAULT),
200 AB8540_PIN_GROUP(dmic56_d_1, ABX500_DEFAULT),
201 AB8540_PIN_GROUP(sysclkreq5_d_1, ABX500_DEFAULT),
202 AB8540_PIN_GROUP(batremn_d_1, ABX500_DEFAULT),
203 AB8540_PIN_GROUP(service_d_1, ABX500_DEFAULT),
204 AB8540_PIN_GROUP(pwrctrl0_d_1, ABX500_DEFAULT),
205 AB8540_PIN_GROUP(pwrctrl1_d_1, ABX500_DEFAULT),
206 AB8540_PIN_GROUP(pwmextvibra1_d_1, ABX500_DEFAULT),
207 AB8540_PIN_GROUP(pwmextvibra2_d_1, ABX500_DEFAULT),
208 AB8540_PIN_GROUP(gpio1_vbat_d_1, ABX500_DEFAULT),
209 AB8540_PIN_GROUP(gpio2_vbat_d_1, ABX500_DEFAULT),
210 AB8540_PIN_GROUP(gpio3_vbat_d_1, ABX500_DEFAULT),
211 AB8540_PIN_GROUP(gpio4_vbat_d_1, ABX500_DEFAULT),
212 AB8540_PIN_GROUP(pdmclkdat_d_1, ABX500_DEFAULT),
213 /* Altfunction A column */
214 AB8540_PIN_GROUP(gpio1_a_1, ABX500_ALT_A),
215 AB8540_PIN_GROUP(gpio2_a_1, ABX500_ALT_A),
216 AB8540_PIN_GROUP(gpio3_a_1, ABX500_ALT_A),
217 AB8540_PIN_GROUP(gpio4_a_1, ABX500_ALT_A),
218 AB8540_PIN_GROUP(gpio14_a_1, ABX500_ALT_A),
219 AB8540_PIN_GROUP(gpio15_a_1, ABX500_ALT_A),
220 AB8540_PIN_GROUP(gpio16_a_1, ABX500_ALT_A),
221 AB8540_PIN_GROUP(gpio17_a_1, ABX500_ALT_A),
222 AB8540_PIN_GROUP(gpio18_a_1, ABX500_ALT_A),
223 AB8540_PIN_GROUP(gpio19_a_1, ABX500_ALT_A),
224 AB8540_PIN_GROUP(gpio20_a_1, ABX500_ALT_A),
225 AB8540_PIN_GROUP(gpio27_a_1, ABX500_ALT_A),
226 AB8540_PIN_GROUP(gpio28_a_1, ABX500_ALT_A),
227 AB8540_PIN_GROUP(gpio29_a_1, ABX500_ALT_A),
228 AB8540_PIN_GROUP(gpio30_a_1, ABX500_ALT_A),
229 AB8540_PIN_GROUP(gpio31_a_1, ABX500_ALT_A),
230 AB8540_PIN_GROUP(gpio32_a_1, ABX500_ALT_A),
231 AB8540_PIN_GROUP(gpio42_a_1, ABX500_ALT_A),
232 AB8540_PIN_GROUP(gpio43_a_1, ABX500_ALT_A),
233 AB8540_PIN_GROUP(gpio44_a_1, ABX500_ALT_A),
234 AB8540_PIN_GROUP(gpio45_a_1, ABX500_ALT_A),
235 AB8540_PIN_GROUP(gpio46_a_1, ABX500_ALT_A),
236 AB8540_PIN_GROUP(gpio47_a_1, ABX500_ALT_A),
237 AB8540_PIN_GROUP(gpio48_a_1, ABX500_ALT_A),
238 AB8540_PIN_GROUP(gpio51_a_1, ABX500_ALT_A),
239 AB8540_PIN_GROUP(gpio52_a_1, ABX500_ALT_A),
240 AB8540_PIN_GROUP(gpio53_a_1, ABX500_ALT_A),
241 AB8540_PIN_GROUP(gpio54_a_1, ABX500_ALT_A),
242 AB8540_PIN_GROUP(gpio55_a_1, ABX500_ALT_A),
243 AB8540_PIN_GROUP(gpio56_a_1, ABX500_ALT_A),
244};
245
246/* We use this macro to define the groups applicable to a function */
247#define AB8540_FUNC_GROUPS(a, b...) \
248static const char * const a##_groups[] = { b };
249
250AB8540_FUNC_GROUPS(sysclkreq, "sysclkreq2_d_1", "sysclkreq3_d_1",
251 "sysclkreq4_d_1", "sysclkreq5_d_1", "sysclkreq6_d_1");
252AB8540_FUNC_GROUPS(gpio, "gpio1_a_1", "gpio2_a_1", "gpio3_a_1", "gpio4_a_1",
253 "gpio14_a_1", "gpio15_a_1", "gpio16_a_1", "gpio17_a_1",
254 "gpio18_a_1", "gpio19_a_1", "gpio20_a_1", "gpio27_a_1",
255 "gpio28_a_1", "gpio29_a_1", "gpio30_a_1", "gpio31_a_1",
256 "gpio32_a_1", "gpio42_a_1", "gpio43_a_1", "gpio44_a_1",
257 "gpio45_a_1", "gpio46_a_1", "gpio47_a_1", "gpio48_a_1",
258 "gpio51_a_1", "gpio52_a_1", "gpio53_a_1", "gpio54_a_1",
259 "gpio55_a_1", "gpio56_a_1");
260AB8540_FUNC_GROUPS(pwmout, "pwmout1_d_1", "pwmout2_d_1", "pwmout3_d_1");
261AB8540_FUNC_GROUPS(adi1, "adi1_d_1");
262AB8540_FUNC_GROUPS(dmic, "dmic12_d_1", "dmic34_d_1", "dmic56_d_1");
263AB8540_FUNC_GROUPS(batremn, "batremn_d_1");
264AB8540_FUNC_GROUPS(service, "service_d_1");
265AB8540_FUNC_GROUPS(pwrctrl, "pwrctrl0_d_1", "pwrctrl1_d_1");
266AB8540_FUNC_GROUPS(pwmextvibra, "pwmextvibra1_d_1", "pwmextvibra2_d_1");
267AB8540_FUNC_GROUPS(gpio_vbat, "gpio1_vbat_d_1", "gpio2_vbat_d_1",
268 "gpio3_vbat_d_1", "gpio4_vbat_d_1");
269AB8540_FUNC_GROUPS(pdm, "pdmclkdat_d_1");
270
271#define FUNCTION(fname) \
272 { \
273 .name = #fname, \
274 .groups = fname##_groups, \
275 .ngroups = ARRAY_SIZE(fname##_groups), \
276 }
277
278static const struct abx500_function ab8540_functions[] = {
279 FUNCTION(sysclkreq),
280 FUNCTION(gpio),
281 FUNCTION(pwmout),
282 FUNCTION(adi1),
283 FUNCTION(dmic),
284 FUNCTION(batremn),
285 FUNCTION(service),
286 FUNCTION(pwrctrl),
287 FUNCTION(pwmextvibra),
288 FUNCTION(gpio_vbat),
289 FUNCTION(pdm),
290};
291
292/*
293 * this table translates what's is in the AB8540 specification regarding the
294 * balls alternate functions (as for DB, default, ALT_A, ALT_B and ALT_C).
295 * ALTERNATE_FUNCTIONS(GPIO_NUMBER, GPIOSEL bit, ALTERNATFUNC bit1,
296 * ALTERNATEFUNC bit2, ALTA val, ALTB val, ALTC val),
297 * AB8540 only supports DEFAULT and ALTA functions, so ALTERNATFUNC
298 * registers is not used
299 *
300 */
301
302struct alternate_functions ab8540_alternate_functions[AB8540_GPIO_MAX_NUMBER + 1] = {
303 /* GPIOSEL1 - bit 4-7 reserved */
304 ALTERNATE_FUNCTIONS(0, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO0 */
305 ALTERNATE_FUNCTIONS(1, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO1, altA controlled by bit 0 */
306 ALTERNATE_FUNCTIONS(2, 1, UNUSED, UNUSED, 0, 0, 0), /* GPIO2, altA controlled by bit 1 */
307 ALTERNATE_FUNCTIONS(3, 2, UNUSED, UNUSED, 0, 0, 0), /* GPIO3, altA controlled by bit 2*/
308 ALTERNATE_FUNCTIONS(4, 3, UNUSED, UNUSED, 0, 0, 0), /* GPIO4, altA controlled by bit 3*/
309 ALTERNATE_FUNCTIONS(5, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO5 */
310 ALTERNATE_FUNCTIONS(6, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO6 */
311 ALTERNATE_FUNCTIONS(7, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO7 */
312 ALTERNATE_FUNCTIONS(8, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO8 */
313 /* GPIOSEL2 - bit 0-4 reserved */
314 ALTERNATE_FUNCTIONS(9, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO9 */
315 ALTERNATE_FUNCTIONS(10, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO10 */
316 ALTERNATE_FUNCTIONS(11, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO11 */
317 ALTERNATE_FUNCTIONS(12, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO12 */
318 ALTERNATE_FUNCTIONS(13, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO13 */
319 ALTERNATE_FUNCTIONS(14, 5, UNUSED, UNUSED, 0, 0, 0), /* GPIO14, altA controlled by bit 5 */
320 ALTERNATE_FUNCTIONS(15, 6, UNUSED, UNUSED, 0, 0, 0), /* GPIO15, altA controlled by bit 6 */
321 ALTERNATE_FUNCTIONS(16, 7, UNUSED, UNUSED, 0, 0, 0), /* GPIO16, altA controlled by bit 7 */
322 /* GPIOSEL3 - bit 4-7 reserved */
323 ALTERNATE_FUNCTIONS(17, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO17, altA controlled by bit 0 */
324 ALTERNATE_FUNCTIONS(18, 1, UNUSED, UNUSED, 0, 0, 0), /* GPIO18, altA controlled by bit 1 */
325 ALTERNATE_FUNCTIONS(19, 2, UNUSED, UNUSED, 0, 0, 0), /* GPIO19, altA controlled by bit 2 */
326 ALTERNATE_FUNCTIONS(20, 3, UNUSED, UNUSED, 0, 0, 0), /* GPIO20, altA controlled by bit 3 */
327 ALTERNATE_FUNCTIONS(21, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO21 */
328 ALTERNATE_FUNCTIONS(22, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO22 */
329 ALTERNATE_FUNCTIONS(23, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO23 */
330 ALTERNATE_FUNCTIONS(24, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO24 */
331 /* GPIOSEL4 - bit 0-1 reserved */
332 ALTERNATE_FUNCTIONS(25, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO25 */
333 ALTERNATE_FUNCTIONS(26, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO26 */
334 ALTERNATE_FUNCTIONS(27, 2, UNUSED, UNUSED, 0, 0, 0), /* GPIO27, altA controlled by bit 2 */
335 ALTERNATE_FUNCTIONS(28, 3, UNUSED, UNUSED, 0, 0, 0), /* GPIO28, altA controlled by bit 3 */
336 ALTERNATE_FUNCTIONS(29, 4, UNUSED, UNUSED, 0, 0, 0), /* GPIO29, altA controlled by bit 4 */
337 ALTERNATE_FUNCTIONS(30, 5, UNUSED, UNUSED, 0, 0, 0), /* GPIO30, altA controlled by bit 5 */
338 ALTERNATE_FUNCTIONS(31, 6, UNUSED, UNUSED, 0, 0, 0), /* GPIO31, altA controlled by bit 6 */
339 ALTERNATE_FUNCTIONS(32, 7, UNUSED, UNUSED, 0, 0, 0), /* GPIO32, altA controlled by bit 7 */
340 /* GPIOSEL5 - bit 0-7 reserved */
341 ALTERNATE_FUNCTIONS(33, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO33 */
342 ALTERNATE_FUNCTIONS(34, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO34 */
343 ALTERNATE_FUNCTIONS(35, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO35 */
344 ALTERNATE_FUNCTIONS(36, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO36 */
345 ALTERNATE_FUNCTIONS(37, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO37 */
346 ALTERNATE_FUNCTIONS(38, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO38 */
347 ALTERNATE_FUNCTIONS(39, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO39 */
348 ALTERNATE_FUNCTIONS(40, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO40 */
349 /* GPIOSEL6 - bit 0 reserved */
350 ALTERNATE_FUNCTIONS(41, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO41 */
351 ALTERNATE_FUNCTIONS(42, 1, UNUSED, UNUSED, 0, 0, 0), /* GPIO42, altA controlled by bit 1 */
352 ALTERNATE_FUNCTIONS(43, 2, UNUSED, UNUSED, 0, 0, 0), /* GPIO43, altA controlled by bit 2 */
353 ALTERNATE_FUNCTIONS(44, 3, UNUSED, UNUSED, 0, 0, 0), /* GPIO44, altA controlled by bit 3 */
354 ALTERNATE_FUNCTIONS(45, 4, UNUSED, UNUSED, 0, 0, 0), /* GPIO45, altA controlled by bit 4 */
355 ALTERNATE_FUNCTIONS(46, 5, UNUSED, UNUSED, 0, 0, 0), /* GPIO46, altA controlled by bit 5 */
356 ALTERNATE_FUNCTIONS(47, 6, UNUSED, UNUSED, 0, 0, 0), /* GPIO47, altA controlled by bit 6 */
357 ALTERNATE_FUNCTIONS(48, 7, UNUSED, UNUSED, 0, 0, 0), /* GPIO48, altA controlled by bit 7 */
358 /* GPIOSEL7 - bit 0-1 reserved */
359 ALTERNATE_FUNCTIONS(49, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO49 */
360 ALTERNATE_FUNCTIONS(50, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO50 */
361 ALTERNATE_FUNCTIONS(51, 2, UNUSED, UNUSED, 0, 0, 0), /* GPIO51, altA controlled by bit 2 */
362 ALTERNATE_FUNCTIONS(52, 3, UNUSED, UNUSED, 0, 0, 0), /* GPIO52, altA controlled by bit 3 */
363 ALTERNATE_FUNCTIONS(53, 4, UNUSED, UNUSED, 0, 0, 0), /* GPIO53, altA controlled by bit 4 */
364 ALTERNATE_FUNCTIONS(54, 5, UNUSED, UNUSED, 0, 0, 0), /* GPIO54, altA controlled by bit 5 */
365 ALTERNATE_FUNCTIONS(55, 6, UNUSED, UNUSED, 0, 0, 0), /* GPIO55, altA controlled by bit 6 */
366 ALTERNATE_FUNCTIONS(56, 7, UNUSED, UNUSED, 0, 0, 0), /* GPIO56, altA controlled by bit 7 */
367};
368
369static struct pullud ab8540_pullud = {
370 .first_pin = 51, /* GPIO1_VBAT */
371 .last_pin = 54, /* GPIO4_VBAT */
372};
373
374/*
375 * For AB8540 Only some GPIOs are interrupt capable:
376 * GPIO43 to GPIO44
377 * GPIO51 to GPIO54
378 */
379struct abx500_gpio_irq_cluster ab8540_gpio_irq_cluster[] = {
380 GPIO_IRQ_CLUSTER(43, 43, AB8540_INT_GPIO43F),
381 GPIO_IRQ_CLUSTER(44, 44, AB8540_INT_GPIO44F),
382 GPIO_IRQ_CLUSTER(51, 54, AB9540_INT_GPIO51R),
383};
384
385static struct abx500_pinctrl_soc_data ab8540_soc = {
386 .gpio_ranges = ab8540_pinranges,
387 .gpio_num_ranges = ARRAY_SIZE(ab8540_pinranges),
388 .pins = ab8540_pins,
389 .npins = ARRAY_SIZE(ab8540_pins),
390 .functions = ab8540_functions,
391 .nfunctions = ARRAY_SIZE(ab8540_functions),
392 .groups = ab8540_groups,
393 .ngroups = ARRAY_SIZE(ab8540_groups),
394 .alternate_functions = ab8540_alternate_functions,
395 .pullud = &ab8540_pullud,
396 .gpio_irq_cluster = ab8540_gpio_irq_cluster,
397 .ngpio_irq_cluster = ARRAY_SIZE(ab8540_gpio_irq_cluster),
398 .irq_gpio_rising_offset = AB8540_INT_GPIO43R,
399 .irq_gpio_falling_offset = AB8540_INT_GPIO43F,
400 .irq_gpio_factor = 2,
401};
402
403void
404abx500_pinctrl_ab8540_init(struct abx500_pinctrl_soc_data **soc)
405{
406 *soc = &ab8540_soc;
407}
diff --git a/drivers/pinctrl/pinctrl-ab9540.c b/drivers/pinctrl/pinctrl-ab9540.c
new file mode 100644
index 000000000000..7610bd012b98
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-ab9540.c
@@ -0,0 +1,485 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2012
3 *
4 * Author: Patrice Chotard <patrice.chotard@stericsson.com> for ST-Ericsson.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/kernel.h>
12#include <linux/gpio.h>
13#include <linux/pinctrl/pinctrl.h>
14#include <linux/mfd/abx500/ab8500.h>
15#include "pinctrl-abx500.h"
16
17/* All the pins that can be used for GPIO and some other functions */
18#define ABX500_GPIO(offset) (offset)
19
20#define AB9540_PIN_R4 ABX500_GPIO(1)
21#define AB9540_PIN_V3 ABX500_GPIO(2)
22#define AB9540_PIN_T4 ABX500_GPIO(3)
23#define AB9540_PIN_T5 ABX500_GPIO(4)
24/* hole */
25#define AB9540_PIN_B18 ABX500_GPIO(10)
26#define AB9540_PIN_C18 ABX500_GPIO(11)
27/* hole */
28#define AB9540_PIN_D18 ABX500_GPIO(13)
29#define AB9540_PIN_B19 ABX500_GPIO(14)
30#define AB9540_PIN_C19 ABX500_GPIO(15)
31#define AB9540_PIN_D19 ABX500_GPIO(16)
32#define AB9540_PIN_R3 ABX500_GPIO(17)
33#define AB9540_PIN_T2 ABX500_GPIO(18)
34#define AB9540_PIN_U2 ABX500_GPIO(19)
35#define AB9540_PIN_V2 ABX500_GPIO(20)
36#define AB9540_PIN_N17 ABX500_GPIO(21)
37#define AB9540_PIN_N16 ABX500_GPIO(22)
38#define AB9540_PIN_M19 ABX500_GPIO(23)
39#define AB9540_PIN_T3 ABX500_GPIO(24)
40#define AB9540_PIN_W2 ABX500_GPIO(25)
41/* hole */
42#define AB9540_PIN_H4 ABX500_GPIO(27)
43#define AB9540_PIN_F1 ABX500_GPIO(28)
44#define AB9540_PIN_F4 ABX500_GPIO(29)
45#define AB9540_PIN_F2 ABX500_GPIO(30)
46#define AB9540_PIN_E4 ABX500_GPIO(31)
47#define AB9540_PIN_F3 ABX500_GPIO(32)
48/* hole */
49#define AB9540_PIN_J13 ABX500_GPIO(34)
50/* hole */
51#define AB9540_PIN_L17 ABX500_GPIO(40)
52#define AB9540_PIN_L16 ABX500_GPIO(41)
53#define AB9540_PIN_W3 ABX500_GPIO(42)
54#define AB9540_PIN_N4 ABX500_GPIO(50)
55#define AB9540_PIN_G12 ABX500_GPIO(51)
56#define AB9540_PIN_E17 ABX500_GPIO(52)
57#define AB9540_PIN_D11 ABX500_GPIO(53)
58#define AB9540_PIN_M18 ABX500_GPIO(54)
59
60/* indicates the highest GPIO number */
61#define AB9540_GPIO_MAX_NUMBER 54
62
63/*
64 * The names of the pins are denoted by GPIO number and ball name, even
65 * though they can be used for other things than GPIO, this is the first
66 * column in the table of the data sheet and often used on schematics and
67 * such.
68 */
69static const struct pinctrl_pin_desc ab9540_pins[] = {
70 PINCTRL_PIN(AB9540_PIN_R4, "GPIO1_R4"),
71 PINCTRL_PIN(AB9540_PIN_V3, "GPIO2_V3"),
72 PINCTRL_PIN(AB9540_PIN_T4, "GPIO3_T4"),
73 PINCTRL_PIN(AB9540_PIN_T5, "GPIO4_T5"),
74 /* hole */
75 PINCTRL_PIN(AB9540_PIN_B18, "GPIO10_B18"),
76 PINCTRL_PIN(AB9540_PIN_C18, "GPIO11_C18"),
77 /* hole */
78 PINCTRL_PIN(AB9540_PIN_D18, "GPIO13_D18"),
79 PINCTRL_PIN(AB9540_PIN_B19, "GPIO14_B19"),
80 PINCTRL_PIN(AB9540_PIN_C19, "GPIO15_C19"),
81 PINCTRL_PIN(AB9540_PIN_D19, "GPIO16_D19"),
82 PINCTRL_PIN(AB9540_PIN_R3, "GPIO17_R3"),
83 PINCTRL_PIN(AB9540_PIN_T2, "GPIO18_T2"),
84 PINCTRL_PIN(AB9540_PIN_U2, "GPIO19_U2"),
85 PINCTRL_PIN(AB9540_PIN_V2, "GPIO20_V2"),
86 PINCTRL_PIN(AB9540_PIN_N17, "GPIO21_N17"),
87 PINCTRL_PIN(AB9540_PIN_N16, "GPIO22_N16"),
88 PINCTRL_PIN(AB9540_PIN_M19, "GPIO23_M19"),
89 PINCTRL_PIN(AB9540_PIN_T3, "GPIO24_T3"),
90 PINCTRL_PIN(AB9540_PIN_W2, "GPIO25_W2"),
91 /* hole */
92 PINCTRL_PIN(AB9540_PIN_H4, "GPIO27_H4"),
93 PINCTRL_PIN(AB9540_PIN_F1, "GPIO28_F1"),
94 PINCTRL_PIN(AB9540_PIN_F4, "GPIO29_F4"),
95 PINCTRL_PIN(AB9540_PIN_F2, "GPIO30_F2"),
96 PINCTRL_PIN(AB9540_PIN_E4, "GPIO31_E4"),
97 PINCTRL_PIN(AB9540_PIN_F3, "GPIO32_F3"),
98 /* hole */
99 PINCTRL_PIN(AB9540_PIN_J13, "GPIO34_J13"),
100 /* hole */
101 PINCTRL_PIN(AB9540_PIN_L17, "GPIO40_L17"),
102 PINCTRL_PIN(AB9540_PIN_L16, "GPIO41_L16"),
103 PINCTRL_PIN(AB9540_PIN_W3, "GPIO42_W3"),
104 PINCTRL_PIN(AB9540_PIN_N4, "GPIO50_N4"),
105 PINCTRL_PIN(AB9540_PIN_G12, "GPIO51_G12"),
106 PINCTRL_PIN(AB9540_PIN_E17, "GPIO52_E17"),
107 PINCTRL_PIN(AB9540_PIN_D11, "GPIO53_D11"),
108 PINCTRL_PIN(AB9540_PIN_M18, "GPIO60_M18"),
109};
110
111/*
112 * Maps local GPIO offsets to local pin numbers
113 */
114static const struct abx500_pinrange ab9540_pinranges[] = {
115 ABX500_PINRANGE(1, 4, ABX500_ALT_A),
116 ABX500_PINRANGE(10, 2, ABX500_DEFAULT),
117 ABX500_PINRANGE(13, 1, ABX500_DEFAULT),
118 ABX500_PINRANGE(14, 12, ABX500_ALT_A),
119 ABX500_PINRANGE(27, 6, ABX500_ALT_A),
120 ABX500_PINRANGE(34, 1, ABX500_ALT_A),
121 ABX500_PINRANGE(40, 3, ABX500_ALT_A),
122 ABX500_PINRANGE(50, 1, ABX500_DEFAULT),
123 ABX500_PINRANGE(51, 3, ABX500_ALT_A),
124 ABX500_PINRANGE(54, 1, ABX500_DEFAULT),
125};
126
127/*
128 * Read the pin group names like this:
129 * sysclkreq2_d_1 = first groups of pins for sysclkreq2 on default function
130 *
131 * The groups are arranged as sets per altfunction column, so we can
132 * mux in one group at a time by selecting the same altfunction for them
133 * all. When functions require pins on different altfunctions, you need
134 * to combine several groups.
135 */
136
137/* default column */
138static const unsigned sysclkreq2_d_1_pins[] = { AB9540_PIN_R4 };
139static const unsigned sysclkreq3_d_1_pins[] = { AB9540_PIN_V3 };
140static const unsigned sysclkreq4_d_1_pins[] = { AB9540_PIN_T4 };
141static const unsigned sysclkreq6_d_1_pins[] = { AB9540_PIN_T5 };
142static const unsigned gpio10_d_1_pins[] = { AB9540_PIN_B18 };
143static const unsigned gpio11_d_1_pins[] = { AB9540_PIN_C18 };
144static const unsigned gpio13_d_1_pins[] = { AB9540_PIN_D18 };
145static const unsigned pwmout1_d_1_pins[] = { AB9540_PIN_B19 };
146static const unsigned pwmout2_d_1_pins[] = { AB9540_PIN_C19 };
147static const unsigned pwmout3_d_1_pins[] = { AB9540_PIN_D19 };
148/* audio data interface 1*/
149static const unsigned adi1_d_1_pins[] = { AB9540_PIN_R3, AB9540_PIN_T2,
150 AB9540_PIN_U2, AB9540_PIN_V2 };
151/* USBUICC */
152static const unsigned usbuicc_d_1_pins[] = { AB9540_PIN_N17, AB9540_PIN_N16,
153 AB9540_PIN_M19 };
154static const unsigned sysclkreq7_d_1_pins[] = { AB9540_PIN_T3 };
155static const unsigned sysclkreq8_d_1_pins[] = { AB9540_PIN_W2 };
156/* Digital microphone 1 and 2 */
157static const unsigned dmic12_d_1_pins[] = { AB9540_PIN_H4, AB9540_PIN_F1 };
158/* Digital microphone 3 and 4 */
159static const unsigned dmic34_d_1_pins[] = { AB9540_PIN_F4, AB9540_PIN_F2 };
160/* Digital microphone 5 and 6 */
161static const unsigned dmic56_d_1_pins[] = { AB9540_PIN_E4, AB9540_PIN_F3 };
162static const unsigned extcpena_d_1_pins[] = { AB9540_PIN_J13 };
163/* modem SDA/SCL */
164static const unsigned modsclsda_d_1_pins[] = { AB9540_PIN_L17, AB9540_PIN_L16 };
165static const unsigned sysclkreq5_d_1_pins[] = { AB9540_PIN_W3 };
166static const unsigned gpio50_d_1_pins[] = { AB9540_PIN_N4 };
167static const unsigned batremn_d_1_pins[] = { AB9540_PIN_G12 };
168static const unsigned resethw_d_1_pins[] = { AB9540_PIN_E17 };
169static const unsigned service_d_1_pins[] = { AB9540_PIN_D11 };
170static const unsigned gpio60_d_1_pins[] = { AB9540_PIN_M18 };
171
172/* Altfunction A column */
173static const unsigned gpio1_a_1_pins[] = { AB9540_PIN_R4 };
174static const unsigned gpio2_a_1_pins[] = { AB9540_PIN_V3 };
175static const unsigned gpio3_a_1_pins[] = { AB9540_PIN_T4 };
176static const unsigned gpio4_a_1_pins[] = { AB9540_PIN_T5 };
177static const unsigned hiqclkena_a_1_pins[] = { AB9540_PIN_B18 };
178static const unsigned pdmclk_a_1_pins[] = { AB9540_PIN_C18 };
179static const unsigned uartdata_a_1_pins[] = { AB9540_PIN_D18, AB9540_PIN_N4 };
180static const unsigned gpio14_a_1_pins[] = { AB9540_PIN_B19 };
181static const unsigned gpio15_a_1_pins[] = { AB9540_PIN_C19 };
182static const unsigned gpio16_a_1_pins[] = { AB9540_PIN_D19 };
183static const unsigned gpio17_a_1_pins[] = { AB9540_PIN_R3 };
184static const unsigned gpio18_a_1_pins[] = { AB9540_PIN_T2 };
185static const unsigned gpio19_a_1_pins[] = { AB9540_PIN_U2 };
186static const unsigned gpio20_a_1_pins[] = { AB9540_PIN_V2 };
187static const unsigned gpio21_a_1_pins[] = { AB9540_PIN_N17 };
188static const unsigned gpio22_a_1_pins[] = { AB9540_PIN_N16 };
189static const unsigned gpio23_a_1_pins[] = { AB9540_PIN_M19 };
190static const unsigned gpio24_a_1_pins[] = { AB9540_PIN_T3 };
191static const unsigned gpio25_a_1_pins[] = { AB9540_PIN_W2 };
192static const unsigned gpio27_a_1_pins[] = { AB9540_PIN_H4 };
193static const unsigned gpio28_a_1_pins[] = { AB9540_PIN_F1 };
194static const unsigned gpio29_a_1_pins[] = { AB9540_PIN_F4 };
195static const unsigned gpio30_a_1_pins[] = { AB9540_PIN_F2 };
196static const unsigned gpio31_a_1_pins[] = { AB9540_PIN_E4 };
197static const unsigned gpio32_a_1_pins[] = { AB9540_PIN_F3 };
198static const unsigned gpio34_a_1_pins[] = { AB9540_PIN_J13 };
199static const unsigned gpio40_a_1_pins[] = { AB9540_PIN_L17 };
200static const unsigned gpio41_a_1_pins[] = { AB9540_PIN_L16 };
201static const unsigned gpio42_a_1_pins[] = { AB9540_PIN_W3 };
202static const unsigned gpio51_a_1_pins[] = { AB9540_PIN_G12 };
203static const unsigned gpio52_a_1_pins[] = { AB9540_PIN_E17 };
204static const unsigned gpio53_a_1_pins[] = { AB9540_PIN_D11 };
205static const unsigned usbuiccpd_a_1_pins[] = { AB9540_PIN_M18 };
206
207/* Altfunction B colum */
208static const unsigned pdmdata_b_1_pins[] = { AB9540_PIN_B18 };
209static const unsigned pwmextvibra1_b_1_pins[] = { AB9540_PIN_D18 };
210static const unsigned pwmextvibra2_b_1_pins[] = { AB9540_PIN_N4 };
211
212/* Altfunction C column */
213static const unsigned usbvdat_c_1_pins[] = { AB9540_PIN_D18 };
214
215#define AB9540_PIN_GROUP(a, b) { .name = #a, .pins = a##_pins, \
216 .npins = ARRAY_SIZE(a##_pins), .altsetting = b }
217
218static const struct abx500_pingroup ab9540_groups[] = {
219 /* default column */
220 AB9540_PIN_GROUP(sysclkreq2_d_1, ABX500_DEFAULT),
221 AB9540_PIN_GROUP(sysclkreq3_d_1, ABX500_DEFAULT),
222 AB9540_PIN_GROUP(sysclkreq4_d_1, ABX500_DEFAULT),
223 AB9540_PIN_GROUP(sysclkreq6_d_1, ABX500_DEFAULT),
224 AB9540_PIN_GROUP(gpio10_d_1, ABX500_DEFAULT),
225 AB9540_PIN_GROUP(gpio11_d_1, ABX500_DEFAULT),
226 AB9540_PIN_GROUP(gpio13_d_1, ABX500_DEFAULT),
227 AB9540_PIN_GROUP(pwmout1_d_1, ABX500_DEFAULT),
228 AB9540_PIN_GROUP(pwmout2_d_1, ABX500_DEFAULT),
229 AB9540_PIN_GROUP(pwmout3_d_1, ABX500_DEFAULT),
230 AB9540_PIN_GROUP(adi1_d_1, ABX500_DEFAULT),
231 AB9540_PIN_GROUP(usbuicc_d_1, ABX500_DEFAULT),
232 AB9540_PIN_GROUP(sysclkreq7_d_1, ABX500_DEFAULT),
233 AB9540_PIN_GROUP(sysclkreq8_d_1, ABX500_DEFAULT),
234 AB9540_PIN_GROUP(dmic12_d_1, ABX500_DEFAULT),
235 AB9540_PIN_GROUP(dmic34_d_1, ABX500_DEFAULT),
236 AB9540_PIN_GROUP(dmic56_d_1, ABX500_DEFAULT),
237 AB9540_PIN_GROUP(extcpena_d_1, ABX500_DEFAULT),
238 AB9540_PIN_GROUP(modsclsda_d_1, ABX500_DEFAULT),
239 AB9540_PIN_GROUP(sysclkreq5_d_1, ABX500_DEFAULT),
240 AB9540_PIN_GROUP(gpio50_d_1, ABX500_DEFAULT),
241 AB9540_PIN_GROUP(batremn_d_1, ABX500_DEFAULT),
242 AB9540_PIN_GROUP(resethw_d_1, ABX500_DEFAULT),
243 AB9540_PIN_GROUP(service_d_1, ABX500_DEFAULT),
244 AB9540_PIN_GROUP(gpio60_d_1, ABX500_DEFAULT),
245
246 /* Altfunction A column */
247 AB9540_PIN_GROUP(gpio1_a_1, ABX500_ALT_A),
248 AB9540_PIN_GROUP(gpio2_a_1, ABX500_ALT_A),
249 AB9540_PIN_GROUP(gpio3_a_1, ABX500_ALT_A),
250 AB9540_PIN_GROUP(gpio4_a_1, ABX500_ALT_A),
251 AB9540_PIN_GROUP(hiqclkena_a_1, ABX500_ALT_A),
252 AB9540_PIN_GROUP(pdmclk_a_1, ABX500_ALT_A),
253 AB9540_PIN_GROUP(uartdata_a_1, ABX500_ALT_A),
254 AB9540_PIN_GROUP(gpio14_a_1, ABX500_ALT_A),
255 AB9540_PIN_GROUP(gpio15_a_1, ABX500_ALT_A),
256 AB9540_PIN_GROUP(gpio16_a_1, ABX500_ALT_A),
257 AB9540_PIN_GROUP(gpio17_a_1, ABX500_ALT_A),
258 AB9540_PIN_GROUP(gpio18_a_1, ABX500_ALT_A),
259 AB9540_PIN_GROUP(gpio19_a_1, ABX500_ALT_A),
260 AB9540_PIN_GROUP(gpio20_a_1, ABX500_ALT_A),
261 AB9540_PIN_GROUP(gpio21_a_1, ABX500_ALT_A),
262 AB9540_PIN_GROUP(gpio22_a_1, ABX500_ALT_A),
263 AB9540_PIN_GROUP(gpio23_a_1, ABX500_ALT_A),
264 AB9540_PIN_GROUP(gpio24_a_1, ABX500_ALT_A),
265 AB9540_PIN_GROUP(gpio25_a_1, ABX500_ALT_A),
266 AB9540_PIN_GROUP(gpio27_a_1, ABX500_ALT_A),
267 AB9540_PIN_GROUP(gpio28_a_1, ABX500_ALT_A),
268 AB9540_PIN_GROUP(gpio29_a_1, ABX500_ALT_A),
269 AB9540_PIN_GROUP(gpio30_a_1, ABX500_ALT_A),
270 AB9540_PIN_GROUP(gpio31_a_1, ABX500_ALT_A),
271 AB9540_PIN_GROUP(gpio32_a_1, ABX500_ALT_A),
272 AB9540_PIN_GROUP(gpio34_a_1, ABX500_ALT_A),
273 AB9540_PIN_GROUP(gpio40_a_1, ABX500_ALT_A),
274 AB9540_PIN_GROUP(gpio41_a_1, ABX500_ALT_A),
275 AB9540_PIN_GROUP(gpio42_a_1, ABX500_ALT_A),
276 AB9540_PIN_GROUP(gpio51_a_1, ABX500_ALT_A),
277 AB9540_PIN_GROUP(gpio52_a_1, ABX500_ALT_A),
278 AB9540_PIN_GROUP(gpio53_a_1, ABX500_ALT_A),
279 AB9540_PIN_GROUP(usbuiccpd_a_1, ABX500_ALT_A),
280
281 /* Altfunction B column */
282 AB9540_PIN_GROUP(pdmdata_b_1, ABX500_ALT_B),
283 AB9540_PIN_GROUP(pwmextvibra1_b_1, ABX500_ALT_B),
284 AB9540_PIN_GROUP(pwmextvibra2_b_1, ABX500_ALT_B),
285
286 /* Altfunction C column */
287 AB9540_PIN_GROUP(usbvdat_c_1, ABX500_ALT_C),
288};
289
290/* We use this macro to define the groups applicable to a function */
291#define AB9540_FUNC_GROUPS(a, b...) \
292static const char * const a##_groups[] = { b };
293
294AB9540_FUNC_GROUPS(sysclkreq, "sysclkreq2_d_1", "sysclkreq3_d_1",
295 "sysclkreq4_d_1", "sysclkreq5_d_1", "sysclkreq6_d_1",
296 "sysclkreq7_d_1", "sysclkreq8_d_1");
297AB9540_FUNC_GROUPS(gpio, "gpio1_a_1", "gpio2_a_1", "gpio3_a_1", "gpio4_a_1",
298 "gpio10_d_1", "gpio11_d_1", "gpio13_d_1", "gpio14_a_1",
299 "gpio15_a_1", "gpio16_a_1", "gpio17_a_1", "gpio18_a_1",
300 "gpio19_a_1", "gpio20_a_1", "gpio21_a_1", "gpio22_a_1",
301 "gpio23_a_1", "gpio24_a_1", "gpio25_a_1", "gpio27_a_1",
302 "gpio28_a_1", "gpio29_a_1", "gpio30_a_1", "gpio31_a_1",
303 "gpio32_a_1", "gpio34_a_1", "gpio40_a_1", "gpio41_a_1",
304 "gpio42_a_1", "gpio50_d_1", "gpio51_a_1", "gpio52_a_1",
305 "gpio53_a_1", "gpio60_d_1");
306AB9540_FUNC_GROUPS(pwmout, "pwmout1_d_1", "pwmout2_d_1", "pwmout3_d_1");
307AB9540_FUNC_GROUPS(adi1, "adi1_d_1");
308AB9540_FUNC_GROUPS(usbuicc, "usbuicc_d_1", "usbuiccpd_a_1");
309AB9540_FUNC_GROUPS(dmic, "dmic12_d_1", "dmic34_d_1", "dmic56_d_1");
310AB9540_FUNC_GROUPS(extcpena, "extcpena_d_1");
311AB9540_FUNC_GROUPS(modsclsda, "modsclsda_d_1");
312AB9540_FUNC_GROUPS(batremn, "batremn_d_1");
313AB9540_FUNC_GROUPS(resethw, "resethw_d_1");
314AB9540_FUNC_GROUPS(service, "service_d_1");
315AB9540_FUNC_GROUPS(hiqclkena, "hiqclkena_a_1");
316AB9540_FUNC_GROUPS(pdm, "pdmdata_b_1", "pdmclk_a_1");
317AB9540_FUNC_GROUPS(uartdata, "uartdata_a_1");
318AB9540_FUNC_GROUPS(pwmextvibra, "pwmextvibra1_b_1", "pwmextvibra2_b_1");
319AB9540_FUNC_GROUPS(usbvdat, "usbvdat_c_1");
320
321#define FUNCTION(fname) \
322 { \
323 .name = #fname, \
324 .groups = fname##_groups, \
325 .ngroups = ARRAY_SIZE(fname##_groups), \
326 }
327
328static const struct abx500_function ab9540_functions[] = {
329 FUNCTION(sysclkreq),
330 FUNCTION(gpio),
331 FUNCTION(pwmout),
332 FUNCTION(adi1),
333 FUNCTION(usbuicc),
334 FUNCTION(dmic),
335 FUNCTION(extcpena),
336 FUNCTION(modsclsda),
337 FUNCTION(batremn),
338 FUNCTION(resethw),
339 FUNCTION(service),
340 FUNCTION(hiqclkena),
341 FUNCTION(pdm),
342 FUNCTION(uartdata),
343 FUNCTION(pwmextvibra),
344 FUNCTION(usbvdat),
345};
346
347/*
348 * this table translates what's is in the AB9540 specification regarding the
349 * balls alternate functions (as for DB, default, ALT_A, ALT_B and ALT_C).
350 * ALTERNATE_FUNCTIONS(GPIO_NUMBER, GPIOSEL bit, ALTERNATFUNC bit1,
351 * ALTERNATEFUNC bit2, ALTA val, ALTB val, ALTC val),
352 *
353 * example :
354 *
355 * ALTERNATE_FUNCTIONS(13, 4, 3, 4, 1, 0, 2),
356 * means that pin AB9540_PIN_D18 (pin 13) supports 4 mux (default/ALT_A,
357 * ALT_B and ALT_C), so GPIOSEL and ALTERNATFUNC registers are used to
358 * select the mux. ALTA, ALTB and ALTC val indicates values to write in
359 * ALTERNATFUNC register. We need to specifies these values as SOC
360 * designers didn't apply the same logic on how to select mux in the
361 * ABx500 family.
362 *
363 * As this pins supports at least ALT_B mux, default mux is
364 * selected by writing 1 in GPIOSEL bit :
365 *
366 * | GPIOSEL bit=4 | alternatfunc bit2=4 | alternatfunc bit1=3
367 * default | 1 | 0 | 0
368 * alt_A | 0 | 0 | 1
369 * alt_B | 0 | 0 | 0
370 * alt_C | 0 | 1 | 0
371 *
372 * ALTERNATE_FUNCTIONS(1, 0, UNUSED, UNUSED),
373 * means that pin AB9540_PIN_R4 (pin 1) supports 2 mux, so only GPIOSEL
374 * register is used to select the mux. As this pins doesn't support at
375 * least ALT_B mux, default mux is by writing 0 in GPIOSEL bit :
376 *
377 * | GPIOSEL bit=0 | alternatfunc bit2= | alternatfunc bit1=
378 * default | 0 | 0 | 0
379 * alt_A | 1 | 0 | 0
380 */
381
382struct alternate_functions ab9540alternate_functions[AB9540_GPIO_MAX_NUMBER + 1] = {
383 /* GPIOSEL1 - bits 4-7 are reserved */
384 ALTERNATE_FUNCTIONS(0, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO0 */
385 ALTERNATE_FUNCTIONS(1, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO1, altA controlled by bit 0 */
386 ALTERNATE_FUNCTIONS(2, 1, UNUSED, UNUSED, 0, 0, 0), /* GPIO2, altA controlled by bit 1 */
387 ALTERNATE_FUNCTIONS(3, 2, UNUSED, UNUSED, 0, 0, 0), /* GPIO3, altA controlled by bit 2*/
388 ALTERNATE_FUNCTIONS(4, 3, UNUSED, UNUSED, 0, 0, 0), /* GPIO4, altA controlled by bit 3*/
389 ALTERNATE_FUNCTIONS(5, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO5 */
390 ALTERNATE_FUNCTIONS(6, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO6 */
391 ALTERNATE_FUNCTIONS(7, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO7 */
392 ALTERNATE_FUNCTIONS(8, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO8 */
393 /* GPIOSEL2 - bits 0 and 3 are reserved */
394 ALTERNATE_FUNCTIONS(9, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO9 */
395 ALTERNATE_FUNCTIONS(10, 1, 0, UNUSED, 1, 0, 0), /* GPIO10, altA and altB controlled by bit 0 */
396 ALTERNATE_FUNCTIONS(11, 2, UNUSED, UNUSED, 0, 0, 0), /* GPIO11, altA controlled by bit 1 */
397 ALTERNATE_FUNCTIONS(12, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO12 */
398 ALTERNATE_FUNCTIONS(13, 4, 3, 4, 1, 0, 2), /* GPIO13, altA altB and altC controlled by bit 3 and 4 */
399 ALTERNATE_FUNCTIONS(14, 5, UNUSED, UNUSED, 0, 0, 0), /* GPIO14, altA controlled by bit 5 */
400 ALTERNATE_FUNCTIONS(15, 6, UNUSED, UNUSED, 0, 0, 0), /* GPIO15, altA controlled by bit 6 */
401 ALTERNATE_FUNCTIONS(16, 7, UNUSED, UNUSED, 0, 0, 0), /* GPIO16, altA controlled by bit 7 */
402 /* GPIOSEL3 - bit 1-3 reserved
403 * pins 17 to 20 are special case, only bit 0 is used to select
404 * alternate function for these 4 pins.
405 * bits 1 to 3 are reserved
406 */
407 ALTERNATE_FUNCTIONS(17, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO17, altA controlled by bit 0 */
408 ALTERNATE_FUNCTIONS(18, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO18, altA controlled by bit 0 */
409 ALTERNATE_FUNCTIONS(19, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO19, altA controlled by bit 0 */
410 ALTERNATE_FUNCTIONS(20, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO20, altA controlled by bit 0 */
411 ALTERNATE_FUNCTIONS(21, 4, UNUSED, UNUSED, 0, 0, 0), /* GPIO21, altA controlled by bit 4 */
412 ALTERNATE_FUNCTIONS(22, 5, UNUSED, UNUSED, 0, 0, 0), /* GPIO22, altA controlled by bit 5 */
413 ALTERNATE_FUNCTIONS(23, 6, UNUSED, UNUSED, 0, 0, 0), /* GPIO23, altA controlled by bit 6 */
414 ALTERNATE_FUNCTIONS(24, 7, UNUSED, UNUSED, 0, 0, 0), /* GPIO24, altA controlled by bit 7 */
415 /* GPIOSEL4 - bit 1 reserved */
416 ALTERNATE_FUNCTIONS(25, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO25, altA controlled by bit 0 */
417 ALTERNATE_FUNCTIONS(26, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO26 */
418 ALTERNATE_FUNCTIONS(27, 2, UNUSED, UNUSED, 0, 0, 0), /* GPIO27, altA controlled by bit 2 */
419 ALTERNATE_FUNCTIONS(28, 3, UNUSED, UNUSED, 0, 0, 0), /* GPIO28, altA controlled by bit 3 */
420 ALTERNATE_FUNCTIONS(29, 4, UNUSED, UNUSED, 0, 0, 0), /* GPIO29, altA controlled by bit 4 */
421 ALTERNATE_FUNCTIONS(30, 5, UNUSED, UNUSED, 0, 0, 0), /* GPIO30, altA controlled by bit 5 */
422 ALTERNATE_FUNCTIONS(31, 6, UNUSED, UNUSED, 0, 0, 0), /* GPIO31, altA controlled by bit 6 */
423 ALTERNATE_FUNCTIONS(32, 7, UNUSED, UNUSED, 0, 0, 0), /* GPIO32, altA controlled by bit 7 */
424 /* GPIOSEL5 - bit 0, 2-6 are reserved */
425 ALTERNATE_FUNCTIONS(33, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO33 */
426 ALTERNATE_FUNCTIONS(34, 1, UNUSED, UNUSED, 0, 0, 0), /* GPIO34, altA controlled by bit 1 */
427 ALTERNATE_FUNCTIONS(35, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO35 */
428 ALTERNATE_FUNCTIONS(36, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO36 */
429 ALTERNATE_FUNCTIONS(37, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO37 */
430 ALTERNATE_FUNCTIONS(38, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO38 */
431 ALTERNATE_FUNCTIONS(39, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO39 */
432 ALTERNATE_FUNCTIONS(40, 7, UNUSED, UNUSED, 0, 0, 0), /* GPIO40, altA controlled by bit 7 */
433 /* GPIOSEL6 - bit 2-7 are reserved */
434 ALTERNATE_FUNCTIONS(41, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO41, altA controlled by bit 0 */
435 ALTERNATE_FUNCTIONS(42, 1, UNUSED, UNUSED, 0, 0, 0), /* GPIO42, altA controlled by bit 1 */
436 ALTERNATE_FUNCTIONS(43, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO43 */
437 ALTERNATE_FUNCTIONS(44, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO44 */
438 ALTERNATE_FUNCTIONS(45, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO45 */
439 ALTERNATE_FUNCTIONS(46, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO46 */
440 ALTERNATE_FUNCTIONS(47, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO47 */
441 ALTERNATE_FUNCTIONS(48, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO48 */
442 /*
443 * GPIOSEL7 - bit 0 and 6-7 are reserved
444 * special case with GPIO60, wich is located at offset 5 of gpiosel7
445 * don't know why it has been called GPIO60 in AB9540 datasheet,
446 * GPIO54 would be logical..., so at SOC point of view we consider
447 * GPIO60 = GPIO54
448 */
449 ALTERNATE_FUNCTIONS(49, 0, UNUSED, UNUSED, 0, 0, 0), /* no GPIO49 */
450 ALTERNATE_FUNCTIONS(50, 1, 2, UNUSED, 1, 0, 0), /* GPIO50, altA and altB controlled by bit 1 */
451 ALTERNATE_FUNCTIONS(51, 2, UNUSED, UNUSED, 0, 0, 0), /* GPIO51, altA controlled by bit 2 */
452 ALTERNATE_FUNCTIONS(52, 3, UNUSED, UNUSED, 0, 0, 0), /* GPIO52, altA controlled by bit 3 */
453 ALTERNATE_FUNCTIONS(53, 4, UNUSED, UNUSED, 0, 0, 0), /* GPIO53, altA controlled by bit 4 */
454 ALTERNATE_FUNCTIONS(54, 5, UNUSED, UNUSED, 0, 0, 0), /* GPIO54 = GPIO60, altA controlled by bit 5 */
455};
456
457struct abx500_gpio_irq_cluster ab9540_gpio_irq_cluster[] = {
458 GPIO_IRQ_CLUSTER(10, 13, AB8500_INT_GPIO10R),
459 GPIO_IRQ_CLUSTER(24, 25, AB8500_INT_GPIO24R),
460 GPIO_IRQ_CLUSTER(40, 41, AB8500_INT_GPIO40R),
461 GPIO_IRQ_CLUSTER(50, 54, AB9540_INT_GPIO50R),
462};
463
464static struct abx500_pinctrl_soc_data ab9540_soc = {
465 .gpio_ranges = ab9540_pinranges,
466 .gpio_num_ranges = ARRAY_SIZE(ab9540_pinranges),
467 .pins = ab9540_pins,
468 .npins = ARRAY_SIZE(ab9540_pins),
469 .functions = ab9540_functions,
470 .nfunctions = ARRAY_SIZE(ab9540_functions),
471 .groups = ab9540_groups,
472 .ngroups = ARRAY_SIZE(ab9540_groups),
473 .alternate_functions = ab9540alternate_functions,
474 .gpio_irq_cluster = ab9540_gpio_irq_cluster,
475 .ngpio_irq_cluster = ARRAY_SIZE(ab9540_gpio_irq_cluster),
476 .irq_gpio_rising_offset = AB8500_INT_GPIO6R,
477 .irq_gpio_falling_offset = AB8500_INT_GPIO6F,
478 .irq_gpio_factor = 1,
479};
480
481void
482abx500_pinctrl_ab9540_init(struct abx500_pinctrl_soc_data **soc)
483{
484 *soc = &ab9540_soc;
485}
diff --git a/drivers/pinctrl/pinctrl-abx500.c b/drivers/pinctrl/pinctrl-abx500.c
new file mode 100644
index 000000000000..caecdd373061
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-abx500.c
@@ -0,0 +1,1012 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2013
3 *
4 * Author: Patrice Chotard <patrice.chotard@st.com>
5 * License terms: GNU General Public License (GPL) version 2
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/kernel.h>
12#include <linux/types.h>
13#include <linux/slab.h>
14#include <linux/init.h>
15#include <linux/module.h>
16#include <linux/err.h>
17#include <linux/of.h>
18#include <linux/of_device.h>
19#include <linux/platform_device.h>
20#include <linux/gpio.h>
21#include <linux/irq.h>
22#include <linux/irqdomain.h>
23#include <linux/interrupt.h>
24#include <linux/bitops.h>
25#include <linux/mfd/abx500.h>
26#include <linux/mfd/abx500/ab8500.h>
27#include <linux/mfd/abx500/ab8500-gpio.h>
28#include <linux/pinctrl/pinctrl.h>
29#include <linux/pinctrl/consumer.h>
30#include <linux/pinctrl/pinmux.h>
31#include <linux/pinctrl/pinconf.h>
32#include <linux/pinctrl/pinconf-generic.h>
33
34#include "pinctrl-abx500.h"
35
36/*
37 * The AB9540 and AB8540 GPIO support are extended versions
38 * of the AB8500 GPIO support.
39 * The AB9540 supports an additional (7th) register so that
40 * more GPIO may be configured and used.
41 * The AB8540 supports 4 new gpios (GPIOx_VBAT) that have
42 * internal pull-up and pull-down capabilities.
43 */
44
45/*
46 * GPIO registers offset
47 * Bank: 0x10
48 */
49#define AB8500_GPIO_SEL1_REG 0x00
50#define AB8500_GPIO_SEL2_REG 0x01
51#define AB8500_GPIO_SEL3_REG 0x02
52#define AB8500_GPIO_SEL4_REG 0x03
53#define AB8500_GPIO_SEL5_REG 0x04
54#define AB8500_GPIO_SEL6_REG 0x05
55#define AB9540_GPIO_SEL7_REG 0x06
56
57#define AB8500_GPIO_DIR1_REG 0x10
58#define AB8500_GPIO_DIR2_REG 0x11
59#define AB8500_GPIO_DIR3_REG 0x12
60#define AB8500_GPIO_DIR4_REG 0x13
61#define AB8500_GPIO_DIR5_REG 0x14
62#define AB8500_GPIO_DIR6_REG 0x15
63#define AB9540_GPIO_DIR7_REG 0x16
64
65#define AB8500_GPIO_OUT1_REG 0x20
66#define AB8500_GPIO_OUT2_REG 0x21
67#define AB8500_GPIO_OUT3_REG 0x22
68#define AB8500_GPIO_OUT4_REG 0x23
69#define AB8500_GPIO_OUT5_REG 0x24
70#define AB8500_GPIO_OUT6_REG 0x25
71#define AB9540_GPIO_OUT7_REG 0x26
72
73#define AB8500_GPIO_PUD1_REG 0x30
74#define AB8500_GPIO_PUD2_REG 0x31
75#define AB8500_GPIO_PUD3_REG 0x32
76#define AB8500_GPIO_PUD4_REG 0x33
77#define AB8500_GPIO_PUD5_REG 0x34
78#define AB8500_GPIO_PUD6_REG 0x35
79#define AB9540_GPIO_PUD7_REG 0x36
80
81#define AB8500_GPIO_IN1_REG 0x40
82#define AB8500_GPIO_IN2_REG 0x41
83#define AB8500_GPIO_IN3_REG 0x42
84#define AB8500_GPIO_IN4_REG 0x43
85#define AB8500_GPIO_IN5_REG 0x44
86#define AB8500_GPIO_IN6_REG 0x45
87#define AB9540_GPIO_IN7_REG 0x46
88#define AB8540_GPIO_VINSEL_REG 0x47
89#define AB8540_GPIO_PULL_UPDOWN_REG 0x48
90#define AB8500_GPIO_ALTFUN_REG 0x50
91#define AB8540_GPIO_PULL_UPDOWN_MASK 0x03
92#define AB8540_GPIO_VINSEL_MASK 0x03
93#define AB8540_GPIOX_VBAT_START 51
94#define AB8540_GPIOX_VBAT_END 54
95
96struct abx500_pinctrl {
97 struct device *dev;
98 struct pinctrl_dev *pctldev;
99 struct abx500_pinctrl_soc_data *soc;
100 struct gpio_chip chip;
101 struct ab8500 *parent;
102 struct mutex lock;
103 struct abx500_gpio_irq_cluster *irq_cluster;
104 int irq_cluster_size;
105};
106
107/**
108 * to_abx500_pinctrl() - get the pointer to abx500_pinctrl
109 * @chip: Member of the structure abx500_pinctrl
110 */
111static inline struct abx500_pinctrl *to_abx500_pinctrl(struct gpio_chip *chip)
112{
113 return container_of(chip, struct abx500_pinctrl, chip);
114}
115
116static int abx500_gpio_get_bit(struct gpio_chip *chip, u8 reg,
117 unsigned offset, bool *bit)
118{
119 struct abx500_pinctrl *pct = to_abx500_pinctrl(chip);
120 u8 pos = offset % 8;
121 u8 val;
122 int ret;
123
124 reg += offset / 8;
125 ret = abx500_get_register_interruptible(pct->dev,
126 AB8500_MISC, reg, &val);
127
128 *bit = !!(val & BIT(pos));
129
130 if (ret < 0)
131 dev_err(pct->dev,
132 "%s read reg =%x, offset=%x failed\n",
133 __func__, reg, offset);
134
135 return ret;
136}
137
138static int abx500_gpio_set_bits(struct gpio_chip *chip, u8 reg,
139 unsigned offset, int val)
140{
141 struct abx500_pinctrl *pct = to_abx500_pinctrl(chip);
142 u8 pos = offset % 8;
143 int ret;
144
145 reg += offset / 8;
146 ret = abx500_mask_and_set_register_interruptible(pct->dev,
147 AB8500_MISC, reg, BIT(pos), val << pos);
148 if (ret < 0)
149 dev_err(pct->dev, "%s write failed\n", __func__);
150
151 return ret;
152}
153
154/**
155 * abx500_gpio_get() - Get the particular GPIO value
156 * @chip: Gpio device
157 * @offset: GPIO number to read
158 */
159static int abx500_gpio_get(struct gpio_chip *chip, unsigned offset)
160{
161 struct abx500_pinctrl *pct = to_abx500_pinctrl(chip);
162 bool bit;
163 int ret;
164
165 ret = abx500_gpio_get_bit(chip, AB8500_GPIO_IN1_REG,
166 offset, &bit);
167 if (ret < 0) {
168 dev_err(pct->dev, "%s failed\n", __func__);
169 return ret;
170 }
171
172 return bit;
173}
174
175static void abx500_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
176{
177 struct abx500_pinctrl *pct = to_abx500_pinctrl(chip);
178 int ret;
179
180 ret = abx500_gpio_set_bits(chip, AB8500_GPIO_OUT1_REG, offset, val);
181 if (ret < 0)
182 dev_err(pct->dev, "%s write failed\n", __func__);
183}
184
185static int abx500_config_pull_updown(struct abx500_pinctrl *pct,
186 int offset, enum abx500_gpio_pull_updown val)
187{
188 u8 pos;
189 int ret;
190 struct pullud *pullud;
191
192 if (!pct->soc->pullud) {
193 dev_err(pct->dev, "%s AB chip doesn't support pull up/down feature",
194 __func__);
195 ret = -EPERM;
196 goto out;
197 }
198
199 pullud = pct->soc->pullud;
200
201 if ((offset < pullud->first_pin)
202 || (offset > pullud->last_pin)) {
203 ret = -EINVAL;
204 goto out;
205 }
206
207 pos = offset << 1;
208
209 ret = abx500_mask_and_set_register_interruptible(pct->dev,
210 AB8500_MISC, AB8540_GPIO_PULL_UPDOWN_REG,
211 AB8540_GPIO_PULL_UPDOWN_MASK << pos, val << pos);
212
213out:
214 if (ret < 0)
215 dev_err(pct->dev, "%s failed (%d)\n", __func__, ret);
216
217 return ret;
218}
219
220static int abx500_gpio_direction_output(struct gpio_chip *chip,
221 unsigned offset,
222 int val)
223{
224 struct abx500_pinctrl *pct = to_abx500_pinctrl(chip);
225 struct pullud *pullud = pct->soc->pullud;
226 unsigned gpio;
227 int ret;
228
229 /* set direction as output */
230 ret = abx500_gpio_set_bits(chip, AB8500_GPIO_DIR1_REG, offset, 1);
231 if (ret < 0)
232 return ret;
233
234 /* disable pull down */
235 ret = abx500_gpio_set_bits(chip, AB8500_GPIO_PUD1_REG, offset, 1);
236 if (ret < 0)
237 return ret;
238
239 /* if supported, disable both pull down and pull up */
240 gpio = offset + 1;
241 if (pullud && gpio >= pullud->first_pin && gpio <= pullud->last_pin) {
242 ret = abx500_config_pull_updown(pct,
243 gpio,
244 ABX500_GPIO_PULL_NONE);
245 if (ret < 0)
246 return ret;
247 }
248
249 /* set the output as 1 or 0 */
250 return abx500_gpio_set_bits(chip, AB8500_GPIO_OUT1_REG, offset, val);
251}
252
253static int abx500_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
254{
255 /* set the register as input */
256 return abx500_gpio_set_bits(chip, AB8500_GPIO_DIR1_REG, offset, 0);
257}
258
259static int abx500_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
260{
261 struct abx500_pinctrl *pct = to_abx500_pinctrl(chip);
262 /* The AB8500 GPIO numbers are off by one */
263 int gpio = offset + 1;
264 int hwirq;
265 int i;
266
267 for (i = 0; i < pct->irq_cluster_size; i++) {
268 struct abx500_gpio_irq_cluster *cluster =
269 &pct->irq_cluster[i];
270
271 if (gpio >= cluster->start && gpio <= cluster->end) {
272 /*
273 * The ABx500 GPIO's associated IRQs are clustered together
274 * throughout the interrupt numbers at irregular intervals.
275 * To solve this quandry, we have placed the read-in values
276 * into the cluster information table.
277 */
278 hwirq = gpio - cluster->start + cluster->to_irq;
279 return irq_create_mapping(pct->parent->domain, hwirq);
280 }
281 }
282
283 return -EINVAL;
284}
285
286static int abx500_set_mode(struct pinctrl_dev *pctldev, struct gpio_chip *chip,
287 unsigned gpio, int alt_setting)
288{
289 struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev);
290 struct alternate_functions af = pct->soc->alternate_functions[gpio];
291 int ret;
292 int val;
293 unsigned offset;
294
295 const char *modes[] = {
296 [ABX500_DEFAULT] = "default",
297 [ABX500_ALT_A] = "altA",
298 [ABX500_ALT_B] = "altB",
299 [ABX500_ALT_C] = "altC",
300 };
301
302 /* sanity check */
303 if (((alt_setting == ABX500_ALT_A) && (af.gpiosel_bit == UNUSED)) ||
304 ((alt_setting == ABX500_ALT_B) && (af.alt_bit1 == UNUSED)) ||
305 ((alt_setting == ABX500_ALT_C) && (af.alt_bit2 == UNUSED))) {
306 dev_dbg(pct->dev, "pin %d doesn't support %s mode\n", gpio,
307 modes[alt_setting]);
308 return -EINVAL;
309 }
310
311 /* on ABx5xx, there is no GPIO0, so adjust the offset */
312 offset = gpio - 1;
313
314 switch (alt_setting) {
315 case ABX500_DEFAULT:
316 /*
317 * for ABx5xx family, default mode is always selected by
318 * writing 0 to GPIOSELx register, except for pins which
319 * support at least ALT_B mode, default mode is selected
320 * by writing 1 to GPIOSELx register
321 */
322 val = 0;
323 if (af.alt_bit1 != UNUSED)
324 val++;
325
326 ret = abx500_gpio_set_bits(chip, AB8500_GPIO_SEL1_REG,
327 offset, val);
328 break;
329
330 case ABX500_ALT_A:
331 /*
332 * for ABx5xx family, alt_a mode is always selected by
333 * writing 1 to GPIOSELx register, except for pins which
334 * support at least ALT_B mode, alt_a mode is selected
335 * by writing 0 to GPIOSELx register and 0 in ALTFUNC
336 * register
337 */
338 if (af.alt_bit1 != UNUSED) {
339 ret = abx500_gpio_set_bits(chip, AB8500_GPIO_SEL1_REG,
340 offset, 0);
341 ret = abx500_gpio_set_bits(chip,
342 AB8500_GPIO_ALTFUN_REG,
343 af.alt_bit1,
344 !!(af.alta_val && BIT(0)));
345 if (af.alt_bit2 != UNUSED)
346 ret = abx500_gpio_set_bits(chip,
347 AB8500_GPIO_ALTFUN_REG,
348 af.alt_bit2,
349 !!(af.alta_val && BIT(1)));
350 } else
351 ret = abx500_gpio_set_bits(chip, AB8500_GPIO_SEL1_REG,
352 offset, 1);
353 break;
354
355 case ABX500_ALT_B:
356 ret = abx500_gpio_set_bits(chip, AB8500_GPIO_SEL1_REG,
357 offset, 0);
358 ret = abx500_gpio_set_bits(chip, AB8500_GPIO_ALTFUN_REG,
359 af.alt_bit1, !!(af.altb_val && BIT(0)));
360 if (af.alt_bit2 != UNUSED)
361 ret = abx500_gpio_set_bits(chip,
362 AB8500_GPIO_ALTFUN_REG,
363 af.alt_bit2,
364 !!(af.altb_val && BIT(1)));
365 break;
366
367 case ABX500_ALT_C:
368 ret = abx500_gpio_set_bits(chip, AB8500_GPIO_SEL1_REG,
369 offset, 0);
370 ret = abx500_gpio_set_bits(chip, AB8500_GPIO_ALTFUN_REG,
371 af.alt_bit2, !!(af.altc_val && BIT(0)));
372 ret = abx500_gpio_set_bits(chip, AB8500_GPIO_ALTFUN_REG,
373 af.alt_bit2, !!(af.altc_val && BIT(1)));
374 break;
375
376 default:
377 dev_dbg(pct->dev, "unknow alt_setting %d\n", alt_setting);
378
379 return -EINVAL;
380 }
381
382 return ret;
383}
384
385static u8 abx500_get_mode(struct pinctrl_dev *pctldev, struct gpio_chip *chip,
386 unsigned gpio)
387{
388 u8 mode;
389 bool bit_mode;
390 bool alt_bit1;
391 bool alt_bit2;
392 struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev);
393 struct alternate_functions af = pct->soc->alternate_functions[gpio];
394 /* on ABx5xx, there is no GPIO0, so adjust the offset */
395 unsigned offset = gpio - 1;
396
397 /*
398 * if gpiosel_bit is set to unused,
399 * it means no GPIO or special case
400 */
401 if (af.gpiosel_bit == UNUSED)
402 return ABX500_DEFAULT;
403
404 /* read GpioSelx register */
405 abx500_gpio_get_bit(chip, AB8500_GPIO_SEL1_REG + (offset / 8),
406 af.gpiosel_bit, &bit_mode);
407 mode = bit_mode;
408
409 /* sanity check */
410 if ((af.alt_bit1 < UNUSED) || (af.alt_bit1 > 7) ||
411 (af.alt_bit2 < UNUSED) || (af.alt_bit2 > 7)) {
412 dev_err(pct->dev,
413 "alt_bitX value not in correct range (-1 to 7)\n");
414 return -EINVAL;
415 }
416
417 /* if alt_bit2 is used, alt_bit1 must be used too */
418 if ((af.alt_bit2 != UNUSED) && (af.alt_bit1 == UNUSED)) {
419 dev_err(pct->dev,
420 "if alt_bit2 is used, alt_bit1 can't be unused\n");
421 return -EINVAL;
422 }
423
424 /* check if pin use AlternateFunction register */
425 if ((af.alt_bit1 == UNUSED) && (af.alt_bit1 == UNUSED))
426 return mode;
427 /*
428 * if pin GPIOSEL bit is set and pin supports alternate function,
429 * it means DEFAULT mode
430 */
431 if (mode)
432 return ABX500_DEFAULT;
433
434 /*
435 * pin use the AlternatFunction register
436 * read alt_bit1 value
437 */
438 abx500_gpio_get_bit(chip, AB8500_GPIO_ALTFUN_REG,
439 af.alt_bit1, &alt_bit1);
440
441 if (af.alt_bit2 != UNUSED)
442 /* read alt_bit2 value */
443 abx500_gpio_get_bit(chip, AB8500_GPIO_ALTFUN_REG, af.alt_bit2,
444 &alt_bit2);
445 else
446 alt_bit2 = 0;
447
448 mode = (alt_bit2 << 1) + alt_bit1;
449 if (mode == af.alta_val)
450 return ABX500_ALT_A;
451 else if (mode == af.altb_val)
452 return ABX500_ALT_B;
453 else
454 return ABX500_ALT_C;
455}
456
457#ifdef CONFIG_DEBUG_FS
458
459#include <linux/seq_file.h>
460
461static void abx500_gpio_dbg_show_one(struct seq_file *s,
462 struct pinctrl_dev *pctldev,
463 struct gpio_chip *chip,
464 unsigned offset, unsigned gpio)
465{
466 const char *label = gpiochip_is_requested(chip, offset - 1);
467 u8 gpio_offset = offset - 1;
468 int mode = -1;
469 bool is_out;
470 bool pull;
471
472 const char *modes[] = {
473 [ABX500_DEFAULT] = "default",
474 [ABX500_ALT_A] = "altA",
475 [ABX500_ALT_B] = "altB",
476 [ABX500_ALT_C] = "altC",
477 };
478
479 abx500_gpio_get_bit(chip, AB8500_GPIO_DIR1_REG, gpio_offset, &is_out);
480 abx500_gpio_get_bit(chip, AB8500_GPIO_PUD1_REG, gpio_offset, &pull);
481
482 if (pctldev)
483 mode = abx500_get_mode(pctldev, chip, offset);
484
485 seq_printf(s, " gpio-%-3d (%-20.20s) %-3s %-9s %s",
486 gpio, label ?: "(none)",
487 is_out ? "out" : "in ",
488 is_out ?
489 (chip->get
490 ? (chip->get(chip, offset) ? "hi" : "lo")
491 : "? ")
492 : (pull ? "pull up" : "pull down"),
493 (mode < 0) ? "unknown" : modes[mode]);
494}
495
496static void abx500_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
497{
498 unsigned i;
499 unsigned gpio = chip->base;
500 struct abx500_pinctrl *pct = to_abx500_pinctrl(chip);
501 struct pinctrl_dev *pctldev = pct->pctldev;
502
503 for (i = 0; i < chip->ngpio; i++, gpio++) {
504 /* On AB8500, there is no GPIO0, the first is the GPIO 1 */
505 abx500_gpio_dbg_show_one(s, pctldev, chip, i + 1, gpio);
506 seq_printf(s, "\n");
507 }
508}
509
510#else
511static inline void abx500_gpio_dbg_show_one(struct seq_file *s,
512 struct pinctrl_dev *pctldev,
513 struct gpio_chip *chip,
514 unsigned offset, unsigned gpio)
515{
516}
517#define abx500_gpio_dbg_show NULL
518#endif
519
520int abx500_gpio_request(struct gpio_chip *chip, unsigned offset)
521{
522 int gpio = chip->base + offset;
523
524 return pinctrl_request_gpio(gpio);
525}
526
527void abx500_gpio_free(struct gpio_chip *chip, unsigned offset)
528{
529 int gpio = chip->base + offset;
530
531 pinctrl_free_gpio(gpio);
532}
533
534static struct gpio_chip abx500gpio_chip = {
535 .label = "abx500-gpio",
536 .owner = THIS_MODULE,
537 .request = abx500_gpio_request,
538 .free = abx500_gpio_free,
539 .direction_input = abx500_gpio_direction_input,
540 .get = abx500_gpio_get,
541 .direction_output = abx500_gpio_direction_output,
542 .set = abx500_gpio_set,
543 .to_irq = abx500_gpio_to_irq,
544 .dbg_show = abx500_gpio_dbg_show,
545};
546
547static int abx500_pmx_get_funcs_cnt(struct pinctrl_dev *pctldev)
548{
549 struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev);
550
551 return pct->soc->nfunctions;
552}
553
554static const char *abx500_pmx_get_func_name(struct pinctrl_dev *pctldev,
555 unsigned function)
556{
557 struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev);
558
559 return pct->soc->functions[function].name;
560}
561
562static int abx500_pmx_get_func_groups(struct pinctrl_dev *pctldev,
563 unsigned function,
564 const char * const **groups,
565 unsigned * const num_groups)
566{
567 struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev);
568
569 *groups = pct->soc->functions[function].groups;
570 *num_groups = pct->soc->functions[function].ngroups;
571
572 return 0;
573}
574
575static int abx500_pmx_enable(struct pinctrl_dev *pctldev, unsigned function,
576 unsigned group)
577{
578 struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev);
579 struct gpio_chip *chip = &pct->chip;
580 const struct abx500_pingroup *g;
581 int i;
582 int ret = 0;
583
584 g = &pct->soc->groups[group];
585 if (g->altsetting < 0)
586 return -EINVAL;
587
588 dev_dbg(pct->dev, "enable group %s, %u pins\n", g->name, g->npins);
589
590 for (i = 0; i < g->npins; i++) {
591 dev_dbg(pct->dev, "setting pin %d to altsetting %d\n",
592 g->pins[i], g->altsetting);
593
594 ret = abx500_set_mode(pctldev, chip, g->pins[i], g->altsetting);
595 }
596
597 return ret;
598}
599
600static void abx500_pmx_disable(struct pinctrl_dev *pctldev,
601 unsigned function, unsigned group)
602{
603 struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev);
604 const struct abx500_pingroup *g;
605
606 g = &pct->soc->groups[group];
607 if (g->altsetting < 0)
608 return;
609
610 /* FIXME: poke out the mux, set the pin to some default state? */
611 dev_dbg(pct->dev, "disable group %s, %u pins\n", g->name, g->npins);
612}
613
614int abx500_gpio_request_enable(struct pinctrl_dev *pctldev,
615 struct pinctrl_gpio_range *range,
616 unsigned offset)
617{
618 struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev);
619 const struct abx500_pinrange *p;
620 int ret;
621 int i;
622
623 /*
624 * Different ranges have different ways to enable GPIO function on a
625 * pin, so refer back to our local range type, where we handily define
626 * what altfunc enables GPIO for a certain pin.
627 */
628 for (i = 0; i < pct->soc->gpio_num_ranges; i++) {
629 p = &pct->soc->gpio_ranges[i];
630 if ((offset >= p->offset) &&
631 (offset < (p->offset + p->npins)))
632 break;
633 }
634
635 if (i == pct->soc->gpio_num_ranges) {
636 dev_err(pct->dev, "%s failed to locate range\n", __func__);
637 return -ENODEV;
638 }
639
640 dev_dbg(pct->dev, "enable GPIO by altfunc %d at gpio %d\n",
641 p->altfunc, offset);
642
643 ret = abx500_set_mode(pct->pctldev, &pct->chip,
644 offset, p->altfunc);
645 if (ret < 0) {
646 dev_err(pct->dev, "%s setting altfunc failed\n", __func__);
647 return ret;
648 }
649
650 return ret;
651}
652
653static void abx500_gpio_disable_free(struct pinctrl_dev *pctldev,
654 struct pinctrl_gpio_range *range,
655 unsigned offset)
656{
657}
658
659static struct pinmux_ops abx500_pinmux_ops = {
660 .get_functions_count = abx500_pmx_get_funcs_cnt,
661 .get_function_name = abx500_pmx_get_func_name,
662 .get_function_groups = abx500_pmx_get_func_groups,
663 .enable = abx500_pmx_enable,
664 .disable = abx500_pmx_disable,
665 .gpio_request_enable = abx500_gpio_request_enable,
666 .gpio_disable_free = abx500_gpio_disable_free,
667};
668
669static int abx500_get_groups_cnt(struct pinctrl_dev *pctldev)
670{
671 struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev);
672
673 return pct->soc->ngroups;
674}
675
676static const char *abx500_get_group_name(struct pinctrl_dev *pctldev,
677 unsigned selector)
678{
679 struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev);
680
681 return pct->soc->groups[selector].name;
682}
683
684static int abx500_get_group_pins(struct pinctrl_dev *pctldev,
685 unsigned selector,
686 const unsigned **pins,
687 unsigned *num_pins)
688{
689 struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev);
690
691 *pins = pct->soc->groups[selector].pins;
692 *num_pins = pct->soc->groups[selector].npins;
693
694 return 0;
695}
696
697static void abx500_pin_dbg_show(struct pinctrl_dev *pctldev,
698 struct seq_file *s, unsigned offset)
699{
700 struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev);
701 struct gpio_chip *chip = &pct->chip;
702
703 abx500_gpio_dbg_show_one(s, pctldev, chip, offset,
704 chip->base + offset - 1);
705}
706
707static struct pinctrl_ops abx500_pinctrl_ops = {
708 .get_groups_count = abx500_get_groups_cnt,
709 .get_group_name = abx500_get_group_name,
710 .get_group_pins = abx500_get_group_pins,
711 .pin_dbg_show = abx500_pin_dbg_show,
712};
713
714int abx500_pin_config_get(struct pinctrl_dev *pctldev,
715 unsigned pin,
716 unsigned long *config)
717{
718 return -ENOSYS;
719}
720
721int abx500_pin_config_set(struct pinctrl_dev *pctldev,
722 unsigned pin,
723 unsigned long config)
724{
725 struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev);
726 struct pullud *pullud = pct->soc->pullud;
727 struct gpio_chip *chip = &pct->chip;
728 unsigned offset;
729 int ret;
730 enum pin_config_param param = pinconf_to_config_param(config);
731 enum pin_config_param argument = pinconf_to_config_argument(config);
732
733 dev_dbg(chip->dev, "pin %d [%#lx]: %s %s\n",
734 pin, config, (param == PIN_CONFIG_OUTPUT) ? "output " : "input",
735 (param == PIN_CONFIG_OUTPUT) ? (argument ? "high" : "low") :
736 (argument ? "pull up" : "pull down"));
737
738 /* on ABx500, there is no GPIO0, so adjust the offset */
739 offset = pin - 1;
740
741 switch (param) {
742 case PIN_CONFIG_BIAS_PULL_DOWN:
743 /*
744 * if argument = 1 set the pull down
745 * else clear the pull down
746 */
747 ret = abx500_gpio_direction_input(chip, offset);
748 /*
749 * Some chips only support pull down, while some actually
750 * support both pull up and pull down. Such chips have
751 * a "pullud" range specified for the pins that support
752 * both features. If the pin is not within that range, we
753 * fall back to the old bit set that only support pull down.
754 */
755 if (pullud &&
756 pin >= pullud->first_pin &&
757 pin <= pullud->last_pin)
758 ret = abx500_config_pull_updown(pct,
759 pin,
760 argument ? ABX500_GPIO_PULL_DOWN : ABX500_GPIO_PULL_NONE);
761 else
762 /* Chip only supports pull down */
763 ret = abx500_gpio_set_bits(chip, AB8500_GPIO_PUD1_REG,
764 offset, argument ? 0 : 1);
765 break;
766
767 case PIN_CONFIG_OUTPUT:
768 ret = abx500_gpio_direction_output(chip, offset, argument);
769
770 break;
771
772 default:
773 dev_err(chip->dev, "illegal configuration requested\n");
774
775 return -EINVAL;
776 }
777
778 return ret;
779}
780
781static struct pinconf_ops abx500_pinconf_ops = {
782 .pin_config_get = abx500_pin_config_get,
783 .pin_config_set = abx500_pin_config_set,
784};
785
786static struct pinctrl_desc abx500_pinctrl_desc = {
787 .name = "pinctrl-abx500",
788 .pctlops = &abx500_pinctrl_ops,
789 .pmxops = &abx500_pinmux_ops,
790 .confops = &abx500_pinconf_ops,
791 .owner = THIS_MODULE,
792};
793
794static int abx500_get_gpio_num(struct abx500_pinctrl_soc_data *soc)
795{
796 unsigned int lowest = 0;
797 unsigned int highest = 0;
798 unsigned int npins = 0;
799 int i;
800
801 /*
802 * Compute number of GPIOs from the last SoC gpio range descriptors
803 * These ranges may include "holes" but the GPIO number space shall
804 * still be homogeneous, so we need to detect and account for any
805 * such holes so that these are included in the number of GPIO pins.
806 */
807 for (i = 0; i < soc->gpio_num_ranges; i++) {
808 unsigned gstart;
809 unsigned gend;
810 const struct abx500_pinrange *p;
811
812 p = &soc->gpio_ranges[i];
813 gstart = p->offset;
814 gend = p->offset + p->npins - 1;
815
816 if (i == 0) {
817 /* First iteration, set start values */
818 lowest = gstart;
819 highest = gend;
820 } else {
821 if (gstart < lowest)
822 lowest = gstart;
823 if (gend > highest)
824 highest = gend;
825 }
826 }
827 /* this gives the absolute number of pins */
828 npins = highest - lowest + 1;
829 return npins;
830}
831
832static const struct of_device_id abx500_gpio_match[] = {
833 { .compatible = "stericsson,ab8500-gpio", .data = (void *)PINCTRL_AB8500, },
834 { .compatible = "stericsson,ab8505-gpio", .data = (void *)PINCTRL_AB8505, },
835 { .compatible = "stericsson,ab8540-gpio", .data = (void *)PINCTRL_AB8540, },
836 { .compatible = "stericsson,ab9540-gpio", .data = (void *)PINCTRL_AB9540, },
837};
838
839static int abx500_gpio_probe(struct platform_device *pdev)
840{
841 struct ab8500_platform_data *abx500_pdata =
842 dev_get_platdata(pdev->dev.parent);
843 struct abx500_gpio_platform_data *pdata = NULL;
844 struct device_node *np = pdev->dev.of_node;
845 struct abx500_pinctrl *pct;
846 const struct platform_device_id *platid = platform_get_device_id(pdev);
847 unsigned int id = -1;
848 int ret, err;
849 int i;
850
851 if (abx500_pdata)
852 pdata = abx500_pdata->gpio;
853 if (!pdata) {
854 if (np) {
855 const struct of_device_id *match;
856
857 match = of_match_device(abx500_gpio_match, &pdev->dev);
858 if (!match)
859 return -ENODEV;
860 id = (unsigned long)match->data;
861 } else {
862 dev_err(&pdev->dev, "gpio dt and platform data missing\n");
863 return -ENODEV;
864 }
865 }
866
867 if (platid)
868 id = platid->driver_data;
869
870 pct = devm_kzalloc(&pdev->dev, sizeof(struct abx500_pinctrl),
871 GFP_KERNEL);
872 if (pct == NULL) {
873 dev_err(&pdev->dev,
874 "failed to allocate memory for pct\n");
875 return -ENOMEM;
876 }
877
878 pct->dev = &pdev->dev;
879 pct->parent = dev_get_drvdata(pdev->dev.parent);
880 pct->chip = abx500gpio_chip;
881 pct->chip.dev = &pdev->dev;
882 pct->chip.base = pdata->gpio_base;
883 pct->chip.base = (np) ? -1 : pdata->gpio_base;
884
885 /* initialize the lock */
886 mutex_init(&pct->lock);
887
888 /* Poke in other ASIC variants here */
889 switch (id) {
890 case PINCTRL_AB8500:
891 abx500_pinctrl_ab8500_init(&pct->soc);
892 break;
893 case PINCTRL_AB8540:
894 abx500_pinctrl_ab8540_init(&pct->soc);
895 break;
896 case PINCTRL_AB9540:
897 abx500_pinctrl_ab9540_init(&pct->soc);
898 break;
899 case PINCTRL_AB8505:
900 abx500_pinctrl_ab8505_init(&pct->soc);
901 break;
902 default:
903 dev_err(&pdev->dev, "Unsupported pinctrl sub driver (%d)\n",
904 (int) platid->driver_data);
905 mutex_destroy(&pct->lock);
906 return -EINVAL;
907 }
908
909 if (!pct->soc) {
910 dev_err(&pdev->dev, "Invalid SOC data\n");
911 mutex_destroy(&pct->lock);
912 return -EINVAL;
913 }
914
915 pct->chip.ngpio = abx500_get_gpio_num(pct->soc);
916 pct->irq_cluster = pct->soc->gpio_irq_cluster;
917 pct->irq_cluster_size = pct->soc->ngpio_irq_cluster;
918
919 ret = gpiochip_add(&pct->chip);
920 if (ret) {
921 dev_err(&pdev->dev, "unable to add gpiochip: %d\n", ret);
922 mutex_destroy(&pct->lock);
923 return ret;
924 }
925 dev_info(&pdev->dev, "added gpiochip\n");
926
927 abx500_pinctrl_desc.pins = pct->soc->pins;
928 abx500_pinctrl_desc.npins = pct->soc->npins;
929 pct->pctldev = pinctrl_register(&abx500_pinctrl_desc, &pdev->dev, pct);
930 if (!pct->pctldev) {
931 dev_err(&pdev->dev,
932 "could not register abx500 pinctrl driver\n");
933 ret = -EINVAL;
934 goto out_rem_chip;
935 }
936 dev_info(&pdev->dev, "registered pin controller\n");
937
938 /* We will handle a range of GPIO pins */
939 for (i = 0; i < pct->soc->gpio_num_ranges; i++) {
940 const struct abx500_pinrange *p = &pct->soc->gpio_ranges[i];
941
942 ret = gpiochip_add_pin_range(&pct->chip,
943 dev_name(&pdev->dev),
944 p->offset - 1, p->offset, p->npins);
945 if (ret < 0)
946 goto out_rem_chip;
947 }
948
949 platform_set_drvdata(pdev, pct);
950 dev_info(&pdev->dev, "initialized abx500 pinctrl driver\n");
951
952 return 0;
953
954out_rem_chip:
955 err = gpiochip_remove(&pct->chip);
956 if (err)
957 dev_info(&pdev->dev, "failed to remove gpiochip\n");
958
959 mutex_destroy(&pct->lock);
960 return ret;
961}
962
963/**
964 * abx500_gpio_remove() - remove Ab8500-gpio driver
965 * @pdev: Platform device registered
966 */
967static int abx500_gpio_remove(struct platform_device *pdev)
968{
969 struct abx500_pinctrl *pct = platform_get_drvdata(pdev);
970 int ret;
971
972 ret = gpiochip_remove(&pct->chip);
973 if (ret < 0) {
974 dev_err(pct->dev, "unable to remove gpiochip: %d\n",
975 ret);
976 return ret;
977 }
978
979 mutex_destroy(&pct->lock);
980
981 return 0;
982}
983
984static const struct platform_device_id abx500_pinctrl_id[] = {
985 { "pinctrl-ab8500", PINCTRL_AB8500 },
986 { "pinctrl-ab8540", PINCTRL_AB8540 },
987 { "pinctrl-ab9540", PINCTRL_AB9540 },
988 { "pinctrl-ab8505", PINCTRL_AB8505 },
989 { },
990};
991
992static struct platform_driver abx500_gpio_driver = {
993 .driver = {
994 .name = "abx500-gpio",
995 .owner = THIS_MODULE,
996 .of_match_table = abx500_gpio_match,
997 },
998 .probe = abx500_gpio_probe,
999 .remove = abx500_gpio_remove,
1000 .id_table = abx500_pinctrl_id,
1001};
1002
1003static int __init abx500_gpio_init(void)
1004{
1005 return platform_driver_register(&abx500_gpio_driver);
1006}
1007core_initcall(abx500_gpio_init);
1008
1009MODULE_AUTHOR("Patrice Chotard <patrice.chotard@st.com>");
1010MODULE_DESCRIPTION("Driver allows to use AxB5xx unused pins to be used as GPIO");
1011MODULE_ALIAS("platform:abx500-gpio");
1012MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-abx500.h b/drivers/pinctrl/pinctrl-abx500.h
new file mode 100644
index 000000000000..eeca8f973999
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-abx500.h
@@ -0,0 +1,234 @@
1#ifndef PINCTRL_PINCTRL_ABx5O0_H
2#define PINCTRL_PINCTRL_ABx500_H
3
4/* Package definitions */
5#define PINCTRL_AB8500 0
6#define PINCTRL_AB8540 1
7#define PINCTRL_AB9540 2
8#define PINCTRL_AB8505 3
9
10/* pins alternate function */
11enum abx500_pin_func {
12 ABX500_DEFAULT,
13 ABX500_ALT_A,
14 ABX500_ALT_B,
15 ABX500_ALT_C,
16};
17
18/**
19 * struct abx500_function - ABx500 pinctrl mux function
20 * @name: The name of the function, exported to pinctrl core.
21 * @groups: An array of pin groups that may select this function.
22 * @ngroups: The number of entries in @groups.
23 */
24struct abx500_function {
25 const char *name;
26 const char * const *groups;
27 unsigned ngroups;
28};
29
30/**
31 * struct abx500_pingroup - describes a ABx500 pin group
32 * @name: the name of this specific pin group
33 * @pins: an array of discrete physical pins used in this group, taken
34 * from the driver-local pin enumeration space
35 * @num_pins: the number of pins in this group array, i.e. the number of
36 * elements in .pins so we can iterate over that array
37 * @altsetting: the altsetting to apply to all pins in this group to
38 * configure them to be used by a function
39 */
40struct abx500_pingroup {
41 const char *name;
42 const unsigned int *pins;
43 const unsigned npins;
44 int altsetting;
45};
46
47#define ALTERNATE_FUNCTIONS(pin, sel_bit, alt1, alt2, alta, altb, altc) \
48{ \
49 .pin_number = pin, \
50 .gpiosel_bit = sel_bit, \
51 .alt_bit1 = alt1, \
52 .alt_bit2 = alt2, \
53 .alta_val = alta, \
54 .altb_val = altb, \
55 .altc_val = altc, \
56}
57
58#define UNUSED -1
59/**
60 * struct alternate_functions
61 * @pin_number: The pin number
62 * @gpiosel_bit: Control bit in GPIOSEL register,
63 * @alt_bit1: First AlternateFunction bit used to select the
64 * alternate function
65 * @alt_bit2: Second AlternateFunction bit used to select the
66 * alternate function
67 *
68 * these 3 following fields are necessary due to none
69 * coherency on how to select the altA, altB and altC
70 * function between the ABx500 SOC family when using
71 * alternatfunc register.
72 * @alta_val: value to write in alternatfunc to select altA function
73 * @altb_val: value to write in alternatfunc to select altB function
74 * @altc_val: value to write in alternatfunc to select altC function
75 */
76struct alternate_functions {
77 unsigned pin_number;
78 s8 gpiosel_bit;
79 s8 alt_bit1;
80 s8 alt_bit2;
81 u8 alta_val;
82 u8 altb_val;
83 u8 altc_val;
84};
85
86/**
87 * struct pullud - specific pull up/down feature
88 * @first_pin: The pin number of the first pins which support
89 * specific pull up/down
90 * @last_pin: The pin number of the last pins
91 */
92struct pullud {
93 unsigned first_pin;
94 unsigned last_pin;
95};
96
97#define GPIO_IRQ_CLUSTER(a, b, c) \
98{ \
99 .start = a, \
100 .end = b, \
101 .to_irq = c, \
102}
103
104/**
105 * struct abx500_gpio_irq_cluster - indicates GPIOs which are interrupt
106 * capable
107 * @start: The pin number of the first pin interrupt capable
108 * @end: The pin number of the last pin interrupt capable
109 * @to_irq: The ABx500 GPIO's associated IRQs are clustered
110 * together throughout the interrupt numbers at irregular
111 * intervals. To solve this quandary, we will place the
112 * read-in values into the cluster information table
113 */
114
115struct abx500_gpio_irq_cluster {
116 int start;
117 int end;
118 int to_irq;
119};
120
121/**
122 * struct abx500_pinrange - map pin numbers to GPIO offsets
123 * @offset: offset into the GPIO local numberspace, incidentally
124 * identical to the offset into the local pin numberspace
125 * @npins: number of pins to map from both offsets
126 * @altfunc: altfunc setting to be used to enable GPIO on a pin in
127 * this range (may vary)
128 */
129struct abx500_pinrange {
130 unsigned int offset;
131 unsigned int npins;
132 int altfunc;
133};
134
135#define ABX500_PINRANGE(a, b, c) { .offset = a, .npins = b, .altfunc = c }
136
137/**
138 * struct abx500_pinctrl_soc_data - ABx500 pin controller per-SoC configuration
139 * @gpio_ranges: An array of GPIO ranges for this SoC
140 * @gpio_num_ranges: The number of GPIO ranges for this SoC
141 * @pins: An array describing all pins the pin controller affects.
142 * All pins which are also GPIOs must be listed first within the
143 * array, and be numbered identically to the GPIO controller's
144 * numbering.
145 * @npins: The number of entries in @pins.
146 * @functions: The functions supported on this SoC.
147 * @nfunction: The number of entries in @functions.
148 * @groups: An array describing all pin groups the pin SoC supports.
149 * @ngroups: The number of entries in @groups.
150 * @alternate_functions: array describing pins which supports alternate and
151 * how to set it.
152 * @pullud: array describing pins which supports pull up/down
153 * specific registers.
154 * @gpio_irq_cluster: An array of GPIO interrupt capable for this SoC
155 * @ngpio_irq_cluster: The number of GPIO inetrrupt capable for this SoC
156 * @irq_gpio_rising_offset: Interrupt offset used as base to compute specific
157 * setting strategy of the rising interrupt line
158 * @irq_gpio_falling_offset: Interrupt offset used as base to compute specific
159 * setting strategy of the falling interrupt line
160 * @irq_gpio_factor: Factor used to compute specific setting strategy of
161 * the interrupt line
162 */
163
164struct abx500_pinctrl_soc_data {
165 const struct abx500_pinrange *gpio_ranges;
166 unsigned gpio_num_ranges;
167 const struct pinctrl_pin_desc *pins;
168 unsigned npins;
169 const struct abx500_function *functions;
170 unsigned nfunctions;
171 const struct abx500_pingroup *groups;
172 unsigned ngroups;
173 struct alternate_functions *alternate_functions;
174 struct pullud *pullud;
175 struct abx500_gpio_irq_cluster *gpio_irq_cluster;
176 unsigned ngpio_irq_cluster;
177 int irq_gpio_rising_offset;
178 int irq_gpio_falling_offset;
179 int irq_gpio_factor;
180};
181
182#ifdef CONFIG_PINCTRL_AB8500
183
184void abx500_pinctrl_ab8500_init(struct abx500_pinctrl_soc_data **soc);
185
186#else
187
188static inline void
189abx500_pinctrl_ab8500_init(struct abx500_pinctrl_soc_data **soc)
190{
191}
192
193#endif
194
195#ifdef CONFIG_PINCTRL_AB8540
196
197void abx500_pinctrl_ab8540_init(struct abx500_pinctrl_soc_data **soc);
198
199#else
200
201static inline void
202abx500_pinctrl_ab8540_init(struct abx500_pinctrl_soc_data **soc)
203{
204}
205
206#endif
207
208#ifdef CONFIG_PINCTRL_AB9540
209
210void abx500_pinctrl_ab9540_init(struct abx500_pinctrl_soc_data **soc);
211
212#else
213
214static inline void
215abx500_pinctrl_ab9540_init(struct abx500_pinctrl_soc_data **soc)
216{
217}
218
219#endif
220
221#ifdef CONFIG_PINCTRL_AB8505
222
223void abx500_pinctrl_ab8505_init(struct abx500_pinctrl_soc_data **soc);
224
225#else
226
227static inline void
228abx500_pinctrl_ab8505_init(struct abx500_pinctrl_soc_data **soc)
229{
230}
231
232#endif
233
234#endif /* PINCTRL_PINCTRL_ABx500_H */
diff --git a/drivers/pinctrl/pinctrl-exynos5440.c b/drivers/pinctrl/pinctrl-exynos5440.c
index de05b64f0da6..142729914c34 100644
--- a/drivers/pinctrl/pinctrl-exynos5440.c
+++ b/drivers/pinctrl/pinctrl-exynos5440.c
@@ -599,7 +599,7 @@ static int exynos5440_gpio_direction_output(struct gpio_chip *gc, unsigned offse
599} 599}
600 600
601/* parse the pin numbers listed in the 'samsung,exynos5440-pins' property */ 601/* parse the pin numbers listed in the 'samsung,exynos5440-pins' property */
602static int __init exynos5440_pinctrl_parse_dt_pins(struct platform_device *pdev, 602static int exynos5440_pinctrl_parse_dt_pins(struct platform_device *pdev,
603 struct device_node *cfg_np, unsigned int **pin_list, 603 struct device_node *cfg_np, unsigned int **pin_list,
604 unsigned int *npins) 604 unsigned int *npins)
605{ 605{
@@ -630,7 +630,7 @@ static int __init exynos5440_pinctrl_parse_dt_pins(struct platform_device *pdev,
630 * Parse the information about all the available pin groups and pin functions 630 * Parse the information about all the available pin groups and pin functions
631 * from device node of the pin-controller. 631 * from device node of the pin-controller.
632 */ 632 */
633static int __init exynos5440_pinctrl_parse_dt(struct platform_device *pdev, 633static int exynos5440_pinctrl_parse_dt(struct platform_device *pdev,
634 struct exynos5440_pinctrl_priv_data *priv) 634 struct exynos5440_pinctrl_priv_data *priv)
635{ 635{
636 struct device *dev = &pdev->dev; 636 struct device *dev = &pdev->dev;
@@ -723,7 +723,7 @@ static int __init exynos5440_pinctrl_parse_dt(struct platform_device *pdev,
723} 723}
724 724
725/* register the pinctrl interface with the pinctrl subsystem */ 725/* register the pinctrl interface with the pinctrl subsystem */
726static int __init exynos5440_pinctrl_register(struct platform_device *pdev, 726static int exynos5440_pinctrl_register(struct platform_device *pdev,
727 struct exynos5440_pinctrl_priv_data *priv) 727 struct exynos5440_pinctrl_priv_data *priv)
728{ 728{
729 struct device *dev = &pdev->dev; 729 struct device *dev = &pdev->dev;
@@ -798,7 +798,7 @@ static int __init exynos5440_pinctrl_register(struct platform_device *pdev,
798} 798}
799 799
800/* register the gpiolib interface with the gpiolib subsystem */ 800/* register the gpiolib interface with the gpiolib subsystem */
801static int __init exynos5440_gpiolib_register(struct platform_device *pdev, 801static int exynos5440_gpiolib_register(struct platform_device *pdev,
802 struct exynos5440_pinctrl_priv_data *priv) 802 struct exynos5440_pinctrl_priv_data *priv)
803{ 803{
804 struct gpio_chip *gc; 804 struct gpio_chip *gc;
@@ -831,7 +831,7 @@ static int __init exynos5440_gpiolib_register(struct platform_device *pdev,
831} 831}
832 832
833/* unregister the gpiolib interface with the gpiolib subsystem */ 833/* unregister the gpiolib interface with the gpiolib subsystem */
834static int __init exynos5440_gpiolib_unregister(struct platform_device *pdev, 834static int exynos5440_gpiolib_unregister(struct platform_device *pdev,
835 struct exynos5440_pinctrl_priv_data *priv) 835 struct exynos5440_pinctrl_priv_data *priv)
836{ 836{
837 int ret = gpiochip_remove(priv->gc); 837 int ret = gpiochip_remove(priv->gc);
diff --git a/drivers/pinctrl/pinctrl-falcon.c b/drivers/pinctrl/pinctrl-falcon.c
index 8ed20e84cb02..4a0d54a08890 100644
--- a/drivers/pinctrl/pinctrl-falcon.c
+++ b/drivers/pinctrl/pinctrl-falcon.c
@@ -170,7 +170,7 @@ static const unsigned pins_ntr[] = {GPIO4};
170static const unsigned pins_ntr8k[] = {GPIO5}; 170static const unsigned pins_ntr8k[] = {GPIO5};
171static const unsigned pins_hrst[] = {GPIO6}; 171static const unsigned pins_hrst[] = {GPIO6};
172static const unsigned pins_mdio[] = {GPIO7, GPIO8}; 172static const unsigned pins_mdio[] = {GPIO7, GPIO8};
173static const unsigned pins_bled[] = {GPIO7, GPIO10, GPIO11, 173static const unsigned pins_bled[] = {GPIO9, GPIO10, GPIO11,
174 GPIO12, GPIO13, GPIO14}; 174 GPIO12, GPIO13, GPIO14};
175static const unsigned pins_asc0[] = {GPIO32, GPIO33}; 175static const unsigned pins_asc0[] = {GPIO32, GPIO33};
176static const unsigned pins_spi[] = {GPIO34, GPIO35, GPIO36}; 176static const unsigned pins_spi[] = {GPIO34, GPIO35, GPIO36};
@@ -315,6 +315,37 @@ static int falcon_pinconf_set(struct pinctrl_dev *pctrldev,
315static void falcon_pinconf_dbg_show(struct pinctrl_dev *pctrldev, 315static void falcon_pinconf_dbg_show(struct pinctrl_dev *pctrldev,
316 struct seq_file *s, unsigned offset) 316 struct seq_file *s, unsigned offset)
317{ 317{
318 unsigned long config;
319 struct pin_desc *desc;
320
321 struct ltq_pinmux_info *info = pinctrl_dev_get_drvdata(pctrldev);
322 int port = PORT(offset);
323
324 seq_printf(s, " (port %d) mux %d -- ", port,
325 pad_r32(info->membase[port], LTQ_PADC_MUX(PORT_PIN(offset))));
326
327 config = LTQ_PINCONF_PACK(LTQ_PINCONF_PARAM_PULL, 0);
328 if (!falcon_pinconf_get(pctrldev, offset, &config))
329 seq_printf(s, "pull %d ",
330 (int)LTQ_PINCONF_UNPACK_ARG(config));
331
332 config = LTQ_PINCONF_PACK(LTQ_PINCONF_PARAM_DRIVE_CURRENT, 0);
333 if (!falcon_pinconf_get(pctrldev, offset, &config))
334 seq_printf(s, "drive-current %d ",
335 (int)LTQ_PINCONF_UNPACK_ARG(config));
336
337 config = LTQ_PINCONF_PACK(LTQ_PINCONF_PARAM_SLEW_RATE, 0);
338 if (!falcon_pinconf_get(pctrldev, offset, &config))
339 seq_printf(s, "slew-rate %d ",
340 (int)LTQ_PINCONF_UNPACK_ARG(config));
341
342 desc = pin_desc_get(pctrldev, offset);
343 if (desc) {
344 if (desc->gpio_owner)
345 seq_printf(s, " owner: %s", desc->gpio_owner);
346 } else {
347 seq_printf(s, " not registered");
348 }
318} 349}
319 350
320static void falcon_pinconf_group_dbg_show(struct pinctrl_dev *pctrldev, 351static void falcon_pinconf_group_dbg_show(struct pinctrl_dev *pctrldev,
@@ -360,6 +391,8 @@ static const struct ltq_cfg_param falcon_cfg_params[] = {
360static struct ltq_pinmux_info falcon_info = { 391static struct ltq_pinmux_info falcon_info = {
361 .desc = &falcon_pctrl_desc, 392 .desc = &falcon_pctrl_desc,
362 .apply_mux = falcon_mux_apply, 393 .apply_mux = falcon_mux_apply,
394 .params = falcon_cfg_params,
395 .num_params = ARRAY_SIZE(falcon_cfg_params),
363}; 396};
364 397
365 398
@@ -398,6 +431,9 @@ static int pinctrl_falcon_probe(struct platform_device *pdev)
398 u32 avail; 431 u32 avail;
399 int pins; 432 int pins;
400 433
434 if (!of_device_is_available(np))
435 continue;
436
401 if (!ppdev) { 437 if (!ppdev) {
402 dev_err(&pdev->dev, "failed to find pad pdev\n"); 438 dev_err(&pdev->dev, "failed to find pad pdev\n");
403 continue; 439 continue;
diff --git a/drivers/pinctrl/pinctrl-lantiq.c b/drivers/pinctrl/pinctrl-lantiq.c
index 15f501d89026..a70384611351 100644
--- a/drivers/pinctrl/pinctrl-lantiq.c
+++ b/drivers/pinctrl/pinctrl-lantiq.c
@@ -64,11 +64,13 @@ static void ltq_pinctrl_pin_dbg_show(struct pinctrl_dev *pctldev,
64 seq_printf(s, " %s", dev_name(pctldev->dev)); 64 seq_printf(s, " %s", dev_name(pctldev->dev));
65} 65}
66 66
67static int ltq_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev, 67static void ltq_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
68 struct device_node *np, 68 struct device_node *np,
69 struct pinctrl_map **map) 69 struct pinctrl_map **map)
70{ 70{
71 struct ltq_pinmux_info *info = pinctrl_dev_get_drvdata(pctldev); 71 struct ltq_pinmux_info *info = pinctrl_dev_get_drvdata(pctldev);
72 struct property *pins = of_find_property(np, "lantiq,pins", NULL);
73 struct property *groups = of_find_property(np, "lantiq,groups", NULL);
72 unsigned long configs[3]; 74 unsigned long configs[3];
73 unsigned num_configs = 0; 75 unsigned num_configs = 0;
74 struct property *prop; 76 struct property *prop;
@@ -76,8 +78,20 @@ static int ltq_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
76 const char *function; 78 const char *function;
77 int ret, i; 79 int ret, i;
78 80
81 if (!pins && !groups) {
82 dev_err(pctldev->dev, "%s defines neither pins nor groups\n",
83 np->name);
84 return;
85 }
86
87 if (pins && groups) {
88 dev_err(pctldev->dev, "%s defines both pins and groups\n",
89 np->name);
90 return;
91 }
92
79 ret = of_property_read_string(np, "lantiq,function", &function); 93 ret = of_property_read_string(np, "lantiq,function", &function);
80 if (!ret) { 94 if (groups && !ret) {
81 of_property_for_each_string(np, "lantiq,groups", prop, group) { 95 of_property_for_each_string(np, "lantiq,groups", prop, group) {
82 (*map)->type = PIN_MAP_TYPE_MUX_GROUP; 96 (*map)->type = PIN_MAP_TYPE_MUX_GROUP;
83 (*map)->name = function; 97 (*map)->name = function;
@@ -85,11 +99,6 @@ static int ltq_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
85 (*map)->data.mux.function = function; 99 (*map)->data.mux.function = function;
86 (*map)++; 100 (*map)++;
87 } 101 }
88 if (of_find_property(np, "lantiq,pins", NULL))
89 dev_err(pctldev->dev,
90 "%s mixes pins and groups settings\n",
91 np->name);
92 return 0;
93 } 102 }
94 103
95 for (i = 0; i < info->num_params; i++) { 104 for (i = 0; i < info->num_params; i++) {
@@ -103,7 +112,7 @@ static int ltq_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
103 } 112 }
104 113
105 if (!num_configs) 114 if (!num_configs)
106 return -EINVAL; 115 return;
107 116
108 of_property_for_each_string(np, "lantiq,pins", prop, pin) { 117 of_property_for_each_string(np, "lantiq,pins", prop, pin) {
109 (*map)->data.configs.configs = kmemdup(configs, 118 (*map)->data.configs.configs = kmemdup(configs,
@@ -115,7 +124,16 @@ static int ltq_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
115 (*map)->data.configs.num_configs = num_configs; 124 (*map)->data.configs.num_configs = num_configs;
116 (*map)++; 125 (*map)++;
117 } 126 }
118 return 0; 127 of_property_for_each_string(np, "lantiq,groups", prop, group) {
128 (*map)->data.configs.configs = kmemdup(configs,
129 num_configs * sizeof(unsigned long),
130 GFP_KERNEL);
131 (*map)->type = PIN_MAP_TYPE_CONFIGS_GROUP;
132 (*map)->name = group;
133 (*map)->data.configs.group_or_pin = group;
134 (*map)->data.configs.num_configs = num_configs;
135 (*map)++;
136 }
119} 137}
120 138
121static int ltq_pinctrl_dt_subnode_size(struct device_node *np) 139static int ltq_pinctrl_dt_subnode_size(struct device_node *np)
@@ -135,23 +153,19 @@ static int ltq_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
135{ 153{
136 struct pinctrl_map *tmp; 154 struct pinctrl_map *tmp;
137 struct device_node *np; 155 struct device_node *np;
138 int ret; 156 int max_maps = 0;
139 157
140 *num_maps = 0;
141 for_each_child_of_node(np_config, np) 158 for_each_child_of_node(np_config, np)
142 *num_maps += ltq_pinctrl_dt_subnode_size(np); 159 max_maps += ltq_pinctrl_dt_subnode_size(np);
143 *map = kzalloc(*num_maps * sizeof(struct pinctrl_map), GFP_KERNEL); 160 *map = kzalloc(max_maps * sizeof(struct pinctrl_map) * 2, GFP_KERNEL);
144 if (!*map) 161 if (!*map)
145 return -ENOMEM; 162 return -ENOMEM;
146 tmp = *map; 163 tmp = *map;
147 164
148 for_each_child_of_node(np_config, np) { 165 for_each_child_of_node(np_config, np)
149 ret = ltq_pinctrl_dt_subnode_to_map(pctldev, np, &tmp); 166 ltq_pinctrl_dt_subnode_to_map(pctldev, np, &tmp);
150 if (ret < 0) { 167 *num_maps = ((int)(tmp - *map));
151 ltq_pinctrl_dt_free_map(pctldev, *map, *num_maps); 168
152 return ret;
153 }
154 }
155 return 0; 169 return 0;
156} 170}
157 171
@@ -280,7 +294,7 @@ static int ltq_pmx_gpio_request_enable(struct pinctrl_dev *pctrldev,
280 unsigned pin) 294 unsigned pin)
281{ 295{
282 struct ltq_pinmux_info *info = pinctrl_dev_get_drvdata(pctrldev); 296 struct ltq_pinmux_info *info = pinctrl_dev_get_drvdata(pctrldev);
283 int mfp = match_mfp(info, pin + (range->id * 32)); 297 int mfp = match_mfp(info, pin);
284 int pin_func; 298 int pin_func;
285 299
286 if (mfp < 0) { 300 if (mfp < 0) {
diff --git a/drivers/pinctrl/pinctrl-lantiq.h b/drivers/pinctrl/pinctrl-lantiq.h
index 4419d32a0ade..6d07f0238532 100644
--- a/drivers/pinctrl/pinctrl-lantiq.h
+++ b/drivers/pinctrl/pinctrl-lantiq.h
@@ -34,6 +34,7 @@ enum ltq_pinconf_param {
34 LTQ_PINCONF_PARAM_OPEN_DRAIN, 34 LTQ_PINCONF_PARAM_OPEN_DRAIN,
35 LTQ_PINCONF_PARAM_DRIVE_CURRENT, 35 LTQ_PINCONF_PARAM_DRIVE_CURRENT,
36 LTQ_PINCONF_PARAM_SLEW_RATE, 36 LTQ_PINCONF_PARAM_SLEW_RATE,
37 LTQ_PINCONF_PARAM_OUTPUT,
37}; 38};
38 39
39struct ltq_cfg_param { 40struct ltq_cfg_param {
diff --git a/drivers/pinctrl/pinctrl-mxs.c b/drivers/pinctrl/pinctrl-mxs.c
index dd227d21dcf2..23af9f1f9c35 100644
--- a/drivers/pinctrl/pinctrl-mxs.c
+++ b/drivers/pinctrl/pinctrl-mxs.c
@@ -146,7 +146,7 @@ free:
146static void mxs_dt_free_map(struct pinctrl_dev *pctldev, 146static void mxs_dt_free_map(struct pinctrl_dev *pctldev,
147 struct pinctrl_map *map, unsigned num_maps) 147 struct pinctrl_map *map, unsigned num_maps)
148{ 148{
149 int i; 149 u32 i;
150 150
151 for (i = 0; i < num_maps; i++) { 151 for (i = 0; i < num_maps; i++) {
152 if (map[i].type == PIN_MAP_TYPE_MUX_GROUP) 152 if (map[i].type == PIN_MAP_TYPE_MUX_GROUP)
@@ -203,7 +203,7 @@ static int mxs_pinctrl_enable(struct pinctrl_dev *pctldev, unsigned selector,
203 void __iomem *reg; 203 void __iomem *reg;
204 u8 bank, shift; 204 u8 bank, shift;
205 u16 pin; 205 u16 pin;
206 int i; 206 u32 i;
207 207
208 for (i = 0; i < g->npins; i++) { 208 for (i = 0; i < g->npins; i++) {
209 bank = PINID_TO_BANK(g->pins[i]); 209 bank = PINID_TO_BANK(g->pins[i]);
@@ -256,7 +256,7 @@ static int mxs_pinconf_group_set(struct pinctrl_dev *pctldev,
256 void __iomem *reg; 256 void __iomem *reg;
257 u8 ma, vol, pull, bank, shift; 257 u8 ma, vol, pull, bank, shift;
258 u16 pin; 258 u16 pin;
259 int i; 259 u32 i;
260 260
261 ma = CONFIG_TO_MA(config); 261 ma = CONFIG_TO_MA(config);
262 vol = CONFIG_TO_VOL(config); 262 vol = CONFIG_TO_VOL(config);
@@ -345,8 +345,7 @@ static int mxs_pinctrl_parse_group(struct platform_device *pdev,
345 const char *propname = "fsl,pinmux-ids"; 345 const char *propname = "fsl,pinmux-ids";
346 char *group; 346 char *group;
347 int length = strlen(np->name) + SUFFIX_LEN; 347 int length = strlen(np->name) + SUFFIX_LEN;
348 int i; 348 u32 val, i;
349 u32 val;
350 349
351 group = devm_kzalloc(&pdev->dev, length, GFP_KERNEL); 350 group = devm_kzalloc(&pdev->dev, length, GFP_KERNEL);
352 if (!group) 351 if (!group)
diff --git a/drivers/pinctrl/pinctrl-nomadik.c b/drivers/pinctrl/pinctrl-nomadik.c
index 1bb16ffb4e41..de9e8519b803 100644
--- a/drivers/pinctrl/pinctrl-nomadik.c
+++ b/drivers/pinctrl/pinctrl-nomadik.c
@@ -25,6 +25,8 @@
25#include <linux/irqdomain.h> 25#include <linux/irqdomain.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/of_device.h> 27#include <linux/of_device.h>
28#include <linux/of_address.h>
29#include <linux/pinctrl/machine.h>
28#include <linux/pinctrl/pinctrl.h> 30#include <linux/pinctrl/pinctrl.h>
29#include <linux/pinctrl/pinmux.h> 31#include <linux/pinctrl/pinmux.h>
30#include <linux/pinctrl/pinconf.h> 32#include <linux/pinctrl/pinconf.h>
@@ -32,8 +34,8 @@
32#include <linux/pinctrl/consumer.h> 34#include <linux/pinctrl/consumer.h>
33#include <linux/platform_data/pinctrl-nomadik.h> 35#include <linux/platform_data/pinctrl-nomadik.h>
34#include <asm/mach/irq.h> 36#include <asm/mach/irq.h>
35#include <mach/irqs.h>
36#include "pinctrl-nomadik.h" 37#include "pinctrl-nomadik.h"
38#include "core.h"
37 39
38/* 40/*
39 * The GPIO module in the Nomadik family of Systems-on-Chip is an 41 * The GPIO module in the Nomadik family of Systems-on-Chip is an
@@ -216,7 +218,7 @@ nmk_gpio_disable_lazy_irq(struct nmk_gpio_chip *nmk_chip, unsigned offset)
216 u32 falling = nmk_chip->fimsc & BIT(offset); 218 u32 falling = nmk_chip->fimsc & BIT(offset);
217 u32 rising = nmk_chip->rimsc & BIT(offset); 219 u32 rising = nmk_chip->rimsc & BIT(offset);
218 int gpio = nmk_chip->chip.base + offset; 220 int gpio = nmk_chip->chip.base + offset;
219 int irq = NOMADIK_GPIO_TO_IRQ(gpio); 221 int irq = irq_find_mapping(nmk_chip->domain, offset);
220 struct irq_data *d = irq_get_irq_data(irq); 222 struct irq_data *d = irq_get_irq_data(irq);
221 223
222 if (!rising && !falling) 224 if (!rising && !falling)
@@ -676,7 +678,7 @@ int nmk_gpio_set_mode(int gpio, int gpio_mode)
676} 678}
677EXPORT_SYMBOL(nmk_gpio_set_mode); 679EXPORT_SYMBOL(nmk_gpio_set_mode);
678 680
679static int nmk_prcm_gpiocr_get_mode(struct pinctrl_dev *pctldev, int gpio) 681static int __maybe_unused nmk_prcm_gpiocr_get_mode(struct pinctrl_dev *pctldev, int gpio)
680{ 682{
681 int i; 683 int i;
682 u16 reg; 684 u16 reg;
@@ -1341,8 +1343,7 @@ static int nmk_gpio_probe(struct platform_device *dev)
1341 1343
1342 if (of_property_read_u32(np, "gpio-bank", &dev->id)) { 1344 if (of_property_read_u32(np, "gpio-bank", &dev->id)) {
1343 dev_err(&dev->dev, "gpio-bank property not found\n"); 1345 dev_err(&dev->dev, "gpio-bank property not found\n");
1344 ret = -EINVAL; 1346 return -EINVAL;
1345 goto out;
1346 } 1347 }
1347 1348
1348 pdata->first_gpio = dev->id * NMK_GPIO_PER_CHIP; 1349 pdata->first_gpio = dev->id * NMK_GPIO_PER_CHIP;
@@ -1350,41 +1351,29 @@ static int nmk_gpio_probe(struct platform_device *dev)
1350 } 1351 }
1351 1352
1352 res = platform_get_resource(dev, IORESOURCE_MEM, 0); 1353 res = platform_get_resource(dev, IORESOURCE_MEM, 0);
1353 if (!res) { 1354 if (!res)
1354 ret = -ENOENT; 1355 return -ENOENT;
1355 goto out;
1356 }
1357 1356
1358 irq = platform_get_irq(dev, 0); 1357 irq = platform_get_irq(dev, 0);
1359 if (irq < 0) { 1358 if (irq < 0)
1360 ret = irq; 1359 return irq;
1361 goto out;
1362 }
1363 1360
1364 secondary_irq = platform_get_irq(dev, 1); 1361 secondary_irq = platform_get_irq(dev, 1);
1365 if (secondary_irq >= 0 && !pdata->get_secondary_status) { 1362 if (secondary_irq >= 0 && !pdata->get_secondary_status)
1366 ret = -EINVAL; 1363 return -EINVAL;
1367 goto out;
1368 }
1369 1364
1370 base = devm_request_and_ioremap(&dev->dev, res); 1365 base = devm_request_and_ioremap(&dev->dev, res);
1371 if (!base) { 1366 if (!base)
1372 ret = -ENOMEM; 1367 return -ENOMEM;
1373 goto out;
1374 }
1375 1368
1376 clk = devm_clk_get(&dev->dev, NULL); 1369 clk = devm_clk_get(&dev->dev, NULL);
1377 if (IS_ERR(clk)) { 1370 if (IS_ERR(clk))
1378 ret = PTR_ERR(clk); 1371 return PTR_ERR(clk);
1379 goto out;
1380 }
1381 clk_prepare(clk); 1372 clk_prepare(clk);
1382 1373
1383 nmk_chip = devm_kzalloc(&dev->dev, sizeof(*nmk_chip), GFP_KERNEL); 1374 nmk_chip = devm_kzalloc(&dev->dev, sizeof(*nmk_chip), GFP_KERNEL);
1384 if (!nmk_chip) { 1375 if (!nmk_chip)
1385 ret = -ENOMEM; 1376 return -ENOMEM;
1386 goto out;
1387 }
1388 1377
1389 /* 1378 /*
1390 * The virt address in nmk_chip->addr is in the nomadik register space, 1379 * The virt address in nmk_chip->addr is in the nomadik register space,
@@ -1418,7 +1407,7 @@ static int nmk_gpio_probe(struct platform_device *dev)
1418 1407
1419 ret = gpiochip_add(&nmk_chip->chip); 1408 ret = gpiochip_add(&nmk_chip->chip);
1420 if (ret) 1409 if (ret)
1421 goto out; 1410 return ret;
1422 1411
1423 BUG_ON(nmk_chip->bank >= ARRAY_SIZE(nmk_gpio_chips)); 1412 BUG_ON(nmk_chip->bank >= ARRAY_SIZE(nmk_gpio_chips));
1424 1413
@@ -1427,14 +1416,15 @@ static int nmk_gpio_probe(struct platform_device *dev)
1427 platform_set_drvdata(dev, nmk_chip); 1416 platform_set_drvdata(dev, nmk_chip);
1428 1417
1429 if (!np) 1418 if (!np)
1430 irq_start = NOMADIK_GPIO_TO_IRQ(pdata->first_gpio); 1419 irq_start = pdata->first_irq;
1431 nmk_chip->domain = irq_domain_add_simple(np, 1420 nmk_chip->domain = irq_domain_add_simple(np,
1432 NMK_GPIO_PER_CHIP, irq_start, 1421 NMK_GPIO_PER_CHIP, irq_start,
1433 &nmk_gpio_irq_simple_ops, nmk_chip); 1422 &nmk_gpio_irq_simple_ops, nmk_chip);
1434 if (!nmk_chip->domain) { 1423 if (!nmk_chip->domain) {
1435 dev_err(&dev->dev, "failed to create irqdomain\n"); 1424 dev_err(&dev->dev, "failed to create irqdomain\n");
1436 ret = -ENOSYS; 1425 /* Just do this, no matter if it fails */
1437 goto out; 1426 ret = gpiochip_remove(&nmk_chip->chip);
1427 return -ENOSYS;
1438 } 1428 }
1439 1429
1440 nmk_gpio_init_irq(nmk_chip); 1430 nmk_gpio_init_irq(nmk_chip);
@@ -1442,12 +1432,6 @@ static int nmk_gpio_probe(struct platform_device *dev)
1442 dev_info(&dev->dev, "at address %p\n", nmk_chip->addr); 1432 dev_info(&dev->dev, "at address %p\n", nmk_chip->addr);
1443 1433
1444 return 0; 1434 return 0;
1445
1446out:
1447 dev_err(&dev->dev, "Failure %i for GPIO %i-%i\n", ret,
1448 pdata->first_gpio, pdata->first_gpio+31);
1449
1450 return ret;
1451} 1435}
1452 1436
1453static int nmk_get_groups_cnt(struct pinctrl_dev *pctldev) 1437static int nmk_get_groups_cnt(struct pinctrl_dev *pctldev)
@@ -1508,11 +1492,285 @@ static void nmk_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
1508 nmk_gpio_dbg_show_one(s, pctldev, chip, offset - chip->base, offset); 1492 nmk_gpio_dbg_show_one(s, pctldev, chip, offset - chip->base, offset);
1509} 1493}
1510 1494
1495static void nmk_pinctrl_dt_free_map(struct pinctrl_dev *pctldev,
1496 struct pinctrl_map *map, unsigned num_maps)
1497{
1498 int i;
1499
1500 for (i = 0; i < num_maps; i++)
1501 if (map[i].type == PIN_MAP_TYPE_CONFIGS_PIN)
1502 kfree(map[i].data.configs.configs);
1503 kfree(map);
1504}
1505
1506static int nmk_dt_reserve_map(struct pinctrl_map **map, unsigned *reserved_maps,
1507 unsigned *num_maps, unsigned reserve)
1508{
1509 unsigned old_num = *reserved_maps;
1510 unsigned new_num = *num_maps + reserve;
1511 struct pinctrl_map *new_map;
1512
1513 if (old_num >= new_num)
1514 return 0;
1515
1516 new_map = krealloc(*map, sizeof(*new_map) * new_num, GFP_KERNEL);
1517 if (!new_map)
1518 return -ENOMEM;
1519
1520 memset(new_map + old_num, 0, (new_num - old_num) * sizeof(*new_map));
1521
1522 *map = new_map;
1523 *reserved_maps = new_num;
1524
1525 return 0;
1526}
1527
1528static int nmk_dt_add_map_mux(struct pinctrl_map **map, unsigned *reserved_maps,
1529 unsigned *num_maps, const char *group,
1530 const char *function)
1531{
1532 if (*num_maps == *reserved_maps)
1533 return -ENOSPC;
1534
1535 (*map)[*num_maps].type = PIN_MAP_TYPE_MUX_GROUP;
1536 (*map)[*num_maps].data.mux.group = group;
1537 (*map)[*num_maps].data.mux.function = function;
1538 (*num_maps)++;
1539
1540 return 0;
1541}
1542
1543static int nmk_dt_add_map_configs(struct pinctrl_map **map,
1544 unsigned *reserved_maps,
1545 unsigned *num_maps, const char *group,
1546 unsigned long *configs, unsigned num_configs)
1547{
1548 unsigned long *dup_configs;
1549
1550 if (*num_maps == *reserved_maps)
1551 return -ENOSPC;
1552
1553 dup_configs = kmemdup(configs, num_configs * sizeof(*dup_configs),
1554 GFP_KERNEL);
1555 if (!dup_configs)
1556 return -ENOMEM;
1557
1558 (*map)[*num_maps].type = PIN_MAP_TYPE_CONFIGS_PIN;
1559
1560 (*map)[*num_maps].data.configs.group_or_pin = group;
1561 (*map)[*num_maps].data.configs.configs = dup_configs;
1562 (*map)[*num_maps].data.configs.num_configs = num_configs;
1563 (*num_maps)++;
1564
1565 return 0;
1566}
1567
1568#define NMK_CONFIG_PIN(x,y) { .property = x, .config = y, }
1569#define NMK_CONFIG_PIN_ARRAY(x,y) { .property = x, .choice = y, \
1570 .size = ARRAY_SIZE(y), }
1571
1572static const unsigned long nmk_pin_input_modes[] = {
1573 PIN_INPUT_NOPULL,
1574 PIN_INPUT_PULLUP,
1575 PIN_INPUT_PULLDOWN,
1576};
1577
1578static const unsigned long nmk_pin_output_modes[] = {
1579 PIN_OUTPUT_LOW,
1580 PIN_OUTPUT_HIGH,
1581 PIN_DIR_OUTPUT,
1582};
1583
1584static const unsigned long nmk_pin_sleep_modes[] = {
1585 PIN_SLEEPMODE_DISABLED,
1586 PIN_SLEEPMODE_ENABLED,
1587};
1588
1589static const unsigned long nmk_pin_sleep_input_modes[] = {
1590 PIN_SLPM_INPUT_NOPULL,
1591 PIN_SLPM_INPUT_PULLUP,
1592 PIN_SLPM_INPUT_PULLDOWN,
1593 PIN_SLPM_DIR_INPUT,
1594};
1595
1596static const unsigned long nmk_pin_sleep_output_modes[] = {
1597 PIN_SLPM_OUTPUT_LOW,
1598 PIN_SLPM_OUTPUT_HIGH,
1599 PIN_SLPM_DIR_OUTPUT,
1600};
1601
1602static const unsigned long nmk_pin_sleep_wakeup_modes[] = {
1603 PIN_SLPM_WAKEUP_DISABLE,
1604 PIN_SLPM_WAKEUP_ENABLE,
1605};
1606
1607static const unsigned long nmk_pin_gpio_modes[] = {
1608 PIN_GPIOMODE_DISABLED,
1609 PIN_GPIOMODE_ENABLED,
1610};
1611
1612static const unsigned long nmk_pin_sleep_pdis_modes[] = {
1613 PIN_SLPM_PDIS_DISABLED,
1614 PIN_SLPM_PDIS_ENABLED,
1615};
1616
1617struct nmk_cfg_param {
1618 const char *property;
1619 unsigned long config;
1620 const unsigned long *choice;
1621 int size;
1622};
1623
1624static const struct nmk_cfg_param nmk_cfg_params[] = {
1625 NMK_CONFIG_PIN_ARRAY("ste,input", nmk_pin_input_modes),
1626 NMK_CONFIG_PIN_ARRAY("ste,output", nmk_pin_output_modes),
1627 NMK_CONFIG_PIN_ARRAY("ste,sleep", nmk_pin_sleep_modes),
1628 NMK_CONFIG_PIN_ARRAY("ste,sleep-input", nmk_pin_sleep_input_modes),
1629 NMK_CONFIG_PIN_ARRAY("ste,sleep-output", nmk_pin_sleep_output_modes),
1630 NMK_CONFIG_PIN_ARRAY("ste,sleep-wakeup", nmk_pin_sleep_wakeup_modes),
1631 NMK_CONFIG_PIN_ARRAY("ste,gpio", nmk_pin_gpio_modes),
1632 NMK_CONFIG_PIN_ARRAY("ste,sleep-pull-disable", nmk_pin_sleep_pdis_modes),
1633};
1634
1635static int nmk_dt_pin_config(int index, int val, unsigned long *config)
1636{
1637 int ret = 0;
1638
1639 if (nmk_cfg_params[index].choice == NULL)
1640 *config = nmk_cfg_params[index].config;
1641 else {
1642 /* test if out of range */
1643 if (val < nmk_cfg_params[index].size) {
1644 *config = nmk_cfg_params[index].config |
1645 nmk_cfg_params[index].choice[val];
1646 }
1647 }
1648 return ret;
1649}
1650
1651static const char *nmk_find_pin_name(struct pinctrl_dev *pctldev, const char *pin_name)
1652{
1653 int i, pin_number;
1654 struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev);
1655
1656 if (sscanf((char *)pin_name, "GPIO%d", &pin_number) == 1)
1657 for (i = 0; i < npct->soc->npins; i++)
1658 if (npct->soc->pins[i].number == pin_number)
1659 return npct->soc->pins[i].name;
1660 return NULL;
1661}
1662
1663static bool nmk_pinctrl_dt_get_config(struct device_node *np,
1664 unsigned long *configs)
1665{
1666 bool has_config = 0;
1667 unsigned long cfg = 0;
1668 int i, val, ret;
1669
1670 for (i = 0; i < ARRAY_SIZE(nmk_cfg_params); i++) {
1671 ret = of_property_read_u32(np,
1672 nmk_cfg_params[i].property, &val);
1673 if (ret != -EINVAL) {
1674 if (nmk_dt_pin_config(i, val, &cfg) == 0) {
1675 *configs |= cfg;
1676 has_config = 1;
1677 }
1678 }
1679 }
1680
1681 return has_config;
1682}
1683
1684int nmk_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
1685 struct device_node *np,
1686 struct pinctrl_map **map,
1687 unsigned *reserved_maps,
1688 unsigned *num_maps)
1689{
1690 int ret;
1691 const char *function = NULL;
1692 unsigned long configs = 0;
1693 bool has_config = 0;
1694 unsigned reserve = 0;
1695 struct property *prop;
1696 const char *group, *gpio_name;
1697 struct device_node *np_config;
1698
1699 ret = of_property_read_string(np, "ste,function", &function);
1700 if (ret >= 0)
1701 reserve = 1;
1702
1703 has_config = nmk_pinctrl_dt_get_config(np, &configs);
1704
1705 np_config = of_parse_phandle(np, "ste,config", 0);
1706 if (np_config)
1707 has_config |= nmk_pinctrl_dt_get_config(np_config, &configs);
1708
1709 ret = of_property_count_strings(np, "ste,pins");
1710 if (ret < 0)
1711 goto exit;
1712
1713 if (has_config)
1714 reserve++;
1715
1716 reserve *= ret;
1717
1718 ret = nmk_dt_reserve_map(map, reserved_maps, num_maps, reserve);
1719 if (ret < 0)
1720 goto exit;
1721
1722 of_property_for_each_string(np, "ste,pins", prop, group) {
1723 if (function) {
1724 ret = nmk_dt_add_map_mux(map, reserved_maps, num_maps,
1725 group, function);
1726 if (ret < 0)
1727 goto exit;
1728 }
1729 if (has_config) {
1730 gpio_name = nmk_find_pin_name(pctldev, group);
1731
1732 ret = nmk_dt_add_map_configs(map, reserved_maps, num_maps,
1733 gpio_name, &configs, 1);
1734 if (ret < 0)
1735 goto exit;
1736 }
1737
1738 }
1739exit:
1740 return ret;
1741}
1742
1743int nmk_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
1744 struct device_node *np_config,
1745 struct pinctrl_map **map, unsigned *num_maps)
1746{
1747 unsigned reserved_maps;
1748 struct device_node *np;
1749 int ret;
1750
1751 reserved_maps = 0;
1752 *map = NULL;
1753 *num_maps = 0;
1754
1755 for_each_child_of_node(np_config, np) {
1756 ret = nmk_pinctrl_dt_subnode_to_map(pctldev, np, map,
1757 &reserved_maps, num_maps);
1758 if (ret < 0) {
1759 nmk_pinctrl_dt_free_map(pctldev, *map, *num_maps);
1760 return ret;
1761 }
1762 }
1763
1764 return 0;
1765}
1766
1511static struct pinctrl_ops nmk_pinctrl_ops = { 1767static struct pinctrl_ops nmk_pinctrl_ops = {
1512 .get_groups_count = nmk_get_groups_cnt, 1768 .get_groups_count = nmk_get_groups_cnt,
1513 .get_group_name = nmk_get_group_name, 1769 .get_group_name = nmk_get_group_name,
1514 .get_group_pins = nmk_get_group_pins, 1770 .get_group_pins = nmk_get_group_pins,
1515 .pin_dbg_show = nmk_pin_dbg_show, 1771 .pin_dbg_show = nmk_pin_dbg_show,
1772 .dt_node_to_map = nmk_pinctrl_dt_node_to_map,
1773 .dt_free_map = nmk_pinctrl_dt_free_map,
1516}; 1774};
1517 1775
1518static int nmk_pmx_get_funcs_cnt(struct pinctrl_dev *pctldev) 1776static int nmk_pmx_get_funcs_cnt(struct pinctrl_dev *pctldev)
@@ -1846,16 +2104,39 @@ static struct pinctrl_desc nmk_pinctrl_desc = {
1846 2104
1847static const struct of_device_id nmk_pinctrl_match[] = { 2105static const struct of_device_id nmk_pinctrl_match[] = {
1848 { 2106 {
1849 .compatible = "stericsson,nmk_pinctrl", 2107 .compatible = "stericsson,nmk-pinctrl",
1850 .data = (void *)PINCTRL_NMK_DB8500, 2108 .data = (void *)PINCTRL_NMK_DB8500,
1851 }, 2109 },
1852 {}, 2110 {},
1853}; 2111};
1854 2112
2113static int nmk_pinctrl_suspend(struct platform_device *pdev, pm_message_t state)
2114{
2115 struct nmk_pinctrl *npct;
2116
2117 npct = platform_get_drvdata(pdev);
2118 if (!npct)
2119 return -EINVAL;
2120
2121 return pinctrl_force_sleep(npct->pctl);
2122}
2123
2124static int nmk_pinctrl_resume(struct platform_device *pdev)
2125{
2126 struct nmk_pinctrl *npct;
2127
2128 npct = platform_get_drvdata(pdev);
2129 if (!npct)
2130 return -EINVAL;
2131
2132 return pinctrl_force_default(npct->pctl);
2133}
2134
1855static int nmk_pinctrl_probe(struct platform_device *pdev) 2135static int nmk_pinctrl_probe(struct platform_device *pdev)
1856{ 2136{
1857 const struct platform_device_id *platid = platform_get_device_id(pdev); 2137 const struct platform_device_id *platid = platform_get_device_id(pdev);
1858 struct device_node *np = pdev->dev.of_node; 2138 struct device_node *np = pdev->dev.of_node;
2139 struct device_node *prcm_np;
1859 struct nmk_pinctrl *npct; 2140 struct nmk_pinctrl *npct;
1860 struct resource *res; 2141 struct resource *res;
1861 unsigned int version = 0; 2142 unsigned int version = 0;
@@ -1884,21 +2165,26 @@ static int nmk_pinctrl_probe(struct platform_device *pdev)
1884 if (version == PINCTRL_NMK_DB8540) 2165 if (version == PINCTRL_NMK_DB8540)
1885 nmk_pinctrl_db8540_init(&npct->soc); 2166 nmk_pinctrl_db8540_init(&npct->soc);
1886 2167
2168 if (np) {
2169 prcm_np = of_parse_phandle(np, "prcm", 0);
2170 if (prcm_np)
2171 npct->prcm_base = of_iomap(prcm_np, 0);
2172 }
2173
2174 /* Allow platform passed information to over-write DT. */
1887 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2175 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1888 if (res) { 2176 if (res)
1889 npct->prcm_base = devm_ioremap(&pdev->dev, res->start, 2177 npct->prcm_base = devm_ioremap(&pdev->dev, res->start,
1890 resource_size(res)); 2178 resource_size(res));
1891 if (!npct->prcm_base) { 2179 if (!npct->prcm_base) {
1892 dev_err(&pdev->dev, 2180 if (version == PINCTRL_NMK_STN8815) {
1893 "failed to ioremap PRCM registers\n"); 2181 dev_info(&pdev->dev,
1894 return -ENOMEM; 2182 "No PRCM base, "
2183 "assuming no ALT-Cx control is available\n");
2184 } else {
2185 dev_err(&pdev->dev, "missing PRCM base address\n");
2186 return -EINVAL;
1895 } 2187 }
1896 } else if (version == PINCTRL_NMK_STN8815) {
1897 dev_info(&pdev->dev,
1898 "No PRCM base, assume no ALT-Cx control is available\n");
1899 } else {
1900 dev_err(&pdev->dev, "missing PRCM base address\n");
1901 return -EINVAL;
1902 } 2188 }
1903 2189
1904 /* 2190 /*
@@ -1963,6 +2249,10 @@ static struct platform_driver nmk_pinctrl_driver = {
1963 }, 2249 },
1964 .probe = nmk_pinctrl_probe, 2250 .probe = nmk_pinctrl_probe,
1965 .id_table = nmk_pinctrl_id, 2251 .id_table = nmk_pinctrl_id,
2252#ifdef CONFIG_PM
2253 .suspend = nmk_pinctrl_suspend,
2254 .resume = nmk_pinctrl_resume,
2255#endif
1966}; 2256};
1967 2257
1968static int __init nmk_gpio_init(void) 2258static int __init nmk_gpio_init(void)
diff --git a/drivers/pinctrl/pinctrl-samsung.c b/drivers/pinctrl/pinctrl-samsung.c
index fd7b24cd8908..5c20ed056054 100644
--- a/drivers/pinctrl/pinctrl-samsung.c
+++ b/drivers/pinctrl/pinctrl-samsung.c
@@ -716,7 +716,6 @@ static int samsung_pinctrl_register(struct platform_device *pdev,
716 } 716 }
717 ctrldesc->pins = pindesc; 717 ctrldesc->pins = pindesc;
718 ctrldesc->npins = drvdata->ctrl->nr_pins; 718 ctrldesc->npins = drvdata->ctrl->nr_pins;
719 ctrldesc->npins = drvdata->ctrl->nr_pins;
720 719
721 /* dynamically populate the pin number and pin name for pindesc */ 720 /* dynamically populate the pin number and pin name for pindesc */
722 for (pin = 0, pdesc = pindesc; pin < ctrldesc->npins; pin++, pdesc++) 721 for (pin = 0, pdesc = pindesc; pin < ctrldesc->npins; pin++, pdesc++)
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index f6a360b86eb6..5c32e880bcb2 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -30,7 +30,6 @@
30#define PCS_MUX_BITS_NAME "pinctrl-single,bits" 30#define PCS_MUX_BITS_NAME "pinctrl-single,bits"
31#define PCS_REG_NAME_LEN ((sizeof(unsigned long) * 2) + 1) 31#define PCS_REG_NAME_LEN ((sizeof(unsigned long) * 2) + 1)
32#define PCS_OFF_DISABLED ~0U 32#define PCS_OFF_DISABLED ~0U
33#define PCS_MAX_GPIO_VALUES 2
34 33
35/** 34/**
36 * struct pcs_pingroup - pingroups for a function 35 * struct pcs_pingroup - pingroups for a function
@@ -78,16 +77,6 @@ struct pcs_function {
78}; 77};
79 78
80/** 79/**
81 * struct pcs_gpio_range - pinctrl gpio range
82 * @range: subrange of the GPIO number space
83 * @gpio_func: gpio function value in the pinmux register
84 */
85struct pcs_gpio_range {
86 struct pinctrl_gpio_range range;
87 int gpio_func;
88};
89
90/**
91 * struct pcs_data - wrapper for data needed by pinctrl framework 80 * struct pcs_data - wrapper for data needed by pinctrl framework
92 * @pa: pindesc array 81 * @pa: pindesc array
93 * @cur: index to current element 82 * @cur: index to current element
@@ -414,26 +403,9 @@ static void pcs_disable(struct pinctrl_dev *pctldev, unsigned fselector,
414} 403}
415 404
416static int pcs_request_gpio(struct pinctrl_dev *pctldev, 405static int pcs_request_gpio(struct pinctrl_dev *pctldev,
417 struct pinctrl_gpio_range *range, unsigned pin) 406 struct pinctrl_gpio_range *range, unsigned offset)
418{ 407{
419 struct pcs_device *pcs = pinctrl_dev_get_drvdata(pctldev); 408 return -ENOTSUPP;
420 struct pcs_gpio_range *gpio = NULL;
421 int end, mux_bytes;
422 unsigned data;
423
424 gpio = container_of(range, struct pcs_gpio_range, range);
425 end = range->pin_base + range->npins - 1;
426 if (pin < range->pin_base || pin > end) {
427 dev_err(pctldev->dev,
428 "pin %d isn't in the range of %d to %d\n",
429 pin, range->pin_base, end);
430 return -EINVAL;
431 }
432 mux_bytes = pcs->width / BITS_PER_BYTE;
433 data = pcs->read(pcs->base + pin * mux_bytes) & ~pcs->fmask;
434 data |= gpio->gpio_func;
435 pcs->write(data, pcs->base + pin * mux_bytes);
436 return 0;
437} 409}
438 410
439static struct pinmux_ops pcs_pinmux_ops = { 411static struct pinmux_ops pcs_pinmux_ops = {
@@ -907,49 +879,6 @@ static void pcs_free_resources(struct pcs_device *pcs)
907 879
908static struct of_device_id pcs_of_match[]; 880static struct of_device_id pcs_of_match[];
909 881
910static int pcs_add_gpio_range(struct device_node *node, struct pcs_device *pcs)
911{
912 struct pcs_gpio_range *gpio;
913 struct device_node *child;
914 struct resource r;
915 const char name[] = "pinctrl-single";
916 u32 gpiores[PCS_MAX_GPIO_VALUES];
917 int ret, i = 0, mux_bytes = 0;
918
919 for_each_child_of_node(node, child) {
920 ret = of_address_to_resource(child, 0, &r);
921 if (ret < 0)
922 continue;
923 memset(gpiores, 0, sizeof(u32) * PCS_MAX_GPIO_VALUES);
924 ret = of_property_read_u32_array(child, "pinctrl-single,gpio",
925 gpiores, PCS_MAX_GPIO_VALUES);
926 if (ret < 0)
927 continue;
928 gpio = devm_kzalloc(pcs->dev, sizeof(*gpio), GFP_KERNEL);
929 if (!gpio) {
930 dev_err(pcs->dev, "failed to allocate pcs gpio\n");
931 return -ENOMEM;
932 }
933 gpio->range.name = devm_kzalloc(pcs->dev, sizeof(name),
934 GFP_KERNEL);
935 if (!gpio->range.name) {
936 dev_err(pcs->dev, "failed to allocate range name\n");
937 return -ENOMEM;
938 }
939 memcpy((char *)gpio->range.name, name, sizeof(name));
940
941 gpio->range.id = i++;
942 gpio->range.base = gpiores[0];
943 gpio->gpio_func = gpiores[1];
944 mux_bytes = pcs->width / BITS_PER_BYTE;
945 gpio->range.pin_base = (r.start - pcs->res->start) / mux_bytes;
946 gpio->range.npins = (r.end - r.start) / mux_bytes + 1;
947
948 pinctrl_add_gpio_range(pcs->pctl, &gpio->range);
949 }
950 return 0;
951}
952
953static int pcs_probe(struct platform_device *pdev) 882static int pcs_probe(struct platform_device *pdev)
954{ 883{
955 struct device_node *np = pdev->dev.of_node; 884 struct device_node *np = pdev->dev.of_node;
@@ -1046,10 +975,6 @@ static int pcs_probe(struct platform_device *pdev)
1046 goto free; 975 goto free;
1047 } 976 }
1048 977
1049 ret = pcs_add_gpio_range(np, pcs);
1050 if (ret < 0)
1051 goto free;
1052
1053 dev_info(pcs->dev, "%i pins at pa %p size %u\n", 978 dev_info(pcs->dev, "%i pins at pa %p size %u\n",
1054 pcs->desc.npins, pcs->base, pcs->size); 979 pcs->desc.npins, pcs->base, pcs->size);
1055 980
diff --git a/drivers/pinctrl/pinctrl-sirf.c b/drivers/pinctrl/pinctrl-sirf.c
index 498b2ba905de..d02498b30c6e 100644
--- a/drivers/pinctrl/pinctrl-sirf.c
+++ b/drivers/pinctrl/pinctrl-sirf.c
@@ -1246,6 +1246,22 @@ static void __iomem *sirfsoc_rsc_of_iomap(void)
1246 return of_iomap(np, 0); 1246 return of_iomap(np, 0);
1247} 1247}
1248 1248
1249static int sirfsoc_gpio_of_xlate(struct gpio_chip *gc,
1250 const struct of_phandle_args *gpiospec,
1251 u32 *flags)
1252{
1253 if (gpiospec->args[0] > SIRFSOC_GPIO_NO_OF_BANKS * SIRFSOC_GPIO_BANK_SIZE)
1254 return -EINVAL;
1255
1256 if (gc != &sgpio_bank[gpiospec->args[0] / SIRFSOC_GPIO_BANK_SIZE].chip.gc)
1257 return -EINVAL;
1258
1259 if (flags)
1260 *flags = gpiospec->args[1];
1261
1262 return gpiospec->args[0] % SIRFSOC_GPIO_BANK_SIZE;
1263}
1264
1249static int sirfsoc_pinmux_probe(struct platform_device *pdev) 1265static int sirfsoc_pinmux_probe(struct platform_device *pdev)
1250{ 1266{
1251 int ret; 1267 int ret;
@@ -1736,6 +1752,8 @@ static int sirfsoc_gpio_probe(struct device_node *np)
1736 bank->chip.gc.ngpio = SIRFSOC_GPIO_BANK_SIZE; 1752 bank->chip.gc.ngpio = SIRFSOC_GPIO_BANK_SIZE;
1737 bank->chip.gc.label = kstrdup(np->full_name, GFP_KERNEL); 1753 bank->chip.gc.label = kstrdup(np->full_name, GFP_KERNEL);
1738 bank->chip.gc.of_node = np; 1754 bank->chip.gc.of_node = np;
1755 bank->chip.gc.of_xlate = sirfsoc_gpio_of_xlate;
1756 bank->chip.gc.of_gpio_n_cells = 2;
1739 bank->chip.regs = regs; 1757 bank->chip.regs = regs;
1740 bank->id = i; 1758 bank->id = i;
1741 bank->is_marco = is_marco; 1759 bank->is_marco = is_marco;
diff --git a/drivers/pinctrl/pinctrl-sunxi.c b/drivers/pinctrl/pinctrl-sunxi.c
new file mode 100644
index 000000000000..80b11e3415bc
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-sunxi.c
@@ -0,0 +1,1505 @@
1/*
2 * Allwinner A1X SoCs pinctrl driver.
3 *
4 * Copyright (C) 2012 Maxime Ripard
5 *
6 * Maxime Ripard <maxime.ripard@free-electrons.com>
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 */
12
13#include <linux/io.h>
14#include <linux/gpio.h>
15#include <linux/module.h>
16#include <linux/of.h>
17#include <linux/of_address.h>
18#include <linux/of_device.h>
19#include <linux/pinctrl/consumer.h>
20#include <linux/pinctrl/machine.h>
21#include <linux/pinctrl/pinctrl.h>
22#include <linux/pinctrl/pinconf-generic.h>
23#include <linux/pinctrl/pinmux.h>
24#include <linux/platform_device.h>
25#include <linux/slab.h>
26
27#include "core.h"
28#include "pinctrl-sunxi.h"
29
30static const struct sunxi_desc_pin sun4i_a10_pins[] = {
31 SUNXI_PIN(SUNXI_PINCTRL_PIN_PA0,
32 SUNXI_FUNCTION(0x0, "gpio_in"),
33 SUNXI_FUNCTION(0x1, "gpio_out")),
34 SUNXI_PIN(SUNXI_PINCTRL_PIN_PA1,
35 SUNXI_FUNCTION(0x0, "gpio_in"),
36 SUNXI_FUNCTION(0x1, "gpio_out")),
37 SUNXI_PIN(SUNXI_PINCTRL_PIN_PA2,
38 SUNXI_FUNCTION(0x0, "gpio_in"),
39 SUNXI_FUNCTION(0x1, "gpio_out")),
40 SUNXI_PIN(SUNXI_PINCTRL_PIN_PA3,
41 SUNXI_FUNCTION(0x0, "gpio_in"),
42 SUNXI_FUNCTION(0x1, "gpio_out")),
43 SUNXI_PIN(SUNXI_PINCTRL_PIN_PA4,
44 SUNXI_FUNCTION(0x0, "gpio_in"),
45 SUNXI_FUNCTION(0x1, "gpio_out")),
46 SUNXI_PIN(SUNXI_PINCTRL_PIN_PA5,
47 SUNXI_FUNCTION(0x0, "gpio_in"),
48 SUNXI_FUNCTION(0x1, "gpio_out")),
49 SUNXI_PIN(SUNXI_PINCTRL_PIN_PA6,
50 SUNXI_FUNCTION(0x0, "gpio_in"),
51 SUNXI_FUNCTION(0x1, "gpio_out")),
52 SUNXI_PIN(SUNXI_PINCTRL_PIN_PA7,
53 SUNXI_FUNCTION(0x0, "gpio_in"),
54 SUNXI_FUNCTION(0x1, "gpio_out")),
55 SUNXI_PIN(SUNXI_PINCTRL_PIN_PA8,
56 SUNXI_FUNCTION(0x0, "gpio_in"),
57 SUNXI_FUNCTION(0x1, "gpio_out")),
58 SUNXI_PIN(SUNXI_PINCTRL_PIN_PA9,
59 SUNXI_FUNCTION(0x0, "gpio_in"),
60 SUNXI_FUNCTION(0x1, "gpio_out")),
61 SUNXI_PIN(SUNXI_PINCTRL_PIN_PA10,
62 SUNXI_FUNCTION(0x0, "gpio_in"),
63 SUNXI_FUNCTION(0x1, "gpio_out"),
64 SUNXI_FUNCTION(0x4, "uart1")), /* TX */
65 SUNXI_PIN(SUNXI_PINCTRL_PIN_PA11,
66 SUNXI_FUNCTION(0x0, "gpio_in"),
67 SUNXI_FUNCTION(0x1, "gpio_out"),
68 SUNXI_FUNCTION(0x4, "uart1")), /* RX */
69 SUNXI_PIN(SUNXI_PINCTRL_PIN_PA12,
70 SUNXI_FUNCTION(0x0, "gpio_in"),
71 SUNXI_FUNCTION(0x1, "gpio_out"),
72 SUNXI_FUNCTION(0x4, "uart1")), /* RTS */
73 SUNXI_PIN(SUNXI_PINCTRL_PIN_PA13,
74 SUNXI_FUNCTION(0x0, "gpio_in"),
75 SUNXI_FUNCTION(0x1, "gpio_out"),
76 SUNXI_FUNCTION(0x4, "uart1")), /* CTS */
77 SUNXI_PIN(SUNXI_PINCTRL_PIN_PA14,
78 SUNXI_FUNCTION(0x0, "gpio_in"),
79 SUNXI_FUNCTION(0x1, "gpio_out"),
80 SUNXI_FUNCTION(0x4, "uart1")), /* DTR */
81 SUNXI_PIN(SUNXI_PINCTRL_PIN_PA15,
82 SUNXI_FUNCTION(0x0, "gpio_in"),
83 SUNXI_FUNCTION(0x1, "gpio_out"),
84 SUNXI_FUNCTION(0x4, "uart1")), /* DSR */
85 SUNXI_PIN(SUNXI_PINCTRL_PIN_PA16,
86 SUNXI_FUNCTION(0x0, "gpio_in"),
87 SUNXI_FUNCTION(0x1, "gpio_out"),
88 SUNXI_FUNCTION(0x4, "uart1")), /* DCD */
89 SUNXI_PIN(SUNXI_PINCTRL_PIN_PA17,
90 SUNXI_FUNCTION(0x0, "gpio_in"),
91 SUNXI_FUNCTION(0x1, "gpio_out"),
92 SUNXI_FUNCTION(0x4, "uart1")), /* RING */
93 /* Hole */
94 SUNXI_PIN(SUNXI_PINCTRL_PIN_PB0,
95 SUNXI_FUNCTION(0x0, "gpio_in"),
96 SUNXI_FUNCTION(0x1, "gpio_out")),
97 SUNXI_PIN(SUNXI_PINCTRL_PIN_PB1,
98 SUNXI_FUNCTION(0x0, "gpio_in"),
99 SUNXI_FUNCTION(0x1, "gpio_out")),
100 SUNXI_PIN(SUNXI_PINCTRL_PIN_PB2,
101 SUNXI_FUNCTION(0x0, "gpio_in"),
102 SUNXI_FUNCTION(0x1, "gpio_out")),
103 SUNXI_PIN(SUNXI_PINCTRL_PIN_PB3,
104 SUNXI_FUNCTION(0x0, "gpio_in"),
105 SUNXI_FUNCTION(0x1, "gpio_out")),
106 SUNXI_PIN(SUNXI_PINCTRL_PIN_PB4,
107 SUNXI_FUNCTION(0x0, "gpio_in"),
108 SUNXI_FUNCTION(0x1, "gpio_out")),
109 SUNXI_PIN(SUNXI_PINCTRL_PIN_PB5,
110 SUNXI_FUNCTION(0x0, "gpio_in"),
111 SUNXI_FUNCTION(0x1, "gpio_out")),
112 SUNXI_PIN(SUNXI_PINCTRL_PIN_PB6,
113 SUNXI_FUNCTION(0x0, "gpio_in"),
114 SUNXI_FUNCTION(0x1, "gpio_out")),
115 SUNXI_PIN(SUNXI_PINCTRL_PIN_PB7,
116 SUNXI_FUNCTION(0x0, "gpio_in"),
117 SUNXI_FUNCTION(0x1, "gpio_out")),
118 SUNXI_PIN(SUNXI_PINCTRL_PIN_PB8,
119 SUNXI_FUNCTION(0x0, "gpio_in"),
120 SUNXI_FUNCTION(0x1, "gpio_out")),
121 SUNXI_PIN(SUNXI_PINCTRL_PIN_PB9,
122 SUNXI_FUNCTION(0x0, "gpio_in"),
123 SUNXI_FUNCTION(0x1, "gpio_out")),
124 SUNXI_PIN(SUNXI_PINCTRL_PIN_PB10,
125 SUNXI_FUNCTION(0x0, "gpio_in"),
126 SUNXI_FUNCTION(0x1, "gpio_out")),
127 SUNXI_PIN(SUNXI_PINCTRL_PIN_PB11,
128 SUNXI_FUNCTION(0x0, "gpio_in"),
129 SUNXI_FUNCTION(0x1, "gpio_out")),
130 SUNXI_PIN(SUNXI_PINCTRL_PIN_PB12,
131 SUNXI_FUNCTION(0x0, "gpio_in"),
132 SUNXI_FUNCTION(0x1, "gpio_out")),
133 SUNXI_PIN(SUNXI_PINCTRL_PIN_PB13,
134 SUNXI_FUNCTION(0x0, "gpio_in"),
135 SUNXI_FUNCTION(0x1, "gpio_out")),
136 SUNXI_PIN(SUNXI_PINCTRL_PIN_PB14,
137 SUNXI_FUNCTION(0x0, "gpio_in"),
138 SUNXI_FUNCTION(0x1, "gpio_out")),
139 SUNXI_PIN(SUNXI_PINCTRL_PIN_PB15,
140 SUNXI_FUNCTION(0x0, "gpio_in"),
141 SUNXI_FUNCTION(0x1, "gpio_out")),
142 SUNXI_PIN(SUNXI_PINCTRL_PIN_PB16,
143 SUNXI_FUNCTION(0x0, "gpio_in"),
144 SUNXI_FUNCTION(0x1, "gpio_out")),
145 SUNXI_PIN(SUNXI_PINCTRL_PIN_PB17,
146 SUNXI_FUNCTION(0x0, "gpio_in"),
147 SUNXI_FUNCTION(0x1, "gpio_out")),
148 SUNXI_PIN(SUNXI_PINCTRL_PIN_PB18,
149 SUNXI_FUNCTION(0x0, "gpio_in"),
150 SUNXI_FUNCTION(0x1, "gpio_out")),
151 SUNXI_PIN(SUNXI_PINCTRL_PIN_PB19,
152 SUNXI_FUNCTION(0x0, "gpio_in"),
153 SUNXI_FUNCTION(0x1, "gpio_out")),
154 SUNXI_PIN(SUNXI_PINCTRL_PIN_PB20,
155 SUNXI_FUNCTION(0x0, "gpio_in"),
156 SUNXI_FUNCTION(0x1, "gpio_out")),
157 SUNXI_PIN(SUNXI_PINCTRL_PIN_PB21,
158 SUNXI_FUNCTION(0x0, "gpio_in"),
159 SUNXI_FUNCTION(0x1, "gpio_out")),
160 SUNXI_PIN(SUNXI_PINCTRL_PIN_PB22,
161 SUNXI_FUNCTION(0x0, "gpio_in"),
162 SUNXI_FUNCTION(0x1, "gpio_out"),
163 SUNXI_FUNCTION(0x2, "uart0")), /* TX */
164 SUNXI_PIN(SUNXI_PINCTRL_PIN_PB23,
165 SUNXI_FUNCTION(0x0, "gpio_in"),
166 SUNXI_FUNCTION(0x1, "gpio_out"),
167 SUNXI_FUNCTION(0x2, "uart0")), /* RX */
168 /* Hole */
169 SUNXI_PIN(SUNXI_PINCTRL_PIN_PC0,
170 SUNXI_FUNCTION(0x0, "gpio_in"),
171 SUNXI_FUNCTION(0x1, "gpio_out")),
172 SUNXI_PIN(SUNXI_PINCTRL_PIN_PC1,
173 SUNXI_FUNCTION(0x0, "gpio_in"),
174 SUNXI_FUNCTION(0x1, "gpio_out")),
175 SUNXI_PIN(SUNXI_PINCTRL_PIN_PC2,
176 SUNXI_FUNCTION(0x0, "gpio_in"),
177 SUNXI_FUNCTION(0x1, "gpio_out")),
178 SUNXI_PIN(SUNXI_PINCTRL_PIN_PC3,
179 SUNXI_FUNCTION(0x0, "gpio_in"),
180 SUNXI_FUNCTION(0x1, "gpio_out")),
181 SUNXI_PIN(SUNXI_PINCTRL_PIN_PC4,
182 SUNXI_FUNCTION(0x0, "gpio_in"),
183 SUNXI_FUNCTION(0x1, "gpio_out")),
184 SUNXI_PIN(SUNXI_PINCTRL_PIN_PC5,
185 SUNXI_FUNCTION(0x0, "gpio_in"),
186 SUNXI_FUNCTION(0x1, "gpio_out")),
187 SUNXI_PIN(SUNXI_PINCTRL_PIN_PC6,
188 SUNXI_FUNCTION(0x0, "gpio_in"),
189 SUNXI_FUNCTION(0x1, "gpio_out")),
190 SUNXI_PIN(SUNXI_PINCTRL_PIN_PC7,
191 SUNXI_FUNCTION(0x0, "gpio_in"),
192 SUNXI_FUNCTION(0x1, "gpio_out")),
193 SUNXI_PIN(SUNXI_PINCTRL_PIN_PC8,
194 SUNXI_FUNCTION(0x0, "gpio_in"),
195 SUNXI_FUNCTION(0x1, "gpio_out")),
196 SUNXI_PIN(SUNXI_PINCTRL_PIN_PC9,
197 SUNXI_FUNCTION(0x0, "gpio_in"),
198 SUNXI_FUNCTION(0x1, "gpio_out")),
199 SUNXI_PIN(SUNXI_PINCTRL_PIN_PC10,
200 SUNXI_FUNCTION(0x0, "gpio_in"),
201 SUNXI_FUNCTION(0x1, "gpio_out")),
202 SUNXI_PIN(SUNXI_PINCTRL_PIN_PC11,
203 SUNXI_FUNCTION(0x0, "gpio_in"),
204 SUNXI_FUNCTION(0x1, "gpio_out")),
205 SUNXI_PIN(SUNXI_PINCTRL_PIN_PC12,
206 SUNXI_FUNCTION(0x0, "gpio_in"),
207 SUNXI_FUNCTION(0x1, "gpio_out")),
208 SUNXI_PIN(SUNXI_PINCTRL_PIN_PC13,
209 SUNXI_FUNCTION(0x0, "gpio_in"),
210 SUNXI_FUNCTION(0x1, "gpio_out")),
211 SUNXI_PIN(SUNXI_PINCTRL_PIN_PC14,
212 SUNXI_FUNCTION(0x0, "gpio_in"),
213 SUNXI_FUNCTION(0x1, "gpio_out")),
214 SUNXI_PIN(SUNXI_PINCTRL_PIN_PC15,
215 SUNXI_FUNCTION(0x0, "gpio_in"),
216 SUNXI_FUNCTION(0x1, "gpio_out")),
217 SUNXI_PIN(SUNXI_PINCTRL_PIN_PC16,
218 SUNXI_FUNCTION(0x0, "gpio_in"),
219 SUNXI_FUNCTION(0x1, "gpio_out")),
220 SUNXI_PIN(SUNXI_PINCTRL_PIN_PC17,
221 SUNXI_FUNCTION(0x0, "gpio_in"),
222 SUNXI_FUNCTION(0x1, "gpio_out")),
223 SUNXI_PIN(SUNXI_PINCTRL_PIN_PC18,
224 SUNXI_FUNCTION(0x0, "gpio_in"),
225 SUNXI_FUNCTION(0x1, "gpio_out")),
226 SUNXI_PIN(SUNXI_PINCTRL_PIN_PC19,
227 SUNXI_FUNCTION(0x0, "gpio_in"),
228 SUNXI_FUNCTION(0x1, "gpio_out")),
229 SUNXI_PIN(SUNXI_PINCTRL_PIN_PC20,
230 SUNXI_FUNCTION(0x0, "gpio_in"),
231 SUNXI_FUNCTION(0x1, "gpio_out")),
232 SUNXI_PIN(SUNXI_PINCTRL_PIN_PC21,
233 SUNXI_FUNCTION(0x0, "gpio_in"),
234 SUNXI_FUNCTION(0x1, "gpio_out")),
235 SUNXI_PIN(SUNXI_PINCTRL_PIN_PC22,
236 SUNXI_FUNCTION(0x0, "gpio_in"),
237 SUNXI_FUNCTION(0x1, "gpio_out")),
238 SUNXI_PIN(SUNXI_PINCTRL_PIN_PC23,
239 SUNXI_FUNCTION(0x0, "gpio_in"),
240 SUNXI_FUNCTION(0x1, "gpio_out")),
241 SUNXI_PIN(SUNXI_PINCTRL_PIN_PC24,
242 SUNXI_FUNCTION(0x0, "gpio_in"),
243 SUNXI_FUNCTION(0x1, "gpio_out")),
244 /* Hole */
245 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD0,
246 SUNXI_FUNCTION(0x0, "gpio_in"),
247 SUNXI_FUNCTION(0x1, "gpio_out")),
248 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD1,
249 SUNXI_FUNCTION(0x0, "gpio_in"),
250 SUNXI_FUNCTION(0x1, "gpio_out")),
251 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD2,
252 SUNXI_FUNCTION(0x0, "gpio_in"),
253 SUNXI_FUNCTION(0x1, "gpio_out")),
254 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD3,
255 SUNXI_FUNCTION(0x0, "gpio_in"),
256 SUNXI_FUNCTION(0x1, "gpio_out")),
257 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD4,
258 SUNXI_FUNCTION(0x0, "gpio_in"),
259 SUNXI_FUNCTION(0x1, "gpio_out")),
260 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD5,
261 SUNXI_FUNCTION(0x0, "gpio_in"),
262 SUNXI_FUNCTION(0x1, "gpio_out")),
263 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD6,
264 SUNXI_FUNCTION(0x0, "gpio_in"),
265 SUNXI_FUNCTION(0x1, "gpio_out")),
266 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD7,
267 SUNXI_FUNCTION(0x0, "gpio_in"),
268 SUNXI_FUNCTION(0x1, "gpio_out")),
269 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD8,
270 SUNXI_FUNCTION(0x0, "gpio_in"),
271 SUNXI_FUNCTION(0x1, "gpio_out")),
272 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD9,
273 SUNXI_FUNCTION(0x0, "gpio_in"),
274 SUNXI_FUNCTION(0x1, "gpio_out")),
275 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD10,
276 SUNXI_FUNCTION(0x0, "gpio_in"),
277 SUNXI_FUNCTION(0x1, "gpio_out")),
278 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD11,
279 SUNXI_FUNCTION(0x0, "gpio_in"),
280 SUNXI_FUNCTION(0x1, "gpio_out")),
281 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD12,
282 SUNXI_FUNCTION(0x0, "gpio_in"),
283 SUNXI_FUNCTION(0x1, "gpio_out")),
284 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD13,
285 SUNXI_FUNCTION(0x0, "gpio_in"),
286 SUNXI_FUNCTION(0x1, "gpio_out")),
287 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD14,
288 SUNXI_FUNCTION(0x0, "gpio_in"),
289 SUNXI_FUNCTION(0x1, "gpio_out")),
290 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD15,
291 SUNXI_FUNCTION(0x0, "gpio_in"),
292 SUNXI_FUNCTION(0x1, "gpio_out")),
293 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD16,
294 SUNXI_FUNCTION(0x0, "gpio_in"),
295 SUNXI_FUNCTION(0x1, "gpio_out")),
296 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD17,
297 SUNXI_FUNCTION(0x0, "gpio_in"),
298 SUNXI_FUNCTION(0x1, "gpio_out")),
299 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD18,
300 SUNXI_FUNCTION(0x0, "gpio_in"),
301 SUNXI_FUNCTION(0x1, "gpio_out")),
302 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD19,
303 SUNXI_FUNCTION(0x0, "gpio_in"),
304 SUNXI_FUNCTION(0x1, "gpio_out")),
305 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD20,
306 SUNXI_FUNCTION(0x0, "gpio_in"),
307 SUNXI_FUNCTION(0x1, "gpio_out")),
308 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD21,
309 SUNXI_FUNCTION(0x0, "gpio_in"),
310 SUNXI_FUNCTION(0x1, "gpio_out")),
311 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD22,
312 SUNXI_FUNCTION(0x0, "gpio_in"),
313 SUNXI_FUNCTION(0x1, "gpio_out")),
314 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD23,
315 SUNXI_FUNCTION(0x0, "gpio_in"),
316 SUNXI_FUNCTION(0x1, "gpio_out")),
317 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD24,
318 SUNXI_FUNCTION(0x0, "gpio_in"),
319 SUNXI_FUNCTION(0x1, "gpio_out")),
320 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD25,
321 SUNXI_FUNCTION(0x0, "gpio_in"),
322 SUNXI_FUNCTION(0x1, "gpio_out")),
323 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD26,
324 SUNXI_FUNCTION(0x0, "gpio_in"),
325 SUNXI_FUNCTION(0x1, "gpio_out")),
326 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD27,
327 SUNXI_FUNCTION(0x0, "gpio_in"),
328 SUNXI_FUNCTION(0x1, "gpio_out")),
329 /* Hole */
330 SUNXI_PIN(SUNXI_PINCTRL_PIN_PE0,
331 SUNXI_FUNCTION(0x0, "gpio_in"),
332 SUNXI_FUNCTION(0x1, "gpio_out")),
333 SUNXI_PIN(SUNXI_PINCTRL_PIN_PE1,
334 SUNXI_FUNCTION(0x0, "gpio_in"),
335 SUNXI_FUNCTION(0x1, "gpio_out")),
336 SUNXI_PIN(SUNXI_PINCTRL_PIN_PE2,
337 SUNXI_FUNCTION(0x0, "gpio_in"),
338 SUNXI_FUNCTION(0x1, "gpio_out")),
339 SUNXI_PIN(SUNXI_PINCTRL_PIN_PE3,
340 SUNXI_FUNCTION(0x0, "gpio_in"),
341 SUNXI_FUNCTION(0x1, "gpio_out")),
342 SUNXI_PIN(SUNXI_PINCTRL_PIN_PE4,
343 SUNXI_FUNCTION(0x0, "gpio_in"),
344 SUNXI_FUNCTION(0x1, "gpio_out")),
345 SUNXI_PIN(SUNXI_PINCTRL_PIN_PE5,
346 SUNXI_FUNCTION(0x0, "gpio_in"),
347 SUNXI_FUNCTION(0x1, "gpio_out")),
348 SUNXI_PIN(SUNXI_PINCTRL_PIN_PE6,
349 SUNXI_FUNCTION(0x0, "gpio_in"),
350 SUNXI_FUNCTION(0x1, "gpio_out")),
351 SUNXI_PIN(SUNXI_PINCTRL_PIN_PE7,
352 SUNXI_FUNCTION(0x0, "gpio_in"),
353 SUNXI_FUNCTION(0x1, "gpio_out")),
354 SUNXI_PIN(SUNXI_PINCTRL_PIN_PE8,
355 SUNXI_FUNCTION(0x0, "gpio_in"),
356 SUNXI_FUNCTION(0x1, "gpio_out")),
357 SUNXI_PIN(SUNXI_PINCTRL_PIN_PE9,
358 SUNXI_FUNCTION(0x0, "gpio_in"),
359 SUNXI_FUNCTION(0x1, "gpio_out")),
360 SUNXI_PIN(SUNXI_PINCTRL_PIN_PE10,
361 SUNXI_FUNCTION(0x0, "gpio_in"),
362 SUNXI_FUNCTION(0x1, "gpio_out")),
363 SUNXI_PIN(SUNXI_PINCTRL_PIN_PE11,
364 SUNXI_FUNCTION(0x0, "gpio_in"),
365 SUNXI_FUNCTION(0x1, "gpio_out")),
366 /* Hole */
367 SUNXI_PIN(SUNXI_PINCTRL_PIN_PF0,
368 SUNXI_FUNCTION(0x0, "gpio_in"),
369 SUNXI_FUNCTION(0x1, "gpio_out")),
370 SUNXI_PIN(SUNXI_PINCTRL_PIN_PF1,
371 SUNXI_FUNCTION(0x0, "gpio_in"),
372 SUNXI_FUNCTION(0x1, "gpio_out")),
373 SUNXI_PIN(SUNXI_PINCTRL_PIN_PF2,
374 SUNXI_FUNCTION(0x0, "gpio_in"),
375 SUNXI_FUNCTION(0x1, "gpio_out"),
376 SUNXI_FUNCTION(0x4, "uart0")), /* TX */
377 SUNXI_PIN(SUNXI_PINCTRL_PIN_PF3,
378 SUNXI_FUNCTION(0x0, "gpio_in"),
379 SUNXI_FUNCTION(0x1, "gpio_out")),
380 SUNXI_PIN(SUNXI_PINCTRL_PIN_PF4,
381 SUNXI_FUNCTION(0x0, "gpio_in"),
382 SUNXI_FUNCTION(0x1, "gpio_out"),
383 SUNXI_FUNCTION(0x4, "uart0")), /* RX */
384 SUNXI_PIN(SUNXI_PINCTRL_PIN_PF5,
385 SUNXI_FUNCTION(0x0, "gpio_in"),
386 SUNXI_FUNCTION(0x1, "gpio_out")),
387 /* Hole */
388 SUNXI_PIN(SUNXI_PINCTRL_PIN_PG0,
389 SUNXI_FUNCTION(0x0, "gpio_in"),
390 SUNXI_FUNCTION(0x1, "gpio_out")),
391 SUNXI_PIN(SUNXI_PINCTRL_PIN_PG1,
392 SUNXI_FUNCTION(0x0, "gpio_in"),
393 SUNXI_FUNCTION(0x1, "gpio_out")),
394 SUNXI_PIN(SUNXI_PINCTRL_PIN_PG2,
395 SUNXI_FUNCTION(0x0, "gpio_in"),
396 SUNXI_FUNCTION(0x1, "gpio_out")),
397 SUNXI_PIN(SUNXI_PINCTRL_PIN_PG3,
398 SUNXI_FUNCTION(0x0, "gpio_in"),
399 SUNXI_FUNCTION(0x1, "gpio_out")),
400 SUNXI_PIN(SUNXI_PINCTRL_PIN_PG4,
401 SUNXI_FUNCTION(0x0, "gpio_in"),
402 SUNXI_FUNCTION(0x1, "gpio_out")),
403 SUNXI_PIN(SUNXI_PINCTRL_PIN_PG5,
404 SUNXI_FUNCTION(0x0, "gpio_in"),
405 SUNXI_FUNCTION(0x1, "gpio_out")),
406 SUNXI_PIN(SUNXI_PINCTRL_PIN_PG6,
407 SUNXI_FUNCTION(0x0, "gpio_in"),
408 SUNXI_FUNCTION(0x1, "gpio_out")),
409 SUNXI_PIN(SUNXI_PINCTRL_PIN_PG7,
410 SUNXI_FUNCTION(0x0, "gpio_in"),
411 SUNXI_FUNCTION(0x1, "gpio_out")),
412 SUNXI_PIN(SUNXI_PINCTRL_PIN_PG8,
413 SUNXI_FUNCTION(0x0, "gpio_in"),
414 SUNXI_FUNCTION(0x1, "gpio_out")),
415 SUNXI_PIN(SUNXI_PINCTRL_PIN_PG9,
416 SUNXI_FUNCTION(0x0, "gpio_in"),
417 SUNXI_FUNCTION(0x1, "gpio_out")),
418 SUNXI_PIN(SUNXI_PINCTRL_PIN_PG10,
419 SUNXI_FUNCTION(0x0, "gpio_in"),
420 SUNXI_FUNCTION(0x1, "gpio_out")),
421 SUNXI_PIN(SUNXI_PINCTRL_PIN_PG11,
422 SUNXI_FUNCTION(0x0, "gpio_in"),
423 SUNXI_FUNCTION(0x1, "gpio_out")),
424 /* Hole */
425 SUNXI_PIN(SUNXI_PINCTRL_PIN_PH0,
426 SUNXI_FUNCTION(0x0, "gpio_in"),
427 SUNXI_FUNCTION(0x1, "gpio_out")),
428 SUNXI_PIN(SUNXI_PINCTRL_PIN_PH1,
429 SUNXI_FUNCTION(0x0, "gpio_in"),
430 SUNXI_FUNCTION(0x1, "gpio_out")),
431 SUNXI_PIN(SUNXI_PINCTRL_PIN_PH2,
432 SUNXI_FUNCTION(0x0, "gpio_in"),
433 SUNXI_FUNCTION(0x1, "gpio_out")),
434 SUNXI_PIN(SUNXI_PINCTRL_PIN_PH3,
435 SUNXI_FUNCTION(0x0, "gpio_in"),
436 SUNXI_FUNCTION(0x1, "gpio_out")),
437 SUNXI_PIN(SUNXI_PINCTRL_PIN_PH4,
438 SUNXI_FUNCTION(0x0, "gpio_in"),
439 SUNXI_FUNCTION(0x1, "gpio_out")),
440 SUNXI_PIN(SUNXI_PINCTRL_PIN_PH5,
441 SUNXI_FUNCTION(0x0, "gpio_in"),
442 SUNXI_FUNCTION(0x1, "gpio_out")),
443 SUNXI_PIN(SUNXI_PINCTRL_PIN_PH6,
444 SUNXI_FUNCTION(0x0, "gpio_in"),
445 SUNXI_FUNCTION(0x1, "gpio_out")),
446 SUNXI_PIN(SUNXI_PINCTRL_PIN_PH7,
447 SUNXI_FUNCTION(0x0, "gpio_in"),
448 SUNXI_FUNCTION(0x1, "gpio_out")),
449 SUNXI_PIN(SUNXI_PINCTRL_PIN_PH8,
450 SUNXI_FUNCTION(0x0, "gpio_in"),
451 SUNXI_FUNCTION(0x1, "gpio_out")),
452 SUNXI_PIN(SUNXI_PINCTRL_PIN_PH9,
453 SUNXI_FUNCTION(0x0, "gpio_in"),
454 SUNXI_FUNCTION(0x1, "gpio_out")),
455 SUNXI_PIN(SUNXI_PINCTRL_PIN_PH10,
456 SUNXI_FUNCTION(0x0, "gpio_in"),
457 SUNXI_FUNCTION(0x1, "gpio_out")),
458 SUNXI_PIN(SUNXI_PINCTRL_PIN_PH11,
459 SUNXI_FUNCTION(0x0, "gpio_in"),
460 SUNXI_FUNCTION(0x1, "gpio_out")),
461 SUNXI_PIN(SUNXI_PINCTRL_PIN_PH12,
462 SUNXI_FUNCTION(0x0, "gpio_in"),
463 SUNXI_FUNCTION(0x1, "gpio_out")),
464 SUNXI_PIN(SUNXI_PINCTRL_PIN_PH13,
465 SUNXI_FUNCTION(0x0, "gpio_in"),
466 SUNXI_FUNCTION(0x1, "gpio_out")),
467 SUNXI_PIN(SUNXI_PINCTRL_PIN_PH14,
468 SUNXI_FUNCTION(0x0, "gpio_in"),
469 SUNXI_FUNCTION(0x1, "gpio_out")),
470 SUNXI_PIN(SUNXI_PINCTRL_PIN_PH15,
471 SUNXI_FUNCTION(0x0, "gpio_in"),
472 SUNXI_FUNCTION(0x1, "gpio_out")),
473 SUNXI_PIN(SUNXI_PINCTRL_PIN_PH16,
474 SUNXI_FUNCTION(0x0, "gpio_in"),
475 SUNXI_FUNCTION(0x1, "gpio_out")),
476 SUNXI_PIN(SUNXI_PINCTRL_PIN_PH17,
477 SUNXI_FUNCTION(0x0, "gpio_in"),
478 SUNXI_FUNCTION(0x1, "gpio_out")),
479 SUNXI_PIN(SUNXI_PINCTRL_PIN_PH18,
480 SUNXI_FUNCTION(0x0, "gpio_in"),
481 SUNXI_FUNCTION(0x1, "gpio_out")),
482 SUNXI_PIN(SUNXI_PINCTRL_PIN_PH19,
483 SUNXI_FUNCTION(0x0, "gpio_in"),
484 SUNXI_FUNCTION(0x1, "gpio_out")),
485 SUNXI_PIN(SUNXI_PINCTRL_PIN_PH20,
486 SUNXI_FUNCTION(0x0, "gpio_in"),
487 SUNXI_FUNCTION(0x1, "gpio_out")),
488 SUNXI_PIN(SUNXI_PINCTRL_PIN_PH21,
489 SUNXI_FUNCTION(0x0, "gpio_in"),
490 SUNXI_FUNCTION(0x1, "gpio_out")),
491 SUNXI_PIN(SUNXI_PINCTRL_PIN_PH22,
492 SUNXI_FUNCTION(0x0, "gpio_in"),
493 SUNXI_FUNCTION(0x1, "gpio_out")),
494 SUNXI_PIN(SUNXI_PINCTRL_PIN_PH23,
495 SUNXI_FUNCTION(0x0, "gpio_in"),
496 SUNXI_FUNCTION(0x1, "gpio_out")),
497 SUNXI_PIN(SUNXI_PINCTRL_PIN_PH24,
498 SUNXI_FUNCTION(0x0, "gpio_in"),
499 SUNXI_FUNCTION(0x1, "gpio_out")),
500 SUNXI_PIN(SUNXI_PINCTRL_PIN_PH25,
501 SUNXI_FUNCTION(0x0, "gpio_in"),
502 SUNXI_FUNCTION(0x1, "gpio_out")),
503 SUNXI_PIN(SUNXI_PINCTRL_PIN_PH26,
504 SUNXI_FUNCTION(0x0, "gpio_in"),
505 SUNXI_FUNCTION(0x1, "gpio_out")),
506 SUNXI_PIN(SUNXI_PINCTRL_PIN_PH27,
507 SUNXI_FUNCTION(0x0, "gpio_in"),
508 SUNXI_FUNCTION(0x1, "gpio_out")),
509 /* Hole */
510 SUNXI_PIN(SUNXI_PINCTRL_PIN_PI0,
511 SUNXI_FUNCTION(0x0, "gpio_in"),
512 SUNXI_FUNCTION(0x1, "gpio_out")),
513 SUNXI_PIN(SUNXI_PINCTRL_PIN_PI1,
514 SUNXI_FUNCTION(0x0, "gpio_in"),
515 SUNXI_FUNCTION(0x1, "gpio_out")),
516 SUNXI_PIN(SUNXI_PINCTRL_PIN_PI2,
517 SUNXI_FUNCTION(0x0, "gpio_in"),
518 SUNXI_FUNCTION(0x1, "gpio_out")),
519 SUNXI_PIN(SUNXI_PINCTRL_PIN_PI3,
520 SUNXI_FUNCTION(0x0, "gpio_in"),
521 SUNXI_FUNCTION(0x1, "gpio_out")),
522 SUNXI_PIN(SUNXI_PINCTRL_PIN_PI4,
523 SUNXI_FUNCTION(0x0, "gpio_in"),
524 SUNXI_FUNCTION(0x1, "gpio_out")),
525 SUNXI_PIN(SUNXI_PINCTRL_PIN_PI5,
526 SUNXI_FUNCTION(0x0, "gpio_in"),
527 SUNXI_FUNCTION(0x1, "gpio_out")),
528 SUNXI_PIN(SUNXI_PINCTRL_PIN_PI6,
529 SUNXI_FUNCTION(0x0, "gpio_in"),
530 SUNXI_FUNCTION(0x1, "gpio_out")),
531 SUNXI_PIN(SUNXI_PINCTRL_PIN_PI7,
532 SUNXI_FUNCTION(0x0, "gpio_in"),
533 SUNXI_FUNCTION(0x1, "gpio_out")),
534 SUNXI_PIN(SUNXI_PINCTRL_PIN_PI8,
535 SUNXI_FUNCTION(0x0, "gpio_in"),
536 SUNXI_FUNCTION(0x1, "gpio_out")),
537 SUNXI_PIN(SUNXI_PINCTRL_PIN_PI9,
538 SUNXI_FUNCTION(0x0, "gpio_in"),
539 SUNXI_FUNCTION(0x1, "gpio_out")),
540 SUNXI_PIN(SUNXI_PINCTRL_PIN_PI10,
541 SUNXI_FUNCTION(0x0, "gpio_in"),
542 SUNXI_FUNCTION(0x1, "gpio_out")),
543 SUNXI_PIN(SUNXI_PINCTRL_PIN_PI11,
544 SUNXI_FUNCTION(0x0, "gpio_in"),
545 SUNXI_FUNCTION(0x1, "gpio_out")),
546 SUNXI_PIN(SUNXI_PINCTRL_PIN_PI12,
547 SUNXI_FUNCTION(0x0, "gpio_in"),
548 SUNXI_FUNCTION(0x1, "gpio_out")),
549 SUNXI_PIN(SUNXI_PINCTRL_PIN_PI13,
550 SUNXI_FUNCTION(0x0, "gpio_in"),
551 SUNXI_FUNCTION(0x1, "gpio_out")),
552 SUNXI_PIN(SUNXI_PINCTRL_PIN_PI14,
553 SUNXI_FUNCTION(0x0, "gpio_in"),
554 SUNXI_FUNCTION(0x1, "gpio_out")),
555 SUNXI_PIN(SUNXI_PINCTRL_PIN_PI15,
556 SUNXI_FUNCTION(0x0, "gpio_in"),
557 SUNXI_FUNCTION(0x1, "gpio_out")),
558 SUNXI_PIN(SUNXI_PINCTRL_PIN_PI16,
559 SUNXI_FUNCTION(0x0, "gpio_in"),
560 SUNXI_FUNCTION(0x1, "gpio_out")),
561 SUNXI_PIN(SUNXI_PINCTRL_PIN_PI17,
562 SUNXI_FUNCTION(0x0, "gpio_in"),
563 SUNXI_FUNCTION(0x1, "gpio_out")),
564 SUNXI_PIN(SUNXI_PINCTRL_PIN_PI18,
565 SUNXI_FUNCTION(0x0, "gpio_in"),
566 SUNXI_FUNCTION(0x1, "gpio_out")),
567 SUNXI_PIN(SUNXI_PINCTRL_PIN_PI19,
568 SUNXI_FUNCTION(0x0, "gpio_in"),
569 SUNXI_FUNCTION(0x1, "gpio_out")),
570 SUNXI_PIN(SUNXI_PINCTRL_PIN_PI20,
571 SUNXI_FUNCTION(0x0, "gpio_in"),
572 SUNXI_FUNCTION(0x1, "gpio_out")),
573 SUNXI_PIN(SUNXI_PINCTRL_PIN_PI21,
574 SUNXI_FUNCTION(0x0, "gpio_in"),
575 SUNXI_FUNCTION(0x1, "gpio_out")),
576};
577
578static const struct sunxi_desc_pin sun5i_a13_pins[] = {
579 /* Hole */
580 SUNXI_PIN(SUNXI_PINCTRL_PIN_PB0,
581 SUNXI_FUNCTION(0x0, "gpio_in"),
582 SUNXI_FUNCTION(0x1, "gpio_out")),
583 SUNXI_PIN(SUNXI_PINCTRL_PIN_PB1,
584 SUNXI_FUNCTION(0x0, "gpio_in"),
585 SUNXI_FUNCTION(0x1, "gpio_out")),
586 SUNXI_PIN(SUNXI_PINCTRL_PIN_PB2,
587 SUNXI_FUNCTION(0x0, "gpio_in"),
588 SUNXI_FUNCTION(0x1, "gpio_out")),
589 SUNXI_PIN(SUNXI_PINCTRL_PIN_PB3,
590 SUNXI_FUNCTION(0x0, "gpio_in"),
591 SUNXI_FUNCTION(0x1, "gpio_out")),
592 SUNXI_PIN(SUNXI_PINCTRL_PIN_PB4,
593 SUNXI_FUNCTION(0x0, "gpio_in"),
594 SUNXI_FUNCTION(0x1, "gpio_out")),
595 /* Hole */
596 SUNXI_PIN(SUNXI_PINCTRL_PIN_PB10,
597 SUNXI_FUNCTION(0x0, "gpio_in"),
598 SUNXI_FUNCTION(0x1, "gpio_out")),
599 /* Hole */
600 SUNXI_PIN(SUNXI_PINCTRL_PIN_PB15,
601 SUNXI_FUNCTION(0x0, "gpio_in"),
602 SUNXI_FUNCTION(0x1, "gpio_out")),
603 SUNXI_PIN(SUNXI_PINCTRL_PIN_PB16,
604 SUNXI_FUNCTION(0x0, "gpio_in"),
605 SUNXI_FUNCTION(0x1, "gpio_out")),
606 SUNXI_PIN(SUNXI_PINCTRL_PIN_PB17,
607 SUNXI_FUNCTION(0x0, "gpio_in"),
608 SUNXI_FUNCTION(0x1, "gpio_out")),
609 SUNXI_PIN(SUNXI_PINCTRL_PIN_PB18,
610 SUNXI_FUNCTION(0x0, "gpio_in"),
611 SUNXI_FUNCTION(0x1, "gpio_out")),
612 /* Hole */
613 SUNXI_PIN(SUNXI_PINCTRL_PIN_PC0,
614 SUNXI_FUNCTION(0x0, "gpio_in"),
615 SUNXI_FUNCTION(0x1, "gpio_out")),
616 SUNXI_PIN(SUNXI_PINCTRL_PIN_PC1,
617 SUNXI_FUNCTION(0x0, "gpio_in"),
618 SUNXI_FUNCTION(0x1, "gpio_out")),
619 SUNXI_PIN(SUNXI_PINCTRL_PIN_PC2,
620 SUNXI_FUNCTION(0x0, "gpio_in"),
621 SUNXI_FUNCTION(0x1, "gpio_out")),
622 SUNXI_PIN(SUNXI_PINCTRL_PIN_PC3,
623 SUNXI_FUNCTION(0x0, "gpio_in"),
624 SUNXI_FUNCTION(0x1, "gpio_out")),
625 SUNXI_PIN(SUNXI_PINCTRL_PIN_PC4,
626 SUNXI_FUNCTION(0x0, "gpio_in"),
627 SUNXI_FUNCTION(0x1, "gpio_out")),
628 SUNXI_PIN(SUNXI_PINCTRL_PIN_PC5,
629 SUNXI_FUNCTION(0x0, "gpio_in"),
630 SUNXI_FUNCTION(0x1, "gpio_out")),
631 SUNXI_PIN(SUNXI_PINCTRL_PIN_PC6,
632 SUNXI_FUNCTION(0x0, "gpio_in"),
633 SUNXI_FUNCTION(0x1, "gpio_out")),
634 SUNXI_PIN(SUNXI_PINCTRL_PIN_PC7,
635 SUNXI_FUNCTION(0x0, "gpio_in"),
636 SUNXI_FUNCTION(0x1, "gpio_out")),
637 SUNXI_PIN(SUNXI_PINCTRL_PIN_PC8,
638 SUNXI_FUNCTION(0x0, "gpio_in"),
639 SUNXI_FUNCTION(0x1, "gpio_out")),
640 SUNXI_PIN(SUNXI_PINCTRL_PIN_PC9,
641 SUNXI_FUNCTION(0x0, "gpio_in"),
642 SUNXI_FUNCTION(0x1, "gpio_out")),
643 SUNXI_PIN(SUNXI_PINCTRL_PIN_PC10,
644 SUNXI_FUNCTION(0x0, "gpio_in"),
645 SUNXI_FUNCTION(0x1, "gpio_out")),
646 SUNXI_PIN(SUNXI_PINCTRL_PIN_PC11,
647 SUNXI_FUNCTION(0x0, "gpio_in"),
648 SUNXI_FUNCTION(0x1, "gpio_out")),
649 SUNXI_PIN(SUNXI_PINCTRL_PIN_PC12,
650 SUNXI_FUNCTION(0x0, "gpio_in"),
651 SUNXI_FUNCTION(0x1, "gpio_out")),
652 SUNXI_PIN(SUNXI_PINCTRL_PIN_PC13,
653 SUNXI_FUNCTION(0x0, "gpio_in"),
654 SUNXI_FUNCTION(0x1, "gpio_out")),
655 SUNXI_PIN(SUNXI_PINCTRL_PIN_PC14,
656 SUNXI_FUNCTION(0x0, "gpio_in"),
657 SUNXI_FUNCTION(0x1, "gpio_out")),
658 SUNXI_PIN(SUNXI_PINCTRL_PIN_PC15,
659 SUNXI_FUNCTION(0x0, "gpio_in"),
660 SUNXI_FUNCTION(0x1, "gpio_out")),
661 /* Hole */
662 SUNXI_PIN(SUNXI_PINCTRL_PIN_PC19,
663 SUNXI_FUNCTION(0x0, "gpio_in"),
664 SUNXI_FUNCTION(0x1, "gpio_out")),
665 /* Hole */
666 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD2,
667 SUNXI_FUNCTION(0x0, "gpio_in"),
668 SUNXI_FUNCTION(0x1, "gpio_out")),
669 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD3,
670 SUNXI_FUNCTION(0x0, "gpio_in"),
671 SUNXI_FUNCTION(0x1, "gpio_out")),
672 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD4,
673 SUNXI_FUNCTION(0x0, "gpio_in"),
674 SUNXI_FUNCTION(0x1, "gpio_out")),
675 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD5,
676 SUNXI_FUNCTION(0x0, "gpio_in"),
677 SUNXI_FUNCTION(0x1, "gpio_out")),
678 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD6,
679 SUNXI_FUNCTION(0x0, "gpio_in"),
680 SUNXI_FUNCTION(0x1, "gpio_out")),
681 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD7,
682 SUNXI_FUNCTION(0x0, "gpio_in"),
683 SUNXI_FUNCTION(0x1, "gpio_out")),
684 /* Hole */
685 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD10,
686 SUNXI_FUNCTION(0x0, "gpio_in"),
687 SUNXI_FUNCTION(0x1, "gpio_out")),
688 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD11,
689 SUNXI_FUNCTION(0x0, "gpio_in"),
690 SUNXI_FUNCTION(0x1, "gpio_out")),
691 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD12,
692 SUNXI_FUNCTION(0x0, "gpio_in"),
693 SUNXI_FUNCTION(0x1, "gpio_out")),
694 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD13,
695 SUNXI_FUNCTION(0x0, "gpio_in"),
696 SUNXI_FUNCTION(0x1, "gpio_out")),
697 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD14,
698 SUNXI_FUNCTION(0x0, "gpio_in"),
699 SUNXI_FUNCTION(0x1, "gpio_out")),
700 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD15,
701 SUNXI_FUNCTION(0x0, "gpio_in"),
702 SUNXI_FUNCTION(0x1, "gpio_out")),
703 /* Hole */
704 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD18,
705 SUNXI_FUNCTION(0x0, "gpio_in"),
706 SUNXI_FUNCTION(0x1, "gpio_out")),
707 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD19,
708 SUNXI_FUNCTION(0x0, "gpio_in"),
709 SUNXI_FUNCTION(0x1, "gpio_out")),
710 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD20,
711 SUNXI_FUNCTION(0x0, "gpio_in"),
712 SUNXI_FUNCTION(0x1, "gpio_out")),
713 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD21,
714 SUNXI_FUNCTION(0x0, "gpio_in"),
715 SUNXI_FUNCTION(0x1, "gpio_out")),
716 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD22,
717 SUNXI_FUNCTION(0x0, "gpio_in"),
718 SUNXI_FUNCTION(0x1, "gpio_out")),
719 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD23,
720 SUNXI_FUNCTION(0x0, "gpio_in"),
721 SUNXI_FUNCTION(0x1, "gpio_out")),
722 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD24,
723 SUNXI_FUNCTION(0x0, "gpio_in"),
724 SUNXI_FUNCTION(0x1, "gpio_out")),
725 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD25,
726 SUNXI_FUNCTION(0x0, "gpio_in"),
727 SUNXI_FUNCTION(0x1, "gpio_out")),
728 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD26,
729 SUNXI_FUNCTION(0x0, "gpio_in"),
730 SUNXI_FUNCTION(0x1, "gpio_out")),
731 SUNXI_PIN(SUNXI_PINCTRL_PIN_PD27,
732 SUNXI_FUNCTION(0x0, "gpio_in"),
733 SUNXI_FUNCTION(0x1, "gpio_out")),
734 /* Hole */
735 SUNXI_PIN(SUNXI_PINCTRL_PIN_PE0,
736 SUNXI_FUNCTION(0x0, "gpio_in"),
737 SUNXI_FUNCTION(0x1, "gpio_out")),
738 SUNXI_PIN(SUNXI_PINCTRL_PIN_PE1,
739 SUNXI_FUNCTION(0x0, "gpio_in"),
740 SUNXI_FUNCTION(0x1, "gpio_out")),
741 SUNXI_PIN(SUNXI_PINCTRL_PIN_PE2,
742 SUNXI_FUNCTION(0x0, "gpio_in"),
743 SUNXI_FUNCTION(0x1, "gpio_out")),
744 SUNXI_PIN(SUNXI_PINCTRL_PIN_PE3,
745 SUNXI_FUNCTION(0x0, "gpio_in"),
746 SUNXI_FUNCTION(0x1, "gpio_out")),
747 SUNXI_PIN(SUNXI_PINCTRL_PIN_PE4,
748 SUNXI_FUNCTION(0x0, "gpio_in"),
749 SUNXI_FUNCTION(0x1, "gpio_out")),
750 SUNXI_PIN(SUNXI_PINCTRL_PIN_PE5,
751 SUNXI_FUNCTION(0x0, "gpio_in"),
752 SUNXI_FUNCTION(0x1, "gpio_out")),
753 SUNXI_PIN(SUNXI_PINCTRL_PIN_PE6,
754 SUNXI_FUNCTION(0x0, "gpio_in"),
755 SUNXI_FUNCTION(0x1, "gpio_out")),
756 SUNXI_PIN(SUNXI_PINCTRL_PIN_PE7,
757 SUNXI_FUNCTION(0x0, "gpio_in"),
758 SUNXI_FUNCTION(0x1, "gpio_out")),
759 SUNXI_PIN(SUNXI_PINCTRL_PIN_PE8,
760 SUNXI_FUNCTION(0x0, "gpio_in"),
761 SUNXI_FUNCTION(0x1, "gpio_out")),
762 SUNXI_PIN(SUNXI_PINCTRL_PIN_PE9,
763 SUNXI_FUNCTION(0x0, "gpio_in"),
764 SUNXI_FUNCTION(0x1, "gpio_out")),
765 SUNXI_PIN(SUNXI_PINCTRL_PIN_PE10,
766 SUNXI_FUNCTION(0x0, "gpio_in"),
767 SUNXI_FUNCTION(0x1, "gpio_out"),
768 SUNXI_FUNCTION(0x4, "uart1")), /* TX */
769 SUNXI_PIN(SUNXI_PINCTRL_PIN_PE11,
770 SUNXI_FUNCTION(0x0, "gpio_in"),
771 SUNXI_FUNCTION(0x1, "gpio_out"),
772 SUNXI_FUNCTION(0x4, "uart1")), /* RX */
773 /* Hole */
774 SUNXI_PIN(SUNXI_PINCTRL_PIN_PF0,
775 SUNXI_FUNCTION(0x0, "gpio_in"),
776 SUNXI_FUNCTION(0x1, "gpio_out")),
777 SUNXI_PIN(SUNXI_PINCTRL_PIN_PF1,
778 SUNXI_FUNCTION(0x0, "gpio_in"),
779 SUNXI_FUNCTION(0x1, "gpio_out")),
780 SUNXI_PIN(SUNXI_PINCTRL_PIN_PF2,
781 SUNXI_FUNCTION(0x0, "gpio_in"),
782 SUNXI_FUNCTION(0x1, "gpio_out")),
783 SUNXI_PIN(SUNXI_PINCTRL_PIN_PF3,
784 SUNXI_FUNCTION(0x0, "gpio_in"),
785 SUNXI_FUNCTION(0x1, "gpio_out")),
786 SUNXI_PIN(SUNXI_PINCTRL_PIN_PF4,
787 SUNXI_FUNCTION(0x0, "gpio_in"),
788 SUNXI_FUNCTION(0x1, "gpio_out")),
789 SUNXI_PIN(SUNXI_PINCTRL_PIN_PF5,
790 SUNXI_FUNCTION(0x0, "gpio_in"),
791 SUNXI_FUNCTION(0x1, "gpio_out")),
792 /* Hole */
793 SUNXI_PIN(SUNXI_PINCTRL_PIN_PG0,
794 SUNXI_FUNCTION(0x0, "gpio_in"),
795 SUNXI_FUNCTION(0x1, "gpio_out")),
796 SUNXI_PIN(SUNXI_PINCTRL_PIN_PG1,
797 SUNXI_FUNCTION(0x0, "gpio_in"),
798 SUNXI_FUNCTION(0x1, "gpio_out")),
799 SUNXI_PIN(SUNXI_PINCTRL_PIN_PG2,
800 SUNXI_FUNCTION(0x0, "gpio_in"),
801 SUNXI_FUNCTION(0x1, "gpio_out")),
802 SUNXI_PIN(SUNXI_PINCTRL_PIN_PG3,
803 SUNXI_FUNCTION(0x0, "gpio_in"),
804 SUNXI_FUNCTION(0x1, "gpio_out"),
805 SUNXI_FUNCTION(0x4, "uart1")), /* TX */
806 SUNXI_PIN(SUNXI_PINCTRL_PIN_PG4,
807 SUNXI_FUNCTION(0x0, "gpio_in"),
808 SUNXI_FUNCTION(0x1, "gpio_out"),
809 SUNXI_FUNCTION(0x4, "uart1")), /* RX */
810 /* Hole */
811 SUNXI_PIN(SUNXI_PINCTRL_PIN_PG9,
812 SUNXI_FUNCTION(0x0, "gpio_in"),
813 SUNXI_FUNCTION(0x1, "gpio_out")),
814 SUNXI_PIN(SUNXI_PINCTRL_PIN_PG10,
815 SUNXI_FUNCTION(0x0, "gpio_in"),
816 SUNXI_FUNCTION(0x1, "gpio_out")),
817 SUNXI_PIN(SUNXI_PINCTRL_PIN_PG11,
818 SUNXI_FUNCTION(0x0, "gpio_in"),
819 SUNXI_FUNCTION(0x1, "gpio_out")),
820 SUNXI_PIN(SUNXI_PINCTRL_PIN_PG12,
821 SUNXI_FUNCTION(0x0, "gpio_in"),
822 SUNXI_FUNCTION(0x1, "gpio_out")),
823};
824
825static const struct sunxi_pinctrl_desc sun4i_a10_pinctrl_data = {
826 .pins = sun4i_a10_pins,
827 .npins = ARRAY_SIZE(sun4i_a10_pins),
828};
829
830static const struct sunxi_pinctrl_desc sun5i_a13_pinctrl_data = {
831 .pins = sun5i_a13_pins,
832 .npins = ARRAY_SIZE(sun5i_a13_pins),
833};
834
835static struct sunxi_pinctrl_group *
836sunxi_pinctrl_find_group_by_name(struct sunxi_pinctrl *pctl, const char *group)
837{
838 int i;
839
840 for (i = 0; i < pctl->ngroups; i++) {
841 struct sunxi_pinctrl_group *grp = pctl->groups + i;
842
843 if (!strcmp(grp->name, group))
844 return grp;
845 }
846
847 return NULL;
848}
849
850static struct sunxi_pinctrl_function *
851sunxi_pinctrl_find_function_by_name(struct sunxi_pinctrl *pctl,
852 const char *name)
853{
854 struct sunxi_pinctrl_function *func = pctl->functions;
855 int i;
856
857 for (i = 0; i < pctl->nfunctions; i++) {
858 if (!func[i].name)
859 break;
860
861 if (!strcmp(func[i].name, name))
862 return func + i;
863 }
864
865 return NULL;
866}
867
868static struct sunxi_desc_function *
869sunxi_pinctrl_desc_find_function_by_name(struct sunxi_pinctrl *pctl,
870 const char *pin_name,
871 const char *func_name)
872{
873 int i;
874
875 for (i = 0; i < pctl->desc->npins; i++) {
876 const struct sunxi_desc_pin *pin = pctl->desc->pins + i;
877
878 if (!strcmp(pin->pin.name, pin_name)) {
879 struct sunxi_desc_function *func = pin->functions;
880
881 while (func->name) {
882 if (!strcmp(func->name, func_name))
883 return func;
884
885 func++;
886 }
887 }
888 }
889
890 return NULL;
891}
892
893static int sunxi_pctrl_get_groups_count(struct pinctrl_dev *pctldev)
894{
895 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
896
897 return pctl->ngroups;
898}
899
900static const char *sunxi_pctrl_get_group_name(struct pinctrl_dev *pctldev,
901 unsigned group)
902{
903 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
904
905 return pctl->groups[group].name;
906}
907
908static int sunxi_pctrl_get_group_pins(struct pinctrl_dev *pctldev,
909 unsigned group,
910 const unsigned **pins,
911 unsigned *num_pins)
912{
913 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
914
915 *pins = (unsigned *)&pctl->groups[group].pin;
916 *num_pins = 1;
917
918 return 0;
919}
920
921static int sunxi_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
922 struct device_node *node,
923 struct pinctrl_map **map,
924 unsigned *num_maps)
925{
926 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
927 unsigned long *pinconfig;
928 struct property *prop;
929 const char *function;
930 const char *group;
931 int ret, nmaps, i = 0;
932 u32 val;
933
934 *map = NULL;
935 *num_maps = 0;
936
937 ret = of_property_read_string(node, "allwinner,function", &function);
938 if (ret) {
939 dev_err(pctl->dev,
940 "missing allwinner,function property in node %s\n",
941 node->name);
942 return -EINVAL;
943 }
944
945 nmaps = of_property_count_strings(node, "allwinner,pins") * 2;
946 if (nmaps < 0) {
947 dev_err(pctl->dev,
948 "missing allwinner,pins property in node %s\n",
949 node->name);
950 return -EINVAL;
951 }
952
953 *map = kmalloc(nmaps * sizeof(struct pinctrl_map), GFP_KERNEL);
954 if (!map)
955 return -ENOMEM;
956
957 of_property_for_each_string(node, "allwinner,pins", prop, group) {
958 struct sunxi_pinctrl_group *grp =
959 sunxi_pinctrl_find_group_by_name(pctl, group);
960 int j = 0, configlen = 0;
961
962 if (!grp) {
963 dev_err(pctl->dev, "unknown pin %s", group);
964 continue;
965 }
966
967 if (!sunxi_pinctrl_desc_find_function_by_name(pctl,
968 grp->name,
969 function)) {
970 dev_err(pctl->dev, "unsupported function %s on pin %s",
971 function, group);
972 continue;
973 }
974
975 (*map)[i].type = PIN_MAP_TYPE_MUX_GROUP;
976 (*map)[i].data.mux.group = group;
977 (*map)[i].data.mux.function = function;
978
979 i++;
980
981 (*map)[i].type = PIN_MAP_TYPE_CONFIGS_GROUP;
982 (*map)[i].data.configs.group_or_pin = group;
983
984 if (of_find_property(node, "allwinner,drive", NULL))
985 configlen++;
986 if (of_find_property(node, "allwinner,pull", NULL))
987 configlen++;
988
989 pinconfig = kzalloc(configlen * sizeof(*pinconfig), GFP_KERNEL);
990
991 if (!of_property_read_u32(node, "allwinner,drive", &val)) {
992 u16 strength = (val + 1) * 10;
993 pinconfig[j++] =
994 pinconf_to_config_packed(PIN_CONFIG_DRIVE_STRENGTH,
995 strength);
996 }
997
998 if (!of_property_read_u32(node, "allwinner,pull", &val)) {
999 enum pin_config_param pull = PIN_CONFIG_END;
1000 if (val == 1)
1001 pull = PIN_CONFIG_BIAS_PULL_UP;
1002 else if (val == 2)
1003 pull = PIN_CONFIG_BIAS_PULL_DOWN;
1004 pinconfig[j++] = pinconf_to_config_packed(pull, 0);
1005 }
1006
1007 (*map)[i].data.configs.configs = pinconfig;
1008 (*map)[i].data.configs.num_configs = configlen;
1009
1010 i++;
1011 }
1012
1013 *num_maps = nmaps;
1014
1015 return 0;
1016}
1017
1018static void sunxi_pctrl_dt_free_map(struct pinctrl_dev *pctldev,
1019 struct pinctrl_map *map,
1020 unsigned num_maps)
1021{
1022 int i;
1023
1024 for (i = 0; i < num_maps; i++) {
1025 if (map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP)
1026 kfree(map[i].data.configs.configs);
1027 }
1028
1029 kfree(map);
1030}
1031
1032static struct pinctrl_ops sunxi_pctrl_ops = {
1033 .dt_node_to_map = sunxi_pctrl_dt_node_to_map,
1034 .dt_free_map = sunxi_pctrl_dt_free_map,
1035 .get_groups_count = sunxi_pctrl_get_groups_count,
1036 .get_group_name = sunxi_pctrl_get_group_name,
1037 .get_group_pins = sunxi_pctrl_get_group_pins,
1038};
1039
1040static int sunxi_pconf_group_get(struct pinctrl_dev *pctldev,
1041 unsigned group,
1042 unsigned long *config)
1043{
1044 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
1045
1046 *config = pctl->groups[group].config;
1047
1048 return 0;
1049}
1050
1051static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev,
1052 unsigned group,
1053 unsigned long config)
1054{
1055 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
1056 struct sunxi_pinctrl_group *g = &pctl->groups[group];
1057 u32 val, mask;
1058 u16 strength;
1059 u8 dlevel;
1060
1061 switch (pinconf_to_config_param(config)) {
1062 case PIN_CONFIG_DRIVE_STRENGTH:
1063 strength = pinconf_to_config_argument(config);
1064 if (strength > 40)
1065 return -EINVAL;
1066 /*
1067 * We convert from mA to what the register expects:
1068 * 0: 10mA
1069 * 1: 20mA
1070 * 2: 30mA
1071 * 3: 40mA
1072 */
1073 dlevel = strength / 10 - 1;
1074 val = readl(pctl->membase + sunxi_dlevel_reg(g->pin));
1075 mask = DLEVEL_PINS_MASK << sunxi_dlevel_offset(g->pin);
1076 writel((val & ~mask) | dlevel << sunxi_dlevel_offset(g->pin),
1077 pctl->membase + sunxi_dlevel_reg(g->pin));
1078 break;
1079 case PIN_CONFIG_BIAS_PULL_UP:
1080 val = readl(pctl->membase + sunxi_pull_reg(g->pin));
1081 mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin);
1082 writel((val & ~mask) | 1 << sunxi_pull_offset(g->pin),
1083 pctl->membase + sunxi_pull_reg(g->pin));
1084 break;
1085 case PIN_CONFIG_BIAS_PULL_DOWN:
1086 val = readl(pctl->membase + sunxi_pull_reg(g->pin));
1087 mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin);
1088 writel((val & ~mask) | 2 << sunxi_pull_offset(g->pin),
1089 pctl->membase + sunxi_pull_reg(g->pin));
1090 break;
1091 default:
1092 break;
1093 }
1094
1095 /* cache the config value */
1096 g->config = config;
1097
1098 return 0;
1099}
1100
1101static struct pinconf_ops sunxi_pconf_ops = {
1102 .pin_config_group_get = sunxi_pconf_group_get,
1103 .pin_config_group_set = sunxi_pconf_group_set,
1104};
1105
1106static int sunxi_pmx_get_funcs_cnt(struct pinctrl_dev *pctldev)
1107{
1108 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
1109
1110 return pctl->nfunctions;
1111}
1112
1113static const char *sunxi_pmx_get_func_name(struct pinctrl_dev *pctldev,
1114 unsigned function)
1115{
1116 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
1117
1118 return pctl->functions[function].name;
1119}
1120
1121static int sunxi_pmx_get_func_groups(struct pinctrl_dev *pctldev,
1122 unsigned function,
1123 const char * const **groups,
1124 unsigned * const num_groups)
1125{
1126 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
1127
1128 *groups = pctl->functions[function].groups;
1129 *num_groups = pctl->functions[function].ngroups;
1130
1131 return 0;
1132}
1133
1134static void sunxi_pmx_set(struct pinctrl_dev *pctldev,
1135 unsigned pin,
1136 u8 config)
1137{
1138 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
1139
1140 u32 val = readl(pctl->membase + sunxi_mux_reg(pin));
1141 u32 mask = MUX_PINS_MASK << sunxi_mux_offset(pin);
1142 writel((val & ~mask) | config << sunxi_mux_offset(pin),
1143 pctl->membase + sunxi_mux_reg(pin));
1144}
1145
1146static int sunxi_pmx_enable(struct pinctrl_dev *pctldev,
1147 unsigned function,
1148 unsigned group)
1149{
1150 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
1151 struct sunxi_pinctrl_group *g = pctl->groups + group;
1152 struct sunxi_pinctrl_function *func = pctl->functions + function;
1153 struct sunxi_desc_function *desc =
1154 sunxi_pinctrl_desc_find_function_by_name(pctl,
1155 g->name,
1156 func->name);
1157
1158 if (!desc)
1159 return -EINVAL;
1160
1161 sunxi_pmx_set(pctldev, g->pin, desc->muxval);
1162
1163 return 0;
1164}
1165
1166static int
1167sunxi_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
1168 struct pinctrl_gpio_range *range,
1169 unsigned offset,
1170 bool input)
1171{
1172 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
1173 struct sunxi_desc_function *desc;
1174 char pin_name[SUNXI_PIN_NAME_MAX_LEN];
1175 const char *func;
1176 u8 bank, pin;
1177 int ret;
1178
1179 bank = (offset) / PINS_PER_BANK;
1180 pin = (offset) % PINS_PER_BANK;
1181
1182 ret = sprintf(pin_name, "P%c%d", 'A' + bank, pin);
1183 if (!ret)
1184 goto error;
1185
1186 if (input)
1187 func = "gpio_in";
1188 else
1189 func = "gpio_out";
1190
1191 desc = sunxi_pinctrl_desc_find_function_by_name(pctl,
1192 pin_name,
1193 func);
1194 if (!desc) {
1195 ret = -EINVAL;
1196 goto error;
1197 }
1198
1199 sunxi_pmx_set(pctldev, offset, desc->muxval);
1200
1201 ret = 0;
1202
1203error:
1204 return ret;
1205}
1206
1207static struct pinmux_ops sunxi_pmx_ops = {
1208 .get_functions_count = sunxi_pmx_get_funcs_cnt,
1209 .get_function_name = sunxi_pmx_get_func_name,
1210 .get_function_groups = sunxi_pmx_get_func_groups,
1211 .enable = sunxi_pmx_enable,
1212 .gpio_set_direction = sunxi_pmx_gpio_set_direction,
1213};
1214
1215static struct pinctrl_desc sunxi_pctrl_desc = {
1216 .confops = &sunxi_pconf_ops,
1217 .pctlops = &sunxi_pctrl_ops,
1218 .pmxops = &sunxi_pmx_ops,
1219};
1220
1221static int sunxi_pinctrl_gpio_request(struct gpio_chip *chip, unsigned offset)
1222{
1223 return pinctrl_request_gpio(chip->base + offset);
1224}
1225
1226static void sunxi_pinctrl_gpio_free(struct gpio_chip *chip, unsigned offset)
1227{
1228 pinctrl_free_gpio(chip->base + offset);
1229}
1230
1231static int sunxi_pinctrl_gpio_direction_input(struct gpio_chip *chip,
1232 unsigned offset)
1233{
1234 return pinctrl_gpio_direction_input(chip->base + offset);
1235}
1236
1237static int sunxi_pinctrl_gpio_get(struct gpio_chip *chip, unsigned offset)
1238{
1239 struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev);
1240
1241 u32 reg = sunxi_data_reg(offset);
1242 u8 index = sunxi_data_offset(offset);
1243 u32 val = (readl(pctl->membase + reg) >> index) & DATA_PINS_MASK;
1244
1245 return val;
1246}
1247
1248static int sunxi_pinctrl_gpio_direction_output(struct gpio_chip *chip,
1249 unsigned offset, int value)
1250{
1251 return pinctrl_gpio_direction_output(chip->base + offset);
1252}
1253
1254static void sunxi_pinctrl_gpio_set(struct gpio_chip *chip,
1255 unsigned offset, int value)
1256{
1257 struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev);
1258 u32 reg = sunxi_data_reg(offset);
1259 u8 index = sunxi_data_offset(offset);
1260
1261 writel((value & DATA_PINS_MASK) << index, pctl->membase + reg);
1262}
1263
1264static int sunxi_pinctrl_gpio_of_xlate(struct gpio_chip *gc,
1265 const struct of_phandle_args *gpiospec,
1266 u32 *flags)
1267{
1268 int pin, base;
1269
1270 base = PINS_PER_BANK * gpiospec->args[0];
1271 pin = base + gpiospec->args[1];
1272
1273 if (pin > (gc->base + gc->ngpio))
1274 return -EINVAL;
1275
1276 if (flags)
1277 *flags = gpiospec->args[2];
1278
1279 return pin;
1280}
1281
1282static struct gpio_chip sunxi_pinctrl_gpio_chip = {
1283 .owner = THIS_MODULE,
1284 .request = sunxi_pinctrl_gpio_request,
1285 .free = sunxi_pinctrl_gpio_free,
1286 .direction_input = sunxi_pinctrl_gpio_direction_input,
1287 .direction_output = sunxi_pinctrl_gpio_direction_output,
1288 .get = sunxi_pinctrl_gpio_get,
1289 .set = sunxi_pinctrl_gpio_set,
1290 .of_xlate = sunxi_pinctrl_gpio_of_xlate,
1291 .of_gpio_n_cells = 3,
1292 .can_sleep = 0,
1293};
1294
1295static struct of_device_id sunxi_pinctrl_match[] = {
1296 { .compatible = "allwinner,sun4i-a10-pinctrl", .data = (void *)&sun4i_a10_pinctrl_data },
1297 { .compatible = "allwinner,sun5i-a13-pinctrl", .data = (void *)&sun5i_a13_pinctrl_data },
1298 {}
1299};
1300MODULE_DEVICE_TABLE(of, sunxi_pinctrl_match);
1301
1302static int sunxi_pinctrl_add_function(struct sunxi_pinctrl *pctl,
1303 const char *name)
1304{
1305 struct sunxi_pinctrl_function *func = pctl->functions;
1306
1307 while (func->name) {
1308 /* function already there */
1309 if (strcmp(func->name, name) == 0) {
1310 func->ngroups++;
1311 return -EEXIST;
1312 }
1313 func++;
1314 }
1315
1316 func->name = name;
1317 func->ngroups = 1;
1318
1319 pctl->nfunctions++;
1320
1321 return 0;
1322}
1323
1324static int sunxi_pinctrl_build_state(struct platform_device *pdev)
1325{
1326 struct sunxi_pinctrl *pctl = platform_get_drvdata(pdev);
1327 int i;
1328
1329 pctl->ngroups = pctl->desc->npins;
1330
1331 /* Allocate groups */
1332 pctl->groups = devm_kzalloc(&pdev->dev,
1333 pctl->ngroups * sizeof(*pctl->groups),
1334 GFP_KERNEL);
1335 if (!pctl->groups)
1336 return -ENOMEM;
1337
1338 for (i = 0; i < pctl->desc->npins; i++) {
1339 const struct sunxi_desc_pin *pin = pctl->desc->pins + i;
1340 struct sunxi_pinctrl_group *group = pctl->groups + i;
1341
1342 group->name = pin->pin.name;
1343 group->pin = pin->pin.number;
1344 }
1345
1346 /*
1347 * We suppose that we won't have any more functions than pins,
1348 * we'll reallocate that later anyway
1349 */
1350 pctl->functions = devm_kzalloc(&pdev->dev,
1351 pctl->desc->npins * sizeof(*pctl->functions),
1352 GFP_KERNEL);
1353 if (!pctl->functions)
1354 return -ENOMEM;
1355
1356 /* Count functions and their associated groups */
1357 for (i = 0; i < pctl->desc->npins; i++) {
1358 const struct sunxi_desc_pin *pin = pctl->desc->pins + i;
1359 struct sunxi_desc_function *func = pin->functions;
1360
1361 while (func->name) {
1362 sunxi_pinctrl_add_function(pctl, func->name);
1363 func++;
1364 }
1365 }
1366
1367 pctl->functions = krealloc(pctl->functions,
1368 pctl->nfunctions * sizeof(*pctl->functions),
1369 GFP_KERNEL);
1370
1371 for (i = 0; i < pctl->desc->npins; i++) {
1372 const struct sunxi_desc_pin *pin = pctl->desc->pins + i;
1373 struct sunxi_desc_function *func = pin->functions;
1374
1375 while (func->name) {
1376 struct sunxi_pinctrl_function *func_item;
1377 const char **func_grp;
1378
1379 func_item = sunxi_pinctrl_find_function_by_name(pctl,
1380 func->name);
1381 if (!func_item)
1382 return -EINVAL;
1383
1384 if (!func_item->groups) {
1385 func_item->groups =
1386 devm_kzalloc(&pdev->dev,
1387 func_item->ngroups * sizeof(*func_item->groups),
1388 GFP_KERNEL);
1389 if (!func_item->groups)
1390 return -ENOMEM;
1391 }
1392
1393 func_grp = func_item->groups;
1394 while (*func_grp)
1395 func_grp++;
1396
1397 *func_grp = pin->pin.name;
1398 func++;
1399 }
1400 }
1401
1402 return 0;
1403}
1404
1405static int sunxi_pinctrl_probe(struct platform_device *pdev)
1406{
1407 struct device_node *node = pdev->dev.of_node;
1408 const struct of_device_id *device;
1409 struct pinctrl_pin_desc *pins;
1410 struct sunxi_pinctrl *pctl;
1411 int i, ret, last_pin;
1412
1413 pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL);
1414 if (!pctl)
1415 return -ENOMEM;
1416 platform_set_drvdata(pdev, pctl);
1417
1418 pctl->membase = of_iomap(node, 0);
1419 if (!pctl->membase)
1420 return -ENOMEM;
1421
1422 device = of_match_device(sunxi_pinctrl_match, &pdev->dev);
1423 if (!device)
1424 return -ENODEV;
1425
1426 pctl->desc = (struct sunxi_pinctrl_desc *)device->data;
1427
1428 ret = sunxi_pinctrl_build_state(pdev);
1429 if (ret) {
1430 dev_err(&pdev->dev, "dt probe failed: %d\n", ret);
1431 return ret;
1432 }
1433
1434 pins = devm_kzalloc(&pdev->dev,
1435 pctl->desc->npins * sizeof(*pins),
1436 GFP_KERNEL);
1437 if (!pins)
1438 return -ENOMEM;
1439
1440 for (i = 0; i < pctl->desc->npins; i++)
1441 pins[i] = pctl->desc->pins[i].pin;
1442
1443 sunxi_pctrl_desc.name = dev_name(&pdev->dev);
1444 sunxi_pctrl_desc.owner = THIS_MODULE;
1445 sunxi_pctrl_desc.pins = pins;
1446 sunxi_pctrl_desc.npins = pctl->desc->npins;
1447 pctl->dev = &pdev->dev;
1448 pctl->pctl_dev = pinctrl_register(&sunxi_pctrl_desc,
1449 &pdev->dev, pctl);
1450 if (!pctl->pctl_dev) {
1451 dev_err(&pdev->dev, "couldn't register pinctrl driver\n");
1452 return -EINVAL;
1453 }
1454
1455 pctl->chip = devm_kzalloc(&pdev->dev, sizeof(*pctl->chip), GFP_KERNEL);
1456 if (!pctl->chip) {
1457 ret = -ENOMEM;
1458 goto pinctrl_error;
1459 }
1460
1461 last_pin = pctl->desc->pins[pctl->desc->npins - 1].pin.number;
1462 pctl->chip = &sunxi_pinctrl_gpio_chip;
1463 pctl->chip->ngpio = round_up(last_pin, PINS_PER_BANK);
1464 pctl->chip->label = dev_name(&pdev->dev);
1465 pctl->chip->dev = &pdev->dev;
1466 pctl->chip->base = 0;
1467
1468 ret = gpiochip_add(pctl->chip);
1469 if (ret)
1470 goto pinctrl_error;
1471
1472 for (i = 0; i < pctl->desc->npins; i++) {
1473 const struct sunxi_desc_pin *pin = pctl->desc->pins + i;
1474
1475 ret = gpiochip_add_pin_range(pctl->chip, dev_name(&pdev->dev),
1476 pin->pin.number,
1477 pin->pin.number, 1);
1478 if (ret)
1479 goto gpiochip_error;
1480 }
1481
1482 dev_info(&pdev->dev, "initialized sunXi PIO driver\n");
1483
1484 return 0;
1485
1486gpiochip_error:
1487 ret = gpiochip_remove(pctl->chip);
1488pinctrl_error:
1489 pinctrl_unregister(pctl->pctl_dev);
1490 return ret;
1491}
1492
1493static struct platform_driver sunxi_pinctrl_driver = {
1494 .probe = sunxi_pinctrl_probe,
1495 .driver = {
1496 .name = "sunxi-pinctrl",
1497 .owner = THIS_MODULE,
1498 .of_match_table = sunxi_pinctrl_match,
1499 },
1500};
1501module_platform_driver(sunxi_pinctrl_driver);
1502
1503MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
1504MODULE_DESCRIPTION("Allwinner A1X pinctrl driver");
1505MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/pinctrl-sunxi.h b/drivers/pinctrl/pinctrl-sunxi.h
new file mode 100644
index 000000000000..e921621059ce
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-sunxi.h
@@ -0,0 +1,478 @@
1/*
2 * Allwinner A1X SoCs pinctrl driver.
3 *
4 * Copyright (C) 2012 Maxime Ripard
5 *
6 * Maxime Ripard <maxime.ripard@free-electrons.com>
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 */
12
13#ifndef __PINCTRL_SUNXI_H
14#define __PINCTRL_SUNXI_H
15
16#include <linux/kernel.h>
17
18#define PA_BASE 0
19#define PB_BASE 32
20#define PC_BASE 64
21#define PD_BASE 96
22#define PE_BASE 128
23#define PF_BASE 160
24#define PG_BASE 192
25#define PH_BASE 224
26#define PI_BASE 256
27
28#define SUNXI_PINCTRL_PIN_PA0 PINCTRL_PIN(PA_BASE + 0, "PA0")
29#define SUNXI_PINCTRL_PIN_PA1 PINCTRL_PIN(PA_BASE + 1, "PA1")
30#define SUNXI_PINCTRL_PIN_PA2 PINCTRL_PIN(PA_BASE + 2, "PA2")
31#define SUNXI_PINCTRL_PIN_PA3 PINCTRL_PIN(PA_BASE + 3, "PA3")
32#define SUNXI_PINCTRL_PIN_PA4 PINCTRL_PIN(PA_BASE + 4, "PA4")
33#define SUNXI_PINCTRL_PIN_PA5 PINCTRL_PIN(PA_BASE + 5, "PA5")
34#define SUNXI_PINCTRL_PIN_PA6 PINCTRL_PIN(PA_BASE + 6, "PA6")
35#define SUNXI_PINCTRL_PIN_PA7 PINCTRL_PIN(PA_BASE + 7, "PA7")
36#define SUNXI_PINCTRL_PIN_PA8 PINCTRL_PIN(PA_BASE + 8, "PA8")
37#define SUNXI_PINCTRL_PIN_PA9 PINCTRL_PIN(PA_BASE + 9, "PA9")
38#define SUNXI_PINCTRL_PIN_PA10 PINCTRL_PIN(PA_BASE + 10, "PA10")
39#define SUNXI_PINCTRL_PIN_PA11 PINCTRL_PIN(PA_BASE + 11, "PA11")
40#define SUNXI_PINCTRL_PIN_PA12 PINCTRL_PIN(PA_BASE + 12, "PA12")
41#define SUNXI_PINCTRL_PIN_PA13 PINCTRL_PIN(PA_BASE + 13, "PA13")
42#define SUNXI_PINCTRL_PIN_PA14 PINCTRL_PIN(PA_BASE + 14, "PA14")
43#define SUNXI_PINCTRL_PIN_PA15 PINCTRL_PIN(PA_BASE + 15, "PA15")
44#define SUNXI_PINCTRL_PIN_PA16 PINCTRL_PIN(PA_BASE + 16, "PA16")
45#define SUNXI_PINCTRL_PIN_PA17 PINCTRL_PIN(PA_BASE + 17, "PA17")
46#define SUNXI_PINCTRL_PIN_PA18 PINCTRL_PIN(PA_BASE + 18, "PA18")
47#define SUNXI_PINCTRL_PIN_PA19 PINCTRL_PIN(PA_BASE + 19, "PA19")
48#define SUNXI_PINCTRL_PIN_PA20 PINCTRL_PIN(PA_BASE + 20, "PA20")
49#define SUNXI_PINCTRL_PIN_PA21 PINCTRL_PIN(PA_BASE + 21, "PA21")
50#define SUNXI_PINCTRL_PIN_PA22 PINCTRL_PIN(PA_BASE + 22, "PA22")
51#define SUNXI_PINCTRL_PIN_PA23 PINCTRL_PIN(PA_BASE + 23, "PA23")
52#define SUNXI_PINCTRL_PIN_PA24 PINCTRL_PIN(PA_BASE + 24, "PA24")
53#define SUNXI_PINCTRL_PIN_PA25 PINCTRL_PIN(PA_BASE + 25, "PA25")
54#define SUNXI_PINCTRL_PIN_PA26 PINCTRL_PIN(PA_BASE + 26, "PA26")
55#define SUNXI_PINCTRL_PIN_PA27 PINCTRL_PIN(PA_BASE + 27, "PA27")
56#define SUNXI_PINCTRL_PIN_PA28 PINCTRL_PIN(PA_BASE + 28, "PA28")
57#define SUNXI_PINCTRL_PIN_PA29 PINCTRL_PIN(PA_BASE + 29, "PA29")
58#define SUNXI_PINCTRL_PIN_PA30 PINCTRL_PIN(PA_BASE + 30, "PA30")
59#define SUNXI_PINCTRL_PIN_PA31 PINCTRL_PIN(PA_BASE + 31, "PA31")
60
61#define SUNXI_PINCTRL_PIN_PB0 PINCTRL_PIN(PB_BASE + 0, "PB0")
62#define SUNXI_PINCTRL_PIN_PB1 PINCTRL_PIN(PB_BASE + 1, "PB1")
63#define SUNXI_PINCTRL_PIN_PB2 PINCTRL_PIN(PB_BASE + 2, "PB2")
64#define SUNXI_PINCTRL_PIN_PB3 PINCTRL_PIN(PB_BASE + 3, "PB3")
65#define SUNXI_PINCTRL_PIN_PB4 PINCTRL_PIN(PB_BASE + 4, "PB4")
66#define SUNXI_PINCTRL_PIN_PB5 PINCTRL_PIN(PB_BASE + 5, "PB5")
67#define SUNXI_PINCTRL_PIN_PB6 PINCTRL_PIN(PB_BASE + 6, "PB6")
68#define SUNXI_PINCTRL_PIN_PB7 PINCTRL_PIN(PB_BASE + 7, "PB7")
69#define SUNXI_PINCTRL_PIN_PB8 PINCTRL_PIN(PB_BASE + 8, "PB8")
70#define SUNXI_PINCTRL_PIN_PB9 PINCTRL_PIN(PB_BASE + 9, "PB9")
71#define SUNXI_PINCTRL_PIN_PB10 PINCTRL_PIN(PB_BASE + 10, "PB10")
72#define SUNXI_PINCTRL_PIN_PB11 PINCTRL_PIN(PB_BASE + 11, "PB11")
73#define SUNXI_PINCTRL_PIN_PB12 PINCTRL_PIN(PB_BASE + 12, "PB12")
74#define SUNXI_PINCTRL_PIN_PB13 PINCTRL_PIN(PB_BASE + 13, "PB13")
75#define SUNXI_PINCTRL_PIN_PB14 PINCTRL_PIN(PB_BASE + 14, "PB14")
76#define SUNXI_PINCTRL_PIN_PB15 PINCTRL_PIN(PB_BASE + 15, "PB15")
77#define SUNXI_PINCTRL_PIN_PB16 PINCTRL_PIN(PB_BASE + 16, "PB16")
78#define SUNXI_PINCTRL_PIN_PB17 PINCTRL_PIN(PB_BASE + 17, "PB17")
79#define SUNXI_PINCTRL_PIN_PB18 PINCTRL_PIN(PB_BASE + 18, "PB18")
80#define SUNXI_PINCTRL_PIN_PB19 PINCTRL_PIN(PB_BASE + 19, "PB19")
81#define SUNXI_PINCTRL_PIN_PB20 PINCTRL_PIN(PB_BASE + 20, "PB20")
82#define SUNXI_PINCTRL_PIN_PB21 PINCTRL_PIN(PB_BASE + 21, "PB21")
83#define SUNXI_PINCTRL_PIN_PB22 PINCTRL_PIN(PB_BASE + 22, "PB22")
84#define SUNXI_PINCTRL_PIN_PB23 PINCTRL_PIN(PB_BASE + 23, "PB23")
85#define SUNXI_PINCTRL_PIN_PB24 PINCTRL_PIN(PB_BASE + 24, "PB24")
86#define SUNXI_PINCTRL_PIN_PB25 PINCTRL_PIN(PB_BASE + 25, "PB25")
87#define SUNXI_PINCTRL_PIN_PB26 PINCTRL_PIN(PB_BASE + 26, "PB26")
88#define SUNXI_PINCTRL_PIN_PB27 PINCTRL_PIN(PB_BASE + 27, "PB27")
89#define SUNXI_PINCTRL_PIN_PB28 PINCTRL_PIN(PB_BASE + 28, "PB28")
90#define SUNXI_PINCTRL_PIN_PB29 PINCTRL_PIN(PB_BASE + 29, "PB29")
91#define SUNXI_PINCTRL_PIN_PB30 PINCTRL_PIN(PB_BASE + 30, "PB30")
92#define SUNXI_PINCTRL_PIN_PB31 PINCTRL_PIN(PB_BASE + 31, "PB31")
93
94#define SUNXI_PINCTRL_PIN_PC0 PINCTRL_PIN(PC_BASE + 0, "PC0")
95#define SUNXI_PINCTRL_PIN_PC1 PINCTRL_PIN(PC_BASE + 1, "PC1")
96#define SUNXI_PINCTRL_PIN_PC2 PINCTRL_PIN(PC_BASE + 2, "PC2")
97#define SUNXI_PINCTRL_PIN_PC3 PINCTRL_PIN(PC_BASE + 3, "PC3")
98#define SUNXI_PINCTRL_PIN_PC4 PINCTRL_PIN(PC_BASE + 4, "PC4")
99#define SUNXI_PINCTRL_PIN_PC5 PINCTRL_PIN(PC_BASE + 5, "PC5")
100#define SUNXI_PINCTRL_PIN_PC6 PINCTRL_PIN(PC_BASE + 6, "PC6")
101#define SUNXI_PINCTRL_PIN_PC7 PINCTRL_PIN(PC_BASE + 7, "PC7")
102#define SUNXI_PINCTRL_PIN_PC8 PINCTRL_PIN(PC_BASE + 8, "PC8")
103#define SUNXI_PINCTRL_PIN_PC9 PINCTRL_PIN(PC_BASE + 9, "PC9")
104#define SUNXI_PINCTRL_PIN_PC10 PINCTRL_PIN(PC_BASE + 10, "PC10")
105#define SUNXI_PINCTRL_PIN_PC11 PINCTRL_PIN(PC_BASE + 11, "PC11")
106#define SUNXI_PINCTRL_PIN_PC12 PINCTRL_PIN(PC_BASE + 12, "PC12")
107#define SUNXI_PINCTRL_PIN_PC13 PINCTRL_PIN(PC_BASE + 13, "PC13")
108#define SUNXI_PINCTRL_PIN_PC14 PINCTRL_PIN(PC_BASE + 14, "PC14")
109#define SUNXI_PINCTRL_PIN_PC15 PINCTRL_PIN(PC_BASE + 15, "PC15")
110#define SUNXI_PINCTRL_PIN_PC16 PINCTRL_PIN(PC_BASE + 16, "PC16")
111#define SUNXI_PINCTRL_PIN_PC17 PINCTRL_PIN(PC_BASE + 17, "PC17")
112#define SUNXI_PINCTRL_PIN_PC18 PINCTRL_PIN(PC_BASE + 18, "PC18")
113#define SUNXI_PINCTRL_PIN_PC19 PINCTRL_PIN(PC_BASE + 19, "PC19")
114#define SUNXI_PINCTRL_PIN_PC20 PINCTRL_PIN(PC_BASE + 20, "PC20")
115#define SUNXI_PINCTRL_PIN_PC21 PINCTRL_PIN(PC_BASE + 21, "PC21")
116#define SUNXI_PINCTRL_PIN_PC22 PINCTRL_PIN(PC_BASE + 22, "PC22")
117#define SUNXI_PINCTRL_PIN_PC23 PINCTRL_PIN(PC_BASE + 23, "PC23")
118#define SUNXI_PINCTRL_PIN_PC24 PINCTRL_PIN(PC_BASE + 24, "PC24")
119#define SUNXI_PINCTRL_PIN_PC25 PINCTRL_PIN(PC_BASE + 25, "PC25")
120#define SUNXI_PINCTRL_PIN_PC26 PINCTRL_PIN(PC_BASE + 26, "PC26")
121#define SUNXI_PINCTRL_PIN_PC27 PINCTRL_PIN(PC_BASE + 27, "PC27")
122#define SUNXI_PINCTRL_PIN_PC28 PINCTRL_PIN(PC_BASE + 28, "PC28")
123#define SUNXI_PINCTRL_PIN_PC29 PINCTRL_PIN(PC_BASE + 29, "PC29")
124#define SUNXI_PINCTRL_PIN_PC30 PINCTRL_PIN(PC_BASE + 30, "PC30")
125#define SUNXI_PINCTRL_PIN_PC31 PINCTRL_PIN(PC_BASE + 31, "PC31")
126
127#define SUNXI_PINCTRL_PIN_PD0 PINCTRL_PIN(PD_BASE + 0, "PD0")
128#define SUNXI_PINCTRL_PIN_PD1 PINCTRL_PIN(PD_BASE + 1, "PD1")
129#define SUNXI_PINCTRL_PIN_PD2 PINCTRL_PIN(PD_BASE + 2, "PD2")
130#define SUNXI_PINCTRL_PIN_PD3 PINCTRL_PIN(PD_BASE + 3, "PD3")
131#define SUNXI_PINCTRL_PIN_PD4 PINCTRL_PIN(PD_BASE + 4, "PD4")
132#define SUNXI_PINCTRL_PIN_PD5 PINCTRL_PIN(PD_BASE + 5, "PD5")
133#define SUNXI_PINCTRL_PIN_PD6 PINCTRL_PIN(PD_BASE + 6, "PD6")
134#define SUNXI_PINCTRL_PIN_PD7 PINCTRL_PIN(PD_BASE + 7, "PD7")
135#define SUNXI_PINCTRL_PIN_PD8 PINCTRL_PIN(PD_BASE + 8, "PD8")
136#define SUNXI_PINCTRL_PIN_PD9 PINCTRL_PIN(PD_BASE + 9, "PD9")
137#define SUNXI_PINCTRL_PIN_PD10 PINCTRL_PIN(PD_BASE + 10, "PD10")
138#define SUNXI_PINCTRL_PIN_PD11 PINCTRL_PIN(PD_BASE + 11, "PD11")
139#define SUNXI_PINCTRL_PIN_PD12 PINCTRL_PIN(PD_BASE + 12, "PD12")
140#define SUNXI_PINCTRL_PIN_PD13 PINCTRL_PIN(PD_BASE + 13, "PD13")
141#define SUNXI_PINCTRL_PIN_PD14 PINCTRL_PIN(PD_BASE + 14, "PD14")
142#define SUNXI_PINCTRL_PIN_PD15 PINCTRL_PIN(PD_BASE + 15, "PD15")
143#define SUNXI_PINCTRL_PIN_PD16 PINCTRL_PIN(PD_BASE + 16, "PD16")
144#define SUNXI_PINCTRL_PIN_PD17 PINCTRL_PIN(PD_BASE + 17, "PD17")
145#define SUNXI_PINCTRL_PIN_PD18 PINCTRL_PIN(PD_BASE + 18, "PD18")
146#define SUNXI_PINCTRL_PIN_PD19 PINCTRL_PIN(PD_BASE + 19, "PD19")
147#define SUNXI_PINCTRL_PIN_PD20 PINCTRL_PIN(PD_BASE + 20, "PD20")
148#define SUNXI_PINCTRL_PIN_PD21 PINCTRL_PIN(PD_BASE + 21, "PD21")
149#define SUNXI_PINCTRL_PIN_PD22 PINCTRL_PIN(PD_BASE + 22, "PD22")
150#define SUNXI_PINCTRL_PIN_PD23 PINCTRL_PIN(PD_BASE + 23, "PD23")
151#define SUNXI_PINCTRL_PIN_PD24 PINCTRL_PIN(PD_BASE + 24, "PD24")
152#define SUNXI_PINCTRL_PIN_PD25 PINCTRL_PIN(PD_BASE + 25, "PD25")
153#define SUNXI_PINCTRL_PIN_PD26 PINCTRL_PIN(PD_BASE + 26, "PD26")
154#define SUNXI_PINCTRL_PIN_PD27 PINCTRL_PIN(PD_BASE + 27, "PD27")
155#define SUNXI_PINCTRL_PIN_PD28 PINCTRL_PIN(PD_BASE + 28, "PD28")
156#define SUNXI_PINCTRL_PIN_PD29 PINCTRL_PIN(PD_BASE + 29, "PD29")
157#define SUNXI_PINCTRL_PIN_PD30 PINCTRL_PIN(PD_BASE + 30, "PD30")
158#define SUNXI_PINCTRL_PIN_PD31 PINCTRL_PIN(PD_BASE + 31, "PD31")
159
160#define SUNXI_PINCTRL_PIN_PE0 PINCTRL_PIN(PE_BASE + 0, "PE0")
161#define SUNXI_PINCTRL_PIN_PE1 PINCTRL_PIN(PE_BASE + 1, "PE1")
162#define SUNXI_PINCTRL_PIN_PE2 PINCTRL_PIN(PE_BASE + 2, "PE2")
163#define SUNXI_PINCTRL_PIN_PE3 PINCTRL_PIN(PE_BASE + 3, "PE3")
164#define SUNXI_PINCTRL_PIN_PE4 PINCTRL_PIN(PE_BASE + 4, "PE4")
165#define SUNXI_PINCTRL_PIN_PE5 PINCTRL_PIN(PE_BASE + 5, "PE5")
166#define SUNXI_PINCTRL_PIN_PE6 PINCTRL_PIN(PE_BASE + 6, "PE6")
167#define SUNXI_PINCTRL_PIN_PE7 PINCTRL_PIN(PE_BASE + 7, "PE7")
168#define SUNXI_PINCTRL_PIN_PE8 PINCTRL_PIN(PE_BASE + 8, "PE8")
169#define SUNXI_PINCTRL_PIN_PE9 PINCTRL_PIN(PE_BASE + 9, "PE9")
170#define SUNXI_PINCTRL_PIN_PE10 PINCTRL_PIN(PE_BASE + 10, "PE10")
171#define SUNXI_PINCTRL_PIN_PE11 PINCTRL_PIN(PE_BASE + 11, "PE11")
172#define SUNXI_PINCTRL_PIN_PE12 PINCTRL_PIN(PE_BASE + 12, "PE12")
173#define SUNXI_PINCTRL_PIN_PE13 PINCTRL_PIN(PE_BASE + 13, "PE13")
174#define SUNXI_PINCTRL_PIN_PE14 PINCTRL_PIN(PE_BASE + 14, "PE14")
175#define SUNXI_PINCTRL_PIN_PE15 PINCTRL_PIN(PE_BASE + 15, "PE15")
176#define SUNXI_PINCTRL_PIN_PE16 PINCTRL_PIN(PE_BASE + 16, "PE16")
177#define SUNXI_PINCTRL_PIN_PE17 PINCTRL_PIN(PE_BASE + 17, "PE17")
178#define SUNXI_PINCTRL_PIN_PE18 PINCTRL_PIN(PE_BASE + 18, "PE18")
179#define SUNXI_PINCTRL_PIN_PE19 PINCTRL_PIN(PE_BASE + 19, "PE19")
180#define SUNXI_PINCTRL_PIN_PE20 PINCTRL_PIN(PE_BASE + 20, "PE20")
181#define SUNXI_PINCTRL_PIN_PE21 PINCTRL_PIN(PE_BASE + 21, "PE21")
182#define SUNXI_PINCTRL_PIN_PE22 PINCTRL_PIN(PE_BASE + 22, "PE22")
183#define SUNXI_PINCTRL_PIN_PE23 PINCTRL_PIN(PE_BASE + 23, "PE23")
184#define SUNXI_PINCTRL_PIN_PE24 PINCTRL_PIN(PE_BASE + 24, "PE24")
185#define SUNXI_PINCTRL_PIN_PE25 PINCTRL_PIN(PE_BASE + 25, "PE25")
186#define SUNXI_PINCTRL_PIN_PE26 PINCTRL_PIN(PE_BASE + 26, "PE26")
187#define SUNXI_PINCTRL_PIN_PE27 PINCTRL_PIN(PE_BASE + 27, "PE27")
188#define SUNXI_PINCTRL_PIN_PE28 PINCTRL_PIN(PE_BASE + 28, "PE28")
189#define SUNXI_PINCTRL_PIN_PE29 PINCTRL_PIN(PE_BASE + 29, "PE29")
190#define SUNXI_PINCTRL_PIN_PE30 PINCTRL_PIN(PE_BASE + 30, "PE30")
191#define SUNXI_PINCTRL_PIN_PE31 PINCTRL_PIN(PE_BASE + 31, "PE31")
192
193#define SUNXI_PINCTRL_PIN_PF0 PINCTRL_PIN(PF_BASE + 0, "PF0")
194#define SUNXI_PINCTRL_PIN_PF1 PINCTRL_PIN(PF_BASE + 1, "PF1")
195#define SUNXI_PINCTRL_PIN_PF2 PINCTRL_PIN(PF_BASE + 2, "PF2")
196#define SUNXI_PINCTRL_PIN_PF3 PINCTRL_PIN(PF_BASE + 3, "PF3")
197#define SUNXI_PINCTRL_PIN_PF4 PINCTRL_PIN(PF_BASE + 4, "PF4")
198#define SUNXI_PINCTRL_PIN_PF5 PINCTRL_PIN(PF_BASE + 5, "PF5")
199#define SUNXI_PINCTRL_PIN_PF6 PINCTRL_PIN(PF_BASE + 6, "PF6")
200#define SUNXI_PINCTRL_PIN_PF7 PINCTRL_PIN(PF_BASE + 7, "PF7")
201#define SUNXI_PINCTRL_PIN_PF8 PINCTRL_PIN(PF_BASE + 8, "PF8")
202#define SUNXI_PINCTRL_PIN_PF9 PINCTRL_PIN(PF_BASE + 9, "PF9")
203#define SUNXI_PINCTRL_PIN_PF10 PINCTRL_PIN(PF_BASE + 10, "PF10")
204#define SUNXI_PINCTRL_PIN_PF11 PINCTRL_PIN(PF_BASE + 11, "PF11")
205#define SUNXI_PINCTRL_PIN_PF12 PINCTRL_PIN(PF_BASE + 12, "PF12")
206#define SUNXI_PINCTRL_PIN_PF13 PINCTRL_PIN(PF_BASE + 13, "PF13")
207#define SUNXI_PINCTRL_PIN_PF14 PINCTRL_PIN(PF_BASE + 14, "PF14")
208#define SUNXI_PINCTRL_PIN_PF15 PINCTRL_PIN(PF_BASE + 15, "PF15")
209#define SUNXI_PINCTRL_PIN_PF16 PINCTRL_PIN(PF_BASE + 16, "PF16")
210#define SUNXI_PINCTRL_PIN_PF17 PINCTRL_PIN(PF_BASE + 17, "PF17")
211#define SUNXI_PINCTRL_PIN_PF18 PINCTRL_PIN(PF_BASE + 18, "PF18")
212#define SUNXI_PINCTRL_PIN_PF19 PINCTRL_PIN(PF_BASE + 19, "PF19")
213#define SUNXI_PINCTRL_PIN_PF20 PINCTRL_PIN(PF_BASE + 20, "PF20")
214#define SUNXI_PINCTRL_PIN_PF21 PINCTRL_PIN(PF_BASE + 21, "PF21")
215#define SUNXI_PINCTRL_PIN_PF22 PINCTRL_PIN(PF_BASE + 22, "PF22")
216#define SUNXI_PINCTRL_PIN_PF23 PINCTRL_PIN(PF_BASE + 23, "PF23")
217#define SUNXI_PINCTRL_PIN_PF24 PINCTRL_PIN(PF_BASE + 24, "PF24")
218#define SUNXI_PINCTRL_PIN_PF25 PINCTRL_PIN(PF_BASE + 25, "PF25")
219#define SUNXI_PINCTRL_PIN_PF26 PINCTRL_PIN(PF_BASE + 26, "PF26")
220#define SUNXI_PINCTRL_PIN_PF27 PINCTRL_PIN(PF_BASE + 27, "PF27")
221#define SUNXI_PINCTRL_PIN_PF28 PINCTRL_PIN(PF_BASE + 28, "PF28")
222#define SUNXI_PINCTRL_PIN_PF29 PINCTRL_PIN(PF_BASE + 29, "PF29")
223#define SUNXI_PINCTRL_PIN_PF30 PINCTRL_PIN(PF_BASE + 30, "PF30")
224#define SUNXI_PINCTRL_PIN_PF31 PINCTRL_PIN(PF_BASE + 31, "PF31")
225
226#define SUNXI_PINCTRL_PIN_PG0 PINCTRL_PIN(PG_BASE + 0, "PG0")
227#define SUNXI_PINCTRL_PIN_PG1 PINCTRL_PIN(PG_BASE + 1, "PG1")
228#define SUNXI_PINCTRL_PIN_PG2 PINCTRL_PIN(PG_BASE + 2, "PG2")
229#define SUNXI_PINCTRL_PIN_PG3 PINCTRL_PIN(PG_BASE + 3, "PG3")
230#define SUNXI_PINCTRL_PIN_PG4 PINCTRL_PIN(PG_BASE + 4, "PG4")
231#define SUNXI_PINCTRL_PIN_PG5 PINCTRL_PIN(PG_BASE + 5, "PG5")
232#define SUNXI_PINCTRL_PIN_PG6 PINCTRL_PIN(PG_BASE + 6, "PG6")
233#define SUNXI_PINCTRL_PIN_PG7 PINCTRL_PIN(PG_BASE + 7, "PG7")
234#define SUNXI_PINCTRL_PIN_PG8 PINCTRL_PIN(PG_BASE + 8, "PG8")
235#define SUNXI_PINCTRL_PIN_PG9 PINCTRL_PIN(PG_BASE + 9, "PG9")
236#define SUNXI_PINCTRL_PIN_PG10 PINCTRL_PIN(PG_BASE + 10, "PG10")
237#define SUNXI_PINCTRL_PIN_PG11 PINCTRL_PIN(PG_BASE + 11, "PG11")
238#define SUNXI_PINCTRL_PIN_PG12 PINCTRL_PIN(PG_BASE + 12, "PG12")
239#define SUNXI_PINCTRL_PIN_PG13 PINCTRL_PIN(PG_BASE + 13, "PG13")
240#define SUNXI_PINCTRL_PIN_PG14 PINCTRL_PIN(PG_BASE + 14, "PG14")
241#define SUNXI_PINCTRL_PIN_PG15 PINCTRL_PIN(PG_BASE + 15, "PG15")
242#define SUNXI_PINCTRL_PIN_PG16 PINCTRL_PIN(PG_BASE + 16, "PG16")
243#define SUNXI_PINCTRL_PIN_PG17 PINCTRL_PIN(PG_BASE + 17, "PG17")
244#define SUNXI_PINCTRL_PIN_PG18 PINCTRL_PIN(PG_BASE + 18, "PG18")
245#define SUNXI_PINCTRL_PIN_PG19 PINCTRL_PIN(PG_BASE + 19, "PG19")
246#define SUNXI_PINCTRL_PIN_PG20 PINCTRL_PIN(PG_BASE + 20, "PG20")
247#define SUNXI_PINCTRL_PIN_PG21 PINCTRL_PIN(PG_BASE + 21, "PG21")
248#define SUNXI_PINCTRL_PIN_PG22 PINCTRL_PIN(PG_BASE + 22, "PG22")
249#define SUNXI_PINCTRL_PIN_PG23 PINCTRL_PIN(PG_BASE + 23, "PG23")
250#define SUNXI_PINCTRL_PIN_PG24 PINCTRL_PIN(PG_BASE + 24, "PG24")
251#define SUNXI_PINCTRL_PIN_PG25 PINCTRL_PIN(PG_BASE + 25, "PG25")
252#define SUNXI_PINCTRL_PIN_PG26 PINCTRL_PIN(PG_BASE + 26, "PG26")
253#define SUNXI_PINCTRL_PIN_PG27 PINCTRL_PIN(PG_BASE + 27, "PG27")
254#define SUNXI_PINCTRL_PIN_PG28 PINCTRL_PIN(PG_BASE + 28, "PG28")
255#define SUNXI_PINCTRL_PIN_PG29 PINCTRL_PIN(PG_BASE + 29, "PG29")
256#define SUNXI_PINCTRL_PIN_PG30 PINCTRL_PIN(PG_BASE + 30, "PG30")
257#define SUNXI_PINCTRL_PIN_PG31 PINCTRL_PIN(PG_BASE + 31, "PG31")
258
259#define SUNXI_PINCTRL_PIN_PH0 PINCTRL_PIN(PH_BASE + 0, "PH0")
260#define SUNXI_PINCTRL_PIN_PH1 PINCTRL_PIN(PH_BASE + 1, "PH1")
261#define SUNXI_PINCTRL_PIN_PH2 PINCTRL_PIN(PH_BASE + 2, "PH2")
262#define SUNXI_PINCTRL_PIN_PH3 PINCTRL_PIN(PH_BASE + 3, "PH3")
263#define SUNXI_PINCTRL_PIN_PH4 PINCTRL_PIN(PH_BASE + 4, "PH4")
264#define SUNXI_PINCTRL_PIN_PH5 PINCTRL_PIN(PH_BASE + 5, "PH5")
265#define SUNXI_PINCTRL_PIN_PH6 PINCTRL_PIN(PH_BASE + 6, "PH6")
266#define SUNXI_PINCTRL_PIN_PH7 PINCTRL_PIN(PH_BASE + 7, "PH7")
267#define SUNXI_PINCTRL_PIN_PH8 PINCTRL_PIN(PH_BASE + 8, "PH8")
268#define SUNXI_PINCTRL_PIN_PH9 PINCTRL_PIN(PH_BASE + 9, "PH9")
269#define SUNXI_PINCTRL_PIN_PH10 PINCTRL_PIN(PH_BASE + 10, "PH10")
270#define SUNXI_PINCTRL_PIN_PH11 PINCTRL_PIN(PH_BASE + 11, "PH11")
271#define SUNXI_PINCTRL_PIN_PH12 PINCTRL_PIN(PH_BASE + 12, "PH12")
272#define SUNXI_PINCTRL_PIN_PH13 PINCTRL_PIN(PH_BASE + 13, "PH13")
273#define SUNXI_PINCTRL_PIN_PH14 PINCTRL_PIN(PH_BASE + 14, "PH14")
274#define SUNXI_PINCTRL_PIN_PH15 PINCTRL_PIN(PH_BASE + 15, "PH15")
275#define SUNXI_PINCTRL_PIN_PH16 PINCTRL_PIN(PH_BASE + 16, "PH16")
276#define SUNXI_PINCTRL_PIN_PH17 PINCTRL_PIN(PH_BASE + 17, "PH17")
277#define SUNXI_PINCTRL_PIN_PH18 PINCTRL_PIN(PH_BASE + 18, "PH18")
278#define SUNXI_PINCTRL_PIN_PH19 PINCTRL_PIN(PH_BASE + 19, "PH19")
279#define SUNXI_PINCTRL_PIN_PH20 PINCTRL_PIN(PH_BASE + 20, "PH20")
280#define SUNXI_PINCTRL_PIN_PH21 PINCTRL_PIN(PH_BASE + 21, "PH21")
281#define SUNXI_PINCTRL_PIN_PH22 PINCTRL_PIN(PH_BASE + 22, "PH22")
282#define SUNXI_PINCTRL_PIN_PH23 PINCTRL_PIN(PH_BASE + 23, "PH23")
283#define SUNXI_PINCTRL_PIN_PH24 PINCTRL_PIN(PH_BASE + 24, "PH24")
284#define SUNXI_PINCTRL_PIN_PH25 PINCTRL_PIN(PH_BASE + 25, "PH25")
285#define SUNXI_PINCTRL_PIN_PH26 PINCTRL_PIN(PH_BASE + 26, "PH26")
286#define SUNXI_PINCTRL_PIN_PH27 PINCTRL_PIN(PH_BASE + 27, "PH27")
287#define SUNXI_PINCTRL_PIN_PH28 PINCTRL_PIN(PH_BASE + 28, "PH28")
288#define SUNXI_PINCTRL_PIN_PH29 PINCTRL_PIN(PH_BASE + 29, "PH29")
289#define SUNXI_PINCTRL_PIN_PH30 PINCTRL_PIN(PH_BASE + 30, "PH30")
290#define SUNXI_PINCTRL_PIN_PH31 PINCTRL_PIN(PH_BASE + 31, "PH31")
291
292#define SUNXI_PINCTRL_PIN_PI0 PINCTRL_PIN(PI_BASE + 0, "PI0")
293#define SUNXI_PINCTRL_PIN_PI1 PINCTRL_PIN(PI_BASE + 1, "PI1")
294#define SUNXI_PINCTRL_PIN_PI2 PINCTRL_PIN(PI_BASE + 2, "PI2")
295#define SUNXI_PINCTRL_PIN_PI3 PINCTRL_PIN(PI_BASE + 3, "PI3")
296#define SUNXI_PINCTRL_PIN_PI4 PINCTRL_PIN(PI_BASE + 4, "PI4")
297#define SUNXI_PINCTRL_PIN_PI5 PINCTRL_PIN(PI_BASE + 5, "PI5")
298#define SUNXI_PINCTRL_PIN_PI6 PINCTRL_PIN(PI_BASE + 6, "PI6")
299#define SUNXI_PINCTRL_PIN_PI7 PINCTRL_PIN(PI_BASE + 7, "PI7")
300#define SUNXI_PINCTRL_PIN_PI8 PINCTRL_PIN(PI_BASE + 8, "PI8")
301#define SUNXI_PINCTRL_PIN_PI9 PINCTRL_PIN(PI_BASE + 9, "PI9")
302#define SUNXI_PINCTRL_PIN_PI10 PINCTRL_PIN(PI_BASE + 10, "PI10")
303#define SUNXI_PINCTRL_PIN_PI11 PINCTRL_PIN(PI_BASE + 11, "PI11")
304#define SUNXI_PINCTRL_PIN_PI12 PINCTRL_PIN(PI_BASE + 12, "PI12")
305#define SUNXI_PINCTRL_PIN_PI13 PINCTRL_PIN(PI_BASE + 13, "PI13")
306#define SUNXI_PINCTRL_PIN_PI14 PINCTRL_PIN(PI_BASE + 14, "PI14")
307#define SUNXI_PINCTRL_PIN_PI15 PINCTRL_PIN(PI_BASE + 15, "PI15")
308#define SUNXI_PINCTRL_PIN_PI16 PINCTRL_PIN(PI_BASE + 16, "PI16")
309#define SUNXI_PINCTRL_PIN_PI17 PINCTRL_PIN(PI_BASE + 17, "PI17")
310#define SUNXI_PINCTRL_PIN_PI18 PINCTRL_PIN(PI_BASE + 18, "PI18")
311#define SUNXI_PINCTRL_PIN_PI19 PINCTRL_PIN(PI_BASE + 19, "PI19")
312#define SUNXI_PINCTRL_PIN_PI20 PINCTRL_PIN(PI_BASE + 20, "PI20")
313#define SUNXI_PINCTRL_PIN_PI21 PINCTRL_PIN(PI_BASE + 21, "PI21")
314#define SUNXI_PINCTRL_PIN_PI22 PINCTRL_PIN(PI_BASE + 22, "PI22")
315#define SUNXI_PINCTRL_PIN_PI23 PINCTRL_PIN(PI_BASE + 23, "PI23")
316#define SUNXI_PINCTRL_PIN_PI24 PINCTRL_PIN(PI_BASE + 24, "PI24")
317#define SUNXI_PINCTRL_PIN_PI25 PINCTRL_PIN(PI_BASE + 25, "PI25")
318#define SUNXI_PINCTRL_PIN_PI26 PINCTRL_PIN(PI_BASE + 26, "PI26")
319#define SUNXI_PINCTRL_PIN_PI27 PINCTRL_PIN(PI_BASE + 27, "PI27")
320#define SUNXI_PINCTRL_PIN_PI28 PINCTRL_PIN(PI_BASE + 28, "PI28")
321#define SUNXI_PINCTRL_PIN_PI29 PINCTRL_PIN(PI_BASE + 29, "PI29")
322#define SUNXI_PINCTRL_PIN_PI30 PINCTRL_PIN(PI_BASE + 30, "PI30")
323#define SUNXI_PINCTRL_PIN_PI31 PINCTRL_PIN(PI_BASE + 31, "PI31")
324
325#define SUNXI_PIN_NAME_MAX_LEN 5
326
327#define BANK_MEM_SIZE 0x24
328#define MUX_REGS_OFFSET 0x0
329#define DATA_REGS_OFFSET 0x10
330#define DLEVEL_REGS_OFFSET 0x14
331#define PULL_REGS_OFFSET 0x1c
332
333#define PINS_PER_BANK 32
334#define MUX_PINS_PER_REG 8
335#define MUX_PINS_BITS 4
336#define MUX_PINS_MASK 0x0f
337#define DATA_PINS_PER_REG 32
338#define DATA_PINS_BITS 1
339#define DATA_PINS_MASK 0x01
340#define DLEVEL_PINS_PER_REG 16
341#define DLEVEL_PINS_BITS 2
342#define DLEVEL_PINS_MASK 0x03
343#define PULL_PINS_PER_REG 16
344#define PULL_PINS_BITS 2
345#define PULL_PINS_MASK 0x03
346
347struct sunxi_desc_function {
348 const char *name;
349 u8 muxval;
350};
351
352struct sunxi_desc_pin {
353 struct pinctrl_pin_desc pin;
354 struct sunxi_desc_function *functions;
355};
356
357struct sunxi_pinctrl_desc {
358 const struct sunxi_desc_pin *pins;
359 int npins;
360 struct pinctrl_gpio_range *ranges;
361 int nranges;
362};
363
364struct sunxi_pinctrl_function {
365 const char *name;
366 const char **groups;
367 unsigned ngroups;
368};
369
370struct sunxi_pinctrl_group {
371 const char *name;
372 unsigned long config;
373 unsigned pin;
374};
375
376struct sunxi_pinctrl {
377 void __iomem *membase;
378 struct gpio_chip *chip;
379 struct sunxi_pinctrl_desc *desc;
380 struct device *dev;
381 struct sunxi_pinctrl_function *functions;
382 unsigned nfunctions;
383 struct sunxi_pinctrl_group *groups;
384 unsigned ngroups;
385 struct pinctrl_dev *pctl_dev;
386};
387
388#define SUNXI_PIN(_pin, ...) \
389 { \
390 .pin = _pin, \
391 .functions = (struct sunxi_desc_function[]){ \
392 __VA_ARGS__, { } }, \
393 }
394
395#define SUNXI_FUNCTION(_val, _name) \
396 { \
397 .name = _name, \
398 .muxval = _val, \
399 }
400
401/*
402 * The sunXi PIO registers are organized as is:
403 * 0x00 - 0x0c Muxing values.
404 * 8 pins per register, each pin having a 4bits value
405 * 0x10 Pin values
406 * 32 bits per register, each pin corresponding to one bit
407 * 0x14 - 0x18 Drive level
408 * 16 pins per register, each pin having a 2bits value
409 * 0x1c - 0x20 Pull-Up values
410 * 16 pins per register, each pin having a 2bits value
411 *
412 * This is for the first bank. Each bank will have the same layout,
413 * with an offset being a multiple of 0x24.
414 *
415 * The following functions calculate from the pin number the register
416 * and the bit offset that we should access.
417 */
418static inline u32 sunxi_mux_reg(u16 pin)
419{
420 u8 bank = pin / PINS_PER_BANK;
421 u32 offset = bank * BANK_MEM_SIZE;
422 offset += MUX_REGS_OFFSET;
423 offset += pin % PINS_PER_BANK / MUX_PINS_PER_REG * 0x04;
424 return round_down(offset, 4);
425}
426
427static inline u32 sunxi_mux_offset(u16 pin)
428{
429 u32 pin_num = pin % MUX_PINS_PER_REG;
430 return pin_num * MUX_PINS_BITS;
431}
432
433static inline u32 sunxi_data_reg(u16 pin)
434{
435 u8 bank = pin / PINS_PER_BANK;
436 u32 offset = bank * BANK_MEM_SIZE;
437 offset += DATA_REGS_OFFSET;
438 offset += pin % PINS_PER_BANK / DATA_PINS_PER_REG * 0x04;
439 return round_down(offset, 4);
440}
441
442static inline u32 sunxi_data_offset(u16 pin)
443{
444 u32 pin_num = pin % DATA_PINS_PER_REG;
445 return pin_num * DATA_PINS_BITS;
446}
447
448static inline u32 sunxi_dlevel_reg(u16 pin)
449{
450 u8 bank = pin / PINS_PER_BANK;
451 u32 offset = bank * BANK_MEM_SIZE;
452 offset += DLEVEL_REGS_OFFSET;
453 offset += pin % PINS_PER_BANK / DLEVEL_PINS_PER_REG * 0x04;
454 return round_down(offset, 4);
455}
456
457static inline u32 sunxi_dlevel_offset(u16 pin)
458{
459 u32 pin_num = pin % DLEVEL_PINS_PER_REG;
460 return pin_num * DLEVEL_PINS_BITS;
461}
462
463static inline u32 sunxi_pull_reg(u16 pin)
464{
465 u8 bank = pin / PINS_PER_BANK;
466 u32 offset = bank * BANK_MEM_SIZE;
467 offset += PULL_REGS_OFFSET;
468 offset += pin % PINS_PER_BANK / PULL_PINS_PER_REG * 0x04;
469 return round_down(offset, 4);
470}
471
472static inline u32 sunxi_pull_offset(u16 pin)
473{
474 u32 pin_num = pin % PULL_PINS_PER_REG;
475 return pin_num * PULL_PINS_BITS;
476}
477
478#endif /* __PINCTRL_SUNXI_H */
diff --git a/drivers/pinctrl/pinctrl-tegra.c b/drivers/pinctrl/pinctrl-tegra.c
index ae1e4bb3259d..f195d77a3572 100644
--- a/drivers/pinctrl/pinctrl-tegra.c
+++ b/drivers/pinctrl/pinctrl-tegra.c
@@ -201,6 +201,7 @@ static const struct cfg_param {
201 {"nvidia,open-drain", TEGRA_PINCONF_PARAM_OPEN_DRAIN}, 201 {"nvidia,open-drain", TEGRA_PINCONF_PARAM_OPEN_DRAIN},
202 {"nvidia,lock", TEGRA_PINCONF_PARAM_LOCK}, 202 {"nvidia,lock", TEGRA_PINCONF_PARAM_LOCK},
203 {"nvidia,io-reset", TEGRA_PINCONF_PARAM_IORESET}, 203 {"nvidia,io-reset", TEGRA_PINCONF_PARAM_IORESET},
204 {"nvidia,rcv-sel", TEGRA_PINCONF_PARAM_RCV_SEL},
204 {"nvidia,high-speed-mode", TEGRA_PINCONF_PARAM_HIGH_SPEED_MODE}, 205 {"nvidia,high-speed-mode", TEGRA_PINCONF_PARAM_HIGH_SPEED_MODE},
205 {"nvidia,schmitt", TEGRA_PINCONF_PARAM_SCHMITT}, 206 {"nvidia,schmitt", TEGRA_PINCONF_PARAM_SCHMITT},
206 {"nvidia,low-power-mode", TEGRA_PINCONF_PARAM_LOW_POWER_MODE}, 207 {"nvidia,low-power-mode", TEGRA_PINCONF_PARAM_LOW_POWER_MODE},
@@ -208,6 +209,7 @@ static const struct cfg_param {
208 {"nvidia,pull-up-strength", TEGRA_PINCONF_PARAM_DRIVE_UP_STRENGTH}, 209 {"nvidia,pull-up-strength", TEGRA_PINCONF_PARAM_DRIVE_UP_STRENGTH},
209 {"nvidia,slew-rate-falling", TEGRA_PINCONF_PARAM_SLEW_RATE_FALLING}, 210 {"nvidia,slew-rate-falling", TEGRA_PINCONF_PARAM_SLEW_RATE_FALLING},
210 {"nvidia,slew-rate-rising", TEGRA_PINCONF_PARAM_SLEW_RATE_RISING}, 211 {"nvidia,slew-rate-rising", TEGRA_PINCONF_PARAM_SLEW_RATE_RISING},
212 {"nvidia,drive-type", TEGRA_PINCONF_PARAM_DRIVE_TYPE},
211}; 213};
212 214
213static int tegra_pinctrl_dt_subnode_to_map(struct device *dev, 215static int tegra_pinctrl_dt_subnode_to_map(struct device *dev,
@@ -450,6 +452,12 @@ static int tegra_pinconf_reg(struct tegra_pmx *pmx,
450 *bit = g->ioreset_bit; 452 *bit = g->ioreset_bit;
451 *width = 1; 453 *width = 1;
452 break; 454 break;
455 case TEGRA_PINCONF_PARAM_RCV_SEL:
456 *bank = g->rcv_sel_bank;
457 *reg = g->rcv_sel_reg;
458 *bit = g->rcv_sel_bit;
459 *width = 1;
460 break;
453 case TEGRA_PINCONF_PARAM_HIGH_SPEED_MODE: 461 case TEGRA_PINCONF_PARAM_HIGH_SPEED_MODE:
454 *bank = g->drv_bank; 462 *bank = g->drv_bank;
455 *reg = g->drv_reg; 463 *reg = g->drv_reg;
@@ -492,6 +500,12 @@ static int tegra_pinconf_reg(struct tegra_pmx *pmx,
492 *bit = g->slwr_bit; 500 *bit = g->slwr_bit;
493 *width = g->slwr_width; 501 *width = g->slwr_width;
494 break; 502 break;
503 case TEGRA_PINCONF_PARAM_DRIVE_TYPE:
504 *bank = g->drvtype_bank;
505 *reg = g->drvtype_reg;
506 *bit = g->drvtype_bit;
507 *width = 2;
508 break;
495 default: 509 default:
496 dev_err(pmx->dev, "Invalid config param %04x\n", param); 510 dev_err(pmx->dev, "Invalid config param %04x\n", param);
497 return -ENOTSUPP; 511 return -ENOTSUPP;
diff --git a/drivers/pinctrl/pinctrl-tegra.h b/drivers/pinctrl/pinctrl-tegra.h
index 62e380965c68..817f7061dc4c 100644
--- a/drivers/pinctrl/pinctrl-tegra.h
+++ b/drivers/pinctrl/pinctrl-tegra.h
@@ -30,6 +30,8 @@ enum tegra_pinconf_param {
30 /* argument: Boolean */ 30 /* argument: Boolean */
31 TEGRA_PINCONF_PARAM_IORESET, 31 TEGRA_PINCONF_PARAM_IORESET,
32 /* argument: Boolean */ 32 /* argument: Boolean */
33 TEGRA_PINCONF_PARAM_RCV_SEL,
34 /* argument: Boolean */
33 TEGRA_PINCONF_PARAM_HIGH_SPEED_MODE, 35 TEGRA_PINCONF_PARAM_HIGH_SPEED_MODE,
34 /* argument: Boolean */ 36 /* argument: Boolean */
35 TEGRA_PINCONF_PARAM_SCHMITT, 37 TEGRA_PINCONF_PARAM_SCHMITT,
@@ -43,6 +45,8 @@ enum tegra_pinconf_param {
43 TEGRA_PINCONF_PARAM_SLEW_RATE_FALLING, 45 TEGRA_PINCONF_PARAM_SLEW_RATE_FALLING,
44 /* argument: Integer, range is HW-dependant */ 46 /* argument: Integer, range is HW-dependant */
45 TEGRA_PINCONF_PARAM_SLEW_RATE_RISING, 47 TEGRA_PINCONF_PARAM_SLEW_RATE_RISING,
48 /* argument: Integer, range is HW-dependant */
49 TEGRA_PINCONF_PARAM_DRIVE_TYPE,
46}; 50};
47 51
48enum tegra_pinconf_pull { 52enum tegra_pinconf_pull {
@@ -95,6 +99,9 @@ struct tegra_function {
95 * @ioreset_reg: IO reset register offset. -1 if unsupported. 99 * @ioreset_reg: IO reset register offset. -1 if unsupported.
96 * @ioreset_bank: IO reset register bank. 0 if unsupported. 100 * @ioreset_bank: IO reset register bank. 0 if unsupported.
97 * @ioreset_bit: IO reset register bit. 0 if unsupported. 101 * @ioreset_bit: IO reset register bit. 0 if unsupported.
102 * @rcv_sel_reg: Receiver select offset. -1 if unsupported.
103 * @rcv_sel_bank: Receiver select bank. 0 if unsupported.
104 * @rcv_sel_bit: Receiver select bit. 0 if unsupported.
98 * @drv_reg: Drive fields register offset. -1 if unsupported. 105 * @drv_reg: Drive fields register offset. -1 if unsupported.
99 * This register contains the hsm, schmitt, lpmd, drvdn, 106 * This register contains the hsm, schmitt, lpmd, drvdn,
100 * drvup, slwr, and slwf parameters. 107 * drvup, slwr, and slwf parameters.
@@ -110,6 +117,9 @@ struct tegra_function {
110 * @slwr_width: Slew Rising field width. 0 if unsupported. 117 * @slwr_width: Slew Rising field width. 0 if unsupported.
111 * @slwf_bit: Slew Falling register bit. 0 if unsupported. 118 * @slwf_bit: Slew Falling register bit. 0 if unsupported.
112 * @slwf_width: Slew Falling field width. 0 if unsupported. 119 * @slwf_width: Slew Falling field width. 0 if unsupported.
120 * @drvtype_reg: Drive type fields register offset. -1 if unsupported.
121 * @drvtype_bank: Drive type fields register bank. 0 if unsupported.
122 * @drvtype_bit: Drive type register bit. 0 if unsupported.
113 * 123 *
114 * A representation of a group of pins (possibly just one pin) in the Tegra 124 * A representation of a group of pins (possibly just one pin) in the Tegra
115 * pin controller. Each group allows some parameter or parameters to be 125 * pin controller. Each group allows some parameter or parameters to be
@@ -131,15 +141,19 @@ struct tegra_pingroup {
131 s16 odrain_reg; 141 s16 odrain_reg;
132 s16 lock_reg; 142 s16 lock_reg;
133 s16 ioreset_reg; 143 s16 ioreset_reg;
144 s16 rcv_sel_reg;
134 s16 drv_reg; 145 s16 drv_reg;
146 s16 drvtype_reg;
135 u32 mux_bank:2; 147 u32 mux_bank:2;
136 u32 pupd_bank:2; 148 u32 pupd_bank:2;
137 u32 tri_bank:2; 149 u32 tri_bank:2;
138 u32 einput_bank:2; 150 u32 einput_bank:2;
139 u32 odrain_bank:2; 151 u32 odrain_bank:2;
140 u32 ioreset_bank:2; 152 u32 ioreset_bank:2;
153 u32 rcv_sel_bank:2;
141 u32 lock_bank:2; 154 u32 lock_bank:2;
142 u32 drv_bank:2; 155 u32 drv_bank:2;
156 u32 drvtype_bank:2;
143 u32 mux_bit:5; 157 u32 mux_bit:5;
144 u32 pupd_bit:5; 158 u32 pupd_bit:5;
145 u32 tri_bit:5; 159 u32 tri_bit:5;
@@ -147,6 +161,7 @@ struct tegra_pingroup {
147 u32 odrain_bit:5; 161 u32 odrain_bit:5;
148 u32 lock_bit:5; 162 u32 lock_bit:5;
149 u32 ioreset_bit:5; 163 u32 ioreset_bit:5;
164 u32 rcv_sel_bit:5;
150 u32 hsm_bit:5; 165 u32 hsm_bit:5;
151 u32 schmitt_bit:5; 166 u32 schmitt_bit:5;
152 u32 lpmd_bit:5; 167 u32 lpmd_bit:5;
@@ -154,6 +169,7 @@ struct tegra_pingroup {
154 u32 drvup_bit:5; 169 u32 drvup_bit:5;
155 u32 slwr_bit:5; 170 u32 slwr_bit:5;
156 u32 slwf_bit:5; 171 u32 slwf_bit:5;
172 u32 drvtype_bit:5;
157 u32 drvdn_width:6; 173 u32 drvdn_width:6;
158 u32 drvup_width:6; 174 u32 drvup_width:6;
159 u32 slwr_width:6; 175 u32 slwr_width:6;
diff --git a/drivers/pinctrl/pinctrl-tegra114.c b/drivers/pinctrl/pinctrl-tegra114.c
new file mode 100644
index 000000000000..622c4854977e
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-tegra114.c
@@ -0,0 +1,2769 @@
1/*
2 * Pinctrl data and driver for the NVIDIA Tegra114 pinmux
3 *
4 * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Arthur: Pritesh Raithatha <praithatha@nvidia.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/module.h>
22#include <linux/of.h>
23#include <linux/platform_device.h>
24#include <linux/pinctrl/pinctrl.h>
25#include <linux/pinctrl/pinmux.h>
26
27#include "pinctrl-tegra.h"
28
29/*
30 * Most pins affected by the pinmux can also be GPIOs. Define these first.
31 * These must match how the GPIO driver names/numbers its pins.
32 */
33#define _GPIO(offset) (offset)
34
35#define TEGRA_PIN_CLK_32K_OUT_PA0 _GPIO(0)
36#define TEGRA_PIN_UART3_CTS_N_PA1 _GPIO(1)
37#define TEGRA_PIN_DAP2_FS_PA2 _GPIO(2)
38#define TEGRA_PIN_DAP2_SCLK_PA3 _GPIO(3)
39#define TEGRA_PIN_DAP2_DIN_PA4 _GPIO(4)
40#define TEGRA_PIN_DAP2_DOUT_PA5 _GPIO(5)
41#define TEGRA_PIN_SDMMC3_CLK_PA6 _GPIO(6)
42#define TEGRA_PIN_SDMMC3_CMD_PA7 _GPIO(7)
43#define TEGRA_PIN_GMI_A17_PB0 _GPIO(8)
44#define TEGRA_PIN_GMI_A18_PB1 _GPIO(9)
45#define TEGRA_PIN_SDMMC3_DAT3_PB4 _GPIO(12)
46#define TEGRA_PIN_SDMMC3_DAT2_PB5 _GPIO(13)
47#define TEGRA_PIN_SDMMC3_DAT1_PB6 _GPIO(14)
48#define TEGRA_PIN_SDMMC3_DAT0_PB7 _GPIO(15)
49#define TEGRA_PIN_UART3_RTS_N_PC0 _GPIO(16)
50#define TEGRA_PIN_UART2_TXD_PC2 _GPIO(18)
51#define TEGRA_PIN_UART2_RXD_PC3 _GPIO(19)
52#define TEGRA_PIN_GEN1_I2C_SCL_PC4 _GPIO(20)
53#define TEGRA_PIN_GEN1_I2C_SDA_PC5 _GPIO(21)
54#define TEGRA_PIN_GMI_WP_N_PC7 _GPIO(23)
55#define TEGRA_PIN_GMI_AD0_PG0 _GPIO(48)
56#define TEGRA_PIN_GMI_AD1_PG1 _GPIO(49)
57#define TEGRA_PIN_GMI_AD2_PG2 _GPIO(50)
58#define TEGRA_PIN_GMI_AD3_PG3 _GPIO(51)
59#define TEGRA_PIN_GMI_AD4_PG4 _GPIO(52)
60#define TEGRA_PIN_GMI_AD5_PG5 _GPIO(53)
61#define TEGRA_PIN_GMI_AD6_PG6 _GPIO(54)
62#define TEGRA_PIN_GMI_AD7_PG7 _GPIO(55)
63#define TEGRA_PIN_GMI_AD8_PH0 _GPIO(56)
64#define TEGRA_PIN_GMI_AD9_PH1 _GPIO(57)
65#define TEGRA_PIN_GMI_AD10_PH2 _GPIO(58)
66#define TEGRA_PIN_GMI_AD11_PH3 _GPIO(59)
67#define TEGRA_PIN_GMI_AD12_PH4 _GPIO(60)
68#define TEGRA_PIN_GMI_AD13_PH5 _GPIO(61)
69#define TEGRA_PIN_GMI_AD14_PH6 _GPIO(62)
70#define TEGRA_PIN_GMI_AD15_PH7 _GPIO(63)
71#define TEGRA_PIN_GMI_WR_N_PI0 _GPIO(64)
72#define TEGRA_PIN_GMI_OE_N_PI1 _GPIO(65)
73#define TEGRA_PIN_GMI_CS6_N_PI3 _GPIO(67)
74#define TEGRA_PIN_GMI_RST_N_PI4 _GPIO(68)
75#define TEGRA_PIN_GMI_IORDY_PI5 _GPIO(69)
76#define TEGRA_PIN_GMI_CS7_N_PI6 _GPIO(70)
77#define TEGRA_PIN_GMI_WAIT_PI7 _GPIO(71)
78#define TEGRA_PIN_GMI_CS0_N_PJ0 _GPIO(72)
79#define TEGRA_PIN_GMI_CS1_N_PJ2 _GPIO(74)
80#define TEGRA_PIN_GMI_DQS_P_PJ3 _GPIO(75)
81#define TEGRA_PIN_UART2_CTS_N_PJ5 _GPIO(77)
82#define TEGRA_PIN_UART2_RTS_N_PJ6 _GPIO(78)
83#define TEGRA_PIN_GMI_A16_PJ7 _GPIO(79)
84#define TEGRA_PIN_GMI_ADV_N_PK0 _GPIO(80)
85#define TEGRA_PIN_GMI_CLK_PK1 _GPIO(81)
86#define TEGRA_PIN_GMI_CS4_N_PK2 _GPIO(82)
87#define TEGRA_PIN_GMI_CS2_N_PK3 _GPIO(83)
88#define TEGRA_PIN_GMI_CS3_N_PK4 _GPIO(84)
89#define TEGRA_PIN_SPDIF_OUT_PK5 _GPIO(85)
90#define TEGRA_PIN_SPDIF_IN_PK6 _GPIO(86)
91#define TEGRA_PIN_GMI_A19_PK7 _GPIO(87)
92#define TEGRA_PIN_DAP1_FS_PN0 _GPIO(104)
93#define TEGRA_PIN_DAP1_DIN_PN1 _GPIO(105)
94#define TEGRA_PIN_DAP1_DOUT_PN2 _GPIO(106)
95#define TEGRA_PIN_DAP1_SCLK_PN3 _GPIO(107)
96#define TEGRA_PIN_USB_VBUS_EN0_PN4 _GPIO(108)
97#define TEGRA_PIN_USB_VBUS_EN1_PN5 _GPIO(109)
98#define TEGRA_PIN_HDMI_INT_PN7 _GPIO(111)
99#define TEGRA_PIN_ULPI_DATA7_PO0 _GPIO(112)
100#define TEGRA_PIN_ULPI_DATA0_PO1 _GPIO(113)
101#define TEGRA_PIN_ULPI_DATA1_PO2 _GPIO(114)
102#define TEGRA_PIN_ULPI_DATA2_PO3 _GPIO(115)
103#define TEGRA_PIN_ULPI_DATA3_PO4 _GPIO(116)
104#define TEGRA_PIN_ULPI_DATA4_PO5 _GPIO(117)
105#define TEGRA_PIN_ULPI_DATA5_PO6 _GPIO(118)
106#define TEGRA_PIN_ULPI_DATA6_PO7 _GPIO(119)
107#define TEGRA_PIN_DAP3_FS_PP0 _GPIO(120)
108#define TEGRA_PIN_DAP3_DIN_PP1 _GPIO(121)
109#define TEGRA_PIN_DAP3_DOUT_PP2 _GPIO(122)
110#define TEGRA_PIN_DAP3_SCLK_PP3 _GPIO(123)
111#define TEGRA_PIN_DAP4_FS_PP4 _GPIO(124)
112#define TEGRA_PIN_DAP4_DIN_PP5 _GPIO(125)
113#define TEGRA_PIN_DAP4_DOUT_PP6 _GPIO(126)
114#define TEGRA_PIN_DAP4_SCLK_PP7 _GPIO(127)
115#define TEGRA_PIN_KB_COL0_PQ0 _GPIO(128)
116#define TEGRA_PIN_KB_COL1_PQ1 _GPIO(129)
117#define TEGRA_PIN_KB_COL2_PQ2 _GPIO(130)
118#define TEGRA_PIN_KB_COL3_PQ3 _GPIO(131)
119#define TEGRA_PIN_KB_COL4_PQ4 _GPIO(132)
120#define TEGRA_PIN_KB_COL5_PQ5 _GPIO(133)
121#define TEGRA_PIN_KB_COL6_PQ6 _GPIO(134)
122#define TEGRA_PIN_KB_COL7_PQ7 _GPIO(135)
123#define TEGRA_PIN_KB_ROW0_PR0 _GPIO(136)
124#define TEGRA_PIN_KB_ROW1_PR1 _GPIO(137)
125#define TEGRA_PIN_KB_ROW2_PR2 _GPIO(138)
126#define TEGRA_PIN_KB_ROW3_PR3 _GPIO(139)
127#define TEGRA_PIN_KB_ROW4_PR4 _GPIO(140)
128#define TEGRA_PIN_KB_ROW5_PR5 _GPIO(141)
129#define TEGRA_PIN_KB_ROW6_PR6 _GPIO(142)
130#define TEGRA_PIN_KB_ROW7_PR7 _GPIO(143)
131#define TEGRA_PIN_KB_ROW8_PS0 _GPIO(144)
132#define TEGRA_PIN_KB_ROW9_PS1 _GPIO(145)
133#define TEGRA_PIN_KB_ROW10_PS2 _GPIO(146)
134#define TEGRA_PIN_GEN2_I2C_SCL_PT5 _GPIO(157)
135#define TEGRA_PIN_GEN2_I2C_SDA_PT6 _GPIO(158)
136#define TEGRA_PIN_SDMMC4_CMD_PT7 _GPIO(159)
137#define TEGRA_PIN_PU0 _GPIO(160)
138#define TEGRA_PIN_PU1 _GPIO(161)
139#define TEGRA_PIN_PU2 _GPIO(162)
140#define TEGRA_PIN_PU3 _GPIO(163)
141#define TEGRA_PIN_PU4 _GPIO(164)
142#define TEGRA_PIN_PU5 _GPIO(165)
143#define TEGRA_PIN_PU6 _GPIO(166)
144#define TEGRA_PIN_PV0 _GPIO(168)
145#define TEGRA_PIN_PV1 _GPIO(169)
146#define TEGRA_PIN_SDMMC3_CD_N_PV2 _GPIO(170)
147#define TEGRA_PIN_SDMMC1_WP_N_PV3 _GPIO(171)
148#define TEGRA_PIN_DDC_SCL_PV4 _GPIO(172)
149#define TEGRA_PIN_DDC_SDA_PV5 _GPIO(173)
150#define TEGRA_PIN_GPIO_W2_AUD_PW2 _GPIO(178)
151#define TEGRA_PIN_GPIO_W3_AUD_PW3 _GPIO(179)
152#define TEGRA_PIN_CLK1_OUT_PW4 _GPIO(180)
153#define TEGRA_PIN_CLK2_OUT_PW5 _GPIO(181)
154#define TEGRA_PIN_UART3_TXD_PW6 _GPIO(182)
155#define TEGRA_PIN_UART3_RXD_PW7 _GPIO(183)
156#define TEGRA_PIN_DVFS_PWM_PX0 _GPIO(184)
157#define TEGRA_PIN_GPIO_X1_AUD_PX1 _GPIO(185)
158#define TEGRA_PIN_DVFS_CLK_PX2 _GPIO(186)
159#define TEGRA_PIN_GPIO_X3_AUD_PX3 _GPIO(187)
160#define TEGRA_PIN_GPIO_X4_AUD_PX4 _GPIO(188)
161#define TEGRA_PIN_GPIO_X5_AUD_PX5 _GPIO(189)
162#define TEGRA_PIN_GPIO_X6_AUD_PX6 _GPIO(190)
163#define TEGRA_PIN_GPIO_X7_AUD_PX7 _GPIO(191)
164#define TEGRA_PIN_ULPI_CLK_PY0 _GPIO(192)
165#define TEGRA_PIN_ULPI_DIR_PY1 _GPIO(193)
166#define TEGRA_PIN_ULPI_NXT_PY2 _GPIO(194)
167#define TEGRA_PIN_ULPI_STP_PY3 _GPIO(195)
168#define TEGRA_PIN_SDMMC1_DAT3_PY4 _GPIO(196)
169#define TEGRA_PIN_SDMMC1_DAT2_PY5 _GPIO(197)
170#define TEGRA_PIN_SDMMC1_DAT1_PY6 _GPIO(198)
171#define TEGRA_PIN_SDMMC1_DAT0_PY7 _GPIO(199)
172#define TEGRA_PIN_SDMMC1_CLK_PZ0 _GPIO(200)
173#define TEGRA_PIN_SDMMC1_CMD_PZ1 _GPIO(201)
174#define TEGRA_PIN_SYS_CLK_REQ_PZ5 _GPIO(205)
175#define TEGRA_PIN_PWR_I2C_SCL_PZ6 _GPIO(206)
176#define TEGRA_PIN_PWR_I2C_SDA_PZ7 _GPIO(207)
177#define TEGRA_PIN_SDMMC4_DAT0_PAA0 _GPIO(208)
178#define TEGRA_PIN_SDMMC4_DAT1_PAA1 _GPIO(209)
179#define TEGRA_PIN_SDMMC4_DAT2_PAA2 _GPIO(210)
180#define TEGRA_PIN_SDMMC4_DAT3_PAA3 _GPIO(211)
181#define TEGRA_PIN_SDMMC4_DAT4_PAA4 _GPIO(212)
182#define TEGRA_PIN_SDMMC4_DAT5_PAA5 _GPIO(213)
183#define TEGRA_PIN_SDMMC4_DAT6_PAA6 _GPIO(214)
184#define TEGRA_PIN_SDMMC4_DAT7_PAA7 _GPIO(215)
185#define TEGRA_PIN_PBB0 _GPIO(216)
186#define TEGRA_PIN_CAM_I2C_SCL_PBB1 _GPIO(217)
187#define TEGRA_PIN_CAM_I2C_SDA_PBB2 _GPIO(218)
188#define TEGRA_PIN_PBB3 _GPIO(219)
189#define TEGRA_PIN_PBB4 _GPIO(220)
190#define TEGRA_PIN_PBB5 _GPIO(221)
191#define TEGRA_PIN_PBB6 _GPIO(222)
192#define TEGRA_PIN_PBB7 _GPIO(223)
193#define TEGRA_PIN_CAM_MCLK_PCC0 _GPIO(224)
194#define TEGRA_PIN_PCC1 _GPIO(225)
195#define TEGRA_PIN_PCC2 _GPIO(226)
196#define TEGRA_PIN_SDMMC4_CLK_PCC4 _GPIO(228)
197#define TEGRA_PIN_CLK2_REQ_PCC5 _GPIO(229)
198#define TEGRA_PIN_CLK3_OUT_PEE0 _GPIO(240)
199#define TEGRA_PIN_CLK3_REQ_PEE1 _GPIO(241)
200#define TEGRA_PIN_CLK1_REQ_PEE2 _GPIO(242)
201#define TEGRA_PIN_HDMI_CEC_PEE3 _GPIO(243)
202#define TEGRA_PIN_SDMMC3_CLK_LB_OUT_PEE4 _GPIO(244)
203#define TEGRA_PIN_SDMMC3_CLK_LB_IN_PEE5 _GPIO(245)
204
205/* All non-GPIO pins follow */
206#define NUM_GPIOS (TEGRA_PIN_SDMMC3_CLK_LB_IN_PEE5 + 1)
207#define _PIN(offset) (NUM_GPIOS + (offset))
208
209/* Non-GPIO pins */
210#define TEGRA_PIN_CORE_PWR_REQ _PIN(0)
211#define TEGRA_PIN_CPU_PWR_REQ _PIN(1)
212#define TEGRA_PIN_PWR_INT_N _PIN(2)
213#define TEGRA_PIN_RESET_OUT_N _PIN(3)
214#define TEGRA_PIN_OWR _PIN(4)
215
216static const struct pinctrl_pin_desc tegra114_pins[] = {
217 PINCTRL_PIN(TEGRA_PIN_CLK_32K_OUT_PA0, "CLK_32K_OUT PA0"),
218 PINCTRL_PIN(TEGRA_PIN_UART3_CTS_N_PA1, "UART3_CTS_N PA1"),
219 PINCTRL_PIN(TEGRA_PIN_DAP2_FS_PA2, "DAP2_FS PA2"),
220 PINCTRL_PIN(TEGRA_PIN_DAP2_SCLK_PA3, "DAP2_SCLK PA3"),
221 PINCTRL_PIN(TEGRA_PIN_DAP2_DIN_PA4, "DAP2_DIN PA4"),
222 PINCTRL_PIN(TEGRA_PIN_DAP2_DOUT_PA5, "DAP2_DOUT PA5"),
223 PINCTRL_PIN(TEGRA_PIN_SDMMC3_CLK_PA6, "SDMMC3_CLK PA6"),
224 PINCTRL_PIN(TEGRA_PIN_SDMMC3_CMD_PA7, "SDMMC3_CMD PA7"),
225 PINCTRL_PIN(TEGRA_PIN_GMI_A17_PB0, "GMI_A17 PB0"),
226 PINCTRL_PIN(TEGRA_PIN_GMI_A18_PB1, "GMI_A18 PB1"),
227 PINCTRL_PIN(TEGRA_PIN_SDMMC3_DAT3_PB4, "SDMMC3_DAT3 PB4"),
228 PINCTRL_PIN(TEGRA_PIN_SDMMC3_DAT2_PB5, "SDMMC3_DAT2 PB5"),
229 PINCTRL_PIN(TEGRA_PIN_SDMMC3_DAT1_PB6, "SDMMC3_DAT1 PB6"),
230 PINCTRL_PIN(TEGRA_PIN_SDMMC3_DAT0_PB7, "SDMMC3_DAT0 PB7"),
231 PINCTRL_PIN(TEGRA_PIN_UART3_RTS_N_PC0, "UART3_RTS_N PC0"),
232 PINCTRL_PIN(TEGRA_PIN_UART2_TXD_PC2, "UART2_TXD PC2"),
233 PINCTRL_PIN(TEGRA_PIN_UART2_RXD_PC3, "UART2_RXD PC3"),
234 PINCTRL_PIN(TEGRA_PIN_GEN1_I2C_SCL_PC4, "GEN1_I2C_SCL PC4"),
235 PINCTRL_PIN(TEGRA_PIN_GEN1_I2C_SDA_PC5, "GEN1_I2C_SDA PC5"),
236 PINCTRL_PIN(TEGRA_PIN_GMI_WP_N_PC7, "GMI_WP_N PC7"),
237 PINCTRL_PIN(TEGRA_PIN_GMI_AD0_PG0, "GMI_AD0 PG0"),
238 PINCTRL_PIN(TEGRA_PIN_GMI_AD1_PG1, "GMI_AD1 PG1"),
239 PINCTRL_PIN(TEGRA_PIN_GMI_AD2_PG2, "GMI_AD2 PG2"),
240 PINCTRL_PIN(TEGRA_PIN_GMI_AD3_PG3, "GMI_AD3 PG3"),
241 PINCTRL_PIN(TEGRA_PIN_GMI_AD4_PG4, "GMI_AD4 PG4"),
242 PINCTRL_PIN(TEGRA_PIN_GMI_AD5_PG5, "GMI_AD5 PG5"),
243 PINCTRL_PIN(TEGRA_PIN_GMI_AD6_PG6, "GMI_AD6 PG6"),
244 PINCTRL_PIN(TEGRA_PIN_GMI_AD7_PG7, "GMI_AD7 PG7"),
245 PINCTRL_PIN(TEGRA_PIN_GMI_AD8_PH0, "GMI_AD8 PH0"),
246 PINCTRL_PIN(TEGRA_PIN_GMI_AD9_PH1, "GMI_AD9 PH1"),
247 PINCTRL_PIN(TEGRA_PIN_GMI_AD10_PH2, "GMI_AD10 PH2"),
248 PINCTRL_PIN(TEGRA_PIN_GMI_AD11_PH3, "GMI_AD11 PH3"),
249 PINCTRL_PIN(TEGRA_PIN_GMI_AD12_PH4, "GMI_AD12 PH4"),
250 PINCTRL_PIN(TEGRA_PIN_GMI_AD13_PH5, "GMI_AD13 PH5"),
251 PINCTRL_PIN(TEGRA_PIN_GMI_AD14_PH6, "GMI_AD14 PH6"),
252 PINCTRL_PIN(TEGRA_PIN_GMI_AD15_PH7, "GMI_AD15 PH7"),
253 PINCTRL_PIN(TEGRA_PIN_GMI_WR_N_PI0, "GMI_WR_N PI0"),
254 PINCTRL_PIN(TEGRA_PIN_GMI_OE_N_PI1, "GMI_OE_N PI1"),
255 PINCTRL_PIN(TEGRA_PIN_GMI_CS6_N_PI3, "GMI_CS6_N PI3"),
256 PINCTRL_PIN(TEGRA_PIN_GMI_RST_N_PI4, "GMI_RST_N PI4"),
257 PINCTRL_PIN(TEGRA_PIN_GMI_IORDY_PI5, "GMI_IORDY PI5"),
258 PINCTRL_PIN(TEGRA_PIN_GMI_CS7_N_PI6, "GMI_CS7_N PI6"),
259 PINCTRL_PIN(TEGRA_PIN_GMI_WAIT_PI7, "GMI_WAIT PI7"),
260 PINCTRL_PIN(TEGRA_PIN_GMI_CS0_N_PJ0, "GMI_CS0_N PJ0"),
261 PINCTRL_PIN(TEGRA_PIN_GMI_CS1_N_PJ2, "GMI_CS1_N PJ2"),
262 PINCTRL_PIN(TEGRA_PIN_GMI_DQS_P_PJ3, "GMI_DQS_P PJ3"),
263 PINCTRL_PIN(TEGRA_PIN_UART2_CTS_N_PJ5, "UART2_CTS_N PJ5"),
264 PINCTRL_PIN(TEGRA_PIN_UART2_RTS_N_PJ6, "UART2_RTS_N PJ6"),
265 PINCTRL_PIN(TEGRA_PIN_GMI_A16_PJ7, "GMI_A16 PJ7"),
266 PINCTRL_PIN(TEGRA_PIN_GMI_ADV_N_PK0, "GMI_ADV_N PK0"),
267 PINCTRL_PIN(TEGRA_PIN_GMI_CLK_PK1, "GMI_CLK PK1"),
268 PINCTRL_PIN(TEGRA_PIN_GMI_CS4_N_PK2, "GMI_CS4_N PK2"),
269 PINCTRL_PIN(TEGRA_PIN_GMI_CS2_N_PK3, "GMI_CS2_N PK3"),
270 PINCTRL_PIN(TEGRA_PIN_GMI_CS3_N_PK4, "GMI_CS3_N PK4"),
271 PINCTRL_PIN(TEGRA_PIN_SPDIF_OUT_PK5, "SPDIF_OUT PK5"),
272 PINCTRL_PIN(TEGRA_PIN_SPDIF_IN_PK6, "SPDIF_IN PK6"),
273 PINCTRL_PIN(TEGRA_PIN_GMI_A19_PK7, "GMI_A19 PK7"),
274 PINCTRL_PIN(TEGRA_PIN_DAP1_FS_PN0, "DAP1_FS PN0"),
275 PINCTRL_PIN(TEGRA_PIN_DAP1_DIN_PN1, "DAP1_DIN PN1"),
276 PINCTRL_PIN(TEGRA_PIN_DAP1_DOUT_PN2, "DAP1_DOUT PN2"),
277 PINCTRL_PIN(TEGRA_PIN_DAP1_SCLK_PN3, "DAP1_SCLK PN3"),
278 PINCTRL_PIN(TEGRA_PIN_USB_VBUS_EN0_PN4, "USB_VBUS_EN0 PN4"),
279 PINCTRL_PIN(TEGRA_PIN_USB_VBUS_EN1_PN5, "USB_VBUS_EN1 PN5"),
280 PINCTRL_PIN(TEGRA_PIN_HDMI_INT_PN7, "HDMI_INT PN7"),
281 PINCTRL_PIN(TEGRA_PIN_ULPI_DATA7_PO0, "ULPI_DATA7 PO0"),
282 PINCTRL_PIN(TEGRA_PIN_ULPI_DATA0_PO1, "ULPI_DATA0 PO1"),
283 PINCTRL_PIN(TEGRA_PIN_ULPI_DATA1_PO2, "ULPI_DATA1 PO2"),
284 PINCTRL_PIN(TEGRA_PIN_ULPI_DATA2_PO3, "ULPI_DATA2 PO3"),
285 PINCTRL_PIN(TEGRA_PIN_ULPI_DATA3_PO4, "ULPI_DATA3 PO4"),
286 PINCTRL_PIN(TEGRA_PIN_ULPI_DATA4_PO5, "ULPI_DATA4 PO5"),
287 PINCTRL_PIN(TEGRA_PIN_ULPI_DATA5_PO6, "ULPI_DATA5 PO6"),
288 PINCTRL_PIN(TEGRA_PIN_ULPI_DATA6_PO7, "ULPI_DATA6 PO7"),
289 PINCTRL_PIN(TEGRA_PIN_DAP3_FS_PP0, "DAP3_FS PP0"),
290 PINCTRL_PIN(TEGRA_PIN_DAP3_DIN_PP1, "DAP3_DIN PP1"),
291 PINCTRL_PIN(TEGRA_PIN_DAP3_DOUT_PP2, "DAP3_DOUT PP2"),
292 PINCTRL_PIN(TEGRA_PIN_DAP3_SCLK_PP3, "DAP3_SCLK PP3"),
293 PINCTRL_PIN(TEGRA_PIN_DAP4_FS_PP4, "DAP4_FS PP4"),
294 PINCTRL_PIN(TEGRA_PIN_DAP4_DIN_PP5, "DAP4_DIN PP5"),
295 PINCTRL_PIN(TEGRA_PIN_DAP4_DOUT_PP6, "DAP4_DOUT PP6"),
296 PINCTRL_PIN(TEGRA_PIN_DAP4_SCLK_PP7, "DAP4_SCLK PP7"),
297 PINCTRL_PIN(TEGRA_PIN_KB_COL0_PQ0, "KB_COL0 PQ0"),
298 PINCTRL_PIN(TEGRA_PIN_KB_COL1_PQ1, "KB_COL1 PQ1"),
299 PINCTRL_PIN(TEGRA_PIN_KB_COL2_PQ2, "KB_COL2 PQ2"),
300 PINCTRL_PIN(TEGRA_PIN_KB_COL3_PQ3, "KB_COL3 PQ3"),
301 PINCTRL_PIN(TEGRA_PIN_KB_COL4_PQ4, "KB_COL4 PQ4"),
302 PINCTRL_PIN(TEGRA_PIN_KB_COL5_PQ5, "KB_COL5 PQ5"),
303 PINCTRL_PIN(TEGRA_PIN_KB_COL6_PQ6, "KB_COL6 PQ6"),
304 PINCTRL_PIN(TEGRA_PIN_KB_COL7_PQ7, "KB_COL7 PQ7"),
305 PINCTRL_PIN(TEGRA_PIN_KB_ROW0_PR0, "KB_ROW0 PR0"),
306 PINCTRL_PIN(TEGRA_PIN_KB_ROW1_PR1, "KB_ROW1 PR1"),
307 PINCTRL_PIN(TEGRA_PIN_KB_ROW2_PR2, "KB_ROW2 PR2"),
308 PINCTRL_PIN(TEGRA_PIN_KB_ROW3_PR3, "KB_ROW3 PR3"),
309 PINCTRL_PIN(TEGRA_PIN_KB_ROW4_PR4, "KB_ROW4 PR4"),
310 PINCTRL_PIN(TEGRA_PIN_KB_ROW5_PR5, "KB_ROW5 PR5"),
311 PINCTRL_PIN(TEGRA_PIN_KB_ROW6_PR6, "KB_ROW6 PR6"),
312 PINCTRL_PIN(TEGRA_PIN_KB_ROW7_PR7, "KB_ROW7 PR7"),
313 PINCTRL_PIN(TEGRA_PIN_KB_ROW8_PS0, "KB_ROW8 PS0"),
314 PINCTRL_PIN(TEGRA_PIN_KB_ROW9_PS1, "KB_ROW9 PS1"),
315 PINCTRL_PIN(TEGRA_PIN_KB_ROW10_PS2, "KB_ROW10 PS2"),
316 PINCTRL_PIN(TEGRA_PIN_GEN2_I2C_SCL_PT5, "GEN2_I2C_SCL PT5"),
317 PINCTRL_PIN(TEGRA_PIN_GEN2_I2C_SDA_PT6, "GEN2_I2C_SDA PT6"),
318 PINCTRL_PIN(TEGRA_PIN_SDMMC4_CMD_PT7, "SDMMC4_CMD PT7"),
319 PINCTRL_PIN(TEGRA_PIN_PU0, "PU0"),
320 PINCTRL_PIN(TEGRA_PIN_PU1, "PU1"),
321 PINCTRL_PIN(TEGRA_PIN_PU2, "PU2"),
322 PINCTRL_PIN(TEGRA_PIN_PU3, "PU3"),
323 PINCTRL_PIN(TEGRA_PIN_PU4, "PU4"),
324 PINCTRL_PIN(TEGRA_PIN_PU5, "PU5"),
325 PINCTRL_PIN(TEGRA_PIN_PU6, "PU6"),
326 PINCTRL_PIN(TEGRA_PIN_PV0, "PV0"),
327 PINCTRL_PIN(TEGRA_PIN_PV1, "PV1"),
328 PINCTRL_PIN(TEGRA_PIN_SDMMC3_CD_N_PV2, "SDMMC3_CD_N PV2"),
329 PINCTRL_PIN(TEGRA_PIN_SDMMC1_WP_N_PV3, "SDMMC1_WP_N PV3"),
330 PINCTRL_PIN(TEGRA_PIN_DDC_SCL_PV4, "DDC_SCL PV4"),
331 PINCTRL_PIN(TEGRA_PIN_DDC_SDA_PV5, "DDC_SDA PV5"),
332 PINCTRL_PIN(TEGRA_PIN_GPIO_W2_AUD_PW2, "GPIO_W2_AUD PW2"),
333 PINCTRL_PIN(TEGRA_PIN_GPIO_W3_AUD_PW3, "GPIO_W3_AUD PW3"),
334 PINCTRL_PIN(TEGRA_PIN_CLK1_OUT_PW4, "CLK1_OUT PW4"),
335 PINCTRL_PIN(TEGRA_PIN_CLK2_OUT_PW5, "CLK2_OUT PW5"),
336 PINCTRL_PIN(TEGRA_PIN_UART3_TXD_PW6, "UART3_TXD PW6"),
337 PINCTRL_PIN(TEGRA_PIN_UART3_RXD_PW7, "UART3_RXD PW7"),
338 PINCTRL_PIN(TEGRA_PIN_DVFS_PWM_PX0, "DVFS_PWM PX0"),
339 PINCTRL_PIN(TEGRA_PIN_GPIO_X1_AUD_PX1, "GPIO_X1_AUD PX1"),
340 PINCTRL_PIN(TEGRA_PIN_DVFS_CLK_PX2, "DVFS_CLK PX2"),
341 PINCTRL_PIN(TEGRA_PIN_GPIO_X3_AUD_PX3, "GPIO_X3_AUD PX3"),
342 PINCTRL_PIN(TEGRA_PIN_GPIO_X4_AUD_PX4, "GPIO_X4_AUD PX4"),
343 PINCTRL_PIN(TEGRA_PIN_GPIO_X5_AUD_PX5, "GPIO_X5_AUD PX5"),
344 PINCTRL_PIN(TEGRA_PIN_GPIO_X6_AUD_PX6, "GPIO_X6_AUD PX6"),
345 PINCTRL_PIN(TEGRA_PIN_GPIO_X7_AUD_PX7, "GPIO_X7_AUD PX7"),
346 PINCTRL_PIN(TEGRA_PIN_ULPI_CLK_PY0, "ULPI_CLK PY0"),
347 PINCTRL_PIN(TEGRA_PIN_ULPI_DIR_PY1, "ULPI_DIR PY1"),
348 PINCTRL_PIN(TEGRA_PIN_ULPI_NXT_PY2, "ULPI_NXT PY2"),
349 PINCTRL_PIN(TEGRA_PIN_ULPI_STP_PY3, "ULPI_STP PY3"),
350 PINCTRL_PIN(TEGRA_PIN_SDMMC1_DAT3_PY4, "SDMMC1_DAT3 PY4"),
351 PINCTRL_PIN(TEGRA_PIN_SDMMC1_DAT2_PY5, "SDMMC1_DAT2 PY5"),
352 PINCTRL_PIN(TEGRA_PIN_SDMMC1_DAT1_PY6, "SDMMC1_DAT1 PY6"),
353 PINCTRL_PIN(TEGRA_PIN_SDMMC1_DAT0_PY7, "SDMMC1_DAT0 PY7"),
354 PINCTRL_PIN(TEGRA_PIN_SDMMC1_CLK_PZ0, "SDMMC1_CLK PZ0"),
355 PINCTRL_PIN(TEGRA_PIN_SDMMC1_CMD_PZ1, "SDMMC1_CMD PZ1"),
356 PINCTRL_PIN(TEGRA_PIN_SYS_CLK_REQ_PZ5, "SYS_CLK_REQ PZ5"),
357 PINCTRL_PIN(TEGRA_PIN_PWR_I2C_SCL_PZ6, "PWR_I2C_SCL PZ6"),
358 PINCTRL_PIN(TEGRA_PIN_PWR_I2C_SDA_PZ7, "PWR_I2C_SDA PZ7"),
359 PINCTRL_PIN(TEGRA_PIN_SDMMC4_DAT0_PAA0, "SDMMC4_DAT0 PAA0"),
360 PINCTRL_PIN(TEGRA_PIN_SDMMC4_DAT1_PAA1, "SDMMC4_DAT1 PAA1"),
361 PINCTRL_PIN(TEGRA_PIN_SDMMC4_DAT2_PAA2, "SDMMC4_DAT2 PAA2"),
362 PINCTRL_PIN(TEGRA_PIN_SDMMC4_DAT3_PAA3, "SDMMC4_DAT3 PAA3"),
363 PINCTRL_PIN(TEGRA_PIN_SDMMC4_DAT4_PAA4, "SDMMC4_DAT4 PAA4"),
364 PINCTRL_PIN(TEGRA_PIN_SDMMC4_DAT5_PAA5, "SDMMC4_DAT5 PAA5"),
365 PINCTRL_PIN(TEGRA_PIN_SDMMC4_DAT6_PAA6, "SDMMC4_DAT6 PAA6"),
366 PINCTRL_PIN(TEGRA_PIN_SDMMC4_DAT7_PAA7, "SDMMC4_DAT7 PAA7"),
367 PINCTRL_PIN(TEGRA_PIN_PBB0, "PBB0"),
368 PINCTRL_PIN(TEGRA_PIN_CAM_I2C_SCL_PBB1, "CAM_I2C_SCL PBB1"),
369 PINCTRL_PIN(TEGRA_PIN_CAM_I2C_SDA_PBB2, "CAM_I2C_SDA PBB2"),
370 PINCTRL_PIN(TEGRA_PIN_PBB3, "PBB3"),
371 PINCTRL_PIN(TEGRA_PIN_PBB4, "PBB4"),
372 PINCTRL_PIN(TEGRA_PIN_PBB5, "PBB5"),
373 PINCTRL_PIN(TEGRA_PIN_PBB6, "PBB6"),
374 PINCTRL_PIN(TEGRA_PIN_PBB7, "PBB7"),
375 PINCTRL_PIN(TEGRA_PIN_CAM_MCLK_PCC0, "CAM_MCLK PCC0"),
376 PINCTRL_PIN(TEGRA_PIN_PCC1, "PCC1"),
377 PINCTRL_PIN(TEGRA_PIN_PCC2, "PCC2"),
378 PINCTRL_PIN(TEGRA_PIN_SDMMC4_CLK_PCC4, "SDMMC4_CLK PCC4"),
379 PINCTRL_PIN(TEGRA_PIN_CLK2_REQ_PCC5, "CLK2_REQ PCC5"),
380 PINCTRL_PIN(TEGRA_PIN_CLK3_OUT_PEE0, "CLK3_OUT PEE0"),
381 PINCTRL_PIN(TEGRA_PIN_CLK3_REQ_PEE1, "CLK3_REQ PEE1"),
382 PINCTRL_PIN(TEGRA_PIN_CLK1_REQ_PEE2, "CLK1_REQ PEE2"),
383 PINCTRL_PIN(TEGRA_PIN_HDMI_CEC_PEE3, "HDMI_CEC PEE3"),
384 PINCTRL_PIN(TEGRA_PIN_SDMMC3_CLK_LB_OUT_PEE4, "SDMMC3_CLK_LB_OUT PEE4"),
385 PINCTRL_PIN(TEGRA_PIN_SDMMC3_CLK_LB_IN_PEE5, "SDMMC3_CLK_LB_IN PEE5"),
386 PINCTRL_PIN(TEGRA_PIN_CORE_PWR_REQ, "CORE_PWR_REQ"),
387 PINCTRL_PIN(TEGRA_PIN_CPU_PWR_REQ, "CPU_PWR_REQ"),
388 PINCTRL_PIN(TEGRA_PIN_OWR, "OWR"),
389 PINCTRL_PIN(TEGRA_PIN_PWR_INT_N, "PWR_INT_N"),
390 PINCTRL_PIN(TEGRA_PIN_RESET_OUT_N, "RESET_OUT_N"),
391};
392
393static const unsigned clk_32k_out_pa0_pins[] = {
394 TEGRA_PIN_CLK_32K_OUT_PA0,
395};
396
397static const unsigned uart3_cts_n_pa1_pins[] = {
398 TEGRA_PIN_UART3_CTS_N_PA1,
399};
400
401static const unsigned dap2_fs_pa2_pins[] = {
402 TEGRA_PIN_DAP2_FS_PA2,
403};
404
405static const unsigned dap2_sclk_pa3_pins[] = {
406 TEGRA_PIN_DAP2_SCLK_PA3,
407};
408
409static const unsigned dap2_din_pa4_pins[] = {
410 TEGRA_PIN_DAP2_DIN_PA4,
411};
412
413static const unsigned dap2_dout_pa5_pins[] = {
414 TEGRA_PIN_DAP2_DOUT_PA5,
415};
416
417static const unsigned sdmmc3_clk_pa6_pins[] = {
418 TEGRA_PIN_SDMMC3_CLK_PA6,
419};
420
421static const unsigned sdmmc3_cmd_pa7_pins[] = {
422 TEGRA_PIN_SDMMC3_CMD_PA7,
423};
424
425static const unsigned gmi_a17_pb0_pins[] = {
426 TEGRA_PIN_GMI_A17_PB0,
427};
428
429static const unsigned gmi_a18_pb1_pins[] = {
430 TEGRA_PIN_GMI_A18_PB1,
431};
432
433static const unsigned sdmmc3_dat3_pb4_pins[] = {
434 TEGRA_PIN_SDMMC3_DAT3_PB4,
435};
436
437static const unsigned sdmmc3_dat2_pb5_pins[] = {
438 TEGRA_PIN_SDMMC3_DAT2_PB5,
439};
440
441static const unsigned sdmmc3_dat1_pb6_pins[] = {
442 TEGRA_PIN_SDMMC3_DAT1_PB6,
443};
444
445static const unsigned sdmmc3_dat0_pb7_pins[] = {
446 TEGRA_PIN_SDMMC3_DAT0_PB7,
447};
448
449static const unsigned uart3_rts_n_pc0_pins[] = {
450 TEGRA_PIN_UART3_RTS_N_PC0,
451};
452
453static const unsigned uart2_txd_pc2_pins[] = {
454 TEGRA_PIN_UART2_TXD_PC2,
455};
456
457static const unsigned uart2_rxd_pc3_pins[] = {
458 TEGRA_PIN_UART2_RXD_PC3,
459};
460
461static const unsigned gen1_i2c_scl_pc4_pins[] = {
462 TEGRA_PIN_GEN1_I2C_SCL_PC4,
463};
464
465static const unsigned gen1_i2c_sda_pc5_pins[] = {
466 TEGRA_PIN_GEN1_I2C_SDA_PC5,
467};
468
469static const unsigned gmi_wp_n_pc7_pins[] = {
470 TEGRA_PIN_GMI_WP_N_PC7,
471};
472
473static const unsigned gmi_ad0_pg0_pins[] = {
474 TEGRA_PIN_GMI_AD0_PG0,
475};
476
477static const unsigned gmi_ad1_pg1_pins[] = {
478 TEGRA_PIN_GMI_AD1_PG1,
479};
480
481static const unsigned gmi_ad2_pg2_pins[] = {
482 TEGRA_PIN_GMI_AD2_PG2,
483};
484
485static const unsigned gmi_ad3_pg3_pins[] = {
486 TEGRA_PIN_GMI_AD3_PG3,
487};
488
489static const unsigned gmi_ad4_pg4_pins[] = {
490 TEGRA_PIN_GMI_AD4_PG4,
491};
492
493static const unsigned gmi_ad5_pg5_pins[] = {
494 TEGRA_PIN_GMI_AD5_PG5,
495};
496
497static const unsigned gmi_ad6_pg6_pins[] = {
498 TEGRA_PIN_GMI_AD6_PG6,
499};
500
501static const unsigned gmi_ad7_pg7_pins[] = {
502 TEGRA_PIN_GMI_AD7_PG7,
503};
504
505static const unsigned gmi_ad8_ph0_pins[] = {
506 TEGRA_PIN_GMI_AD8_PH0,
507};
508
509static const unsigned gmi_ad9_ph1_pins[] = {
510 TEGRA_PIN_GMI_AD9_PH1,
511};
512
513static const unsigned gmi_ad10_ph2_pins[] = {
514 TEGRA_PIN_GMI_AD10_PH2,
515};
516
517static const unsigned gmi_ad11_ph3_pins[] = {
518 TEGRA_PIN_GMI_AD11_PH3,
519};
520
521static const unsigned gmi_ad12_ph4_pins[] = {
522 TEGRA_PIN_GMI_AD12_PH4,
523};
524
525static const unsigned gmi_ad13_ph5_pins[] = {
526 TEGRA_PIN_GMI_AD13_PH5,
527};
528
529static const unsigned gmi_ad14_ph6_pins[] = {
530 TEGRA_PIN_GMI_AD14_PH6,
531};
532
533static const unsigned gmi_ad15_ph7_pins[] = {
534 TEGRA_PIN_GMI_AD15_PH7,
535};
536
537static const unsigned gmi_wr_n_pi0_pins[] = {
538 TEGRA_PIN_GMI_WR_N_PI0,
539};
540
541static const unsigned gmi_oe_n_pi1_pins[] = {
542 TEGRA_PIN_GMI_OE_N_PI1,
543};
544
545static const unsigned gmi_cs6_n_pi3_pins[] = {
546 TEGRA_PIN_GMI_CS6_N_PI3,
547};
548
549static const unsigned gmi_rst_n_pi4_pins[] = {
550 TEGRA_PIN_GMI_RST_N_PI4,
551};
552
553static const unsigned gmi_iordy_pi5_pins[] = {
554 TEGRA_PIN_GMI_IORDY_PI5,
555};
556
557static const unsigned gmi_cs7_n_pi6_pins[] = {
558 TEGRA_PIN_GMI_CS7_N_PI6,
559};
560
561static const unsigned gmi_wait_pi7_pins[] = {
562 TEGRA_PIN_GMI_WAIT_PI7,
563};
564
565static const unsigned gmi_cs0_n_pj0_pins[] = {
566 TEGRA_PIN_GMI_CS0_N_PJ0,
567};
568
569static const unsigned gmi_cs1_n_pj2_pins[] = {
570 TEGRA_PIN_GMI_CS1_N_PJ2,
571};
572
573static const unsigned gmi_dqs_p_pj3_pins[] = {
574 TEGRA_PIN_GMI_DQS_P_PJ3,
575};
576
577static const unsigned uart2_cts_n_pj5_pins[] = {
578 TEGRA_PIN_UART2_CTS_N_PJ5,
579};
580
581static const unsigned uart2_rts_n_pj6_pins[] = {
582 TEGRA_PIN_UART2_RTS_N_PJ6,
583};
584
585static const unsigned gmi_a16_pj7_pins[] = {
586 TEGRA_PIN_GMI_A16_PJ7,
587};
588
589static const unsigned gmi_adv_n_pk0_pins[] = {
590 TEGRA_PIN_GMI_ADV_N_PK0,
591};
592
593static const unsigned gmi_clk_pk1_pins[] = {
594 TEGRA_PIN_GMI_CLK_PK1,
595};
596
597static const unsigned gmi_cs4_n_pk2_pins[] = {
598 TEGRA_PIN_GMI_CS4_N_PK2,
599};
600
601static const unsigned gmi_cs2_n_pk3_pins[] = {
602 TEGRA_PIN_GMI_CS2_N_PK3,
603};
604
605static const unsigned gmi_cs3_n_pk4_pins[] = {
606 TEGRA_PIN_GMI_CS3_N_PK4,
607};
608
609static const unsigned spdif_out_pk5_pins[] = {
610 TEGRA_PIN_SPDIF_OUT_PK5,
611};
612
613static const unsigned spdif_in_pk6_pins[] = {
614 TEGRA_PIN_SPDIF_IN_PK6,
615};
616
617static const unsigned gmi_a19_pk7_pins[] = {
618 TEGRA_PIN_GMI_A19_PK7,
619};
620
621static const unsigned dap1_fs_pn0_pins[] = {
622 TEGRA_PIN_DAP1_FS_PN0,
623};
624
625static const unsigned dap1_din_pn1_pins[] = {
626 TEGRA_PIN_DAP1_DIN_PN1,
627};
628
629static const unsigned dap1_dout_pn2_pins[] = {
630 TEGRA_PIN_DAP1_DOUT_PN2,
631};
632
633static const unsigned dap1_sclk_pn3_pins[] = {
634 TEGRA_PIN_DAP1_SCLK_PN3,
635};
636
637static const unsigned usb_vbus_en0_pn4_pins[] = {
638 TEGRA_PIN_USB_VBUS_EN0_PN4,
639};
640
641static const unsigned usb_vbus_en1_pn5_pins[] = {
642 TEGRA_PIN_USB_VBUS_EN1_PN5,
643};
644
645static const unsigned hdmi_int_pn7_pins[] = {
646 TEGRA_PIN_HDMI_INT_PN7,
647};
648
649static const unsigned ulpi_data7_po0_pins[] = {
650 TEGRA_PIN_ULPI_DATA7_PO0,
651};
652
653static const unsigned ulpi_data0_po1_pins[] = {
654 TEGRA_PIN_ULPI_DATA0_PO1,
655};
656
657static const unsigned ulpi_data1_po2_pins[] = {
658 TEGRA_PIN_ULPI_DATA1_PO2,
659};
660
661static const unsigned ulpi_data2_po3_pins[] = {
662 TEGRA_PIN_ULPI_DATA2_PO3,
663};
664
665static const unsigned ulpi_data3_po4_pins[] = {
666 TEGRA_PIN_ULPI_DATA3_PO4,
667};
668
669static const unsigned ulpi_data4_po5_pins[] = {
670 TEGRA_PIN_ULPI_DATA4_PO5,
671};
672
673static const unsigned ulpi_data5_po6_pins[] = {
674 TEGRA_PIN_ULPI_DATA5_PO6,
675};
676
677static const unsigned ulpi_data6_po7_pins[] = {
678 TEGRA_PIN_ULPI_DATA6_PO7,
679};
680
681static const unsigned dap3_fs_pp0_pins[] = {
682 TEGRA_PIN_DAP3_FS_PP0,
683};
684
685static const unsigned dap3_din_pp1_pins[] = {
686 TEGRA_PIN_DAP3_DIN_PP1,
687};
688
689static const unsigned dap3_dout_pp2_pins[] = {
690 TEGRA_PIN_DAP3_DOUT_PP2,
691};
692
693static const unsigned dap3_sclk_pp3_pins[] = {
694 TEGRA_PIN_DAP3_SCLK_PP3,
695};
696
697static const unsigned dap4_fs_pp4_pins[] = {
698 TEGRA_PIN_DAP4_FS_PP4,
699};
700
701static const unsigned dap4_din_pp5_pins[] = {
702 TEGRA_PIN_DAP4_DIN_PP5,
703};
704
705static const unsigned dap4_dout_pp6_pins[] = {
706 TEGRA_PIN_DAP4_DOUT_PP6,
707};
708
709static const unsigned dap4_sclk_pp7_pins[] = {
710 TEGRA_PIN_DAP4_SCLK_PP7,
711};
712
713static const unsigned kb_col0_pq0_pins[] = {
714 TEGRA_PIN_KB_COL0_PQ0,
715};
716
717static const unsigned kb_col1_pq1_pins[] = {
718 TEGRA_PIN_KB_COL1_PQ1,
719};
720
721static const unsigned kb_col2_pq2_pins[] = {
722 TEGRA_PIN_KB_COL2_PQ2,
723};
724
725static const unsigned kb_col3_pq3_pins[] = {
726 TEGRA_PIN_KB_COL3_PQ3,
727};
728
729static const unsigned kb_col4_pq4_pins[] = {
730 TEGRA_PIN_KB_COL4_PQ4,
731};
732
733static const unsigned kb_col5_pq5_pins[] = {
734 TEGRA_PIN_KB_COL5_PQ5,
735};
736
737static const unsigned kb_col6_pq6_pins[] = {
738 TEGRA_PIN_KB_COL6_PQ6,
739};
740
741static const unsigned kb_col7_pq7_pins[] = {
742 TEGRA_PIN_KB_COL7_PQ7,
743};
744
745static const unsigned kb_row0_pr0_pins[] = {
746 TEGRA_PIN_KB_ROW0_PR0,
747};
748
749static const unsigned kb_row1_pr1_pins[] = {
750 TEGRA_PIN_KB_ROW1_PR1,
751};
752
753static const unsigned kb_row2_pr2_pins[] = {
754 TEGRA_PIN_KB_ROW2_PR2,
755};
756
757static const unsigned kb_row3_pr3_pins[] = {
758 TEGRA_PIN_KB_ROW3_PR3,
759};
760
761static const unsigned kb_row4_pr4_pins[] = {
762 TEGRA_PIN_KB_ROW4_PR4,
763};
764
765static const unsigned kb_row5_pr5_pins[] = {
766 TEGRA_PIN_KB_ROW5_PR5,
767};
768
769static const unsigned kb_row6_pr6_pins[] = {
770 TEGRA_PIN_KB_ROW6_PR6,
771};
772
773static const unsigned kb_row7_pr7_pins[] = {
774 TEGRA_PIN_KB_ROW7_PR7,
775};
776
777static const unsigned kb_row8_ps0_pins[] = {
778 TEGRA_PIN_KB_ROW8_PS0,
779};
780
781static const unsigned kb_row9_ps1_pins[] = {
782 TEGRA_PIN_KB_ROW9_PS1,
783};
784
785static const unsigned kb_row10_ps2_pins[] = {
786 TEGRA_PIN_KB_ROW10_PS2,
787};
788
789static const unsigned gen2_i2c_scl_pt5_pins[] = {
790 TEGRA_PIN_GEN2_I2C_SCL_PT5,
791};
792
793static const unsigned gen2_i2c_sda_pt6_pins[] = {
794 TEGRA_PIN_GEN2_I2C_SDA_PT6,
795};
796
797static const unsigned sdmmc4_cmd_pt7_pins[] = {
798 TEGRA_PIN_SDMMC4_CMD_PT7,
799};
800
801static const unsigned pu0_pins[] = {
802 TEGRA_PIN_PU0,
803};
804
805static const unsigned pu1_pins[] = {
806 TEGRA_PIN_PU1,
807};
808
809static const unsigned pu2_pins[] = {
810 TEGRA_PIN_PU2,
811};
812
813static const unsigned pu3_pins[] = {
814 TEGRA_PIN_PU3,
815};
816
817static const unsigned pu4_pins[] = {
818 TEGRA_PIN_PU4,
819};
820
821static const unsigned pu5_pins[] = {
822 TEGRA_PIN_PU5,
823};
824
825static const unsigned pu6_pins[] = {
826 TEGRA_PIN_PU6,
827};
828
829static const unsigned pv0_pins[] = {
830 TEGRA_PIN_PV0,
831};
832
833static const unsigned pv1_pins[] = {
834 TEGRA_PIN_PV1,
835};
836
837static const unsigned sdmmc3_cd_n_pv2_pins[] = {
838 TEGRA_PIN_SDMMC3_CD_N_PV2,
839};
840
841static const unsigned sdmmc1_wp_n_pv3_pins[] = {
842 TEGRA_PIN_SDMMC1_WP_N_PV3,
843};
844
845static const unsigned ddc_scl_pv4_pins[] = {
846 TEGRA_PIN_DDC_SCL_PV4,
847};
848
849static const unsigned ddc_sda_pv5_pins[] = {
850 TEGRA_PIN_DDC_SDA_PV5,
851};
852
853static const unsigned gpio_w2_aud_pw2_pins[] = {
854 TEGRA_PIN_GPIO_W2_AUD_PW2,
855};
856
857static const unsigned gpio_w3_aud_pw3_pins[] = {
858 TEGRA_PIN_GPIO_W3_AUD_PW3,
859};
860
861static const unsigned clk1_out_pw4_pins[] = {
862 TEGRA_PIN_CLK1_OUT_PW4,
863};
864
865static const unsigned clk2_out_pw5_pins[] = {
866 TEGRA_PIN_CLK2_OUT_PW5,
867};
868
869static const unsigned uart3_txd_pw6_pins[] = {
870 TEGRA_PIN_UART3_TXD_PW6,
871};
872
873static const unsigned uart3_rxd_pw7_pins[] = {
874 TEGRA_PIN_UART3_RXD_PW7,
875};
876
877static const unsigned dvfs_pwm_px0_pins[] = {
878 TEGRA_PIN_DVFS_PWM_PX0,
879};
880
881static const unsigned gpio_x1_aud_px1_pins[] = {
882 TEGRA_PIN_GPIO_X1_AUD_PX1,
883};
884
885static const unsigned dvfs_clk_px2_pins[] = {
886 TEGRA_PIN_DVFS_CLK_PX2,
887};
888
889static const unsigned gpio_x3_aud_px3_pins[] = {
890 TEGRA_PIN_GPIO_X3_AUD_PX3,
891};
892
893static const unsigned gpio_x4_aud_px4_pins[] = {
894 TEGRA_PIN_GPIO_X4_AUD_PX4,
895};
896
897static const unsigned gpio_x5_aud_px5_pins[] = {
898 TEGRA_PIN_GPIO_X5_AUD_PX5,
899};
900
901static const unsigned gpio_x6_aud_px6_pins[] = {
902 TEGRA_PIN_GPIO_X6_AUD_PX6,
903};
904
905static const unsigned gpio_x7_aud_px7_pins[] = {
906 TEGRA_PIN_GPIO_X7_AUD_PX7,
907};
908
909static const unsigned ulpi_clk_py0_pins[] = {
910 TEGRA_PIN_ULPI_CLK_PY0,
911};
912
913static const unsigned ulpi_dir_py1_pins[] = {
914 TEGRA_PIN_ULPI_DIR_PY1,
915};
916
917static const unsigned ulpi_nxt_py2_pins[] = {
918 TEGRA_PIN_ULPI_NXT_PY2,
919};
920
921static const unsigned ulpi_stp_py3_pins[] = {
922 TEGRA_PIN_ULPI_STP_PY3,
923};
924
925static const unsigned sdmmc1_dat3_py4_pins[] = {
926 TEGRA_PIN_SDMMC1_DAT3_PY4,
927};
928
929static const unsigned sdmmc1_dat2_py5_pins[] = {
930 TEGRA_PIN_SDMMC1_DAT2_PY5,
931};
932
933static const unsigned sdmmc1_dat1_py6_pins[] = {
934 TEGRA_PIN_SDMMC1_DAT1_PY6,
935};
936
937static const unsigned sdmmc1_dat0_py7_pins[] = {
938 TEGRA_PIN_SDMMC1_DAT0_PY7,
939};
940
941static const unsigned sdmmc1_clk_pz0_pins[] = {
942 TEGRA_PIN_SDMMC1_CLK_PZ0,
943};
944
945static const unsigned sdmmc1_cmd_pz1_pins[] = {
946 TEGRA_PIN_SDMMC1_CMD_PZ1,
947};
948
949static const unsigned sys_clk_req_pz5_pins[] = {
950 TEGRA_PIN_SYS_CLK_REQ_PZ5,
951};
952
953static const unsigned pwr_i2c_scl_pz6_pins[] = {
954 TEGRA_PIN_PWR_I2C_SCL_PZ6,
955};
956
957static const unsigned pwr_i2c_sda_pz7_pins[] = {
958 TEGRA_PIN_PWR_I2C_SDA_PZ7,
959};
960
961static const unsigned sdmmc4_dat0_paa0_pins[] = {
962 TEGRA_PIN_SDMMC4_DAT0_PAA0,
963};
964
965static const unsigned sdmmc4_dat1_paa1_pins[] = {
966 TEGRA_PIN_SDMMC4_DAT1_PAA1,
967};
968
969static const unsigned sdmmc4_dat2_paa2_pins[] = {
970 TEGRA_PIN_SDMMC4_DAT2_PAA2,
971};
972
973static const unsigned sdmmc4_dat3_paa3_pins[] = {
974 TEGRA_PIN_SDMMC4_DAT3_PAA3,
975};
976
977static const unsigned sdmmc4_dat4_paa4_pins[] = {
978 TEGRA_PIN_SDMMC4_DAT4_PAA4,
979};
980
981static const unsigned sdmmc4_dat5_paa5_pins[] = {
982 TEGRA_PIN_SDMMC4_DAT5_PAA5,
983};
984
985static const unsigned sdmmc4_dat6_paa6_pins[] = {
986 TEGRA_PIN_SDMMC4_DAT6_PAA6,
987};
988
989static const unsigned sdmmc4_dat7_paa7_pins[] = {
990 TEGRA_PIN_SDMMC4_DAT7_PAA7,
991};
992
993static const unsigned pbb0_pins[] = {
994 TEGRA_PIN_PBB0,
995};
996
997static const unsigned cam_i2c_scl_pbb1_pins[] = {
998 TEGRA_PIN_CAM_I2C_SCL_PBB1,
999};
1000
1001static const unsigned cam_i2c_sda_pbb2_pins[] = {
1002 TEGRA_PIN_CAM_I2C_SDA_PBB2,
1003};
1004
1005static const unsigned pbb3_pins[] = {
1006 TEGRA_PIN_PBB3,
1007};
1008
1009static const unsigned pbb4_pins[] = {
1010 TEGRA_PIN_PBB4,
1011};
1012
1013static const unsigned pbb5_pins[] = {
1014 TEGRA_PIN_PBB5,
1015};
1016
1017static const unsigned pbb6_pins[] = {
1018 TEGRA_PIN_PBB6,
1019};
1020
1021static const unsigned pbb7_pins[] = {
1022 TEGRA_PIN_PBB7,
1023};
1024
1025static const unsigned cam_mclk_pcc0_pins[] = {
1026 TEGRA_PIN_CAM_MCLK_PCC0,
1027};
1028
1029static const unsigned pcc1_pins[] = {
1030 TEGRA_PIN_PCC1,
1031};
1032
1033static const unsigned pcc2_pins[] = {
1034 TEGRA_PIN_PCC2,
1035};
1036
1037static const unsigned sdmmc4_clk_pcc4_pins[] = {
1038 TEGRA_PIN_SDMMC4_CLK_PCC4,
1039};
1040
1041static const unsigned clk2_req_pcc5_pins[] = {
1042 TEGRA_PIN_CLK2_REQ_PCC5,
1043};
1044
1045static const unsigned clk3_out_pee0_pins[] = {
1046 TEGRA_PIN_CLK3_OUT_PEE0,
1047};
1048
1049static const unsigned clk3_req_pee1_pins[] = {
1050 TEGRA_PIN_CLK3_REQ_PEE1,
1051};
1052
1053static const unsigned clk1_req_pee2_pins[] = {
1054 TEGRA_PIN_CLK1_REQ_PEE2,
1055};
1056
1057static const unsigned hdmi_cec_pee3_pins[] = {
1058 TEGRA_PIN_HDMI_CEC_PEE3,
1059};
1060
1061static const unsigned sdmmc3_clk_lb_out_pee4_pins[] = {
1062 TEGRA_PIN_SDMMC3_CLK_LB_OUT_PEE4,
1063};
1064
1065static const unsigned sdmmc3_clk_lb_in_pee5_pins[] = {
1066 TEGRA_PIN_SDMMC3_CLK_LB_IN_PEE5,
1067};
1068
1069static const unsigned core_pwr_req_pins[] = {
1070 TEGRA_PIN_CORE_PWR_REQ,
1071};
1072
1073static const unsigned cpu_pwr_req_pins[] = {
1074 TEGRA_PIN_CPU_PWR_REQ,
1075};
1076
1077static const unsigned owr_pins[] = {
1078 TEGRA_PIN_OWR,
1079};
1080
1081static const unsigned pwr_int_n_pins[] = {
1082 TEGRA_PIN_PWR_INT_N,
1083};
1084
1085static const unsigned reset_out_n_pins[] = {
1086 TEGRA_PIN_RESET_OUT_N,
1087};
1088
1089static const unsigned drive_ao1_pins[] = {
1090 TEGRA_PIN_KB_ROW0_PR0,
1091 TEGRA_PIN_KB_ROW1_PR1,
1092 TEGRA_PIN_KB_ROW2_PR2,
1093 TEGRA_PIN_KB_ROW3_PR3,
1094 TEGRA_PIN_KB_ROW4_PR4,
1095 TEGRA_PIN_KB_ROW5_PR5,
1096 TEGRA_PIN_KB_ROW6_PR6,
1097 TEGRA_PIN_KB_ROW7_PR7,
1098 TEGRA_PIN_PWR_I2C_SCL_PZ6,
1099 TEGRA_PIN_PWR_I2C_SDA_PZ7,
1100};
1101
1102static const unsigned drive_ao2_pins[] = {
1103 TEGRA_PIN_CLK_32K_OUT_PA0,
1104 TEGRA_PIN_KB_COL0_PQ0,
1105 TEGRA_PIN_KB_COL1_PQ1,
1106 TEGRA_PIN_KB_COL2_PQ2,
1107 TEGRA_PIN_KB_COL3_PQ3,
1108 TEGRA_PIN_KB_COL4_PQ4,
1109 TEGRA_PIN_KB_COL5_PQ5,
1110 TEGRA_PIN_KB_COL6_PQ6,
1111 TEGRA_PIN_KB_COL7_PQ7,
1112 TEGRA_PIN_KB_ROW8_PS0,
1113 TEGRA_PIN_KB_ROW9_PS1,
1114 TEGRA_PIN_KB_ROW10_PS2,
1115 TEGRA_PIN_SYS_CLK_REQ_PZ5,
1116 TEGRA_PIN_CORE_PWR_REQ,
1117 TEGRA_PIN_CPU_PWR_REQ,
1118 TEGRA_PIN_RESET_OUT_N,
1119};
1120
1121static const unsigned drive_at1_pins[] = {
1122 TEGRA_PIN_GMI_AD8_PH0,
1123 TEGRA_PIN_GMI_AD9_PH1,
1124 TEGRA_PIN_GMI_AD10_PH2,
1125 TEGRA_PIN_GMI_AD11_PH3,
1126 TEGRA_PIN_GMI_AD12_PH4,
1127 TEGRA_PIN_GMI_AD13_PH5,
1128 TEGRA_PIN_GMI_AD14_PH6,
1129 TEGRA_PIN_GMI_AD15_PH7,
1130
1131 TEGRA_PIN_GMI_IORDY_PI5,
1132 TEGRA_PIN_GMI_CS7_N_PI6,
1133};
1134
1135static const unsigned drive_at2_pins[] = {
1136 TEGRA_PIN_GMI_AD0_PG0,
1137 TEGRA_PIN_GMI_AD1_PG1,
1138 TEGRA_PIN_GMI_AD2_PG2,
1139 TEGRA_PIN_GMI_AD3_PG3,
1140 TEGRA_PIN_GMI_AD4_PG4,
1141 TEGRA_PIN_GMI_AD5_PG5,
1142 TEGRA_PIN_GMI_AD6_PG6,
1143 TEGRA_PIN_GMI_AD7_PG7,
1144
1145 TEGRA_PIN_GMI_WR_N_PI0,
1146 TEGRA_PIN_GMI_OE_N_PI1,
1147 TEGRA_PIN_GMI_CS6_N_PI3,
1148 TEGRA_PIN_GMI_RST_N_PI4,
1149 TEGRA_PIN_GMI_WAIT_PI7,
1150
1151 TEGRA_PIN_GMI_DQS_P_PJ3,
1152
1153 TEGRA_PIN_GMI_ADV_N_PK0,
1154 TEGRA_PIN_GMI_CLK_PK1,
1155 TEGRA_PIN_GMI_CS4_N_PK2,
1156 TEGRA_PIN_GMI_CS2_N_PK3,
1157 TEGRA_PIN_GMI_CS3_N_PK4,
1158};
1159
1160static const unsigned drive_at3_pins[] = {
1161 TEGRA_PIN_GMI_WP_N_PC7,
1162 TEGRA_PIN_GMI_CS0_N_PJ0,
1163};
1164
1165static const unsigned drive_at4_pins[] = {
1166 TEGRA_PIN_GMI_A17_PB0,
1167 TEGRA_PIN_GMI_A18_PB1,
1168 TEGRA_PIN_GMI_CS1_N_PJ2,
1169 TEGRA_PIN_GMI_A16_PJ7,
1170 TEGRA_PIN_GMI_A19_PK7,
1171};
1172
1173static const unsigned drive_at5_pins[] = {
1174 TEGRA_PIN_GEN2_I2C_SCL_PT5,
1175 TEGRA_PIN_GEN2_I2C_SDA_PT6,
1176};
1177
1178static const unsigned drive_cdev1_pins[] = {
1179 TEGRA_PIN_CLK1_OUT_PW4,
1180 TEGRA_PIN_CLK1_REQ_PEE2,
1181};
1182
1183static const unsigned drive_cdev2_pins[] = {
1184 TEGRA_PIN_CLK2_OUT_PW5,
1185 TEGRA_PIN_CLK2_REQ_PCC5,
1186 TEGRA_PIN_SDMMC1_WP_N_PV3,
1187};
1188
1189static const unsigned drive_dap1_pins[] = {
1190 TEGRA_PIN_DAP1_FS_PN0,
1191 TEGRA_PIN_DAP1_DIN_PN1,
1192 TEGRA_PIN_DAP1_DOUT_PN2,
1193 TEGRA_PIN_DAP1_SCLK_PN3,
1194};
1195
1196static const unsigned drive_dap2_pins[] = {
1197 TEGRA_PIN_DAP2_FS_PA2,
1198 TEGRA_PIN_DAP2_SCLK_PA3,
1199 TEGRA_PIN_DAP2_DIN_PA4,
1200 TEGRA_PIN_DAP2_DOUT_PA5,
1201};
1202
1203static const unsigned drive_dap3_pins[] = {
1204 TEGRA_PIN_DAP3_FS_PP0,
1205 TEGRA_PIN_DAP3_DIN_PP1,
1206 TEGRA_PIN_DAP3_DOUT_PP2,
1207 TEGRA_PIN_DAP3_SCLK_PP3,
1208};
1209
1210static const unsigned drive_dap4_pins[] = {
1211 TEGRA_PIN_DAP4_FS_PP4,
1212 TEGRA_PIN_DAP4_DIN_PP5,
1213 TEGRA_PIN_DAP4_DOUT_PP6,
1214 TEGRA_PIN_DAP4_SCLK_PP7,
1215};
1216
1217static const unsigned drive_dbg_pins[] = {
1218 TEGRA_PIN_GEN1_I2C_SCL_PC4,
1219 TEGRA_PIN_GEN1_I2C_SDA_PC5,
1220 TEGRA_PIN_PU0,
1221 TEGRA_PIN_PU1,
1222 TEGRA_PIN_PU2,
1223 TEGRA_PIN_PU3,
1224 TEGRA_PIN_PU4,
1225 TEGRA_PIN_PU5,
1226 TEGRA_PIN_PU6,
1227};
1228
1229static const unsigned drive_sdio3_pins[] = {
1230 TEGRA_PIN_SDMMC3_CLK_PA6,
1231 TEGRA_PIN_SDMMC3_CMD_PA7,
1232 TEGRA_PIN_SDMMC3_DAT3_PB4,
1233 TEGRA_PIN_SDMMC3_DAT2_PB5,
1234 TEGRA_PIN_SDMMC3_DAT1_PB6,
1235 TEGRA_PIN_SDMMC3_DAT0_PB7,
1236 TEGRA_PIN_SDMMC3_CLK_LB_OUT_PEE4,
1237 TEGRA_PIN_SDMMC3_CLK_LB_IN_PEE5,
1238};
1239
1240static const unsigned drive_spi_pins[] = {
1241 TEGRA_PIN_DVFS_PWM_PX0,
1242 TEGRA_PIN_GPIO_X1_AUD_PX1,
1243 TEGRA_PIN_DVFS_CLK_PX2,
1244 TEGRA_PIN_GPIO_X3_AUD_PX3,
1245 TEGRA_PIN_GPIO_X4_AUD_PX4,
1246 TEGRA_PIN_GPIO_X5_AUD_PX5,
1247 TEGRA_PIN_GPIO_X6_AUD_PX6,
1248 TEGRA_PIN_GPIO_X7_AUD_PX7,
1249 TEGRA_PIN_GPIO_W2_AUD_PW2,
1250 TEGRA_PIN_GPIO_W3_AUD_PW3,
1251};
1252
1253static const unsigned drive_uaa_pins[] = {
1254 TEGRA_PIN_ULPI_DATA0_PO1,
1255 TEGRA_PIN_ULPI_DATA1_PO2,
1256 TEGRA_PIN_ULPI_DATA2_PO3,
1257 TEGRA_PIN_ULPI_DATA3_PO4,
1258};
1259
1260static const unsigned drive_uab_pins[] = {
1261 TEGRA_PIN_ULPI_DATA7_PO0,
1262 TEGRA_PIN_ULPI_DATA4_PO5,
1263 TEGRA_PIN_ULPI_DATA5_PO6,
1264 TEGRA_PIN_ULPI_DATA6_PO7,
1265 TEGRA_PIN_PV0,
1266 TEGRA_PIN_PV1,
1267};
1268
1269static const unsigned drive_uart2_pins[] = {
1270 TEGRA_PIN_UART2_TXD_PC2,
1271 TEGRA_PIN_UART2_RXD_PC3,
1272 TEGRA_PIN_UART2_CTS_N_PJ5,
1273 TEGRA_PIN_UART2_RTS_N_PJ6,
1274};
1275
1276static const unsigned drive_uart3_pins[] = {
1277 TEGRA_PIN_UART3_CTS_N_PA1,
1278 TEGRA_PIN_UART3_RTS_N_PC0,
1279 TEGRA_PIN_UART3_TXD_PW6,
1280 TEGRA_PIN_UART3_RXD_PW7,
1281};
1282
1283static const unsigned drive_sdio1_pins[] = {
1284 TEGRA_PIN_SDMMC1_DAT3_PY4,
1285 TEGRA_PIN_SDMMC1_DAT2_PY5,
1286 TEGRA_PIN_SDMMC1_DAT1_PY6,
1287 TEGRA_PIN_SDMMC1_DAT0_PY7,
1288 TEGRA_PIN_SDMMC1_CLK_PZ0,
1289 TEGRA_PIN_SDMMC1_CMD_PZ1,
1290};
1291
1292static const unsigned drive_ddc_pins[] = {
1293 TEGRA_PIN_DDC_SCL_PV4,
1294 TEGRA_PIN_DDC_SDA_PV5,
1295};
1296
1297static const unsigned drive_gma_pins[] = {
1298 TEGRA_PIN_SDMMC4_CLK_PCC4,
1299 TEGRA_PIN_SDMMC4_CMD_PT7,
1300 TEGRA_PIN_SDMMC4_DAT0_PAA0,
1301 TEGRA_PIN_SDMMC4_DAT1_PAA1,
1302 TEGRA_PIN_SDMMC4_DAT2_PAA2,
1303 TEGRA_PIN_SDMMC4_DAT3_PAA3,
1304 TEGRA_PIN_SDMMC4_DAT4_PAA4,
1305 TEGRA_PIN_SDMMC4_DAT5_PAA5,
1306 TEGRA_PIN_SDMMC4_DAT6_PAA6,
1307 TEGRA_PIN_SDMMC4_DAT7_PAA7,
1308};
1309
1310static const unsigned drive_gme_pins[] = {
1311 TEGRA_PIN_PBB0,
1312 TEGRA_PIN_CAM_I2C_SCL_PBB1,
1313 TEGRA_PIN_CAM_I2C_SDA_PBB2,
1314 TEGRA_PIN_PBB3,
1315 TEGRA_PIN_PCC2,
1316};
1317
1318static const unsigned drive_gmf_pins[] = {
1319 TEGRA_PIN_PBB4,
1320 TEGRA_PIN_PBB5,
1321 TEGRA_PIN_PBB6,
1322 TEGRA_PIN_PBB7,
1323};
1324
1325static const unsigned drive_gmg_pins[] = {
1326 TEGRA_PIN_CAM_MCLK_PCC0,
1327};
1328
1329static const unsigned drive_gmh_pins[] = {
1330 TEGRA_PIN_PCC1,
1331};
1332
1333static const unsigned drive_owr_pins[] = {
1334 TEGRA_PIN_SDMMC3_CD_N_PV2,
1335};
1336
1337static const unsigned drive_uda_pins[] = {
1338 TEGRA_PIN_ULPI_CLK_PY0,
1339 TEGRA_PIN_ULPI_DIR_PY1,
1340 TEGRA_PIN_ULPI_NXT_PY2,
1341 TEGRA_PIN_ULPI_STP_PY3,
1342};
1343
1344static const unsigned drive_dev3_pins[] = {
1345 TEGRA_PIN_CLK3_OUT_PEE0,
1346 TEGRA_PIN_CLK3_REQ_PEE1,
1347};
1348
1349enum tegra_mux {
1350 TEGRA_MUX_BLINK,
1351 TEGRA_MUX_CEC,
1352 TEGRA_MUX_CLDVFS,
1353 TEGRA_MUX_CLK12,
1354 TEGRA_MUX_CPU,
1355 TEGRA_MUX_DAP,
1356 TEGRA_MUX_DAP1,
1357 TEGRA_MUX_DAP2,
1358 TEGRA_MUX_DEV3,
1359 TEGRA_MUX_DISPLAYA,
1360 TEGRA_MUX_DISPLAYA_ALT,
1361 TEGRA_MUX_DISPLAYB,
1362 TEGRA_MUX_DTV,
1363 TEGRA_MUX_EMC_DLL,
1364 TEGRA_MUX_EXTPERIPH1,
1365 TEGRA_MUX_EXTPERIPH2,
1366 TEGRA_MUX_EXTPERIPH3,
1367 TEGRA_MUX_GMI,
1368 TEGRA_MUX_GMI_ALT,
1369 TEGRA_MUX_HDA,
1370 TEGRA_MUX_HSI,
1371 TEGRA_MUX_I2C1,
1372 TEGRA_MUX_I2C2,
1373 TEGRA_MUX_I2C3,
1374 TEGRA_MUX_I2C4,
1375 TEGRA_MUX_I2CPWR,
1376 TEGRA_MUX_I2S0,
1377 TEGRA_MUX_I2S1,
1378 TEGRA_MUX_I2S2,
1379 TEGRA_MUX_I2S3,
1380 TEGRA_MUX_I2S4,
1381 TEGRA_MUX_IRDA,
1382 TEGRA_MUX_KBC,
1383 TEGRA_MUX_NAND,
1384 TEGRA_MUX_NAND_ALT,
1385 TEGRA_MUX_OWR,
1386 TEGRA_MUX_PMI,
1387 TEGRA_MUX_PWM0,
1388 TEGRA_MUX_PWM1,
1389 TEGRA_MUX_PWM2,
1390 TEGRA_MUX_PWM3,
1391 TEGRA_MUX_PWRON,
1392 TEGRA_MUX_RESET_OUT_N,
1393 TEGRA_MUX_RSVD1,
1394 TEGRA_MUX_RSVD2,
1395 TEGRA_MUX_RSVD3,
1396 TEGRA_MUX_RSVD4,
1397 TEGRA_MUX_SDMMC1,
1398 TEGRA_MUX_SDMMC2,
1399 TEGRA_MUX_SDMMC3,
1400 TEGRA_MUX_SDMMC4,
1401 TEGRA_MUX_SOC,
1402 TEGRA_MUX_SPDIF,
1403 TEGRA_MUX_SPI1,
1404 TEGRA_MUX_SPI2,
1405 TEGRA_MUX_SPI3,
1406 TEGRA_MUX_SPI4,
1407 TEGRA_MUX_SPI5,
1408 TEGRA_MUX_SPI6,
1409 TEGRA_MUX_SYSCLK,
1410 TEGRA_MUX_TRACE,
1411 TEGRA_MUX_UARTA,
1412 TEGRA_MUX_UARTB,
1413 TEGRA_MUX_UARTC,
1414 TEGRA_MUX_UARTD,
1415 TEGRA_MUX_ULPI,
1416 TEGRA_MUX_USB,
1417 TEGRA_MUX_VGP1,
1418 TEGRA_MUX_VGP2,
1419 TEGRA_MUX_VGP3,
1420 TEGRA_MUX_VGP4,
1421 TEGRA_MUX_VGP5,
1422 TEGRA_MUX_VGP6,
1423 TEGRA_MUX_VI,
1424 TEGRA_MUX_VI_ALT1,
1425 TEGRA_MUX_VI_ALT3,
1426};
1427
1428static const char * const blink_groups[] = {
1429 "clk_32k_out_pa0",
1430};
1431
1432static const char * const cec_groups[] = {
1433 "hdmi_cec_pee3",
1434};
1435
1436static const char * const cldvfs_groups[] = {
1437 "gmi_ad9_ph1",
1438 "gmi_ad10_ph2",
1439 "kb_row7_pr7",
1440 "kb_row8_ps0",
1441 "dvfs_pwm_px0",
1442 "dvfs_clk_px2",
1443};
1444
1445static const char * const clk12_groups[] = {
1446 "sdmmc1_wp_n_pv3",
1447 "sdmmc1_clk_pz0",
1448};
1449
1450static const char * const cpu_groups[] = {
1451 "cpu_pwr_req",
1452};
1453
1454static const char * const dap_groups[] = {
1455 "clk1_req_pee2",
1456 "clk2_req_pcc5",
1457};
1458
1459static const char * const dap1_groups[] = {
1460 "clk1_req_pee2",
1461};
1462
1463static const char * const dap2_groups[] = {
1464 "clk1_out_pw4",
1465 "gpio_x4_aud_px4",
1466};
1467
1468static const char * const dev3_groups[] = {
1469 "clk3_req_pee1",
1470};
1471
1472static const char * const displaya_groups[] = {
1473 "dap3_fs_pp0",
1474 "dap3_din_pp1",
1475 "dap3_dout_pp2",
1476 "dap3_sclk_pp3",
1477 "uart3_rts_n_pc0",
1478 "pu3",
1479 "pu4",
1480 "pu5",
1481 "pbb3",
1482 "pbb4",
1483 "pbb5",
1484 "pbb6",
1485 "kb_row3_pr3",
1486 "kb_row4_pr4",
1487 "kb_row5_pr5",
1488 "kb_row6_pr6",
1489 "kb_col3_pq3",
1490 "sdmmc3_dat2_pb5",
1491};
1492
1493static const char * const displaya_alt_groups[] = {
1494 "kb_row6_pr6",
1495};
1496
1497static const char * const displayb_groups[] = {
1498 "dap3_fs_pp0",
1499 "dap3_din_pp1",
1500 "dap3_dout_pp2",
1501 "dap3_sclk_pp3",
1502 "pu3",
1503 "pu4",
1504 "pu5",
1505 "pu6",
1506 "pbb3",
1507 "pbb4",
1508 "pbb5",
1509 "pbb6",
1510 "kb_row3_pr3",
1511 "kb_row4_pr4",
1512 "kb_row5_pr5",
1513 "kb_row6_pr6",
1514 "sdmmc3_dat3_pb4",
1515};
1516
1517static const char * const dtv_groups[] = {
1518 "uart3_cts_n_pa1",
1519 "uart3_rts_n_pc0",
1520 "dap4_fs_pp4",
1521 "dap4_dout_pp6",
1522 "gmi_wait_pi7",
1523 "gmi_ad8_ph0",
1524 "gmi_ad14_ph6",
1525 "gmi_ad15_ph7",
1526};
1527
1528static const char * const emc_dll_groups[] = {
1529 "kb_col0_pq0",
1530 "kb_col1_pq1",
1531};
1532
1533static const char * const extperiph1_groups[] = {
1534 "clk1_out_pw4",
1535};
1536
1537static const char * const extperiph2_groups[] = {
1538 "clk2_out_pw5",
1539};
1540
1541static const char * const extperiph3_groups[] = {
1542 "clk3_out_pee0",
1543};
1544
1545static const char * const gmi_groups[] = {
1546 "gmi_wp_n_pc7",
1547
1548 "gmi_ad0_pg0",
1549 "gmi_ad1_pg1",
1550 "gmi_ad2_pg2",
1551 "gmi_ad3_pg3",
1552 "gmi_ad4_pg4",
1553 "gmi_ad5_pg5",
1554 "gmi_ad6_pg6",
1555 "gmi_ad7_pg7",
1556 "gmi_ad8_ph0",
1557 "gmi_ad9_ph1",
1558 "gmi_ad10_ph2",
1559 "gmi_ad11_ph3",
1560 "gmi_ad12_ph4",
1561 "gmi_ad13_ph5",
1562 "gmi_ad14_ph6",
1563 "gmi_ad15_ph7",
1564 "gmi_wr_n_pi0",
1565 "gmi_oe_n_pi1",
1566 "gmi_cs6_n_pi3",
1567 "gmi_rst_n_pi4",
1568 "gmi_iordy_pi5",
1569 "gmi_cs7_n_pi6",
1570 "gmi_wait_pi7",
1571 "gmi_cs0_n_pj0",
1572 "gmi_cs1_n_pj2",
1573 "gmi_dqs_p_pj3",
1574 "gmi_adv_n_pk0",
1575 "gmi_clk_pk1",
1576 "gmi_cs4_n_pk2",
1577 "gmi_cs2_n_pk3",
1578 "gmi_cs3_n_pk4",
1579 "gmi_a16_pj7",
1580 "gmi_a17_pb0",
1581 "gmi_a18_pb1",
1582 "gmi_a19_pk7",
1583 "gen2_i2c_scl_pt5",
1584 "gen2_i2c_sda_pt6",
1585 "sdmmc4_dat0_paa0",
1586 "sdmmc4_dat1_paa1",
1587 "sdmmc4_dat2_paa2",
1588 "sdmmc4_dat3_paa3",
1589 "sdmmc4_dat4_paa4",
1590 "sdmmc4_dat5_paa5",
1591 "sdmmc4_dat6_paa6",
1592 "sdmmc4_dat7_paa7",
1593 "sdmmc4_clk_pcc4",
1594 "sdmmc4_cmd_pt7",
1595 "dap1_fs_pn0",
1596 "dap1_din_pn1",
1597 "dap1_dout_pn2",
1598 "dap1_sclk_pn3",
1599};
1600
1601static const char * const gmi_alt_groups[] = {
1602 "gmi_wp_n_pc7",
1603 "gmi_cs3_n_pk4",
1604 "gmi_a16_pj7",
1605};
1606
1607static const char * const hda_groups[] = {
1608 "dap1_fs_pn0",
1609 "dap1_din_pn1",
1610 "dap1_dout_pn2",
1611 "dap1_sclk_pn3",
1612 "dap2_fs_pa2",
1613 "dap2_sclk_pa3",
1614 "dap2_din_pa4",
1615 "dap2_dout_pa5",
1616};
1617
1618static const char * const hsi_groups[] = {
1619 "ulpi_data0_po1",
1620 "ulpi_data1_po2",
1621 "ulpi_data2_po3",
1622 "ulpi_data3_po4",
1623 "ulpi_data4_po5",
1624 "ulpi_data5_po6",
1625 "ulpi_data6_po7",
1626 "ulpi_data7_po0",
1627};
1628
1629static const char * const i2c1_groups[] = {
1630 "gen1_i2c_scl_pc4",
1631 "gen1_i2c_sda_pc5",
1632 "gpio_w2_aud_pw2",
1633 "gpio_w3_aud_pw3",
1634};
1635
1636static const char * const i2c2_groups[] = {
1637 "gen2_i2c_scl_pt5",
1638 "gen2_i2c_sda_pt6",
1639};
1640
1641static const char * const i2c3_groups[] = {
1642 "cam_i2c_scl_pbb1",
1643 "cam_i2c_sda_pbb2",
1644};
1645
1646static const char * const i2c4_groups[] = {
1647 "ddc_scl_pv4",
1648 "ddc_sda_pv5",
1649};
1650
1651static const char * const i2cpwr_groups[] = {
1652 "pwr_i2c_scl_pz6",
1653 "pwr_i2c_sda_pz7",
1654};
1655
1656static const char * const i2s0_groups[] = {
1657 "dap1_fs_pn0",
1658 "dap1_din_pn1",
1659 "dap1_dout_pn2",
1660 "dap1_sclk_pn3",
1661};
1662
1663static const char * const i2s1_groups[] = {
1664 "dap2_fs_pa2",
1665 "dap2_sclk_pa3",
1666 "dap2_din_pa4",
1667 "dap2_dout_pa5",
1668};
1669
1670static const char * const i2s2_groups[] = {
1671 "dap3_fs_pp0",
1672 "dap3_din_pp1",
1673 "dap3_dout_pp2",
1674 "dap3_sclk_pp3",
1675};
1676
1677static const char * const i2s3_groups[] = {
1678 "dap4_fs_pp4",
1679 "dap4_din_pp5",
1680 "dap4_dout_pp6",
1681 "dap4_sclk_pp7",
1682};
1683
1684static const char * const i2s4_groups[] = {
1685 "pcc1",
1686 "pbb0",
1687 "pbb7",
1688 "pcc2",
1689};
1690
1691static const char * const irda_groups[] = {
1692 "uart2_rxd_pc3",
1693 "uart2_txd_pc2",
1694};
1695
1696static const char * const kbc_groups[] = {
1697 "kb_row0_pr0",
1698 "kb_row1_pr1",
1699 "kb_row2_pr2",
1700 "kb_row3_pr3",
1701 "kb_row4_pr4",
1702 "kb_row5_pr5",
1703 "kb_row6_pr6",
1704 "kb_row7_pr7",
1705 "kb_row8_ps0",
1706 "kb_row9_ps1",
1707 "kb_row10_ps2",
1708 "kb_col0_pq0",
1709 "kb_col1_pq1",
1710 "kb_col2_pq2",
1711 "kb_col3_pq3",
1712 "kb_col4_pq4",
1713 "kb_col5_pq5",
1714 "kb_col6_pq6",
1715 "kb_col7_pq7",
1716};
1717
1718static const char * const nand_groups[] = {
1719 "gmi_wp_n_pc7",
1720 "gmi_wait_pi7",
1721 "gmi_adv_n_pk0",
1722 "gmi_clk_pk1",
1723 "gmi_cs0_n_pj0",
1724 "gmi_cs1_n_pj2",
1725 "gmi_cs2_n_pk3",
1726 "gmi_cs3_n_pk4",
1727 "gmi_cs4_n_pk2",
1728 "gmi_cs6_n_pi3",
1729 "gmi_cs7_n_pi6",
1730 "gmi_ad0_pg0",
1731 "gmi_ad1_pg1",
1732 "gmi_ad2_pg2",
1733 "gmi_ad3_pg3",
1734 "gmi_ad4_pg4",
1735 "gmi_ad5_pg5",
1736 "gmi_ad6_pg6",
1737 "gmi_ad7_pg7",
1738 "gmi_ad8_ph0",
1739 "gmi_ad9_ph1",
1740 "gmi_ad10_ph2",
1741 "gmi_ad11_ph3",
1742 "gmi_ad12_ph4",
1743 "gmi_ad13_ph5",
1744 "gmi_ad14_ph6",
1745 "gmi_ad15_ph7",
1746 "gmi_wr_n_pi0",
1747 "gmi_oe_n_pi1",
1748 "gmi_dqs_p_pj3",
1749 "gmi_rst_n_pi4",
1750};
1751
1752static const char * const nand_alt_groups[] = {
1753 "gmi_cs6_n_pi3",
1754 "gmi_cs7_n_pi6",
1755 "gmi_rst_n_pi4",
1756};
1757
1758static const char * const owr_groups[] = {
1759 "pu0",
1760 "kb_col4_pq4",
1761 "owr",
1762 "sdmmc3_cd_n_pv2",
1763};
1764
1765static const char * const pmi_groups[] = {
1766 "pwr_int_n",
1767};
1768
1769static const char * const pwm0_groups[] = {
1770 "sdmmc1_dat2_py5",
1771 "uart3_rts_n_pc0",
1772 "pu3",
1773 "gmi_ad8_ph0",
1774 "sdmmc3_dat3_pb4",
1775};
1776
1777static const char * const pwm1_groups[] = {
1778 "sdmmc1_dat1_py6",
1779 "pu4",
1780 "gmi_ad9_ph1",
1781 "sdmmc3_dat2_pb5",
1782};
1783
1784static const char * const pwm2_groups[] = {
1785 "pu5",
1786 "gmi_ad10_ph2",
1787 "kb_col3_pq3",
1788 "sdmmc3_dat1_pb6",
1789};
1790
1791static const char * const pwm3_groups[] = {
1792 "pu6",
1793 "gmi_ad11_ph3",
1794 "sdmmc3_cmd_pa7",
1795};
1796
1797static const char * const pwron_groups[] = {
1798 "core_pwr_req",
1799};
1800
1801static const char * const reset_out_n_groups[] = {
1802 "reset_out_n",
1803};
1804
1805static const char * const rsvd1_groups[] = {
1806 "pv1",
1807 "hdmi_int_pn7",
1808 "pu1",
1809 "pu2",
1810 "gmi_wp_n_pc7",
1811 "gmi_adv_n_pk0",
1812 "gmi_cs0_n_pj0",
1813 "gmi_cs1_n_pj2",
1814 "gmi_ad0_pg0",
1815 "gmi_ad1_pg1",
1816 "gmi_ad2_pg2",
1817 "gmi_ad3_pg3",
1818 "gmi_ad4_pg4",
1819 "gmi_ad5_pg5",
1820 "gmi_ad6_pg6",
1821 "gmi_ad7_pg7",
1822 "gmi_wr_n_pi0",
1823 "gmi_oe_n_pi1",
1824 "gpio_x4_aud_px4",
1825 "gpio_x5_aud_px5",
1826 "gpio_x7_aud_px7",
1827
1828 "reset_out_n",
1829};
1830
1831static const char * const rsvd2_groups[] = {
1832 "pv0",
1833 "pv1",
1834 "sdmmc1_dat0_py7",
1835 "clk2_out_pw5",
1836 "clk2_req_pcc5",
1837 "hdmi_int_pn7",
1838 "ddc_scl_pv4",
1839 "ddc_sda_pv5",
1840 "uart3_txd_pw6",
1841 "uart3_rxd_pw7",
1842 "gen1_i2c_scl_pc4",
1843 "gen1_i2c_sda_pc5",
1844 "dap4_fs_pp4",
1845 "dap4_din_pp5",
1846 "dap4_dout_pp6",
1847 "dap4_sclk_pp7",
1848 "clk3_out_pee0",
1849 "clk3_req_pee1",
1850 "gmi_iordy_pi5",
1851 "gmi_a17_pb0",
1852 "gmi_a18_pb1",
1853 "gen2_i2c_scl_pt5",
1854 "gen2_i2c_sda_pt6",
1855 "sdmmc4_clk_pcc4",
1856 "sdmmc4_cmd_pt7",
1857 "sdmmc4_dat7_paa7",
1858 "pcc1",
1859 "pbb7",
1860 "pcc2",
1861 "pwr_i2c_scl_pz6",
1862 "pwr_i2c_sda_pz7",
1863 "kb_row0_pr0",
1864 "kb_row1_pr1",
1865 "kb_row2_pr2",
1866 "kb_row7_pr7",
1867 "kb_row8_ps0",
1868 "kb_row9_ps1",
1869 "kb_row10_ps2",
1870 "kb_col1_pq1",
1871 "kb_col2_pq2",
1872 "kb_col5_pq5",
1873 "kb_col6_pq6",
1874 "kb_col7_pq7",
1875 "sys_clk_req_pz5",
1876 "core_pwr_req",
1877 "cpu_pwr_req",
1878 "pwr_int_n",
1879 "owr",
1880 "spdif_out_pk5",
1881 "gpio_x1_aud_px1",
1882 "sdmmc3_clk_pa6",
1883 "sdmmc3_dat0_pb7",
1884 "gpio_w2_aud_pw2",
1885 "usb_vbus_en0_pn4",
1886 "usb_vbus_en1_pn5",
1887 "sdmmc3_clk_lb_out_pee4",
1888 "sdmmc3_clk_lb_in_pee5",
1889 "reset_out_n",
1890};
1891
1892static const char * const rsvd3_groups[] = {
1893 "pv0",
1894 "pv1",
1895 "sdmmc1_clk_pz0",
1896 "clk2_out_pw5",
1897 "clk2_req_pcc5",
1898 "hdmi_int_pn7",
1899 "ddc_scl_pv4",
1900 "ddc_sda_pv5",
1901 "uart2_rts_n_pj6",
1902 "uart2_cts_n_pj5",
1903 "uart3_txd_pw6",
1904 "uart3_rxd_pw7",
1905 "pu0",
1906 "pu1",
1907 "pu2",
1908 "gen1_i2c_scl_pc4",
1909 "gen1_i2c_sda_pc5",
1910 "dap4_din_pp5",
1911 "dap4_sclk_pp7",
1912 "clk3_out_pee0",
1913 "clk3_req_pee1",
1914 "pcc1",
1915 "cam_i2c_scl_pbb1",
1916 "cam_i2c_sda_pbb2",
1917 "pbb7",
1918 "pcc2",
1919 "pwr_i2c_scl_pz6",
1920 "pwr_i2c_sda_pz7",
1921 "kb_row0_pr0",
1922 "kb_row1_pr1",
1923 "kb_row2_pr2",
1924 "kb_row3_pr3",
1925 "kb_row9_ps1",
1926 "kb_row10_ps2",
1927 "clk_32k_out_pa0",
1928 "sys_clk_req_pz5",
1929 "core_pwr_req",
1930 "cpu_pwr_req",
1931 "pwr_int_n",
1932 "owr",
1933 "clk1_req_pee2",
1934 "clk1_out_pw4",
1935 "spdif_out_pk5",
1936 "spdif_in_pk6",
1937 "dap2_fs_pa2",
1938 "dap2_sclk_pa3",
1939 "dap2_din_pa4",
1940 "dap2_dout_pa5",
1941 "dvfs_pwm_px0",
1942 "gpio_x1_aud_px1",
1943 "gpio_x3_aud_px3",
1944 "dvfs_clk_px2",
1945 "sdmmc3_clk_pa6",
1946 "sdmmc3_dat0_pb7",
1947 "hdmi_cec_pee3",
1948 "sdmmc3_cd_n_pv2",
1949 "usb_vbus_en0_pn4",
1950 "usb_vbus_en1_pn5",
1951 "sdmmc3_clk_lb_out_pee4",
1952 "sdmmc3_clk_lb_in_pee5",
1953 "reset_out_n",
1954};
1955
1956static const char * const rsvd4_groups[] = {
1957 "pv0",
1958 "pv1",
1959 "sdmmc1_clk_pz0",
1960 "clk2_out_pw5",
1961 "clk2_req_pcc5",
1962 "hdmi_int_pn7",
1963 "ddc_scl_pv4",
1964 "ddc_sda_pv5",
1965 "pu0",
1966 "pu1",
1967 "pu2",
1968 "gen1_i2c_scl_pc4",
1969 "gen1_i2c_sda_pc5",
1970 "dap4_fs_pp4",
1971 "dap4_din_pp5",
1972 "dap4_dout_pp6",
1973 "dap4_sclk_pp7",
1974 "clk3_out_pee0",
1975 "clk3_req_pee1",
1976 "gmi_ad0_pg0",
1977 "gmi_ad1_pg1",
1978 "gmi_ad2_pg2",
1979 "gmi_ad3_pg3",
1980 "gmi_ad4_pg4",
1981 "gmi_ad12_ph4",
1982 "gmi_ad13_ph5",
1983 "gmi_rst_n_pi4",
1984 "gen2_i2c_scl_pt5",
1985 "gen2_i2c_sda_pt6",
1986 "sdmmc4_clk_pcc4",
1987 "sdmmc4_cmd_pt7",
1988 "sdmmc4_dat0_paa0",
1989 "sdmmc4_dat1_paa1",
1990 "sdmmc4_dat2_paa2",
1991 "sdmmc4_dat3_paa3",
1992 "sdmmc4_dat4_paa4",
1993 "sdmmc4_dat5_paa5",
1994 "sdmmc4_dat6_paa6",
1995 "sdmmc4_dat7_paa7",
1996 "cam_mclk_pcc0",
1997 "pcc1",
1998 "cam_i2c_scl_pbb1",
1999 "cam_i2c_sda_pbb2",
2000 "pbb3",
2001 "pbb4",
2002 "pbb5",
2003 "pbb6",
2004 "pbb7",
2005 "pcc2",
2006 "pwr_i2c_scl_pz6",
2007 "pwr_i2c_sda_pz7",
2008 "kb_row0_pr0",
2009 "kb_row1_pr1",
2010 "kb_row2_pr2",
2011 "kb_col2_pq2",
2012 "kb_col5_pq5",
2013 "kb_col6_pq6",
2014 "kb_col7_pq7",
2015 "clk_32k_out_pa0",
2016 "sys_clk_req_pz5",
2017 "core_pwr_req",
2018 "cpu_pwr_req",
2019 "pwr_int_n",
2020 "owr",
2021 "dap1_fs_pn0",
2022 "dap1_din_pn1",
2023 "dap1_dout_pn2",
2024 "dap1_sclk_pn3",
2025 "clk1_req_pee2",
2026 "clk1_out_pw4",
2027 "spdif_in_pk6",
2028 "spdif_out_pk5",
2029 "dap2_fs_pa2",
2030 "dap2_sclk_pa3",
2031 "dap2_din_pa4",
2032 "dap2_dout_pa5",
2033 "dvfs_pwm_px0",
2034 "gpio_x1_aud_px1",
2035 "gpio_x3_aud_px3",
2036 "dvfs_clk_px2",
2037 "gpio_x5_aud_px5",
2038 "gpio_x6_aud_px6",
2039 "gpio_x7_aud_px7",
2040 "sdmmc3_cd_n_pv2",
2041 "usb_vbus_en0_pn4",
2042 "usb_vbus_en1_pn5",
2043 "sdmmc3_clk_lb_in_pee5",
2044 "sdmmc3_clk_lb_out_pee4",
2045};
2046
2047static const char * const sdmmc1_groups[] = {
2048
2049 "sdmmc1_clk_pz0",
2050 "sdmmc1_cmd_pz1",
2051 "sdmmc1_dat3_py4",
2052 "sdmmc1_dat2_py5",
2053 "sdmmc1_dat1_py6",
2054 "sdmmc1_dat0_py7",
2055 "uart3_cts_n_pa1",
2056 "kb_col5_pq5",
2057 "sdmmc1_wp_n_pv3",
2058};
2059
2060static const char * const sdmmc2_groups[] = {
2061 "gmi_iordy_pi5",
2062 "gmi_clk_pk1",
2063 "gmi_cs2_n_pk3",
2064 "gmi_cs3_n_pk4",
2065 "gmi_cs7_n_pi6",
2066 "gmi_ad12_ph4",
2067 "gmi_ad13_ph5",
2068 "gmi_ad14_ph6",
2069 "gmi_ad15_ph7",
2070 "gmi_dqs_p_pj3",
2071};
2072
2073static const char * const sdmmc3_groups[] = {
2074 "kb_col4_pq4",
2075 "sdmmc3_clk_pa6",
2076 "sdmmc3_cmd_pa7",
2077 "sdmmc3_dat0_pb7",
2078 "sdmmc3_dat1_pb6",
2079 "sdmmc3_dat2_pb5",
2080 "sdmmc3_dat3_pb4",
2081 "hdmi_cec_pee3",
2082 "sdmmc3_cd_n_pv2",
2083 "sdmmc3_clk_lb_in_pee5",
2084 "sdmmc3_clk_lb_out_pee4",
2085};
2086
2087static const char * const sdmmc4_groups[] = {
2088 "sdmmc4_clk_pcc4",
2089 "sdmmc4_cmd_pt7",
2090 "sdmmc4_dat0_paa0",
2091 "sdmmc4_dat1_paa1",
2092 "sdmmc4_dat2_paa2",
2093 "sdmmc4_dat3_paa3",
2094 "sdmmc4_dat4_paa4",
2095 "sdmmc4_dat5_paa5",
2096 "sdmmc4_dat6_paa6",
2097 "sdmmc4_dat7_paa7",
2098};
2099
2100static const char * const soc_groups[] = {
2101 "gmi_cs1_n_pj2",
2102 "gmi_oe_n_pi1",
2103 "clk_32k_out_pa0",
2104 "hdmi_cec_pee3",
2105};
2106
2107static const char * const spdif_groups[] = {
2108 "sdmmc1_cmd_pz1",
2109 "sdmmc1_dat3_py4",
2110 "uart2_rxd_pc3",
2111 "uart2_txd_pc2",
2112 "spdif_in_pk6",
2113 "spdif_out_pk5",
2114};
2115
2116static const char * const spi1_groups[] = {
2117 "ulpi_clk_py0",
2118 "ulpi_dir_py1",
2119 "ulpi_nxt_py2",
2120 "ulpi_stp_py3",
2121 "gpio_x3_aud_px3",
2122 "gpio_x4_aud_px4",
2123 "gpio_x5_aud_px5",
2124 "gpio_x6_aud_px6",
2125 "gpio_x7_aud_px7",
2126 "gpio_w3_aud_pw3",
2127};
2128
2129static const char * const spi2_groups[] = {
2130 "ulpi_data4_po5",
2131 "ulpi_data5_po6",
2132 "ulpi_data6_po7",
2133 "ulpi_data7_po0",
2134 "kb_row4_pr4",
2135 "kb_row5_pr5",
2136 "kb_col0_pq0",
2137 "kb_col1_pq1",
2138 "kb_col2_pq2",
2139 "kb_col6_pq6",
2140 "kb_col7_pq7",
2141 "gpio_x4_aud_px4",
2142 "gpio_x5_aud_px5",
2143 "gpio_x6_aud_px6",
2144 "gpio_x7_aud_px7",
2145 "gpio_w2_aud_pw2",
2146 "gpio_w3_aud_pw3",
2147};
2148
2149static const char * const spi3_groups[] = {
2150 "ulpi_data0_po1",
2151 "ulpi_data1_po2",
2152 "ulpi_data2_po3",
2153 "ulpi_data3_po4",
2154 "sdmmc4_dat0_paa0",
2155 "sdmmc4_dat1_paa1",
2156 "sdmmc4_dat2_paa2",
2157 "sdmmc4_dat3_paa3",
2158 "sdmmc4_dat4_paa4",
2159 "sdmmc4_dat5_paa5",
2160 "sdmmc4_dat6_paa6",
2161 "sdmmc3_clk_pa6",
2162 "sdmmc3_cmd_pa7",
2163 "sdmmc3_dat0_pb7",
2164 "sdmmc3_dat1_pb6",
2165 "sdmmc3_dat2_pb5",
2166 "sdmmc3_dat3_pb4",
2167};
2168
2169static const char * const spi4_groups[] = {
2170 "sdmmc1_cmd_pz1",
2171 "sdmmc1_dat3_py4",
2172 "sdmmc1_dat2_py5",
2173 "sdmmc1_dat1_py6",
2174 "sdmmc1_dat0_py7",
2175 "uart2_rxd_pc3",
2176 "uart2_txd_pc2",
2177 "uart2_rts_n_pj6",
2178 "uart2_cts_n_pj5",
2179 "uart3_txd_pw6",
2180 "uart3_rxd_pw7",
2181 "uart3_cts_n_pa1",
2182 "gmi_wait_pi7",
2183 "gmi_cs6_n_pi3",
2184 "gmi_ad5_pg5",
2185 "gmi_ad6_pg6",
2186 "gmi_ad7_pg7",
2187 "gmi_a19_pk7",
2188 "gmi_wr_n_pi0",
2189 "sdmmc1_wp_n_pv3",
2190};
2191
2192static const char * const spi5_groups[] = {
2193 "ulpi_clk_py0",
2194 "ulpi_dir_py1",
2195 "ulpi_nxt_py2",
2196 "ulpi_stp_py3",
2197 "dap3_fs_pp0",
2198 "dap3_din_pp1",
2199 "dap3_dout_pp2",
2200 "dap3_sclk_pp3",
2201};
2202
2203static const char * const spi6_groups[] = {
2204 "dvfs_pwm_px0",
2205 "gpio_x1_aud_px1",
2206 "gpio_x3_aud_px3",
2207 "dvfs_clk_px2",
2208 "gpio_x6_aud_px6",
2209 "gpio_w2_aud_pw2",
2210 "gpio_w3_aud_pw3",
2211};
2212
2213static const char * const sysclk_groups[] = {
2214 "sys_clk_req_pz5",
2215};
2216
2217static const char * const trace_groups[] = {
2218 "gmi_iordy_pi5",
2219 "gmi_adv_n_pk0",
2220 "gmi_clk_pk1",
2221 "gmi_cs2_n_pk3",
2222 "gmi_cs4_n_pk2",
2223 "gmi_a16_pj7",
2224 "gmi_a17_pb0",
2225 "gmi_a18_pb1",
2226 "gmi_a19_pk7",
2227 "gmi_dqs_p_pj3",
2228};
2229
2230static const char * const uarta_groups[] = {
2231 "ulpi_data0_po1",
2232 "ulpi_data1_po2",
2233 "ulpi_data2_po3",
2234 "ulpi_data3_po4",
2235 "ulpi_data4_po5",
2236 "ulpi_data5_po6",
2237 "ulpi_data6_po7",
2238 "ulpi_data7_po0",
2239 "sdmmc1_cmd_pz1",
2240 "sdmmc1_dat3_py4",
2241 "sdmmc1_dat2_py5",
2242 "sdmmc1_dat1_py6",
2243 "sdmmc1_dat0_py7",
2244 "uart2_rxd_pc3",
2245 "uart2_txd_pc2",
2246 "uart2_rts_n_pj6",
2247 "uart2_cts_n_pj5",
2248 "pu0",
2249 "pu1",
2250 "pu2",
2251 "pu3",
2252 "pu4",
2253 "pu5",
2254 "pu6",
2255 "kb_row7_pr7",
2256 "kb_row8_ps0",
2257 "kb_row9_ps1",
2258 "kb_row10_ps2",
2259 "kb_col3_pq3",
2260 "kb_col4_pq4",
2261 "sdmmc3_cmd_pa7",
2262 "sdmmc3_dat1_pb6",
2263 "sdmmc1_wp_n_pv3",
2264};
2265
2266static const char * const uartb_groups[] = {
2267 "uart2_rts_n_pj6",
2268 "uart2_cts_n_pj5",
2269};
2270
2271static const char * const uartc_groups[] = {
2272 "uart3_txd_pw6",
2273 "uart3_rxd_pw7",
2274 "uart3_cts_n_pa1",
2275 "uart3_rts_n_pc0",
2276};
2277
2278static const char * const uartd_groups[] = {
2279 "ulpi_clk_py0",
2280 "ulpi_dir_py1",
2281 "ulpi_nxt_py2",
2282 "ulpi_stp_py3",
2283 "gmi_a16_pj7",
2284 "gmi_a17_pb0",
2285 "gmi_a18_pb1",
2286 "gmi_a19_pk7",
2287};
2288
2289static const char * const ulpi_groups[] = {
2290 "ulpi_data0_po1",
2291 "ulpi_data1_po2",
2292 "ulpi_data2_po3",
2293 "ulpi_data3_po4",
2294 "ulpi_data4_po5",
2295 "ulpi_data5_po6",
2296 "ulpi_data6_po7",
2297 "ulpi_data7_po0",
2298 "ulpi_clk_py0",
2299 "ulpi_dir_py1",
2300 "ulpi_nxt_py2",
2301 "ulpi_stp_py3",
2302};
2303
2304static const char * const usb_groups[] = {
2305 "pv0",
2306 "pu6",
2307 "gmi_cs0_n_pj0",
2308 "gmi_cs4_n_pk2",
2309 "gmi_ad11_ph3",
2310 "kb_col0_pq0",
2311 "spdif_in_pk6",
2312 "usb_vbus_en0_pn4",
2313 "usb_vbus_en1_pn5",
2314};
2315
2316static const char * const vgp1_groups[] = {
2317 "cam_i2c_scl_pbb1",
2318};
2319
2320static const char * const vgp2_groups[] = {
2321 "cam_i2c_sda_pbb2",
2322};
2323
2324static const char * const vgp3_groups[] = {
2325 "pbb3",
2326};
2327
2328static const char * const vgp4_groups[] = {
2329 "pbb4",
2330};
2331
2332static const char * const vgp5_groups[] = {
2333 "pbb5",
2334};
2335
2336static const char * const vgp6_groups[] = {
2337 "pbb6",
2338};
2339
2340static const char * const vi_groups[] = {
2341 "cam_mclk_pcc0",
2342 "pbb0",
2343};
2344
2345static const char * const vi_alt1_groups[] = {
2346 "cam_mclk_pcc0",
2347 "pbb0",
2348};
2349
2350static const char * const vi_alt3_groups[] = {
2351 "cam_mclk_pcc0",
2352 "pbb0",
2353};
2354
2355#define FUNCTION(fname) \
2356 { \
2357 .name = #fname, \
2358 .groups = fname##_groups, \
2359 .ngroups = ARRAY_SIZE(fname##_groups), \
2360 }
2361
2362static const struct tegra_function tegra114_functions[] = {
2363 FUNCTION(blink),
2364 FUNCTION(cec),
2365 FUNCTION(cldvfs),
2366 FUNCTION(clk12),
2367 FUNCTION(cpu),
2368 FUNCTION(dap),
2369 FUNCTION(dap1),
2370 FUNCTION(dap2),
2371 FUNCTION(dev3),
2372 FUNCTION(displaya),
2373 FUNCTION(displaya_alt),
2374 FUNCTION(displayb),
2375 FUNCTION(dtv),
2376 FUNCTION(emc_dll),
2377 FUNCTION(extperiph1),
2378 FUNCTION(extperiph2),
2379 FUNCTION(extperiph3),
2380 FUNCTION(gmi),
2381 FUNCTION(gmi_alt),
2382 FUNCTION(hda),
2383 FUNCTION(hsi),
2384 FUNCTION(i2c1),
2385 FUNCTION(i2c2),
2386 FUNCTION(i2c3),
2387 FUNCTION(i2c4),
2388 FUNCTION(i2cpwr),
2389 FUNCTION(i2s0),
2390 FUNCTION(i2s1),
2391 FUNCTION(i2s2),
2392 FUNCTION(i2s3),
2393 FUNCTION(i2s4),
2394 FUNCTION(irda),
2395 FUNCTION(kbc),
2396 FUNCTION(nand),
2397 FUNCTION(nand_alt),
2398 FUNCTION(owr),
2399 FUNCTION(pmi),
2400 FUNCTION(pwm0),
2401 FUNCTION(pwm1),
2402 FUNCTION(pwm2),
2403 FUNCTION(pwm3),
2404 FUNCTION(pwron),
2405 FUNCTION(reset_out_n),
2406 FUNCTION(rsvd1),
2407 FUNCTION(rsvd2),
2408 FUNCTION(rsvd3),
2409 FUNCTION(rsvd4),
2410 FUNCTION(sdmmc1),
2411 FUNCTION(sdmmc2),
2412 FUNCTION(sdmmc3),
2413 FUNCTION(sdmmc4),
2414 FUNCTION(soc),
2415 FUNCTION(spdif),
2416 FUNCTION(spi1),
2417 FUNCTION(spi2),
2418 FUNCTION(spi3),
2419 FUNCTION(spi4),
2420 FUNCTION(spi5),
2421 FUNCTION(spi6),
2422 FUNCTION(sysclk),
2423 FUNCTION(trace),
2424 FUNCTION(uarta),
2425 FUNCTION(uartb),
2426 FUNCTION(uartc),
2427 FUNCTION(uartd),
2428 FUNCTION(ulpi),
2429 FUNCTION(usb),
2430 FUNCTION(vgp1),
2431 FUNCTION(vgp2),
2432 FUNCTION(vgp3),
2433 FUNCTION(vgp4),
2434 FUNCTION(vgp5),
2435 FUNCTION(vgp6),
2436 FUNCTION(vi),
2437 FUNCTION(vi_alt1),
2438 FUNCTION(vi_alt3),
2439};
2440
2441#define DRV_PINGROUP_REG_START 0x868 /* bank 0 */
2442#define PINGROUP_REG_START 0x3000 /* bank 1 */
2443
2444#define PINGROUP_REG_Y(r) ((r) - PINGROUP_REG_START)
2445#define PINGROUP_REG_N(r) -1
2446
2447#define PINGROUP(pg_name, f0, f1, f2, f3, f_safe, r, od, ior, rcv_sel) \
2448 { \
2449 .name = #pg_name, \
2450 .pins = pg_name##_pins, \
2451 .npins = ARRAY_SIZE(pg_name##_pins), \
2452 .funcs = { \
2453 TEGRA_MUX_##f0, \
2454 TEGRA_MUX_##f1, \
2455 TEGRA_MUX_##f2, \
2456 TEGRA_MUX_##f3, \
2457 }, \
2458 .func_safe = TEGRA_MUX_##f_safe, \
2459 .mux_reg = PINGROUP_REG_Y(r), \
2460 .mux_bank = 1, \
2461 .mux_bit = 0, \
2462 .pupd_reg = PINGROUP_REG_Y(r), \
2463 .pupd_bank = 1, \
2464 .pupd_bit = 2, \
2465 .tri_reg = PINGROUP_REG_Y(r), \
2466 .tri_bank = 1, \
2467 .tri_bit = 4, \
2468 .einput_reg = PINGROUP_REG_Y(r), \
2469 .einput_bank = 1, \
2470 .einput_bit = 5, \
2471 .odrain_reg = PINGROUP_REG_##od(r), \
2472 .odrain_bank = 1, \
2473 .odrain_bit = 6, \
2474 .lock_reg = PINGROUP_REG_Y(r), \
2475 .lock_bank = 1, \
2476 .lock_bit = 7, \
2477 .ioreset_reg = PINGROUP_REG_##ior(r), \
2478 .ioreset_bank = 1, \
2479 .ioreset_bit = 8, \
2480 .rcv_sel_reg = PINGROUP_REG_##rcv_sel(r), \
2481 .rcv_sel_bank = 1, \
2482 .rcv_sel_bit = 9, \
2483 .drv_reg = -1, \
2484 .drvtype_reg = -1, \
2485 }
2486
2487#define DRV_PINGROUP_DVRTYPE_Y(r) ((r) - DRV_PINGROUP_REG_START)
2488#define DRV_PINGROUP_DVRTYPE_N(r) -1
2489
2490#define DRV_PINGROUP(pg_name, r, hsm_b, schmitt_b, lpmd_b, \
2491 drvdn_b, drvdn_w, drvup_b, drvup_w, \
2492 slwr_b, slwr_w, slwf_b, slwf_w, \
2493 drvtype) \
2494 { \
2495 .name = "drive_" #pg_name, \
2496 .pins = drive_##pg_name##_pins, \
2497 .npins = ARRAY_SIZE(drive_##pg_name##_pins), \
2498 .mux_reg = -1, \
2499 .pupd_reg = -1, \
2500 .tri_reg = -1, \
2501 .einput_reg = -1, \
2502 .odrain_reg = -1, \
2503 .lock_reg = -1, \
2504 .ioreset_reg = -1, \
2505 .rcv_sel_reg = -1, \
2506 .drv_reg = DRV_PINGROUP_DVRTYPE_Y(r), \
2507 .drv_bank = 0, \
2508 .hsm_bit = hsm_b, \
2509 .schmitt_bit = schmitt_b, \
2510 .lpmd_bit = lpmd_b, \
2511 .drvdn_bit = drvdn_b, \
2512 .drvdn_width = drvdn_w, \
2513 .drvup_bit = drvup_b, \
2514 .drvup_width = drvup_w, \
2515 .slwr_bit = slwr_b, \
2516 .slwr_width = slwr_w, \
2517 .slwf_bit = slwf_b, \
2518 .slwf_width = slwf_w, \
2519 .drvtype_reg = DRV_PINGROUP_DVRTYPE_##drvtype(r), \
2520 .drvtype_bank = 0, \
2521 .drvtype_bit = 6, \
2522 }
2523
2524static const struct tegra_pingroup tegra114_groups[] = {
2525 /* pg_name, f0, f1, f2, f3, safe, r, od, ior, rcv_sel */
2526 /* FIXME: Fill in correct data in safe column */
2527 PINGROUP(ulpi_data0_po1, SPI3, HSI, UARTA, ULPI, ULPI, 0x3000, N, N, N),
2528 PINGROUP(ulpi_data1_po2, SPI3, HSI, UARTA, ULPI, ULPI, 0x3004, N, N, N),
2529 PINGROUP(ulpi_data2_po3, SPI3, HSI, UARTA, ULPI, ULPI, 0x3008, N, N, N),
2530 PINGROUP(ulpi_data3_po4, SPI3, HSI, UARTA, ULPI, ULPI, 0x300c, N, N, N),
2531 PINGROUP(ulpi_data4_po5, SPI2, HSI, UARTA, ULPI, ULPI, 0x3010, N, N, N),
2532 PINGROUP(ulpi_data5_po6, SPI2, HSI, UARTA, ULPI, ULPI, 0x3014, N, N, N),
2533 PINGROUP(ulpi_data6_po7, SPI2, HSI, UARTA, ULPI, ULPI, 0x3018, N, N, N),
2534 PINGROUP(ulpi_data7_po0, SPI2, HSI, UARTA, ULPI, ULPI, 0x301c, N, N, N),
2535 PINGROUP(ulpi_clk_py0, SPI1, SPI5, UARTD, ULPI, ULPI, 0x3020, N, N, N),
2536 PINGROUP(ulpi_dir_py1, SPI1, SPI5, UARTD, ULPI, ULPI, 0x3024, N, N, N),
2537 PINGROUP(ulpi_nxt_py2, SPI1, SPI5, UARTD, ULPI, ULPI, 0x3028, N, N, N),
2538 PINGROUP(ulpi_stp_py3, SPI1, SPI5, UARTD, ULPI, ULPI, 0x302c, N, N, N),
2539 PINGROUP(dap3_fs_pp0, I2S2, SPI5, DISPLAYA, DISPLAYB, I2S2, 0x3030, N, N, N),
2540 PINGROUP(dap3_din_pp1, I2S2, SPI5, DISPLAYA, DISPLAYB, I2S2, 0x3034, N, N, N),
2541 PINGROUP(dap3_dout_pp2, I2S2, SPI5, DISPLAYA, DISPLAYB, I2S2, 0x3038, N, N, N),
2542 PINGROUP(dap3_sclk_pp3, I2S2, SPI5, DISPLAYA, DISPLAYB, I2S2, 0x303c, N, N, N),
2543 PINGROUP(pv0, USB, RSVD2, RSVD3, RSVD4, RSVD4, 0x3040, N, N, N),
2544 PINGROUP(pv1, RSVD1, RSVD2, RSVD3, RSVD4, RSVD4, 0x3044, N, N, N),
2545 PINGROUP(sdmmc1_clk_pz0, SDMMC1, CLK12, RSVD3, RSVD4, RSVD4, 0x3048, N, N, N),
2546 PINGROUP(sdmmc1_cmd_pz1, SDMMC1, SPDIF, SPI4, UARTA, SDMMC1, 0x304c, N, N, N),
2547 PINGROUP(sdmmc1_dat3_py4, SDMMC1, SPDIF, SPI4, UARTA, SDMMC1, 0x3050, N, N, N),
2548 PINGROUP(sdmmc1_dat2_py5, SDMMC1, PWM0, SPI4, UARTA, SDMMC1, 0x3054, N, N, N),
2549 PINGROUP(sdmmc1_dat1_py6, SDMMC1, PWM1, SPI4, UARTA, SDMMC1, 0x3058, N, N, N),
2550 PINGROUP(sdmmc1_dat0_py7, SDMMC1, RSVD2, SPI4, UARTA, RSVD2, 0x305c, N, N, N),
2551 PINGROUP(clk2_out_pw5, EXTPERIPH2, RSVD2, RSVD3, RSVD4, RSVD4, 0x3068, N, N, N),
2552 PINGROUP(clk2_req_pcc5, DAP, RSVD2, RSVD3, RSVD4, RSVD4, 0x306c, N, N, N),
2553 PINGROUP(hdmi_int_pn7, RSVD1, RSVD2, RSVD3, RSVD4, RSVD4, 0x3110, N, N, Y),
2554 PINGROUP(ddc_scl_pv4, I2C4, RSVD2, RSVD3, RSVD4, RSVD4, 0x3114, N, N, Y),
2555 PINGROUP(ddc_sda_pv5, I2C4, RSVD2, RSVD3, RSVD4, RSVD4, 0x3118, N, N, Y),
2556 PINGROUP(uart2_rxd_pc3, IRDA, SPDIF, UARTA, SPI4, IRDA, 0x3164, N, N, N),
2557 PINGROUP(uart2_txd_pc2, IRDA, SPDIF, UARTA, SPI4, IRDA, 0x3168, N, N, N),
2558 PINGROUP(uart2_rts_n_pj6, UARTA, UARTB, RSVD3, SPI4, RSVD3, 0x316c, N, N, N),
2559 PINGROUP(uart2_cts_n_pj5, UARTA, UARTB, RSVD3, SPI4, RSVD3, 0x3170, N, N, N),
2560 PINGROUP(uart3_txd_pw6, UARTC, RSVD2, RSVD3, SPI4, RSVD3, 0x3174, N, N, N),
2561 PINGROUP(uart3_rxd_pw7, UARTC, RSVD2, RSVD3, SPI4, RSVD3, 0x3178, N, N, N),
2562 PINGROUP(uart3_cts_n_pa1, UARTC, SDMMC1, DTV, SPI4, UARTC, 0x317c, N, N, N),
2563 PINGROUP(uart3_rts_n_pc0, UARTC, PWM0, DTV, DISPLAYA, UARTC, 0x3180, N, N, N),
2564 PINGROUP(pu0, OWR, UARTA, RSVD3, RSVD4, RSVD4, 0x3184, N, N, N),
2565 PINGROUP(pu1, RSVD1, UARTA, RSVD3, RSVD4, RSVD4, 0x3188, N, N, N),
2566 PINGROUP(pu2, RSVD1, UARTA, RSVD3, RSVD4, RSVD4, 0x318c, N, N, N),
2567 PINGROUP(pu3, PWM0, UARTA, DISPLAYA, DISPLAYB, PWM0, 0x3190, N, N, N),
2568 PINGROUP(pu4, PWM1, UARTA, DISPLAYA, DISPLAYB, PWM1, 0x3194, N, N, N),
2569 PINGROUP(pu5, PWM2, UARTA, DISPLAYA, DISPLAYB, PWM2, 0x3198, N, N, N),
2570 PINGROUP(pu6, PWM3, UARTA, USB, DISPLAYB, PWM3, 0x319c, N, N, N),
2571 PINGROUP(gen1_i2c_sda_pc5, I2C1, RSVD2, RSVD3, RSVD4, RSVD4, 0x31a0, Y, N, N),
2572 PINGROUP(gen1_i2c_scl_pc4, I2C1, RSVD2, RSVD3, RSVD4, RSVD4, 0x31a4, Y, N, N),
2573 PINGROUP(dap4_fs_pp4, I2S3, RSVD2, DTV, RSVD4, RSVD4, 0x31a8, N, N, N),
2574 PINGROUP(dap4_din_pp5, I2S3, RSVD2, RSVD3, RSVD4, RSVD4, 0x31ac, N, N, N),
2575 PINGROUP(dap4_dout_pp6, I2S3, RSVD2, DTV, RSVD4, RSVD4, 0x31b0, N, N, N),
2576 PINGROUP(dap4_sclk_pp7, I2S3, RSVD2, RSVD3, RSVD4, RSVD4, 0x31b4, N, N, N),
2577 PINGROUP(clk3_out_pee0, EXTPERIPH3, RSVD2, RSVD3, RSVD4, RSVD4, 0x31b8, N, N, N),
2578 PINGROUP(clk3_req_pee1, DEV3, RSVD2, RSVD3, RSVD4, RSVD4, 0x31bc, N, N, N),
2579 PINGROUP(gmi_wp_n_pc7, RSVD1, NAND, GMI, GMI_ALT, RSVD1, 0x31c0, N, N, N),
2580 PINGROUP(gmi_iordy_pi5, SDMMC2, RSVD2, GMI, TRACE, RSVD2, 0x31c4, N, N, N),
2581 PINGROUP(gmi_wait_pi7, SPI4, NAND, GMI, DTV, NAND, 0x31c8, N, N, N),
2582 PINGROUP(gmi_adv_n_pk0, RSVD1, NAND, GMI, TRACE, RSVD1, 0x31cc, N, N, N),
2583 PINGROUP(gmi_clk_pk1, SDMMC2, NAND, GMI, TRACE, GMI, 0x31d0, N, N, N),
2584 PINGROUP(gmi_cs0_n_pj0, RSVD1, NAND, GMI, USB, RSVD1, 0x31d4, N, N, N),
2585 PINGROUP(gmi_cs1_n_pj2, RSVD1, NAND, GMI, SOC, RSVD1, 0x31d8, N, N, N),
2586 PINGROUP(gmi_cs2_n_pk3, SDMMC2, NAND, GMI, TRACE, GMI, 0x31dc, N, N, N),
2587 PINGROUP(gmi_cs3_n_pk4, SDMMC2, NAND, GMI, GMI_ALT, GMI, 0x31e0, N, N, N),
2588 PINGROUP(gmi_cs4_n_pk2, USB, NAND, GMI, TRACE, GMI, 0x31e4, N, N, N),
2589 PINGROUP(gmi_cs6_n_pi3, NAND, NAND_ALT, GMI, SPI4, NAND, 0x31e8, N, N, N),
2590 PINGROUP(gmi_cs7_n_pi6, NAND, NAND_ALT, GMI, SDMMC2, NAND, 0x31ec, N, N, N),
2591 PINGROUP(gmi_ad0_pg0, RSVD1, NAND, GMI, RSVD4, RSVD4, 0x31f0, N, N, N),
2592 PINGROUP(gmi_ad1_pg1, RSVD1, NAND, GMI, RSVD4, RSVD4, 0x31f4, N, N, N),
2593 PINGROUP(gmi_ad2_pg2, RSVD1, NAND, GMI, RSVD4, RSVD4, 0x31f8, N, N, N),
2594 PINGROUP(gmi_ad3_pg3, RSVD1, NAND, GMI, RSVD4, RSVD4, 0x31fc, N, N, N),
2595 PINGROUP(gmi_ad4_pg4, RSVD1, NAND, GMI, RSVD4, RSVD4, 0x3200, N, N, N),
2596 PINGROUP(gmi_ad5_pg5, RSVD1, NAND, GMI, SPI4, RSVD1, 0x3204, N, N, N),
2597 PINGROUP(gmi_ad6_pg6, RSVD1, NAND, GMI, SPI4, RSVD1, 0x3208, N, N, N),
2598 PINGROUP(gmi_ad7_pg7, RSVD1, NAND, GMI, SPI4, RSVD1, 0x320c, N, N, N),
2599 PINGROUP(gmi_ad8_ph0, PWM0, NAND, GMI, DTV, GMI, 0x3210, N, N, N),
2600 PINGROUP(gmi_ad9_ph1, PWM1, NAND, GMI, CLDVFS, GMI, 0x3214, N, N, N),
2601 PINGROUP(gmi_ad10_ph2, PWM2, NAND, GMI, CLDVFS, GMI, 0x3218, N, N, N),
2602 PINGROUP(gmi_ad11_ph3, PWM3, NAND, GMI, USB, GMI, 0x321c, N, N, N),
2603 PINGROUP(gmi_ad12_ph4, SDMMC2, NAND, GMI, RSVD4, RSVD4, 0x3220, N, N, N),
2604 PINGROUP(gmi_ad13_ph5, SDMMC2, NAND, GMI, RSVD4, RSVD4, 0x3224, N, N, N),
2605 PINGROUP(gmi_ad14_ph6, SDMMC2, NAND, GMI, DTV, GMI, 0x3228, N, N, N),
2606 PINGROUP(gmi_ad15_ph7, SDMMC2, NAND, GMI, DTV, GMI, 0x322c, N, N, N),
2607 PINGROUP(gmi_a16_pj7, UARTD, TRACE, GMI, GMI_ALT, GMI, 0x3230, N, N, N),
2608 PINGROUP(gmi_a17_pb0, UARTD, RSVD2, GMI, TRACE, RSVD2, 0x3234, N, N, N),
2609 PINGROUP(gmi_a18_pb1, UARTD, RSVD2, GMI, TRACE, RSVD2, 0x3238, N, N, N),
2610 PINGROUP(gmi_a19_pk7, UARTD, SPI4, GMI, TRACE, GMI, 0x323c, N, N, N),
2611 PINGROUP(gmi_wr_n_pi0, RSVD1, NAND, GMI, SPI4, RSVD1, 0x3240, N, N, N),
2612 PINGROUP(gmi_oe_n_pi1, RSVD1, NAND, GMI, SOC, RSVD1, 0x3244, N, N, N),
2613 PINGROUP(gmi_dqs_p_pj3, SDMMC2, NAND, GMI, TRACE, NAND, 0x3248, N, N, N),
2614 PINGROUP(gmi_rst_n_pi4, NAND, NAND_ALT, GMI, RSVD4, RSVD4, 0x324c, N, N, N),
2615 PINGROUP(gen2_i2c_scl_pt5, I2C2, RSVD2, GMI, RSVD4, RSVD4, 0x3250, Y, N, N),
2616 PINGROUP(gen2_i2c_sda_pt6, I2C2, RSVD2, GMI, RSVD4, RSVD4, 0x3254, Y, N, N),
2617 PINGROUP(sdmmc4_clk_pcc4, SDMMC4, RSVD2, GMI, RSVD4, RSVD4, 0x3258, N, Y, N),
2618 PINGROUP(sdmmc4_cmd_pt7, SDMMC4, RSVD2, GMI, RSVD4, RSVD4, 0x325c, N, Y, N),
2619 PINGROUP(sdmmc4_dat0_paa0, SDMMC4, SPI3, GMI, RSVD4, RSVD4, 0x3260, N, Y, N),
2620 PINGROUP(sdmmc4_dat1_paa1, SDMMC4, SPI3, GMI, RSVD4, RSVD4, 0x3264, N, Y, N),
2621 PINGROUP(sdmmc4_dat2_paa2, SDMMC4, SPI3, GMI, RSVD4, RSVD4, 0x3268, N, Y, N),
2622 PINGROUP(sdmmc4_dat3_paa3, SDMMC4, SPI3, GMI, RSVD4, RSVD4, 0x326c, N, Y, N),
2623 PINGROUP(sdmmc4_dat4_paa4, SDMMC4, SPI3, GMI, RSVD4, RSVD4, 0x3270, N, Y, N),
2624 PINGROUP(sdmmc4_dat5_paa5, SDMMC4, SPI3, GMI, RSVD4, RSVD4, 0x3274, N, Y, N),
2625 PINGROUP(sdmmc4_dat6_paa6, SDMMC4, SPI3, GMI, RSVD4, RSVD4, 0x3278, N, Y, N),
2626 PINGROUP(sdmmc4_dat7_paa7, SDMMC4, RSVD2, GMI, RSVD4, RSVD4, 0x327c, N, Y, N),
2627 PINGROUP(cam_mclk_pcc0, VI, VI_ALT1, VI_ALT3, RSVD4, RSVD4, 0x3284, N, N, N),
2628 PINGROUP(pcc1, I2S4, RSVD2, RSVD3, RSVD4, RSVD4, 0x3288, N, N, N),
2629 PINGROUP(pbb0, I2S4, VI, VI_ALT1, VI_ALT3, I2S4, 0x328c, N, N, N),
2630 PINGROUP(cam_i2c_scl_pbb1, VGP1, I2C3, RSVD3, RSVD4, RSVD4, 0x3290, Y, N, N),
2631 PINGROUP(cam_i2c_sda_pbb2, VGP2, I2C3, RSVD3, RSVD4, RSVD4, 0x3294, Y, N, N),
2632 PINGROUP(pbb3, VGP3, DISPLAYA, DISPLAYB, RSVD4, RSVD4, 0x3298, N, N, N),
2633 PINGROUP(pbb4, VGP4, DISPLAYA, DISPLAYB, RSVD4, RSVD4, 0x329c, N, N, N),
2634 PINGROUP(pbb5, VGP5, DISPLAYA, DISPLAYB, RSVD4, RSVD4, 0x32a0, N, N, N),
2635 PINGROUP(pbb6, VGP6, DISPLAYA, DISPLAYB, RSVD4, RSVD4, 0x32a4, N, N, N),
2636 PINGROUP(pbb7, I2S4, RSVD2, RSVD3, RSVD4, RSVD4, 0x32a8, N, N, N),
2637 PINGROUP(pcc2, I2S4, RSVD2, RSVD3, RSVD4, RSVD4, 0x32ac, N, N, N),
2638 PINGROUP(pwr_i2c_scl_pz6, I2CPWR, RSVD2, RSVD3, RSVD4, RSVD4, 0x32b4, Y, N, N),
2639 PINGROUP(pwr_i2c_sda_pz7, I2CPWR, RSVD2, RSVD3, RSVD4, RSVD4, 0x32b8, Y, N, N),
2640 PINGROUP(kb_row0_pr0, KBC, RSVD2, RSVD3, RSVD4, RSVD4, 0x32bc, N, N, N),
2641 PINGROUP(kb_row1_pr1, KBC, RSVD2, RSVD3, RSVD4, RSVD4, 0x32c0, N, N, N),
2642 PINGROUP(kb_row2_pr2, KBC, RSVD2, RSVD3, RSVD4, RSVD4, 0x32c4, N, N, N),
2643 PINGROUP(kb_row3_pr3, KBC, DISPLAYA, RSVD3, DISPLAYB, RSVD3, 0x32c8, N, N, N),
2644 PINGROUP(kb_row4_pr4, KBC, DISPLAYA, SPI2, DISPLAYB, KBC, 0x32cc, N, N, N),
2645 PINGROUP(kb_row5_pr5, KBC, DISPLAYA, SPI2, DISPLAYB, KBC, 0x32d0, N, N, N),
2646 PINGROUP(kb_row6_pr6, KBC, DISPLAYA, DISPLAYA_ALT, DISPLAYB, KBC, 0x32d4, N, N, N),
2647 PINGROUP(kb_row7_pr7, KBC, RSVD2, CLDVFS, UARTA, RSVD2, 0x32d8, N, N, N),
2648 PINGROUP(kb_row8_ps0, KBC, RSVD2, CLDVFS, UARTA, RSVD2, 0x32dc, N, N, N),
2649 PINGROUP(kb_row9_ps1, KBC, RSVD2, RSVD3, UARTA, RSVD3, 0x32e0, N, N, N),
2650 PINGROUP(kb_row10_ps2, KBC, RSVD2, RSVD3, UARTA, RSVD3, 0x32e4, N, N, N),
2651 PINGROUP(kb_col0_pq0, KBC, USB, SPI2, EMC_DLL, KBC, 0x32fc, N, N, N),
2652 PINGROUP(kb_col1_pq1, KBC, RSVD2, SPI2, EMC_DLL, RSVD2, 0x3300, N, N, N),
2653 PINGROUP(kb_col2_pq2, KBC, RSVD2, SPI2, RSVD4, RSVD2, 0x3304, N, N, N),
2654 PINGROUP(kb_col3_pq3, KBC, DISPLAYA, PWM2, UARTA, KBC, 0x3308, N, N, N),
2655 PINGROUP(kb_col4_pq4, KBC, OWR, SDMMC3, UARTA, KBC, 0x330c, N, N, N),
2656 PINGROUP(kb_col5_pq5, KBC, RSVD2, SDMMC1, RSVD4, RSVD4, 0x3310, N, N, N),
2657 PINGROUP(kb_col6_pq6, KBC, RSVD2, SPI2, RSVD4, RSVD4, 0x3314, N, N, N),
2658 PINGROUP(kb_col7_pq7, KBC, RSVD2, SPI2, RSVD4, RSVD4, 0x3318, N, N, N),
2659 PINGROUP(clk_32k_out_pa0, BLINK, SOC, RSVD3, RSVD4, RSVD4, 0x331c, N, N, N),
2660 PINGROUP(sys_clk_req_pz5, SYSCLK, RSVD2, RSVD3, RSVD4, RSVD4, 0x3320, N, N, N),
2661 PINGROUP(core_pwr_req, PWRON, RSVD2, RSVD3, RSVD4, RSVD4, 0x3324, N, N, N),
2662 PINGROUP(cpu_pwr_req, CPU, RSVD2, RSVD3, RSVD4, RSVD4, 0x3328, N, N, N),
2663 PINGROUP(pwr_int_n, PMI, RSVD2, RSVD3, RSVD4, RSVD4, 0x332c, N, N, N),
2664 PINGROUP(owr, OWR, RSVD2, RSVD3, RSVD4, RSVD4, 0x3334, N, N, Y),
2665 PINGROUP(dap1_fs_pn0, I2S0, HDA, GMI, RSVD4, RSVD4, 0x3338, N, N, N),
2666 PINGROUP(dap1_din_pn1, I2S0, HDA, GMI, RSVD4, RSVD4, 0x333c, N, N, N),
2667 PINGROUP(dap1_dout_pn2, I2S0, HDA, GMI, RSVD4, RSVD4, 0x3340, N, N, N),
2668 PINGROUP(dap1_sclk_pn3, I2S0, HDA, GMI, RSVD4, RSVD4, 0x3344, N, N, N),
2669 PINGROUP(clk1_req_pee2, DAP, DAP1, RSVD3, RSVD4, RSVD4, 0x3348, N, N, N),
2670 PINGROUP(clk1_out_pw4, EXTPERIPH1, DAP2, RSVD3, RSVD4, RSVD4, 0x334c, N, N, N),
2671 PINGROUP(spdif_in_pk6, SPDIF, USB, RSVD3, RSVD4, RSVD4, 0x3350, N, N, N),
2672 PINGROUP(spdif_out_pk5, SPDIF, RSVD2, RSVD3, RSVD4, RSVD4, 0x3354, N, N, N),
2673 PINGROUP(dap2_fs_pa2, I2S1, HDA, RSVD3, RSVD4, RSVD4, 0x3358, N, N, N),
2674 PINGROUP(dap2_din_pa4, I2S1, HDA, RSVD3, RSVD4, RSVD4, 0x335c, N, N, N),
2675 PINGROUP(dap2_dout_pa5, I2S1, HDA, RSVD3, RSVD4, RSVD4, 0x3360, N, N, N),
2676 PINGROUP(dap2_sclk_pa3, I2S1, HDA, RSVD3, RSVD4, RSVD4, 0x3364, N, N, N),
2677 PINGROUP(dvfs_pwm_px0, SPI6, CLDVFS, RSVD3, RSVD4, RSVD4, 0x3368, N, N, N),
2678 PINGROUP(gpio_x1_aud_px1, SPI6, RSVD2, RSVD3, RSVD4, RSVD4, 0x336c, N, N, N),
2679 PINGROUP(gpio_x3_aud_px3, SPI6, SPI1, RSVD3, RSVD4, RSVD4, 0x3370, N, N, N),
2680 PINGROUP(dvfs_clk_px2, SPI6, CLDVFS, RSVD3, RSVD4, RSVD4, 0x3374, N, N, N),
2681 PINGROUP(gpio_x4_aud_px4, RSVD1, SPI1, SPI2, DAP2, RSVD1, 0x3378, N, N, N),
2682 PINGROUP(gpio_x5_aud_px5, RSVD1, SPI1, SPI2, RSVD4, RSVD1, 0x337c, N, N, N),
2683 PINGROUP(gpio_x6_aud_px6, SPI6, SPI1, SPI2, RSVD4, RSVD4, 0x3380, N, N, N),
2684 PINGROUP(gpio_x7_aud_px7, RSVD1, SPI1, SPI2, RSVD4, RSVD4, 0x3384, N, N, N),
2685 PINGROUP(sdmmc3_clk_pa6, SDMMC3, RSVD2, RSVD3, SPI3, RSVD3, 0x3390, N, N, N),
2686 PINGROUP(sdmmc3_cmd_pa7, SDMMC3, PWM3, UARTA, SPI3, SDMMC3, 0x3394, N, N, N),
2687 PINGROUP(sdmmc3_dat0_pb7, SDMMC3, RSVD2, RSVD3, SPI3, RSVD3, 0x3398, N, N, N),
2688 PINGROUP(sdmmc3_dat1_pb6, SDMMC3, PWM2, UARTA, SPI3, SDMMC3, 0x339c, N, N, N),
2689 PINGROUP(sdmmc3_dat2_pb5, SDMMC3, PWM1, DISPLAYA, SPI3, SDMMC3, 0x33a0, N, N, N),
2690 PINGROUP(sdmmc3_dat3_pb4, SDMMC3, PWM0, DISPLAYB, SPI3, SDMMC3, 0x33a4, N, N, N),
2691 PINGROUP(hdmi_cec_pee3, CEC, SDMMC3, RSVD3, SOC, RSVD3, 0x33e0, Y, N, N),
2692 PINGROUP(sdmmc1_wp_n_pv3, SDMMC1, CLK12, SPI4, UARTA, SDMMC1, 0x33e4, N, N, N),
2693 PINGROUP(sdmmc3_cd_n_pv2, SDMMC3, OWR, RSVD3, RSVD4, RSVD4, 0x33e8, N, N, N),
2694 PINGROUP(gpio_w2_aud_pw2, SPI6, RSVD2, SPI2, I2C1, RSVD2, 0x33ec, N, N, N),
2695 PINGROUP(gpio_w3_aud_pw3, SPI6, SPI1, SPI2, I2C1, SPI6, 0x33f0, N, N, N),
2696 PINGROUP(usb_vbus_en0_pn4, USB, RSVD2, RSVD3, RSVD4, RSVD4, 0x33f4, Y, N, N),
2697 PINGROUP(usb_vbus_en1_pn5, USB, RSVD2, RSVD3, RSVD4, RSVD4, 0x33f8, Y, N, N),
2698 PINGROUP(sdmmc3_clk_lb_in_pee5, SDMMC3, RSVD2, RSVD3, RSVD4, RSVD4, 0x33fc, N, N, N),
2699 PINGROUP(sdmmc3_clk_lb_out_pee4, SDMMC3, RSVD2, RSVD3, RSVD4, RSVD4, 0x3400, N, N, N),
2700 PINGROUP(reset_out_n, RSVD1, RSVD2, RSVD3, RESET_OUT_N, RSVD3, 0x3408, N, N, N),
2701
2702 /* pg_name, r, hsm_b, schmitt_b, lpmd_b, drvdn_b, drvdn_w, drvup_b, drvup_w, slwr_b, slwr_w, slwf_b, slwf_w, drvtype */
2703 DRV_PINGROUP(ao1, 0x868, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
2704 DRV_PINGROUP(ao2, 0x86c, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
2705 DRV_PINGROUP(at1, 0x870, 2, 3, 4, 12, 7, 20, 7, 28, 2, 30, 2, Y),
2706 DRV_PINGROUP(at2, 0x874, 2, 3, 4, 12, 7, 20, 7, 28, 2, 30, 2, Y),
2707 DRV_PINGROUP(at3, 0x878, 2, 3, 4, 12, 7, 20, 7, 28, 2, 30, 2, Y),
2708 DRV_PINGROUP(at4, 0x87c, 2, 3, 4, 12, 7, 20, 7, 28, 2, 30, 2, Y),
2709 DRV_PINGROUP(at5, 0x880, 2, 3, 4, 14, 5, 19, 5, 28, 2, 30, 2, N),
2710 DRV_PINGROUP(cdev1, 0x884, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
2711 DRV_PINGROUP(cdev2, 0x888, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
2712 DRV_PINGROUP(dap1, 0x890, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
2713 DRV_PINGROUP(dap2, 0x894, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
2714 DRV_PINGROUP(dap3, 0x898, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
2715 DRV_PINGROUP(dap4, 0x89c, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
2716 DRV_PINGROUP(dbg, 0x8a0, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
2717 DRV_PINGROUP(sdio3, 0x8b0, 2, 3, -1, 12, 7, 20, 7, 28, 2, 30, 2, N),
2718 DRV_PINGROUP(spi, 0x8b4, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
2719 DRV_PINGROUP(uaa, 0x8b8, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
2720 DRV_PINGROUP(uab, 0x8bc, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
2721 DRV_PINGROUP(uart2, 0x8c0, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
2722 DRV_PINGROUP(uart3, 0x8c4, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
2723 DRV_PINGROUP(sdio1, 0x8ec, 2, 3, -1, 12, 7, 20, 7, 28, 2, 30, 2, N),
2724 DRV_PINGROUP(ddc, 0x8fc, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
2725 DRV_PINGROUP(gma, 0x900, 2, 3, 4, 14, 5, 20, 5, 28, 2, 30, 2, Y),
2726 DRV_PINGROUP(gme, 0x910, 2, 3, 4, 14, 5, 19, 5, 28, 2, 30, 2, N),
2727 DRV_PINGROUP(gmf, 0x914, 2, 3, 4, 14, 5, 19, 5, 28, 2, 30, 2, N),
2728 DRV_PINGROUP(gmg, 0x918, 2, 3, 4, 14, 5, 19, 5, 28, 2, 30, 2, N),
2729 DRV_PINGROUP(gmh, 0x91c, 2, 3, 4, 14, 5, 19, 5, 28, 2, 30, 2, N),
2730 DRV_PINGROUP(owr, 0x920, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
2731 DRV_PINGROUP(uda, 0x924, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
2732};
2733
2734static const struct tegra_pinctrl_soc_data tegra114_pinctrl = {
2735 .ngpios = NUM_GPIOS,
2736 .pins = tegra114_pins,
2737 .npins = ARRAY_SIZE(tegra114_pins),
2738 .functions = tegra114_functions,
2739 .nfunctions = ARRAY_SIZE(tegra114_functions),
2740 .groups = tegra114_groups,
2741 .ngroups = ARRAY_SIZE(tegra114_groups),
2742};
2743
2744static int tegra114_pinctrl_probe(struct platform_device *pdev)
2745{
2746 return tegra_pinctrl_probe(pdev, &tegra114_pinctrl);
2747}
2748
2749static struct of_device_id tegra114_pinctrl_of_match[] = {
2750 { .compatible = "nvidia,tegra114-pinmux", },
2751 { },
2752};
2753MODULE_DEVICE_TABLE(of, tegra114_pinctrl_of_match);
2754
2755static struct platform_driver tegra114_pinctrl_driver = {
2756 .driver = {
2757 .name = "tegra114-pinctrl",
2758 .owner = THIS_MODULE,
2759 .of_match_table = tegra114_pinctrl_of_match,
2760 },
2761 .probe = tegra114_pinctrl_probe,
2762 .remove = tegra_pinctrl_remove,
2763};
2764module_platform_driver(tegra114_pinctrl_driver);
2765
2766MODULE_ALIAS("platform:tegra114-pinctrl");
2767MODULE_AUTHOR("Pritesh Raithatha <praithatha@nvidia.com>");
2768MODULE_DESCRIPTION("NVIDIA Tegra114 pincontrol driver");
2769MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-tegra20.c b/drivers/pinctrl/pinctrl-tegra20.c
index e848189038f0..fcfb7d012c5b 100644
--- a/drivers/pinctrl/pinctrl-tegra20.c
+++ b/drivers/pinctrl/pinctrl-tegra20.c
@@ -2624,7 +2624,9 @@ static const struct tegra_function tegra20_functions[] = {
2624 .odrain_reg = -1, \ 2624 .odrain_reg = -1, \
2625 .lock_reg = -1, \ 2625 .lock_reg = -1, \
2626 .ioreset_reg = -1, \ 2626 .ioreset_reg = -1, \
2627 .rcv_sel_reg = -1, \
2627 .drv_reg = -1, \ 2628 .drv_reg = -1, \
2629 .drvtype_reg = -1, \
2628 } 2630 }
2629 2631
2630/* Pin groups with only pull up and pull down control */ 2632/* Pin groups with only pull up and pull down control */
@@ -2642,7 +2644,9 @@ static const struct tegra_function tegra20_functions[] = {
2642 .odrain_reg = -1, \ 2644 .odrain_reg = -1, \
2643 .lock_reg = -1, \ 2645 .lock_reg = -1, \
2644 .ioreset_reg = -1, \ 2646 .ioreset_reg = -1, \
2647 .rcv_sel_reg = -1, \
2645 .drv_reg = -1, \ 2648 .drv_reg = -1, \
2649 .drvtype_reg = -1, \
2646 } 2650 }
2647 2651
2648/* Pin groups for drive strength registers (configurable version) */ 2652/* Pin groups for drive strength registers (configurable version) */
@@ -2660,6 +2664,7 @@ static const struct tegra_function tegra20_functions[] = {
2660 .odrain_reg = -1, \ 2664 .odrain_reg = -1, \
2661 .lock_reg = -1, \ 2665 .lock_reg = -1, \
2662 .ioreset_reg = -1, \ 2666 .ioreset_reg = -1, \
2667 .rcv_sel_reg = -1, \
2663 .drv_reg = ((r) - PINGROUP_REG_A), \ 2668 .drv_reg = ((r) - PINGROUP_REG_A), \
2664 .drv_bank = 3, \ 2669 .drv_bank = 3, \
2665 .hsm_bit = hsm_b, \ 2670 .hsm_bit = hsm_b, \
@@ -2673,6 +2678,7 @@ static const struct tegra_function tegra20_functions[] = {
2673 .slwr_width = slwr_w, \ 2678 .slwr_width = slwr_w, \
2674 .slwf_bit = slwf_b, \ 2679 .slwf_bit = slwf_b, \
2675 .slwf_width = slwf_w, \ 2680 .slwf_width = slwf_w, \
2681 .drvtype_reg = -1, \
2676 } 2682 }
2677 2683
2678/* Pin groups for drive strength registers (simple version) */ 2684/* Pin groups for drive strength registers (simple version) */
diff --git a/drivers/pinctrl/pinctrl-tegra30.c b/drivers/pinctrl/pinctrl-tegra30.c
index 9ad87ea735d4..2300deba25bd 100644
--- a/drivers/pinctrl/pinctrl-tegra30.c
+++ b/drivers/pinctrl/pinctrl-tegra30.c
@@ -3384,7 +3384,9 @@ static const struct tegra_function tegra30_functions[] = {
3384 .ioreset_reg = PINGROUP_REG_##ior(r), \ 3384 .ioreset_reg = PINGROUP_REG_##ior(r), \
3385 .ioreset_bank = 1, \ 3385 .ioreset_bank = 1, \
3386 .ioreset_bit = 8, \ 3386 .ioreset_bit = 8, \
3387 .rcv_sel_reg = -1, \
3387 .drv_reg = -1, \ 3388 .drv_reg = -1, \
3389 .drvtype_reg = -1, \
3388 } 3390 }
3389 3391
3390#define DRV_PINGROUP(pg_name, r, hsm_b, schmitt_b, lpmd_b, \ 3392#define DRV_PINGROUP(pg_name, r, hsm_b, schmitt_b, lpmd_b, \
@@ -3401,6 +3403,7 @@ static const struct tegra_function tegra30_functions[] = {
3401 .odrain_reg = -1, \ 3403 .odrain_reg = -1, \
3402 .lock_reg = -1, \ 3404 .lock_reg = -1, \
3403 .ioreset_reg = -1, \ 3405 .ioreset_reg = -1, \
3406 .rcv_sel_reg = -1, \
3404 .drv_reg = ((r) - DRV_PINGROUP_REG_A), \ 3407 .drv_reg = ((r) - DRV_PINGROUP_REG_A), \
3405 .drv_bank = 0, \ 3408 .drv_bank = 0, \
3406 .hsm_bit = hsm_b, \ 3409 .hsm_bit = hsm_b, \
@@ -3414,6 +3417,7 @@ static const struct tegra_function tegra30_functions[] = {
3414 .slwr_width = slwr_w, \ 3417 .slwr_width = slwr_w, \
3415 .slwf_bit = slwf_b, \ 3418 .slwf_bit = slwf_b, \
3416 .slwf_width = slwf_w, \ 3419 .slwf_width = slwf_w, \
3420 .drvtype_reg = -1, \
3417 } 3421 }
3418 3422
3419static const struct tegra_pingroup tegra30_groups[] = { 3423static const struct tegra_pingroup tegra30_groups[] = {
diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c
index 5f0eb04c2336..53cb6a3a56ed 100644
--- a/drivers/pinctrl/pinctrl-xway.c
+++ b/drivers/pinctrl/pinctrl-xway.c
@@ -441,17 +441,17 @@ static int xway_pinconf_get(struct pinctrl_dev *pctldev,
441 if (port == PORT3) 441 if (port == PORT3)
442 reg = GPIO3_OD; 442 reg = GPIO3_OD;
443 else 443 else
444 reg = GPIO_OD(port); 444 reg = GPIO_OD(pin);
445 *config = LTQ_PINCONF_PACK(param, 445 *config = LTQ_PINCONF_PACK(param,
446 !!gpio_getbit(info->membase[0], reg, PORT_PIN(port))); 446 !gpio_getbit(info->membase[0], reg, PORT_PIN(pin)));
447 break; 447 break;
448 448
449 case LTQ_PINCONF_PARAM_PULL: 449 case LTQ_PINCONF_PARAM_PULL:
450 if (port == PORT3) 450 if (port == PORT3)
451 reg = GPIO3_PUDEN; 451 reg = GPIO3_PUDEN;
452 else 452 else
453 reg = GPIO_PUDEN(port); 453 reg = GPIO_PUDEN(pin);
454 if (!gpio_getbit(info->membase[0], reg, PORT_PIN(port))) { 454 if (!gpio_getbit(info->membase[0], reg, PORT_PIN(pin))) {
455 *config = LTQ_PINCONF_PACK(param, 0); 455 *config = LTQ_PINCONF_PACK(param, 0);
456 break; 456 break;
457 } 457 }
@@ -459,13 +459,18 @@ static int xway_pinconf_get(struct pinctrl_dev *pctldev,
459 if (port == PORT3) 459 if (port == PORT3)
460 reg = GPIO3_PUDSEL; 460 reg = GPIO3_PUDSEL;
461 else 461 else
462 reg = GPIO_PUDSEL(port); 462 reg = GPIO_PUDSEL(pin);
463 if (!gpio_getbit(info->membase[0], reg, PORT_PIN(port))) 463 if (!gpio_getbit(info->membase[0], reg, PORT_PIN(pin)))
464 *config = LTQ_PINCONF_PACK(param, 2); 464 *config = LTQ_PINCONF_PACK(param, 2);
465 else 465 else
466 *config = LTQ_PINCONF_PACK(param, 1); 466 *config = LTQ_PINCONF_PACK(param, 1);
467 break; 467 break;
468 468
469 case LTQ_PINCONF_PARAM_OUTPUT:
470 reg = GPIO_DIR(pin);
471 *config = LTQ_PINCONF_PACK(param,
472 gpio_getbit(info->membase[0], reg, PORT_PIN(pin)));
473 break;
469 default: 474 default:
470 dev_err(pctldev->dev, "Invalid config param %04x\n", param); 475 dev_err(pctldev->dev, "Invalid config param %04x\n", param);
471 return -ENOTSUPP; 476 return -ENOTSUPP;
@@ -488,33 +493,44 @@ static int xway_pinconf_set(struct pinctrl_dev *pctldev,
488 if (port == PORT3) 493 if (port == PORT3)
489 reg = GPIO3_OD; 494 reg = GPIO3_OD;
490 else 495 else
491 reg = GPIO_OD(port); 496 reg = GPIO_OD(pin);
492 gpio_setbit(info->membase[0], reg, PORT_PIN(port)); 497 if (arg == 0)
498 gpio_setbit(info->membase[0], reg, PORT_PIN(pin));
499 else
500 gpio_clearbit(info->membase[0], reg, PORT_PIN(pin));
493 break; 501 break;
494 502
495 case LTQ_PINCONF_PARAM_PULL: 503 case LTQ_PINCONF_PARAM_PULL:
496 if (port == PORT3) 504 if (port == PORT3)
497 reg = GPIO3_PUDEN; 505 reg = GPIO3_PUDEN;
498 else 506 else
499 reg = GPIO_PUDEN(port); 507 reg = GPIO_PUDEN(pin);
500 if (arg == 0) { 508 if (arg == 0) {
501 gpio_clearbit(info->membase[0], reg, PORT_PIN(port)); 509 gpio_clearbit(info->membase[0], reg, PORT_PIN(pin));
502 break; 510 break;
503 } 511 }
504 gpio_setbit(info->membase[0], reg, PORT_PIN(port)); 512 gpio_setbit(info->membase[0], reg, PORT_PIN(pin));
505 513
506 if (port == PORT3) 514 if (port == PORT3)
507 reg = GPIO3_PUDSEL; 515 reg = GPIO3_PUDSEL;
508 else 516 else
509 reg = GPIO_PUDSEL(port); 517 reg = GPIO_PUDSEL(pin);
510 if (arg == 1) 518 if (arg == 1)
511 gpio_clearbit(info->membase[0], reg, PORT_PIN(port)); 519 gpio_clearbit(info->membase[0], reg, PORT_PIN(pin));
512 else if (arg == 2) 520 else if (arg == 2)
513 gpio_setbit(info->membase[0], reg, PORT_PIN(port)); 521 gpio_setbit(info->membase[0], reg, PORT_PIN(pin));
514 else 522 else
515 dev_err(pctldev->dev, "Invalid pull value %d\n", arg); 523 dev_err(pctldev->dev, "Invalid pull value %d\n", arg);
516 break; 524 break;
517 525
526 case LTQ_PINCONF_PARAM_OUTPUT:
527 reg = GPIO_DIR(pin);
528 if (arg == 0)
529 gpio_clearbit(info->membase[0], reg, PORT_PIN(pin));
530 else
531 gpio_setbit(info->membase[0], reg, PORT_PIN(pin));
532 break;
533
518 default: 534 default:
519 dev_err(pctldev->dev, "Invalid config param %04x\n", param); 535 dev_err(pctldev->dev, "Invalid config param %04x\n", param);
520 return -ENOTSUPP; 536 return -ENOTSUPP;
@@ -522,9 +538,24 @@ static int xway_pinconf_set(struct pinctrl_dev *pctldev,
522 return 0; 538 return 0;
523} 539}
524 540
541int xway_pinconf_group_set(struct pinctrl_dev *pctldev,
542 unsigned selector,
543 unsigned long config)
544{
545 struct ltq_pinmux_info *info = pinctrl_dev_get_drvdata(pctldev);
546 int i, ret = 0;
547
548 for (i = 0; i < info->grps[selector].npins && !ret; i++)
549 ret = xway_pinconf_set(pctldev,
550 info->grps[selector].pins[i], config);
551
552 return ret;
553}
554
525static struct pinconf_ops xway_pinconf_ops = { 555static struct pinconf_ops xway_pinconf_ops = {
526 .pin_config_get = xway_pinconf_get, 556 .pin_config_get = xway_pinconf_get,
527 .pin_config_set = xway_pinconf_set, 557 .pin_config_set = xway_pinconf_set,
558 .pin_config_group_set = xway_pinconf_group_set,
528}; 559};
529 560
530static struct pinctrl_desc xway_pctrl_desc = { 561static struct pinctrl_desc xway_pctrl_desc = {
@@ -558,6 +589,7 @@ static inline int xway_mux_apply(struct pinctrl_dev *pctrldev,
558static const struct ltq_cfg_param xway_cfg_params[] = { 589static const struct ltq_cfg_param xway_cfg_params[] = {
559 {"lantiq,pull", LTQ_PINCONF_PARAM_PULL}, 590 {"lantiq,pull", LTQ_PINCONF_PARAM_PULL},
560 {"lantiq,open-drain", LTQ_PINCONF_PARAM_OPEN_DRAIN}, 591 {"lantiq,open-drain", LTQ_PINCONF_PARAM_OPEN_DRAIN},
592 {"lantiq,output", LTQ_PINCONF_PARAM_OUTPUT},
561}; 593};
562 594
563static struct ltq_pinmux_info xway_info = { 595static struct ltq_pinmux_info xway_info = {
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index fcde4e528819..d9f9a0dbc6f3 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -1910,7 +1910,7 @@ fail_platform:
1910 return result; 1910 return result;
1911} 1911}
1912 1912
1913static int asus_acpi_remove(struct acpi_device *device, int type) 1913static int asus_acpi_remove(struct acpi_device *device)
1914{ 1914{
1915 struct asus_laptop *asus = acpi_driver_data(device); 1915 struct asus_laptop *asus = acpi_driver_data(device);
1916 1916
diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c
index c87ff16873f9..36e5e6c13db4 100644
--- a/drivers/platform/x86/classmate-laptop.c
+++ b/drivers/platform/x86/classmate-laptop.c
@@ -432,7 +432,7 @@ failed_sensitivity:
432 return error; 432 return error;
433} 433}
434 434
435static int cmpc_accel_remove_v4(struct acpi_device *acpi, int type) 435static int cmpc_accel_remove_v4(struct acpi_device *acpi)
436{ 436{
437 struct input_dev *inputdev; 437 struct input_dev *inputdev;
438 struct cmpc_accel *accel; 438 struct cmpc_accel *accel;
@@ -668,7 +668,7 @@ failed_file:
668 return error; 668 return error;
669} 669}
670 670
671static int cmpc_accel_remove(struct acpi_device *acpi, int type) 671static int cmpc_accel_remove(struct acpi_device *acpi)
672{ 672{
673 struct input_dev *inputdev; 673 struct input_dev *inputdev;
674 struct cmpc_accel *accel; 674 struct cmpc_accel *accel;
@@ -753,7 +753,7 @@ static int cmpc_tablet_add(struct acpi_device *acpi)
753 cmpc_tablet_idev_init); 753 cmpc_tablet_idev_init);
754} 754}
755 755
756static int cmpc_tablet_remove(struct acpi_device *acpi, int type) 756static int cmpc_tablet_remove(struct acpi_device *acpi)
757{ 757{
758 return cmpc_remove_acpi_notify_device(acpi); 758 return cmpc_remove_acpi_notify_device(acpi);
759} 759}
@@ -1000,7 +1000,7 @@ out_bd:
1000 return retval; 1000 return retval;
1001} 1001}
1002 1002
1003static int cmpc_ipml_remove(struct acpi_device *acpi, int type) 1003static int cmpc_ipml_remove(struct acpi_device *acpi)
1004{ 1004{
1005 struct ipml200_dev *ipml; 1005 struct ipml200_dev *ipml;
1006 1006
@@ -1079,7 +1079,7 @@ static int cmpc_keys_add(struct acpi_device *acpi)
1079 cmpc_keys_idev_init); 1079 cmpc_keys_idev_init);
1080} 1080}
1081 1081
1082static int cmpc_keys_remove(struct acpi_device *acpi, int type) 1082static int cmpc_keys_remove(struct acpi_device *acpi)
1083{ 1083{
1084 return cmpc_remove_acpi_notify_device(acpi); 1084 return cmpc_remove_acpi_notify_device(acpi);
1085} 1085}
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 528e9495458d..5d26e70bed6c 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -1007,7 +1007,7 @@ static int eeepc_get_fan_pwm(void)
1007 1007
1008static void eeepc_set_fan_pwm(int value) 1008static void eeepc_set_fan_pwm(int value)
1009{ 1009{
1010 value = SENSORS_LIMIT(value, 0, 255); 1010 value = clamp_val(value, 0, 255);
1011 value = value * 100 / 255; 1011 value = value * 100 / 255;
1012 ec_write(EEEPC_EC_FAN_PWM, value); 1012 ec_write(EEEPC_EC_FAN_PWM, value);
1013} 1013}
@@ -1501,7 +1501,7 @@ fail_platform:
1501 return result; 1501 return result;
1502} 1502}
1503 1503
1504static int eeepc_acpi_remove(struct acpi_device *device, int type) 1504static int eeepc_acpi_remove(struct acpi_device *device)
1505{ 1505{
1506 struct eeepc_laptop *eeepc = acpi_driver_data(device); 1506 struct eeepc_laptop *eeepc = acpi_driver_data(device);
1507 1507
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index c4c1a5444b38..1c9386e7c58c 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -733,7 +733,7 @@ err_stop:
733 return result; 733 return result;
734} 734}
735 735
736static int acpi_fujitsu_remove(struct acpi_device *device, int type) 736static int acpi_fujitsu_remove(struct acpi_device *device)
737{ 737{
738 struct fujitsu_t *fujitsu = acpi_driver_data(device); 738 struct fujitsu_t *fujitsu = acpi_driver_data(device);
739 struct input_dev *input = fujitsu->input; 739 struct input_dev *input = fujitsu->input;
@@ -938,7 +938,7 @@ err_stop:
938 return result; 938 return result;
939} 939}
940 940
941static int acpi_fujitsu_hotkey_remove(struct acpi_device *device, int type) 941static int acpi_fujitsu_hotkey_remove(struct acpi_device *device)
942{ 942{
943 struct fujitsu_hotkey_t *fujitsu_hotkey = acpi_driver_data(device); 943 struct fujitsu_hotkey_t *fujitsu_hotkey = acpi_driver_data(device);
944 struct input_dev *input = fujitsu_hotkey->input; 944 struct input_dev *input = fujitsu_hotkey->input;
diff --git a/drivers/platform/x86/fujitsu-tablet.c b/drivers/platform/x86/fujitsu-tablet.c
index 174ca01c4aa7..570926c10014 100644
--- a/drivers/platform/x86/fujitsu-tablet.c
+++ b/drivers/platform/x86/fujitsu-tablet.c
@@ -431,7 +431,7 @@ static int acpi_fujitsu_add(struct acpi_device *adev)
431 return 0; 431 return 0;
432} 432}
433 433
434static int acpi_fujitsu_remove(struct acpi_device *adev, int type) 434static int acpi_fujitsu_remove(struct acpi_device *adev)
435{ 435{
436 free_irq(fujitsu.irq, fujitsu_interrupt); 436 free_irq(fujitsu.irq, fujitsu_interrupt);
437 release_region(fujitsu.io_base, fujitsu.io_length); 437 release_region(fujitsu.io_base, fujitsu.io_length);
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index 18d74f29dcb2..e64a7a870d42 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -337,7 +337,7 @@ static int lis3lv02d_add(struct acpi_device *device)
337 return ret; 337 return ret;
338} 338}
339 339
340static int lis3lv02d_remove(struct acpi_device *device, int type) 340static int lis3lv02d_remove(struct acpi_device *device)
341{ 341{
342 if (!device) 342 if (!device)
343 return -EINVAL; 343 return -EINVAL;
diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c
index 7481146a5b47..97c2be195efc 100644
--- a/drivers/platform/x86/ibm_rtl.c
+++ b/drivers/platform/x86/ibm_rtl.c
@@ -244,7 +244,7 @@ static int __init ibm_rtl_init(void) {
244 if (force) 244 if (force)
245 pr_warn("module loaded by force\n"); 245 pr_warn("module loaded by force\n");
246 /* first ensure that we are running on IBM HW */ 246 /* first ensure that we are running on IBM HW */
247 else if (efi_enabled || !dmi_check_system(ibm_rtl_dmi_table)) 247 else if (efi_enabled(EFI_BOOT) || !dmi_check_system(ibm_rtl_dmi_table))
248 return -ENODEV; 248 return -ENODEV;
249 249
250 /* Get the address for the Extended BIOS Data Area */ 250 /* Get the address for the Extended BIOS Data Area */
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 64bfb30a52e9..17f00b8dc5cb 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -834,7 +834,7 @@ platform_failed:
834 return ret; 834 return ret;
835} 835}
836 836
837static int ideapad_acpi_remove(struct acpi_device *adevice, int type) 837static int ideapad_acpi_remove(struct acpi_device *adevice)
838{ 838{
839 struct ideapad_private *priv = dev_get_drvdata(&adevice->dev); 839 struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
840 int i; 840 int i;
diff --git a/drivers/platform/x86/intel_menlow.c b/drivers/platform/x86/intel_menlow.c
index 3271ac85115e..d6cfc1558c2f 100644
--- a/drivers/platform/x86/intel_menlow.c
+++ b/drivers/platform/x86/intel_menlow.c
@@ -200,7 +200,7 @@ static int intel_menlow_memory_add(struct acpi_device *device)
200 200
201} 201}
202 202
203static int intel_menlow_memory_remove(struct acpi_device *device, int type) 203static int intel_menlow_memory_remove(struct acpi_device *device)
204{ 204{
205 struct thermal_cooling_device *cdev = acpi_driver_data(device); 205 struct thermal_cooling_device *cdev = acpi_driver_data(device);
206 206
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index 8e8caa767d6a..4add9a31bf60 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -176,7 +176,7 @@ enum SINF_BITS { SINF_NUM_BATTERIES = 0,
176/* R1 handles SINF_AC_CUR_BRIGHT as SINF_CUR_BRIGHT, doesn't know AC state */ 176/* R1 handles SINF_AC_CUR_BRIGHT as SINF_CUR_BRIGHT, doesn't know AC state */
177 177
178static int acpi_pcc_hotkey_add(struct acpi_device *device); 178static int acpi_pcc_hotkey_add(struct acpi_device *device);
179static int acpi_pcc_hotkey_remove(struct acpi_device *device, int type); 179static int acpi_pcc_hotkey_remove(struct acpi_device *device);
180static void acpi_pcc_hotkey_notify(struct acpi_device *device, u32 event); 180static void acpi_pcc_hotkey_notify(struct acpi_device *device, u32 event);
181 181
182static const struct acpi_device_id pcc_device_ids[] = { 182static const struct acpi_device_id pcc_device_ids[] = {
@@ -663,7 +663,7 @@ static int __init acpi_pcc_init(void)
663 return 0; 663 return 0;
664} 664}
665 665
666static int acpi_pcc_hotkey_remove(struct acpi_device *device, int type) 666static int acpi_pcc_hotkey_remove(struct acpi_device *device)
667{ 667{
668 struct pcc_acpi *pcc = acpi_driver_data(device); 668 struct pcc_acpi *pcc = acpi_driver_data(device);
669 669
diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
index 71623a2ff3e8..d1f030053176 100644
--- a/drivers/platform/x86/samsung-laptop.c
+++ b/drivers/platform/x86/samsung-laptop.c
@@ -26,6 +26,7 @@
26#include <linux/seq_file.h> 26#include <linux/seq_file.h>
27#include <linux/debugfs.h> 27#include <linux/debugfs.h>
28#include <linux/ctype.h> 28#include <linux/ctype.h>
29#include <linux/efi.h>
29#include <acpi/video.h> 30#include <acpi/video.h>
30 31
31/* 32/*
@@ -1544,6 +1545,9 @@ static int __init samsung_init(void)
1544 struct samsung_laptop *samsung; 1545 struct samsung_laptop *samsung;
1545 int ret; 1546 int ret;
1546 1547
1548 if (efi_enabled(EFI_BOOT))
1549 return -ENODEV;
1550
1547 quirks = &samsung_unknown; 1551 quirks = &samsung_unknown;
1548 if (!force && !dmi_check_system(samsung_dmi_table)) 1552 if (!force && !dmi_check_system(samsung_dmi_table))
1549 return -ENODEV; 1553 return -ENODEV;
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index b8ad71f7863f..ceb41eff4230 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -2740,7 +2740,7 @@ outwalk:
2740 return result; 2740 return result;
2741} 2741}
2742 2742
2743static int sony_nc_remove(struct acpi_device *device, int type) 2743static int sony_nc_remove(struct acpi_device *device)
2744{ 2744{
2745 struct sony_nc_value *item; 2745 struct sony_nc_value *item;
2746 2746
@@ -4111,7 +4111,7 @@ found:
4111 * ACPI driver 4111 * ACPI driver
4112 * 4112 *
4113 *****************/ 4113 *****************/
4114static int sony_pic_remove(struct acpi_device *device, int type) 4114static int sony_pic_remove(struct acpi_device *device)
4115{ 4115{
4116 struct sony_pic_ioport *io, *tmp_io; 4116 struct sony_pic_ioport *io, *tmp_io;
4117 struct sony_pic_irq *irq, *tmp_irq; 4117 struct sony_pic_irq *irq, *tmp_irq;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index f946ca7cb762..ebcb461bb2b0 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -4877,8 +4877,7 @@ static int __init light_init(struct ibm_init_struct *iibm)
4877static void light_exit(void) 4877static void light_exit(void)
4878{ 4878{
4879 led_classdev_unregister(&tpacpi_led_thinklight.led_classdev); 4879 led_classdev_unregister(&tpacpi_led_thinklight.led_classdev);
4880 if (work_pending(&tpacpi_led_thinklight.work)) 4880 flush_workqueue(tpacpi_wq);
4881 flush_workqueue(tpacpi_wq);
4882} 4881}
4883 4882
4884static int light_read(struct seq_file *m) 4883static int light_read(struct seq_file *m)
diff --git a/drivers/platform/x86/topstar-laptop.c b/drivers/platform/x86/topstar-laptop.c
index d727bfee89a6..4ab618c63b45 100644
--- a/drivers/platform/x86/topstar-laptop.c
+++ b/drivers/platform/x86/topstar-laptop.c
@@ -157,7 +157,7 @@ add_err:
157 return -ENODEV; 157 return -ENODEV;
158} 158}
159 159
160static int acpi_topstar_remove(struct acpi_device *device, int type) 160static int acpi_topstar_remove(struct acpi_device *device)
161{ 161{
162 struct topstar_hkey *tps_hkey = acpi_driver_data(device); 162 struct topstar_hkey *tps_hkey = acpi_driver_data(device);
163 163
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index c2727895794c..904476b2fa8f 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -1118,7 +1118,7 @@ static int toshiba_acpi_setup_backlight(struct toshiba_acpi_dev *dev)
1118 return 0; 1118 return 0;
1119} 1119}
1120 1120
1121static int toshiba_acpi_remove(struct acpi_device *acpi_dev, int type) 1121static int toshiba_acpi_remove(struct acpi_device *acpi_dev)
1122{ 1122{
1123 struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev); 1123 struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev);
1124 1124
@@ -1250,7 +1250,7 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
1250 return 0; 1250 return 0;
1251 1251
1252error: 1252error:
1253 toshiba_acpi_remove(acpi_dev, 0); 1253 toshiba_acpi_remove(acpi_dev);
1254 return ret; 1254 return ret;
1255} 1255}
1256 1256
diff --git a/drivers/platform/x86/toshiba_bluetooth.c b/drivers/platform/x86/toshiba_bluetooth.c
index e95be0b74859..74dd01ae343b 100644
--- a/drivers/platform/x86/toshiba_bluetooth.c
+++ b/drivers/platform/x86/toshiba_bluetooth.c
@@ -32,7 +32,7 @@ MODULE_LICENSE("GPL");
32 32
33 33
34static int toshiba_bt_rfkill_add(struct acpi_device *device); 34static int toshiba_bt_rfkill_add(struct acpi_device *device);
35static int toshiba_bt_rfkill_remove(struct acpi_device *device, int type); 35static int toshiba_bt_rfkill_remove(struct acpi_device *device);
36static void toshiba_bt_rfkill_notify(struct acpi_device *device, u32 event); 36static void toshiba_bt_rfkill_notify(struct acpi_device *device, u32 event);
37 37
38static const struct acpi_device_id bt_device_ids[] = { 38static const struct acpi_device_id bt_device_ids[] = {
@@ -122,7 +122,7 @@ static int toshiba_bt_rfkill_add(struct acpi_device *device)
122 return result; 122 return result;
123} 123}
124 124
125static int toshiba_bt_rfkill_remove(struct acpi_device *device, int type) 125static int toshiba_bt_rfkill_remove(struct acpi_device *device)
126{ 126{
127 /* clean up */ 127 /* clean up */
128 return 0; 128 return 0;
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 42a4dcc25f92..e4ac38aca580 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -92,7 +92,7 @@ module_param(debug_dump_wdg, bool, 0444);
92MODULE_PARM_DESC(debug_dump_wdg, 92MODULE_PARM_DESC(debug_dump_wdg,
93 "Dump available WMI interfaces [0/1]"); 93 "Dump available WMI interfaces [0/1]");
94 94
95static int acpi_wmi_remove(struct acpi_device *device, int type); 95static int acpi_wmi_remove(struct acpi_device *device);
96static int acpi_wmi_add(struct acpi_device *device); 96static int acpi_wmi_add(struct acpi_device *device);
97static void acpi_wmi_notify(struct acpi_device *device, u32 event); 97static void acpi_wmi_notify(struct acpi_device *device, u32 event);
98 98
@@ -917,7 +917,7 @@ static void acpi_wmi_notify(struct acpi_device *device, u32 event)
917 } 917 }
918} 918}
919 919
920static int acpi_wmi_remove(struct acpi_device *device, int type) 920static int acpi_wmi_remove(struct acpi_device *device)
921{ 921{
922 acpi_remove_address_space_handler(device->handle, 922 acpi_remove_address_space_handler(device->handle,
923 ACPI_ADR_SPACE_EC, &acpi_wmi_ec_space_handler); 923 ACPI_ADR_SPACE_EC, &acpi_wmi_ec_space_handler);
diff --git a/drivers/platform/x86/xo15-ebook.c b/drivers/platform/x86/xo15-ebook.c
index 16d340c3b852..4b1377bd5944 100644
--- a/drivers/platform/x86/xo15-ebook.c
+++ b/drivers/platform/x86/xo15-ebook.c
@@ -150,7 +150,7 @@ static int ebook_switch_add(struct acpi_device *device)
150 return error; 150 return error;
151} 151}
152 152
153static int ebook_switch_remove(struct acpi_device *device, int type) 153static int ebook_switch_remove(struct acpi_device *device)
154{ 154{
155 struct ebook_switch *button = acpi_driver_data(device); 155 struct ebook_switch *button = acpi_driver_data(device);
156 156
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 72e822e17d47..8813fc03aa09 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -90,7 +90,7 @@ static int pnpacpi_set_resources(struct pnp_dev *dev)
90 pnp_dbg(&dev->dev, "set resources\n"); 90 pnp_dbg(&dev->dev, "set resources\n");
91 91
92 handle = DEVICE_ACPI_HANDLE(&dev->dev); 92 handle = DEVICE_ACPI_HANDLE(&dev->dev);
93 if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) { 93 if (!handle || acpi_bus_get_device(handle, &acpi_dev)) {
94 dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); 94 dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
95 return -ENODEV; 95 return -ENODEV;
96 } 96 }
@@ -123,7 +123,7 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev)
123 dev_dbg(&dev->dev, "disable resources\n"); 123 dev_dbg(&dev->dev, "disable resources\n");
124 124
125 handle = DEVICE_ACPI_HANDLE(&dev->dev); 125 handle = DEVICE_ACPI_HANDLE(&dev->dev);
126 if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) { 126 if (!handle || acpi_bus_get_device(handle, &acpi_dev)) {
127 dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); 127 dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
128 return 0; 128 return 0;
129 } 129 }
@@ -145,7 +145,7 @@ static bool pnpacpi_can_wakeup(struct pnp_dev *dev)
145 acpi_handle handle; 145 acpi_handle handle;
146 146
147 handle = DEVICE_ACPI_HANDLE(&dev->dev); 147 handle = DEVICE_ACPI_HANDLE(&dev->dev);
148 if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) { 148 if (!handle || acpi_bus_get_device(handle, &acpi_dev)) {
149 dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); 149 dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
150 return false; 150 return false;
151 } 151 }
@@ -160,7 +160,7 @@ static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state)
160 int error = 0; 160 int error = 0;
161 161
162 handle = DEVICE_ACPI_HANDLE(&dev->dev); 162 handle = DEVICE_ACPI_HANDLE(&dev->dev);
163 if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) { 163 if (!handle || acpi_bus_get_device(handle, &acpi_dev)) {
164 dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); 164 dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
165 return 0; 165 return 0;
166 } 166 }
@@ -197,7 +197,7 @@ static int pnpacpi_resume(struct pnp_dev *dev)
197 acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); 197 acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev);
198 int error = 0; 198 int error = 0;
199 199
200 if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) { 200 if (!handle || acpi_bus_get_device(handle, &acpi_dev)) {
201 dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); 201 dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
202 return -ENODEV; 202 return -ENODEV;
203 } 203 }
diff --git a/drivers/pnp/pnpbios/Kconfig b/drivers/pnp/pnpbios/Kconfig
index b986d9fa3b9a..50c3dd065e03 100644
--- a/drivers/pnp/pnpbios/Kconfig
+++ b/drivers/pnp/pnpbios/Kconfig
@@ -2,8 +2,8 @@
2# Plug and Play BIOS configuration 2# Plug and Play BIOS configuration
3# 3#
4config PNPBIOS 4config PNPBIOS
5 bool "Plug and Play BIOS support (EXPERIMENTAL)" 5 bool "Plug and Play BIOS support"
6 depends on ISA && X86 && EXPERIMENTAL 6 depends on ISA && X86
7 default n 7 default n
8 ---help--- 8 ---help---
9 Linux uses the PNPBIOS as defined in "Plug and Play BIOS 9 Linux uses the PNPBIOS as defined in "Plug and Play BIOS
diff --git a/drivers/power/88pm860x_battery.c b/drivers/power/88pm860x_battery.c
index 8bc80b05c63c..d338c1c4e8c8 100644
--- a/drivers/power/88pm860x_battery.c
+++ b/drivers/power/88pm860x_battery.c
@@ -915,15 +915,13 @@ static int pm860x_battery_probe(struct platform_device *pdev)
915 info->irq_cc = platform_get_irq(pdev, 0); 915 info->irq_cc = platform_get_irq(pdev, 0);
916 if (info->irq_cc <= 0) { 916 if (info->irq_cc <= 0) {
917 dev_err(&pdev->dev, "No IRQ resource!\n"); 917 dev_err(&pdev->dev, "No IRQ resource!\n");
918 ret = -EINVAL; 918 return -EINVAL;
919 goto out;
920 } 919 }
921 920
922 info->irq_batt = platform_get_irq(pdev, 1); 921 info->irq_batt = platform_get_irq(pdev, 1);
923 if (info->irq_batt <= 0) { 922 if (info->irq_batt <= 0) {
924 dev_err(&pdev->dev, "No IRQ resource!\n"); 923 dev_err(&pdev->dev, "No IRQ resource!\n");
925 ret = -EINVAL; 924 return -EINVAL;
926 goto out;
927 } 925 }
928 926
929 info->chip = chip; 927 info->chip = chip;
@@ -957,7 +955,7 @@ static int pm860x_battery_probe(struct platform_device *pdev)
957 955
958 ret = power_supply_register(&pdev->dev, &info->battery); 956 ret = power_supply_register(&pdev->dev, &info->battery);
959 if (ret) 957 if (ret)
960 goto out; 958 return ret;
961 info->battery.dev->parent = &pdev->dev; 959 info->battery.dev->parent = &pdev->dev;
962 960
963 ret = request_threaded_irq(info->irq_cc, NULL, 961 ret = request_threaded_irq(info->irq_cc, NULL,
@@ -984,8 +982,6 @@ out_coulomb:
984 free_irq(info->irq_cc, info); 982 free_irq(info->irq_cc, info);
985out_reg: 983out_reg:
986 power_supply_unregister(&info->battery); 984 power_supply_unregister(&info->battery);
987out:
988 kfree(info);
989 return ret; 985 return ret;
990} 986}
991 987
@@ -993,10 +989,9 @@ static int pm860x_battery_remove(struct platform_device *pdev)
993{ 989{
994 struct pm860x_battery_info *info = platform_get_drvdata(pdev); 990 struct pm860x_battery_info *info = platform_get_drvdata(pdev);
995 991
996 power_supply_unregister(&info->battery);
997 free_irq(info->irq_batt, info); 992 free_irq(info->irq_batt, info);
998 free_irq(info->irq_cc, info); 993 free_irq(info->irq_cc, info);
999 kfree(info); 994 power_supply_unregister(&info->battery);
1000 platform_set_drvdata(pdev, NULL); 995 platform_set_drvdata(pdev, NULL);
1001 return 0; 996 return 0;
1002} 997}
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 9f45e2f77d53..9e00c389e777 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -346,6 +346,20 @@ config AB8500_BM
346 help 346 help
347 Say Y to include support for AB8500 battery management. 347 Say Y to include support for AB8500 battery management.
348 348
349config BATTERY_GOLDFISH
350 tristate "Goldfish battery driver"
351 depends on GENERIC_HARDIRQS
352 help
353 Say Y to enable support for the battery and AC power in the
354 Goldfish emulator.
355
356config CHARGER_PM2301
357 bool "PM2301 Battery Charger Driver"
358 depends on AB8500_BM
359 help
360 Say Y to include support for PM2301 charger driver.
361 Depends on AB8500 battery management core.
362
349source "drivers/power/reset/Kconfig" 363source "drivers/power/reset/Kconfig"
350 364
351endif # POWER_SUPPLY 365endif # POWER_SUPPLY
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index 22c8913382c0..3f66436af45c 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_BATTERY_DS2760) += ds2760_battery.o
20obj-$(CONFIG_BATTERY_DS2780) += ds2780_battery.o 20obj-$(CONFIG_BATTERY_DS2780) += ds2780_battery.o
21obj-$(CONFIG_BATTERY_DS2781) += ds2781_battery.o 21obj-$(CONFIG_BATTERY_DS2781) += ds2781_battery.o
22obj-$(CONFIG_BATTERY_DS2782) += ds2782_battery.o 22obj-$(CONFIG_BATTERY_DS2782) += ds2782_battery.o
23obj-$(CONFIG_BATTERY_GOLDFISH) += goldfish_battery.o
23obj-$(CONFIG_BATTERY_PMU) += pmu_battery.o 24obj-$(CONFIG_BATTERY_PMU) += pmu_battery.o
24obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o 25obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o
25obj-$(CONFIG_BATTERY_TOSA) += tosa_battery.o 26obj-$(CONFIG_BATTERY_TOSA) += tosa_battery.o
@@ -38,7 +39,7 @@ obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o
38obj-$(CONFIG_BATTERY_JZ4740) += jz4740-battery.o 39obj-$(CONFIG_BATTERY_JZ4740) += jz4740-battery.o
39obj-$(CONFIG_BATTERY_INTEL_MID) += intel_mid_battery.o 40obj-$(CONFIG_BATTERY_INTEL_MID) += intel_mid_battery.o
40obj-$(CONFIG_BATTERY_RX51) += rx51_battery.o 41obj-$(CONFIG_BATTERY_RX51) += rx51_battery.o
41obj-$(CONFIG_AB8500_BM) += ab8500_bmdata.o ab8500_charger.o ab8500_btemp.o ab8500_fg.o abx500_chargalg.o 42obj-$(CONFIG_AB8500_BM) += ab8500_bmdata.o ab8500_charger.o ab8500_fg.o ab8500_btemp.o abx500_chargalg.o
42obj-$(CONFIG_CHARGER_ISP1704) += isp1704_charger.o 43obj-$(CONFIG_CHARGER_ISP1704) += isp1704_charger.o
43obj-$(CONFIG_CHARGER_MAX8903) += max8903_charger.o 44obj-$(CONFIG_CHARGER_MAX8903) += max8903_charger.o
44obj-$(CONFIG_CHARGER_TWL4030) += twl4030_charger.o 45obj-$(CONFIG_CHARGER_TWL4030) += twl4030_charger.o
@@ -46,6 +47,7 @@ obj-$(CONFIG_CHARGER_LP8727) += lp8727_charger.o
46obj-$(CONFIG_CHARGER_LP8788) += lp8788-charger.o 47obj-$(CONFIG_CHARGER_LP8788) += lp8788-charger.o
47obj-$(CONFIG_CHARGER_GPIO) += gpio-charger.o 48obj-$(CONFIG_CHARGER_GPIO) += gpio-charger.o
48obj-$(CONFIG_CHARGER_MANAGER) += charger-manager.o 49obj-$(CONFIG_CHARGER_MANAGER) += charger-manager.o
50obj-$(CONFIG_CHARGER_PM2301) += pm2301_charger.o
49obj-$(CONFIG_CHARGER_MAX8997) += max8997_charger.o 51obj-$(CONFIG_CHARGER_MAX8997) += max8997_charger.o
50obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o 52obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o
51obj-$(CONFIG_CHARGER_BQ2415X) += bq2415x_charger.o 53obj-$(CONFIG_CHARGER_BQ2415X) += bq2415x_charger.o
diff --git a/drivers/power/ab8500_bmdata.c b/drivers/power/ab8500_bmdata.c
index f034ae43e045..7a96c0650fbb 100644
--- a/drivers/power/ab8500_bmdata.c
+++ b/drivers/power/ab8500_bmdata.c
@@ -182,206 +182,206 @@ static struct batres_vs_temp temp_to_batres_tbl_9100[] = {
182}; 182};
183 183
184static struct abx500_battery_type bat_type_thermistor[] = { 184static struct abx500_battery_type bat_type_thermistor[] = {
185[BATTERY_UNKNOWN] = { 185 [BATTERY_UNKNOWN] = {
186 /* First element always represent the UNKNOWN battery */ 186 /* First element always represent the UNKNOWN battery */
187 .name = POWER_SUPPLY_TECHNOLOGY_UNKNOWN, 187 .name = POWER_SUPPLY_TECHNOLOGY_UNKNOWN,
188 .resis_high = 0, 188 .resis_high = 0,
189 .resis_low = 0, 189 .resis_low = 0,
190 .battery_resistance = 300, 190 .battery_resistance = 300,
191 .charge_full_design = 612, 191 .charge_full_design = 612,
192 .nominal_voltage = 3700, 192 .nominal_voltage = 3700,
193 .termination_vol = 4050, 193 .termination_vol = 4050,
194 .termination_curr = 200, 194 .termination_curr = 200,
195 .recharge_vol = 3990, 195 .recharge_cap = 95,
196 .normal_cur_lvl = 400, 196 .normal_cur_lvl = 400,
197 .normal_vol_lvl = 4100, 197 .normal_vol_lvl = 4100,
198 .maint_a_cur_lvl = 400, 198 .maint_a_cur_lvl = 400,
199 .maint_a_vol_lvl = 4050, 199 .maint_a_vol_lvl = 4050,
200 .maint_a_chg_timer_h = 60, 200 .maint_a_chg_timer_h = 60,
201 .maint_b_cur_lvl = 400, 201 .maint_b_cur_lvl = 400,
202 .maint_b_vol_lvl = 4000, 202 .maint_b_vol_lvl = 4000,
203 .maint_b_chg_timer_h = 200, 203 .maint_b_chg_timer_h = 200,
204 .low_high_cur_lvl = 300, 204 .low_high_cur_lvl = 300,
205 .low_high_vol_lvl = 4000, 205 .low_high_vol_lvl = 4000,
206 .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl), 206 .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
207 .r_to_t_tbl = temp_tbl, 207 .r_to_t_tbl = temp_tbl,
208 .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl), 208 .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
209 .v_to_cap_tbl = cap_tbl, 209 .v_to_cap_tbl = cap_tbl,
210 .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor), 210 .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
211 .batres_tbl = temp_to_batres_tbl_thermistor, 211 .batres_tbl = temp_to_batres_tbl_thermistor,
212}, 212 },
213{ 213 {
214 .name = POWER_SUPPLY_TECHNOLOGY_LIPO, 214 .name = POWER_SUPPLY_TECHNOLOGY_LIPO,
215 .resis_high = 53407, 215 .resis_high = 53407,
216 .resis_low = 12500, 216 .resis_low = 12500,
217 .battery_resistance = 300, 217 .battery_resistance = 300,
218 .charge_full_design = 900, 218 .charge_full_design = 900,
219 .nominal_voltage = 3600, 219 .nominal_voltage = 3600,
220 .termination_vol = 4150, 220 .termination_vol = 4150,
221 .termination_curr = 80, 221 .termination_curr = 80,
222 .recharge_vol = 4130, 222 .recharge_cap = 95,
223 .normal_cur_lvl = 700, 223 .normal_cur_lvl = 700,
224 .normal_vol_lvl = 4200, 224 .normal_vol_lvl = 4200,
225 .maint_a_cur_lvl = 600, 225 .maint_a_cur_lvl = 600,
226 .maint_a_vol_lvl = 4150, 226 .maint_a_vol_lvl = 4150,
227 .maint_a_chg_timer_h = 60, 227 .maint_a_chg_timer_h = 60,
228 .maint_b_cur_lvl = 600, 228 .maint_b_cur_lvl = 600,
229 .maint_b_vol_lvl = 4100, 229 .maint_b_vol_lvl = 4100,
230 .maint_b_chg_timer_h = 200, 230 .maint_b_chg_timer_h = 200,
231 .low_high_cur_lvl = 300, 231 .low_high_cur_lvl = 300,
232 .low_high_vol_lvl = 4000, 232 .low_high_vol_lvl = 4000,
233 .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl_A_thermistor), 233 .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl_A_thermistor),
234 .r_to_t_tbl = temp_tbl_A_thermistor, 234 .r_to_t_tbl = temp_tbl_A_thermistor,
235 .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl_A_thermistor), 235 .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl_A_thermistor),
236 .v_to_cap_tbl = cap_tbl_A_thermistor, 236 .v_to_cap_tbl = cap_tbl_A_thermistor,
237 .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor), 237 .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
238 .batres_tbl = temp_to_batres_tbl_thermistor, 238 .batres_tbl = temp_to_batres_tbl_thermistor,
239 239
240}, 240 },
241{ 241 {
242 .name = POWER_SUPPLY_TECHNOLOGY_LIPO, 242 .name = POWER_SUPPLY_TECHNOLOGY_LIPO,
243 .resis_high = 200000, 243 .resis_high = 200000,
244 .resis_low = 82869, 244 .resis_low = 82869,
245 .battery_resistance = 300, 245 .battery_resistance = 300,
246 .charge_full_design = 900, 246 .charge_full_design = 900,
247 .nominal_voltage = 3600, 247 .nominal_voltage = 3600,
248 .termination_vol = 4150, 248 .termination_vol = 4150,
249 .termination_curr = 80, 249 .termination_curr = 80,
250 .recharge_vol = 4130, 250 .recharge_cap = 95,
251 .normal_cur_lvl = 700, 251 .normal_cur_lvl = 700,
252 .normal_vol_lvl = 4200, 252 .normal_vol_lvl = 4200,
253 .maint_a_cur_lvl = 600, 253 .maint_a_cur_lvl = 600,
254 .maint_a_vol_lvl = 4150, 254 .maint_a_vol_lvl = 4150,
255 .maint_a_chg_timer_h = 60, 255 .maint_a_chg_timer_h = 60,
256 .maint_b_cur_lvl = 600, 256 .maint_b_cur_lvl = 600,
257 .maint_b_vol_lvl = 4100, 257 .maint_b_vol_lvl = 4100,
258 .maint_b_chg_timer_h = 200, 258 .maint_b_chg_timer_h = 200,
259 .low_high_cur_lvl = 300, 259 .low_high_cur_lvl = 300,
260 .low_high_vol_lvl = 4000, 260 .low_high_vol_lvl = 4000,
261 .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl_B_thermistor), 261 .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl_B_thermistor),
262 .r_to_t_tbl = temp_tbl_B_thermistor, 262 .r_to_t_tbl = temp_tbl_B_thermistor,
263 .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl_B_thermistor), 263 .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl_B_thermistor),
264 .v_to_cap_tbl = cap_tbl_B_thermistor, 264 .v_to_cap_tbl = cap_tbl_B_thermistor,
265 .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor), 265 .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
266 .batres_tbl = temp_to_batres_tbl_thermistor, 266 .batres_tbl = temp_to_batres_tbl_thermistor,
267}, 267 },
268}; 268};
269 269
270static struct abx500_battery_type bat_type_ext_thermistor[] = { 270static struct abx500_battery_type bat_type_ext_thermistor[] = {
271[BATTERY_UNKNOWN] = { 271 [BATTERY_UNKNOWN] = {
272 /* First element always represent the UNKNOWN battery */ 272 /* First element always represent the UNKNOWN battery */
273 .name = POWER_SUPPLY_TECHNOLOGY_UNKNOWN, 273 .name = POWER_SUPPLY_TECHNOLOGY_UNKNOWN,
274 .resis_high = 0, 274 .resis_high = 0,
275 .resis_low = 0, 275 .resis_low = 0,
276 .battery_resistance = 300, 276 .battery_resistance = 300,
277 .charge_full_design = 612, 277 .charge_full_design = 612,
278 .nominal_voltage = 3700, 278 .nominal_voltage = 3700,
279 .termination_vol = 4050, 279 .termination_vol = 4050,
280 .termination_curr = 200, 280 .termination_curr = 200,
281 .recharge_vol = 3990, 281 .recharge_cap = 95,
282 .normal_cur_lvl = 400, 282 .normal_cur_lvl = 400,
283 .normal_vol_lvl = 4100, 283 .normal_vol_lvl = 4100,
284 .maint_a_cur_lvl = 400, 284 .maint_a_cur_lvl = 400,
285 .maint_a_vol_lvl = 4050, 285 .maint_a_vol_lvl = 4050,
286 .maint_a_chg_timer_h = 60, 286 .maint_a_chg_timer_h = 60,
287 .maint_b_cur_lvl = 400, 287 .maint_b_cur_lvl = 400,
288 .maint_b_vol_lvl = 4000, 288 .maint_b_vol_lvl = 4000,
289 .maint_b_chg_timer_h = 200, 289 .maint_b_chg_timer_h = 200,
290 .low_high_cur_lvl = 300, 290 .low_high_cur_lvl = 300,
291 .low_high_vol_lvl = 4000, 291 .low_high_vol_lvl = 4000,
292 .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl), 292 .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
293 .r_to_t_tbl = temp_tbl, 293 .r_to_t_tbl = temp_tbl,
294 .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl), 294 .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
295 .v_to_cap_tbl = cap_tbl, 295 .v_to_cap_tbl = cap_tbl,
296 .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor), 296 .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
297 .batres_tbl = temp_to_batres_tbl_thermistor, 297 .batres_tbl = temp_to_batres_tbl_thermistor,
298}, 298 },
299/* 299/*
300 * These are the batteries that doesn't have an internal NTC resistor to measure 300 * These are the batteries that doesn't have an internal NTC resistor to measure
301 * its temperature. The temperature in this case is measure with a NTC placed 301 * its temperature. The temperature in this case is measure with a NTC placed
302 * near the battery but on the PCB. 302 * near the battery but on the PCB.
303 */ 303 */
304{ 304 {
305 .name = POWER_SUPPLY_TECHNOLOGY_LIPO, 305 .name = POWER_SUPPLY_TECHNOLOGY_LIPO,
306 .resis_high = 76000, 306 .resis_high = 76000,
307 .resis_low = 53000, 307 .resis_low = 53000,
308 .battery_resistance = 300, 308 .battery_resistance = 300,
309 .charge_full_design = 900, 309 .charge_full_design = 900,
310 .nominal_voltage = 3700, 310 .nominal_voltage = 3700,
311 .termination_vol = 4150, 311 .termination_vol = 4150,
312 .termination_curr = 100, 312 .termination_curr = 100,
313 .recharge_vol = 4130, 313 .recharge_cap = 95,
314 .normal_cur_lvl = 700, 314 .normal_cur_lvl = 700,
315 .normal_vol_lvl = 4200, 315 .normal_vol_lvl = 4200,
316 .maint_a_cur_lvl = 600, 316 .maint_a_cur_lvl = 600,
317 .maint_a_vol_lvl = 4150, 317 .maint_a_vol_lvl = 4150,
318 .maint_a_chg_timer_h = 60, 318 .maint_a_chg_timer_h = 60,
319 .maint_b_cur_lvl = 600, 319 .maint_b_cur_lvl = 600,
320 .maint_b_vol_lvl = 4100, 320 .maint_b_vol_lvl = 4100,
321 .maint_b_chg_timer_h = 200, 321 .maint_b_chg_timer_h = 200,
322 .low_high_cur_lvl = 300, 322 .low_high_cur_lvl = 300,
323 .low_high_vol_lvl = 4000, 323 .low_high_vol_lvl = 4000,
324 .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl), 324 .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
325 .r_to_t_tbl = temp_tbl, 325 .r_to_t_tbl = temp_tbl,
326 .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl), 326 .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
327 .v_to_cap_tbl = cap_tbl, 327 .v_to_cap_tbl = cap_tbl,
328 .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor), 328 .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
329 .batres_tbl = temp_to_batres_tbl_thermistor, 329 .batres_tbl = temp_to_batres_tbl_thermistor,
330}, 330 },
331{ 331 {
332 .name = POWER_SUPPLY_TECHNOLOGY_LION, 332 .name = POWER_SUPPLY_TECHNOLOGY_LION,
333 .resis_high = 30000, 333 .resis_high = 30000,
334 .resis_low = 10000, 334 .resis_low = 10000,
335 .battery_resistance = 300, 335 .battery_resistance = 300,
336 .charge_full_design = 950, 336 .charge_full_design = 950,
337 .nominal_voltage = 3700, 337 .nominal_voltage = 3700,
338 .termination_vol = 4150, 338 .termination_vol = 4150,
339 .termination_curr = 100, 339 .termination_curr = 100,
340 .recharge_vol = 4130, 340 .recharge_cap = 95,
341 .normal_cur_lvl = 700, 341 .normal_cur_lvl = 700,
342 .normal_vol_lvl = 4200, 342 .normal_vol_lvl = 4200,
343 .maint_a_cur_lvl = 600, 343 .maint_a_cur_lvl = 600,
344 .maint_a_vol_lvl = 4150, 344 .maint_a_vol_lvl = 4150,
345 .maint_a_chg_timer_h = 60, 345 .maint_a_chg_timer_h = 60,
346 .maint_b_cur_lvl = 600, 346 .maint_b_cur_lvl = 600,
347 .maint_b_vol_lvl = 4100, 347 .maint_b_vol_lvl = 4100,
348 .maint_b_chg_timer_h = 200, 348 .maint_b_chg_timer_h = 200,
349 .low_high_cur_lvl = 300, 349 .low_high_cur_lvl = 300,
350 .low_high_vol_lvl = 4000, 350 .low_high_vol_lvl = 4000,
351 .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl), 351 .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
352 .r_to_t_tbl = temp_tbl, 352 .r_to_t_tbl = temp_tbl,
353 .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl), 353 .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
354 .v_to_cap_tbl = cap_tbl, 354 .v_to_cap_tbl = cap_tbl,
355 .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor), 355 .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
356 .batres_tbl = temp_to_batres_tbl_thermistor, 356 .batres_tbl = temp_to_batres_tbl_thermistor,
357}, 357 },
358{ 358 {
359 .name = POWER_SUPPLY_TECHNOLOGY_LION, 359 .name = POWER_SUPPLY_TECHNOLOGY_LION,
360 .resis_high = 95000, 360 .resis_high = 95000,
361 .resis_low = 76001, 361 .resis_low = 76001,
362 .battery_resistance = 300, 362 .battery_resistance = 300,
363 .charge_full_design = 950, 363 .charge_full_design = 950,
364 .nominal_voltage = 3700, 364 .nominal_voltage = 3700,
365 .termination_vol = 4150, 365 .termination_vol = 4150,
366 .termination_curr = 100, 366 .termination_curr = 100,
367 .recharge_vol = 4130, 367 .recharge_cap = 95,
368 .normal_cur_lvl = 700, 368 .normal_cur_lvl = 700,
369 .normal_vol_lvl = 4200, 369 .normal_vol_lvl = 4200,
370 .maint_a_cur_lvl = 600, 370 .maint_a_cur_lvl = 600,
371 .maint_a_vol_lvl = 4150, 371 .maint_a_vol_lvl = 4150,
372 .maint_a_chg_timer_h = 60, 372 .maint_a_chg_timer_h = 60,
373 .maint_b_cur_lvl = 600, 373 .maint_b_cur_lvl = 600,
374 .maint_b_vol_lvl = 4100, 374 .maint_b_vol_lvl = 4100,
375 .maint_b_chg_timer_h = 200, 375 .maint_b_chg_timer_h = 200,
376 .low_high_cur_lvl = 300, 376 .low_high_cur_lvl = 300,
377 .low_high_vol_lvl = 4000, 377 .low_high_vol_lvl = 4000,
378 .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl), 378 .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
379 .r_to_t_tbl = temp_tbl, 379 .r_to_t_tbl = temp_tbl,
380 .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl), 380 .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
381 .v_to_cap_tbl = cap_tbl, 381 .v_to_cap_tbl = cap_tbl,
382 .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor), 382 .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
383 .batres_tbl = temp_to_batres_tbl_thermistor, 383 .batres_tbl = temp_to_batres_tbl_thermistor,
384}, 384 },
385}; 385};
386 386
387static const struct abx500_bm_capacity_levels cap_levels = { 387static const struct abx500_bm_capacity_levels cap_levels = {
@@ -405,8 +405,8 @@ static const struct abx500_fg_parameters fg = {
405 .lowbat_threshold = 3100, 405 .lowbat_threshold = 3100,
406 .battok_falling_th_sel0 = 2860, 406 .battok_falling_th_sel0 = 2860,
407 .battok_raising_th_sel1 = 2860, 407 .battok_raising_th_sel1 = 2860,
408 .maint_thres = 95,
408 .user_cap_limit = 15, 409 .user_cap_limit = 15,
409 .maint_thres = 97,
410}; 410};
411 411
412static const struct abx500_maxim_parameters maxi_params = { 412static const struct abx500_maxim_parameters maxi_params = {
@@ -424,96 +424,84 @@ static const struct abx500_bm_charger_parameters chg = {
424}; 424};
425 425
426struct abx500_bm_data ab8500_bm_data = { 426struct abx500_bm_data ab8500_bm_data = {
427 .temp_under = 3, 427 .temp_under = 3,
428 .temp_low = 8, 428 .temp_low = 8,
429 .temp_high = 43, 429 .temp_high = 43,
430 .temp_over = 48, 430 .temp_over = 48,
431 .main_safety_tmr_h = 4, 431 .main_safety_tmr_h = 4,
432 .temp_interval_chg = 20, 432 .temp_interval_chg = 20,
433 .temp_interval_nochg = 120, 433 .temp_interval_nochg = 120,
434 .usb_safety_tmr_h = 4, 434 .usb_safety_tmr_h = 4,
435 .bkup_bat_v = BUP_VCH_SEL_2P6V, 435 .bkup_bat_v = BUP_VCH_SEL_2P6V,
436 .bkup_bat_i = BUP_ICH_SEL_150UA, 436 .bkup_bat_i = BUP_ICH_SEL_150UA,
437 .no_maintenance = false, 437 .no_maintenance = false,
438 .adc_therm = ABx500_ADC_THERM_BATCTRL, 438 .capacity_scaling = false,
439 .chg_unknown_bat = false, 439 .adc_therm = ABx500_ADC_THERM_BATCTRL,
440 .enable_overshoot = false, 440 .chg_unknown_bat = false,
441 .fg_res = 100, 441 .enable_overshoot = false,
442 .cap_levels = &cap_levels, 442 .fg_res = 100,
443 .bat_type = bat_type_thermistor, 443 .cap_levels = &cap_levels,
444 .n_btypes = 3, 444 .bat_type = bat_type_thermistor,
445 .batt_id = 0, 445 .n_btypes = 3,
446 .interval_charging = 5, 446 .batt_id = 0,
447 .interval_not_charging = 120, 447 .interval_charging = 5,
448 .temp_hysteresis = 3, 448 .interval_not_charging = 120,
449 .gnd_lift_resistance = 34, 449 .temp_hysteresis = 3,
450 .maxi = &maxi_params, 450 .gnd_lift_resistance = 34,
451 .chg_params = &chg, 451 .maxi = &maxi_params,
452 .fg_params = &fg, 452 .chg_params = &chg,
453 .fg_params = &fg,
453}; 454};
454 455
455int bmdevs_of_probe(struct device *dev, struct device_node *np, 456int ab8500_bm_of_probe(struct device *dev,
456 struct abx500_bm_data **battery) 457 struct device_node *np,
458 struct abx500_bm_data *bm)
457{ 459{
458 struct abx500_battery_type *btype; 460 struct batres_vs_temp *tmp_batres_tbl;
459 struct device_node *np_bat_supply; 461 struct device_node *battery_node;
460 struct abx500_bm_data *bat;
461 const char *btech; 462 const char *btech;
462 char bat_tech[8]; 463 int i;
463 int i, thermistor;
464
465 *battery = &ab8500_bm_data;
466 464
467 /* get phandle to 'battery-info' node */ 465 /* get phandle to 'battery-info' node */
468 np_bat_supply = of_parse_phandle(np, "battery", 0); 466 battery_node = of_parse_phandle(np, "battery", 0);
469 if (!np_bat_supply) { 467 if (!battery_node) {
470 dev_err(dev, "missing property battery\n"); 468 dev_err(dev, "battery node or reference missing\n");
471 return -EINVAL; 469 return -EINVAL;
472 } 470 }
473 if (of_property_read_bool(np_bat_supply,
474 "thermistor-on-batctrl"))
475 thermistor = NTC_INTERNAL;
476 else
477 thermistor = NTC_EXTERNAL;
478 471
479 bat = *battery; 472 btech = of_get_property(battery_node, "stericsson,battery-type", NULL);
480 if (thermistor == NTC_EXTERNAL) {
481 bat->n_btypes = 4;
482 bat->bat_type = bat_type_ext_thermistor;
483 bat->adc_therm = ABx500_ADC_THERM_BATTEMP;
484 }
485 btech = of_get_property(np_bat_supply,
486 "stericsson,battery-type", NULL);
487 if (!btech) { 473 if (!btech) {
488 dev_warn(dev, "missing property battery-name/type\n"); 474 dev_warn(dev, "missing property battery-name/type\n");
489 strcpy(bat_tech, "UNKNOWN"); 475 return -EINVAL;
490 } else {
491 strcpy(bat_tech, btech);
492 } 476 }
493 477
494 if (strncmp(bat_tech, "LION", 4) == 0) { 478 if (strncmp(btech, "LION", 4) == 0) {
495 bat->no_maintenance = true; 479 bm->no_maintenance = true;
496 bat->chg_unknown_bat = true; 480 bm->chg_unknown_bat = true;
497 bat->bat_type[BATTERY_UNKNOWN].charge_full_design = 2600; 481 bm->bat_type[BATTERY_UNKNOWN].charge_full_design = 2600;
498 bat->bat_type[BATTERY_UNKNOWN].termination_vol = 4150; 482 bm->bat_type[BATTERY_UNKNOWN].termination_vol = 4150;
499 bat->bat_type[BATTERY_UNKNOWN].recharge_vol = 4130; 483 bm->bat_type[BATTERY_UNKNOWN].recharge_cap = 95;
500 bat->bat_type[BATTERY_UNKNOWN].normal_cur_lvl = 520; 484 bm->bat_type[BATTERY_UNKNOWN].normal_cur_lvl = 520;
501 bat->bat_type[BATTERY_UNKNOWN].normal_vol_lvl = 4200; 485 bm->bat_type[BATTERY_UNKNOWN].normal_vol_lvl = 4200;
502 } 486 }
503 /* select the battery resolution table */ 487
504 for (i = 0; i < bat->n_btypes; ++i) { 488 if (of_property_read_bool(battery_node, "thermistor-on-batctrl")) {
505 btype = (bat->bat_type + i); 489 if (strncmp(btech, "LION", 4) == 0)
506 if (thermistor == NTC_EXTERNAL) { 490 tmp_batres_tbl = temp_to_batres_tbl_9100;
507 btype->batres_tbl = 491 else
508 temp_to_batres_tbl_ext_thermistor; 492 tmp_batres_tbl = temp_to_batres_tbl_thermistor;
509 } else if (strncmp(bat_tech, "LION", 4) == 0) { 493 } else {
510 btype->batres_tbl = 494 bm->n_btypes = 4;
511 temp_to_batres_tbl_9100; 495 bm->bat_type = bat_type_ext_thermistor;
512 } else { 496 bm->adc_therm = ABx500_ADC_THERM_BATTEMP;
513 btype->batres_tbl = 497 tmp_batres_tbl = temp_to_batres_tbl_ext_thermistor;
514 temp_to_batres_tbl_thermistor;
515 }
516 } 498 }
517 of_node_put(np_bat_supply); 499
500 /* select the battery resolution table */
501 for (i = 0; i < bm->n_btypes; ++i)
502 bm->bat_type[i].batres_tbl = tmp_batres_tbl;
503
504 of_node_put(battery_node);
505
518 return 0; 506 return 0;
519} 507}
diff --git a/drivers/power/ab8500_btemp.c b/drivers/power/ab8500_btemp.c
index 20e2a7d3ef43..07689064996e 100644
--- a/drivers/power/ab8500_btemp.c
+++ b/drivers/power/ab8500_btemp.c
@@ -39,6 +39,9 @@
39#define BTEMP_BATCTRL_CURR_SRC_7UA 7 39#define BTEMP_BATCTRL_CURR_SRC_7UA 7
40#define BTEMP_BATCTRL_CURR_SRC_20UA 20 40#define BTEMP_BATCTRL_CURR_SRC_20UA 20
41 41
42#define BTEMP_BATCTRL_CURR_SRC_16UA 16
43#define BTEMP_BATCTRL_CURR_SRC_18UA 18
44
42#define to_ab8500_btemp_device_info(x) container_of((x), \ 45#define to_ab8500_btemp_device_info(x) container_of((x), \
43 struct ab8500_btemp, btemp_psy); 46 struct ab8500_btemp, btemp_psy);
44 47
@@ -78,12 +81,13 @@ struct ab8500_btemp_ranges {
78 * @parent: Pointer to the struct ab8500 81 * @parent: Pointer to the struct ab8500
79 * @gpadc: Pointer to the struct gpadc 82 * @gpadc: Pointer to the struct gpadc
80 * @fg: Pointer to the struct fg 83 * @fg: Pointer to the struct fg
81 * @bat: Pointer to the abx500_bm platform data 84 * @bm: Platform specific battery management information
82 * @btemp_psy: Structure for BTEMP specific battery properties 85 * @btemp_psy: Structure for BTEMP specific battery properties
83 * @events: Structure for information about events triggered 86 * @events: Structure for information about events triggered
84 * @btemp_ranges: Battery temperature range structure 87 * @btemp_ranges: Battery temperature range structure
85 * @btemp_wq: Work queue for measuring the temperature periodically 88 * @btemp_wq: Work queue for measuring the temperature periodically
86 * @btemp_periodic_work: Work for measuring the temperature periodically 89 * @btemp_periodic_work: Work for measuring the temperature periodically
90 * @initialized: True if battery id read.
87 */ 91 */
88struct ab8500_btemp { 92struct ab8500_btemp {
89 struct device *dev; 93 struct device *dev;
@@ -94,12 +98,13 @@ struct ab8500_btemp {
94 struct ab8500 *parent; 98 struct ab8500 *parent;
95 struct ab8500_gpadc *gpadc; 99 struct ab8500_gpadc *gpadc;
96 struct ab8500_fg *fg; 100 struct ab8500_fg *fg;
97 struct abx500_bm_data *bat; 101 struct abx500_bm_data *bm;
98 struct power_supply btemp_psy; 102 struct power_supply btemp_psy;
99 struct ab8500_btemp_events events; 103 struct ab8500_btemp_events events;
100 struct ab8500_btemp_ranges btemp_ranges; 104 struct ab8500_btemp_ranges btemp_ranges;
101 struct workqueue_struct *btemp_wq; 105 struct workqueue_struct *btemp_wq;
102 struct delayed_work btemp_periodic_work; 106 struct delayed_work btemp_periodic_work;
107 bool initialized;
103}; 108};
104 109
105/* BTEMP power supply properties */ 110/* BTEMP power supply properties */
@@ -147,13 +152,13 @@ static int ab8500_btemp_batctrl_volt_to_res(struct ab8500_btemp *di,
147 return (450000 * (v_batctrl)) / (1800 - v_batctrl); 152 return (450000 * (v_batctrl)) / (1800 - v_batctrl);
148 } 153 }
149 154
150 if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL) { 155 if (di->bm->adc_therm == ABx500_ADC_THERM_BATCTRL) {
151 /* 156 /*
152 * If the battery has internal NTC, we use the current 157 * If the battery has internal NTC, we use the current
153 * source to calculate the resistance, 7uA or 20uA 158 * source to calculate the resistance, 7uA or 20uA
154 */ 159 */
155 rbs = (v_batctrl * 1000 160 rbs = (v_batctrl * 1000
156 - di->bat->gnd_lift_resistance * inst_curr) 161 - di->bm->gnd_lift_resistance * inst_curr)
157 / di->curr_source; 162 / di->curr_source;
158 } else { 163 } else {
159 /* 164 /*
@@ -209,11 +214,19 @@ static int ab8500_btemp_curr_source_enable(struct ab8500_btemp *di,
209 return 0; 214 return 0;
210 215
211 /* Only do this for batteries with internal NTC */ 216 /* Only do this for batteries with internal NTC */
212 if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL && enable) { 217 if (di->bm->adc_therm == ABx500_ADC_THERM_BATCTRL && enable) {
213 if (di->curr_source == BTEMP_BATCTRL_CURR_SRC_7UA) 218
214 curr = BAT_CTRL_7U_ENA; 219 if (is_ab9540(di->parent) || is_ab8505(di->parent)) {
215 else 220 if (di->curr_source == BTEMP_BATCTRL_CURR_SRC_16UA)
216 curr = BAT_CTRL_20U_ENA; 221 curr = BAT_CTRL_16U_ENA;
222 else
223 curr = BAT_CTRL_18U_ENA;
224 } else {
225 if (di->curr_source == BTEMP_BATCTRL_CURR_SRC_7UA)
226 curr = BAT_CTRL_7U_ENA;
227 else
228 curr = BAT_CTRL_20U_ENA;
229 }
217 230
218 dev_dbg(di->dev, "Set BATCTRL %duA\n", di->curr_source); 231 dev_dbg(di->dev, "Set BATCTRL %duA\n", di->curr_source);
219 232
@@ -241,14 +254,25 @@ static int ab8500_btemp_curr_source_enable(struct ab8500_btemp *di,
241 __func__); 254 __func__);
242 goto disable_curr_source; 255 goto disable_curr_source;
243 } 256 }
244 } else if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL && !enable) { 257 } else if (di->bm->adc_therm == ABx500_ADC_THERM_BATCTRL && !enable) {
245 dev_dbg(di->dev, "Disable BATCTRL curr source\n"); 258 dev_dbg(di->dev, "Disable BATCTRL curr source\n");
246 259
247 /* Write 0 to the curr bits */ 260 if (is_ab9540(di->parent) || is_ab8505(di->parent)) {
248 ret = abx500_mask_and_set_register_interruptible(di->dev, 261 /* Write 0 to the curr bits */
249 AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE, 262 ret = abx500_mask_and_set_register_interruptible(
250 BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA, 263 di->dev,
251 ~(BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA)); 264 AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
265 BAT_CTRL_16U_ENA | BAT_CTRL_18U_ENA,
266 ~(BAT_CTRL_16U_ENA | BAT_CTRL_18U_ENA));
267 } else {
268 /* Write 0 to the curr bits */
269 ret = abx500_mask_and_set_register_interruptible(
270 di->dev,
271 AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
272 BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA,
273 ~(BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA));
274 }
275
252 if (ret) { 276 if (ret) {
253 dev_err(di->dev, "%s failed disabling current source\n", 277 dev_err(di->dev, "%s failed disabling current source\n",
254 __func__); 278 __func__);
@@ -290,11 +314,20 @@ static int ab8500_btemp_curr_source_enable(struct ab8500_btemp *di,
290 * if we got an error above 314 * if we got an error above
291 */ 315 */
292disable_curr_source: 316disable_curr_source:
293 /* Write 0 to the curr bits */ 317 if (is_ab9540(di->parent) || is_ab8505(di->parent)) {
294 ret = abx500_mask_and_set_register_interruptible(di->dev, 318 /* Write 0 to the curr bits */
319 ret = abx500_mask_and_set_register_interruptible(di->dev,
320 AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
321 BAT_CTRL_16U_ENA | BAT_CTRL_18U_ENA,
322 ~(BAT_CTRL_16U_ENA | BAT_CTRL_18U_ENA));
323 } else {
324 /* Write 0 to the curr bits */
325 ret = abx500_mask_and_set_register_interruptible(di->dev,
295 AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE, 326 AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
296 BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA, 327 BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA,
297 ~(BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA)); 328 ~(BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA));
329 }
330
298 if (ret) { 331 if (ret) {
299 dev_err(di->dev, "%s failed disabling current source\n", 332 dev_err(di->dev, "%s failed disabling current source\n",
300 __func__); 333 __func__);
@@ -372,13 +405,10 @@ static int ab8500_btemp_get_batctrl_res(struct ab8500_btemp *di)
372 return ret; 405 return ret;
373 } 406 }
374 407
375 /* 408 do {
376 * Since there is no interrupt when current measurement is done, 409 msleep(20);
377 * loop for over 250ms (250ms is one sample conversion time 410 } while (!ab8500_fg_inst_curr_started(di->fg));
378 * with 32.768 Khz RTC clock). Note that a stop time must be set 411
379 * since the ab8500_btemp_read_batctrl_voltage call can block and
380 * take an unknown amount of time to complete.
381 */
382 i = 0; 412 i = 0;
383 413
384 do { 414 do {
@@ -457,9 +487,9 @@ static int ab8500_btemp_measure_temp(struct ab8500_btemp *di)
457 int rbat, rntc, vntc; 487 int rbat, rntc, vntc;
458 u8 id; 488 u8 id;
459 489
460 id = di->bat->batt_id; 490 id = di->bm->batt_id;
461 491
462 if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL && 492 if (di->bm->adc_therm == ABx500_ADC_THERM_BATCTRL &&
463 id != BATTERY_UNKNOWN) { 493 id != BATTERY_UNKNOWN) {
464 494
465 rbat = ab8500_btemp_get_batctrl_res(di); 495 rbat = ab8500_btemp_get_batctrl_res(di);
@@ -474,8 +504,8 @@ static int ab8500_btemp_measure_temp(struct ab8500_btemp *di)
474 } 504 }
475 505
476 temp = ab8500_btemp_res_to_temp(di, 506 temp = ab8500_btemp_res_to_temp(di,
477 di->bat->bat_type[id].r_to_t_tbl, 507 di->bm->bat_type[id].r_to_t_tbl,
478 di->bat->bat_type[id].n_temp_tbl_elements, rbat); 508 di->bm->bat_type[id].n_temp_tbl_elements, rbat);
479 } else { 509 } else {
480 vntc = ab8500_gpadc_convert(di->gpadc, BTEMP_BALL); 510 vntc = ab8500_gpadc_convert(di->gpadc, BTEMP_BALL);
481 if (vntc < 0) { 511 if (vntc < 0) {
@@ -491,8 +521,8 @@ static int ab8500_btemp_measure_temp(struct ab8500_btemp *di)
491 rntc = 230000 * vntc / (VTVOUT_V - vntc); 521 rntc = 230000 * vntc / (VTVOUT_V - vntc);
492 522
493 temp = ab8500_btemp_res_to_temp(di, 523 temp = ab8500_btemp_res_to_temp(di,
494 di->bat->bat_type[id].r_to_t_tbl, 524 di->bm->bat_type[id].r_to_t_tbl,
495 di->bat->bat_type[id].n_temp_tbl_elements, rntc); 525 di->bm->bat_type[id].n_temp_tbl_elements, rntc);
496 prev = temp; 526 prev = temp;
497 } 527 }
498 dev_dbg(di->dev, "Battery temperature is %d\n", temp); 528 dev_dbg(di->dev, "Battery temperature is %d\n", temp);
@@ -511,9 +541,12 @@ static int ab8500_btemp_id(struct ab8500_btemp *di)
511{ 541{
512 int res; 542 int res;
513 u8 i; 543 u8 i;
544 if (is_ab9540(di->parent) || is_ab8505(di->parent))
545 di->curr_source = BTEMP_BATCTRL_CURR_SRC_16UA;
546 else
547 di->curr_source = BTEMP_BATCTRL_CURR_SRC_7UA;
514 548
515 di->curr_source = BTEMP_BATCTRL_CURR_SRC_7UA; 549 di->bm->batt_id = BATTERY_UNKNOWN;
516 di->bat->batt_id = BATTERY_UNKNOWN;
517 550
518 res = ab8500_btemp_get_batctrl_res(di); 551 res = ab8500_btemp_get_batctrl_res(di);
519 if (res < 0) { 552 if (res < 0) {
@@ -522,23 +555,23 @@ static int ab8500_btemp_id(struct ab8500_btemp *di)
522 } 555 }
523 556
524 /* BATTERY_UNKNOWN is defined on position 0, skip it! */ 557 /* BATTERY_UNKNOWN is defined on position 0, skip it! */
525 for (i = BATTERY_UNKNOWN + 1; i < di->bat->n_btypes; i++) { 558 for (i = BATTERY_UNKNOWN + 1; i < di->bm->n_btypes; i++) {
526 if ((res <= di->bat->bat_type[i].resis_high) && 559 if ((res <= di->bm->bat_type[i].resis_high) &&
527 (res >= di->bat->bat_type[i].resis_low)) { 560 (res >= di->bm->bat_type[i].resis_low)) {
528 dev_dbg(di->dev, "Battery detected on %s" 561 dev_dbg(di->dev, "Battery detected on %s"
529 " low %d < res %d < high: %d" 562 " low %d < res %d < high: %d"
530 " index: %d\n", 563 " index: %d\n",
531 di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL ? 564 di->bm->adc_therm == ABx500_ADC_THERM_BATCTRL ?
532 "BATCTRL" : "BATTEMP", 565 "BATCTRL" : "BATTEMP",
533 di->bat->bat_type[i].resis_low, res, 566 di->bm->bat_type[i].resis_low, res,
534 di->bat->bat_type[i].resis_high, i); 567 di->bm->bat_type[i].resis_high, i);
535 568
536 di->bat->batt_id = i; 569 di->bm->batt_id = i;
537 break; 570 break;
538 } 571 }
539 } 572 }
540 573
541 if (di->bat->batt_id == BATTERY_UNKNOWN) { 574 if (di->bm->batt_id == BATTERY_UNKNOWN) {
542 dev_warn(di->dev, "Battery identified as unknown" 575 dev_warn(di->dev, "Battery identified as unknown"
543 ", resistance %d Ohm\n", res); 576 ", resistance %d Ohm\n", res);
544 return -ENXIO; 577 return -ENXIO;
@@ -548,13 +581,18 @@ static int ab8500_btemp_id(struct ab8500_btemp *di)
548 * We only have to change current source if the 581 * We only have to change current source if the
549 * detected type is Type 1, else we use the 7uA source 582 * detected type is Type 1, else we use the 7uA source
550 */ 583 */
551 if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL && 584 if (di->bm->adc_therm == ABx500_ADC_THERM_BATCTRL &&
552 di->bat->batt_id == 1) { 585 di->bm->batt_id == 1) {
553 dev_dbg(di->dev, "Set BATCTRL current source to 20uA\n"); 586 if (is_ab9540(di->parent) || is_ab8505(di->parent)) {
554 di->curr_source = BTEMP_BATCTRL_CURR_SRC_20UA; 587 dev_dbg(di->dev, "Set BATCTRL current source to 16uA\n");
588 di->curr_source = BTEMP_BATCTRL_CURR_SRC_16UA;
589 } else {
590 dev_dbg(di->dev, "Set BATCTRL current source to 20uA\n");
591 di->curr_source = BTEMP_BATCTRL_CURR_SRC_20UA;
592 }
555 } 593 }
556 594
557 return di->bat->batt_id; 595 return di->bm->batt_id;
558} 596}
559 597
560/** 598/**
@@ -569,6 +607,13 @@ static void ab8500_btemp_periodic_work(struct work_struct *work)
569 struct ab8500_btemp *di = container_of(work, 607 struct ab8500_btemp *di = container_of(work,
570 struct ab8500_btemp, btemp_periodic_work.work); 608 struct ab8500_btemp, btemp_periodic_work.work);
571 609
610 if (!di->initialized) {
611 di->initialized = true;
612 /* Identify the battery */
613 if (ab8500_btemp_id(di) < 0)
614 dev_warn(di->dev, "failed to identify the battery\n");
615 }
616
572 di->bat_temp = ab8500_btemp_measure_temp(di); 617 di->bat_temp = ab8500_btemp_measure_temp(di);
573 618
574 if (di->bat_temp != di->prev_bat_temp) { 619 if (di->bat_temp != di->prev_bat_temp) {
@@ -577,9 +622,9 @@ static void ab8500_btemp_periodic_work(struct work_struct *work)
577 } 622 }
578 623
579 if (di->events.ac_conn || di->events.usb_conn) 624 if (di->events.ac_conn || di->events.usb_conn)
580 interval = di->bat->temp_interval_chg; 625 interval = di->bm->temp_interval_chg;
581 else 626 else
582 interval = di->bat->temp_interval_nochg; 627 interval = di->bm->temp_interval_nochg;
583 628
584 /* Schedule a new measurement */ 629 /* Schedule a new measurement */
585 queue_delayed_work(di->btemp_wq, 630 queue_delayed_work(di->btemp_wq,
@@ -616,9 +661,9 @@ static irqreturn_t ab8500_btemp_templow_handler(int irq, void *_di)
616{ 661{
617 struct ab8500_btemp *di = _di; 662 struct ab8500_btemp *di = _di;
618 663
619 if (is_ab8500_2p0_or_earlier(di->parent)) { 664 if (is_ab8500_3p3_or_earlier(di->parent)) {
620 dev_dbg(di->dev, "Ignore false btemp low irq" 665 dev_dbg(di->dev, "Ignore false btemp low irq"
621 " for ABB cut 1.0, 1.1 and 2.0\n"); 666 " for ABB cut 1.0, 1.1, 2.0 and 3.3\n");
622 } else { 667 } else {
623 dev_crit(di->dev, "Battery temperature lower than -10deg c\n"); 668 dev_crit(di->dev, "Battery temperature lower than -10deg c\n");
624 669
@@ -732,30 +777,30 @@ static int ab8500_btemp_get_temp(struct ab8500_btemp *di)
732 int temp = 0; 777 int temp = 0;
733 778
734 /* 779 /*
735 * The BTEMP events are not reliabe on AB8500 cut2.0 780 * The BTEMP events are not reliabe on AB8500 cut3.3
736 * and prior versions 781 * and prior versions
737 */ 782 */
738 if (is_ab8500_2p0_or_earlier(di->parent)) { 783 if (is_ab8500_3p3_or_earlier(di->parent)) {
739 temp = di->bat_temp * 10; 784 temp = di->bat_temp * 10;
740 } else { 785 } else {
741 if (di->events.btemp_low) { 786 if (di->events.btemp_low) {
742 if (temp > di->btemp_ranges.btemp_low_limit) 787 if (temp > di->btemp_ranges.btemp_low_limit)
743 temp = di->btemp_ranges.btemp_low_limit; 788 temp = di->btemp_ranges.btemp_low_limit * 10;
744 else 789 else
745 temp = di->bat_temp * 10; 790 temp = di->bat_temp * 10;
746 } else if (di->events.btemp_high) { 791 } else if (di->events.btemp_high) {
747 if (temp < di->btemp_ranges.btemp_high_limit) 792 if (temp < di->btemp_ranges.btemp_high_limit)
748 temp = di->btemp_ranges.btemp_high_limit; 793 temp = di->btemp_ranges.btemp_high_limit * 10;
749 else 794 else
750 temp = di->bat_temp * 10; 795 temp = di->bat_temp * 10;
751 } else if (di->events.btemp_lowmed) { 796 } else if (di->events.btemp_lowmed) {
752 if (temp > di->btemp_ranges.btemp_med_limit) 797 if (temp > di->btemp_ranges.btemp_med_limit)
753 temp = di->btemp_ranges.btemp_med_limit; 798 temp = di->btemp_ranges.btemp_med_limit * 10;
754 else 799 else
755 temp = di->bat_temp * 10; 800 temp = di->bat_temp * 10;
756 } else if (di->events.btemp_medhigh) { 801 } else if (di->events.btemp_medhigh) {
757 if (temp < di->btemp_ranges.btemp_med_limit) 802 if (temp < di->btemp_ranges.btemp_med_limit)
758 temp = di->btemp_ranges.btemp_med_limit; 803 temp = di->btemp_ranges.btemp_med_limit * 10;
759 else 804 else
760 temp = di->bat_temp * 10; 805 temp = di->bat_temp * 10;
761 } else 806 } else
@@ -806,7 +851,7 @@ static int ab8500_btemp_get_property(struct power_supply *psy,
806 val->intval = 1; 851 val->intval = 1;
807 break; 852 break;
808 case POWER_SUPPLY_PROP_TECHNOLOGY: 853 case POWER_SUPPLY_PROP_TECHNOLOGY:
809 val->intval = di->bat->bat_type[di->bat->batt_id].name; 854 val->intval = di->bm->bat_type[di->bm->batt_id].name;
810 break; 855 break;
811 case POWER_SUPPLY_PROP_TEMP: 856 case POWER_SUPPLY_PROP_TEMP:
812 val->intval = ab8500_btemp_get_temp(di); 857 val->intval = ab8500_btemp_get_temp(di);
@@ -967,6 +1012,7 @@ static char *supply_interface[] = {
967static int ab8500_btemp_probe(struct platform_device *pdev) 1012static int ab8500_btemp_probe(struct platform_device *pdev)
968{ 1013{
969 struct device_node *np = pdev->dev.of_node; 1014 struct device_node *np = pdev->dev.of_node;
1015 struct abx500_bm_data *plat = pdev->dev.platform_data;
970 struct ab8500_btemp *di; 1016 struct ab8500_btemp *di;
971 int irq, i, ret = 0; 1017 int irq, i, ret = 0;
972 u8 val; 1018 u8 val;
@@ -976,21 +1022,19 @@ static int ab8500_btemp_probe(struct platform_device *pdev)
976 dev_err(&pdev->dev, "%s no mem for ab8500_btemp\n", __func__); 1022 dev_err(&pdev->dev, "%s no mem for ab8500_btemp\n", __func__);
977 return -ENOMEM; 1023 return -ENOMEM;
978 } 1024 }
979 di->bat = pdev->mfd_cell->platform_data; 1025
980 if (!di->bat) { 1026 if (!plat) {
981 if (np) { 1027 dev_err(&pdev->dev, "no battery management data supplied\n");
982 ret = bmdevs_of_probe(&pdev->dev, np, &di->bat); 1028 return -EINVAL;
983 if (ret) { 1029 }
984 dev_err(&pdev->dev, 1030 di->bm = plat;
985 "failed to get battery information\n"); 1031
986 return ret; 1032 if (np) {
987 } 1033 ret = ab8500_bm_of_probe(&pdev->dev, np, di->bm);
988 } else { 1034 if (ret) {
989 dev_err(&pdev->dev, "missing dt node for ab8500_btemp\n"); 1035 dev_err(&pdev->dev, "failed to get battery information\n");
990 return -EINVAL; 1036 return ret;
991 } 1037 }
992 } else {
993 dev_info(&pdev->dev, "falling back to legacy platform data\n");
994 } 1038 }
995 1039
996 /* get parent data */ 1040 /* get parent data */
@@ -998,6 +1042,8 @@ static int ab8500_btemp_probe(struct platform_device *pdev)
998 di->parent = dev_get_drvdata(pdev->dev.parent); 1042 di->parent = dev_get_drvdata(pdev->dev.parent);
999 di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0"); 1043 di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
1000 1044
1045 di->initialized = false;
1046
1001 /* BTEMP supply */ 1047 /* BTEMP supply */
1002 di->btemp_psy.name = "ab8500_btemp"; 1048 di->btemp_psy.name = "ab8500_btemp";
1003 di->btemp_psy.type = POWER_SUPPLY_TYPE_BATTERY; 1049 di->btemp_psy.type = POWER_SUPPLY_TYPE_BATTERY;
@@ -1022,10 +1068,6 @@ static int ab8500_btemp_probe(struct platform_device *pdev)
1022 INIT_DEFERRABLE_WORK(&di->btemp_periodic_work, 1068 INIT_DEFERRABLE_WORK(&di->btemp_periodic_work,
1023 ab8500_btemp_periodic_work); 1069 ab8500_btemp_periodic_work);
1024 1070
1025 /* Identify the battery */
1026 if (ab8500_btemp_id(di) < 0)
1027 dev_warn(di->dev, "failed to identify the battery\n");
1028
1029 /* Set BTEMP thermal limits. Low and Med are fixed */ 1071 /* Set BTEMP thermal limits. Low and Med are fixed */
1030 di->btemp_ranges.btemp_low_limit = BTEMP_THERMAL_LOW_LIMIT; 1072 di->btemp_ranges.btemp_low_limit = BTEMP_THERMAL_LOW_LIMIT;
1031 di->btemp_ranges.btemp_med_limit = BTEMP_THERMAL_MED_LIMIT; 1073 di->btemp_ranges.btemp_med_limit = BTEMP_THERMAL_MED_LIMIT;
@@ -1123,7 +1165,7 @@ static void __exit ab8500_btemp_exit(void)
1123 platform_driver_unregister(&ab8500_btemp_driver); 1165 platform_driver_unregister(&ab8500_btemp_driver);
1124} 1166}
1125 1167
1126subsys_initcall_sync(ab8500_btemp_init); 1168device_initcall(ab8500_btemp_init);
1127module_exit(ab8500_btemp_exit); 1169module_exit(ab8500_btemp_exit);
1128 1170
1129MODULE_LICENSE("GPL v2"); 1171MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/ab8500_charger.c b/drivers/power/ab8500_charger.c
index 3be9c0ee3fc5..24b30b7ea5ca 100644
--- a/drivers/power/ab8500_charger.c
+++ b/drivers/power/ab8500_charger.c
@@ -31,6 +31,7 @@
31#include <linux/mfd/abx500/ab8500-gpadc.h> 31#include <linux/mfd/abx500/ab8500-gpadc.h>
32#include <linux/mfd/abx500/ux500_chargalg.h> 32#include <linux/mfd/abx500/ux500_chargalg.h>
33#include <linux/usb/otg.h> 33#include <linux/usb/otg.h>
34#include <linux/mutex.h>
34 35
35/* Charger constants */ 36/* Charger constants */
36#define NO_PW_CONN 0 37#define NO_PW_CONN 0
@@ -54,6 +55,7 @@
54 55
55#define MAIN_CH_INPUT_CURR_SHIFT 4 56#define MAIN_CH_INPUT_CURR_SHIFT 4
56#define VBUS_IN_CURR_LIM_SHIFT 4 57#define VBUS_IN_CURR_LIM_SHIFT 4
58#define AUTO_VBUS_IN_CURR_LIM_SHIFT 4
57 59
58#define LED_INDICATOR_PWM_ENA 0x01 60#define LED_INDICATOR_PWM_ENA 0x01
59#define LED_INDICATOR_PWM_DIS 0x00 61#define LED_INDICATOR_PWM_DIS 0x00
@@ -68,6 +70,11 @@
68#define MAIN_CH_NOK 0x01 70#define MAIN_CH_NOK 0x01
69#define VBUS_DET 0x80 71#define VBUS_DET 0x80
70 72
73#define MAIN_CH_STATUS2_MAINCHGDROP 0x80
74#define MAIN_CH_STATUS2_MAINCHARGERDETDBNC 0x40
75#define USB_CH_VBUSDROP 0x40
76#define USB_CH_VBUSDETDBNC 0x01
77
71/* UsbLineStatus register bit masks */ 78/* UsbLineStatus register bit masks */
72#define AB8500_USB_LINK_STATUS 0x78 79#define AB8500_USB_LINK_STATUS 0x78
73#define AB8500_STD_HOST_SUSP 0x18 80#define AB8500_STD_HOST_SUSP 0x18
@@ -79,6 +86,17 @@
79/* Lowest charger voltage is 3.39V -> 0x4E */ 86/* Lowest charger voltage is 3.39V -> 0x4E */
80#define LOW_VOLT_REG 0x4E 87#define LOW_VOLT_REG 0x4E
81 88
89/* Step up/down delay in us */
90#define STEP_UDELAY 1000
91
92#define CHARGER_STATUS_POLL 10 /* in ms */
93
94#define CHG_WD_INTERVAL (60 * HZ)
95
96#define AB8500_SW_CONTROL_FALLBACK 0x03
97/* Wait for enumeration before charing in us */
98#define WAIT_ACA_RID_ENUMERATION (5 * 1000)
99
82/* UsbLineStatus register - usb types */ 100/* UsbLineStatus register - usb types */
83enum ab8500_charger_link_status { 101enum ab8500_charger_link_status {
84 USB_STAT_NOT_CONFIGURED, 102 USB_STAT_NOT_CONFIGURED,
@@ -97,6 +115,13 @@ enum ab8500_charger_link_status {
97 USB_STAT_HM_IDGND, 115 USB_STAT_HM_IDGND,
98 USB_STAT_RESERVED, 116 USB_STAT_RESERVED,
99 USB_STAT_NOT_VALID_LINK, 117 USB_STAT_NOT_VALID_LINK,
118 USB_STAT_PHY_EN,
119 USB_STAT_SUP_NO_IDGND_VBUS,
120 USB_STAT_SUP_IDGND_VBUS,
121 USB_STAT_CHARGER_LINE_1,
122 USB_STAT_CARKIT_1,
123 USB_STAT_CARKIT_2,
124 USB_STAT_ACA_DOCK_CHARGER,
100}; 125};
101 126
102enum ab8500_usb_state { 127enum ab8500_usb_state {
@@ -149,6 +174,7 @@ struct ab8500_charger_info {
149 int charger_voltage; 174 int charger_voltage;
150 int cv_active; 175 int cv_active;
151 bool wd_expired; 176 bool wd_expired;
177 int charger_current;
152}; 178};
153 179
154struct ab8500_charger_event_flags { 180struct ab8500_charger_event_flags {
@@ -159,12 +185,14 @@ struct ab8500_charger_event_flags {
159 bool usbchargernotok; 185 bool usbchargernotok;
160 bool chgwdexp; 186 bool chgwdexp;
161 bool vbus_collapse; 187 bool vbus_collapse;
188 bool vbus_drop_end;
162}; 189};
163 190
164struct ab8500_charger_usb_state { 191struct ab8500_charger_usb_state {
165 bool usb_changed;
166 int usb_current; 192 int usb_current;
193 int usb_current_tmp;
167 enum ab8500_usb_state state; 194 enum ab8500_usb_state state;
195 enum ab8500_usb_state state_tmp;
168 spinlock_t usb_lock; 196 spinlock_t usb_lock;
169}; 197};
170 198
@@ -182,11 +210,17 @@ struct ab8500_charger_usb_state {
182 * charger is enabled 210 * charger is enabled
183 * @vbat Battery voltage 211 * @vbat Battery voltage
184 * @old_vbat Previously measured battery voltage 212 * @old_vbat Previously measured battery voltage
213 * @usb_device_is_unrecognised USB device is unrecognised by the hardware
185 * @autopower Indicate if we should have automatic pwron after pwrloss 214 * @autopower Indicate if we should have automatic pwron after pwrloss
186 * @autopower_cfg platform specific power config support for "pwron after pwrloss" 215 * @autopower_cfg platform specific power config support for "pwron after pwrloss"
216 * @invalid_charger_detect_state State when forcing AB to use invalid charger
217 * @is_usb_host: Indicate if last detected USB type is host
218 * @is_aca_rid: Incicate if accessory is ACA type
219 * @current_stepping_sessions:
220 * Counter for current stepping sessions
187 * @parent: Pointer to the struct ab8500 221 * @parent: Pointer to the struct ab8500
188 * @gpadc: Pointer to the struct gpadc 222 * @gpadc: Pointer to the struct gpadc
189 * @bat: Pointer to the abx500_bm platform data 223 * @bm: Platform specific battery management information
190 * @flags: Structure for information about events triggered 224 * @flags: Structure for information about events triggered
191 * @usb_state: Structure for usb stack information 225 * @usb_state: Structure for usb stack information
192 * @ac_chg: AC charger power supply 226 * @ac_chg: AC charger power supply
@@ -195,19 +229,28 @@ struct ab8500_charger_usb_state {
195 * @usb: Structure that holds the USB charger properties 229 * @usb: Structure that holds the USB charger properties
196 * @regu: Pointer to the struct regulator 230 * @regu: Pointer to the struct regulator
197 * @charger_wq: Work queue for the IRQs and checking HW state 231 * @charger_wq: Work queue for the IRQs and checking HW state
232 * @usb_ipt_crnt_lock: Lock to protect VBUS input current setting from mutuals
233 * @pm_lock: Lock to prevent system to suspend
198 * @check_vbat_work Work for checking vbat threshold to adjust vbus current 234 * @check_vbat_work Work for checking vbat threshold to adjust vbus current
199 * @check_hw_failure_work: Work for checking HW state 235 * @check_hw_failure_work: Work for checking HW state
200 * @check_usbchgnotok_work: Work for checking USB charger not ok status 236 * @check_usbchgnotok_work: Work for checking USB charger not ok status
201 * @kick_wd_work: Work for kicking the charger watchdog in case 237 * @kick_wd_work: Work for kicking the charger watchdog in case
202 * of ABB rev 1.* due to the watchog logic bug 238 * of ABB rev 1.* due to the watchog logic bug
239 * @ac_charger_attached_work: Work for checking if AC charger is still
240 * connected
241 * @usb_charger_attached_work: Work for checking if USB charger is still
242 * connected
203 * @ac_work: Work for checking AC charger connection 243 * @ac_work: Work for checking AC charger connection
204 * @detect_usb_type_work: Work for detecting the USB type connected 244 * @detect_usb_type_work: Work for detecting the USB type connected
205 * @usb_link_status_work: Work for checking the new USB link status 245 * @usb_link_status_work: Work for checking the new USB link status
206 * @usb_state_changed_work: Work for checking USB state 246 * @usb_state_changed_work: Work for checking USB state
247 * @attach_work: Work for detecting USB type
248 * @vbus_drop_end_work: Work for detecting VBUS drop end
207 * @check_main_thermal_prot_work: 249 * @check_main_thermal_prot_work:
208 * Work for checking Main thermal status 250 * Work for checking Main thermal status
209 * @check_usb_thermal_prot_work: 251 * @check_usb_thermal_prot_work:
210 * Work for checking USB thermal status 252 * Work for checking USB thermal status
253 * @charger_attached_mutex: For controlling the wakelock
211 */ 254 */
212struct ab8500_charger { 255struct ab8500_charger {
213 struct device *dev; 256 struct device *dev;
@@ -219,11 +262,16 @@ struct ab8500_charger {
219 bool vddadc_en_usb; 262 bool vddadc_en_usb;
220 int vbat; 263 int vbat;
221 int old_vbat; 264 int old_vbat;
265 bool usb_device_is_unrecognised;
222 bool autopower; 266 bool autopower;
223 bool autopower_cfg; 267 bool autopower_cfg;
268 int invalid_charger_detect_state;
269 bool is_usb_host;
270 int is_aca_rid;
271 atomic_t current_stepping_sessions;
224 struct ab8500 *parent; 272 struct ab8500 *parent;
225 struct ab8500_gpadc *gpadc; 273 struct ab8500_gpadc *gpadc;
226 struct abx500_bm_data *bat; 274 struct abx500_bm_data *bm;
227 struct ab8500_charger_event_flags flags; 275 struct ab8500_charger_event_flags flags;
228 struct ab8500_charger_usb_state usb_state; 276 struct ab8500_charger_usb_state usb_state;
229 struct ux500_charger ac_chg; 277 struct ux500_charger ac_chg;
@@ -232,18 +280,24 @@ struct ab8500_charger {
232 struct ab8500_charger_info usb; 280 struct ab8500_charger_info usb;
233 struct regulator *regu; 281 struct regulator *regu;
234 struct workqueue_struct *charger_wq; 282 struct workqueue_struct *charger_wq;
283 struct mutex usb_ipt_crnt_lock;
235 struct delayed_work check_vbat_work; 284 struct delayed_work check_vbat_work;
236 struct delayed_work check_hw_failure_work; 285 struct delayed_work check_hw_failure_work;
237 struct delayed_work check_usbchgnotok_work; 286 struct delayed_work check_usbchgnotok_work;
238 struct delayed_work kick_wd_work; 287 struct delayed_work kick_wd_work;
288 struct delayed_work usb_state_changed_work;
289 struct delayed_work attach_work;
290 struct delayed_work ac_charger_attached_work;
291 struct delayed_work usb_charger_attached_work;
292 struct delayed_work vbus_drop_end_work;
239 struct work_struct ac_work; 293 struct work_struct ac_work;
240 struct work_struct detect_usb_type_work; 294 struct work_struct detect_usb_type_work;
241 struct work_struct usb_link_status_work; 295 struct work_struct usb_link_status_work;
242 struct work_struct usb_state_changed_work;
243 struct work_struct check_main_thermal_prot_work; 296 struct work_struct check_main_thermal_prot_work;
244 struct work_struct check_usb_thermal_prot_work; 297 struct work_struct check_usb_thermal_prot_work;
245 struct usb_phy *usb_phy; 298 struct usb_phy *usb_phy;
246 struct notifier_block nb; 299 struct notifier_block nb;
300 struct mutex charger_attached_mutex;
247}; 301};
248 302
249/* AC properties */ 303/* AC properties */
@@ -267,50 +321,65 @@ static enum power_supply_property ab8500_charger_usb_props[] = {
267 POWER_SUPPLY_PROP_CURRENT_NOW, 321 POWER_SUPPLY_PROP_CURRENT_NOW,
268}; 322};
269 323
270/** 324/*
271 * ab8500_power_loss_handling - set how we handle powerloss. 325 * Function for enabling and disabling sw fallback mode
272 * @di: pointer to the ab8500_charger structure 326 * should always be disabled when no charger is connected.
273 *
274 * Magic nummbers are from STE HW department.
275 */ 327 */
276static void ab8500_power_loss_handling(struct ab8500_charger *di) 328static void ab8500_enable_disable_sw_fallback(struct ab8500_charger *di,
329 bool fallback)
277{ 330{
331 u8 val;
278 u8 reg; 332 u8 reg;
333 u8 bank;
334 u8 bit;
279 int ret; 335 int ret;
280 336
281 dev_dbg(di->dev, "Autopower : %d\n", di->autopower); 337 dev_dbg(di->dev, "SW Fallback: %d\n", fallback);
282 338
283 /* read the autopower register */ 339 if (is_ab8500(di->parent)) {
284 ret = abx500_get_register_interruptible(di->dev, 0x15, 0x00, &reg); 340 bank = 0x15;
285 if (ret) { 341 reg = 0x0;
286 dev_err(di->dev, "%d write failed\n", __LINE__); 342 bit = 3;
287 return; 343 } else {
344 bank = AB8500_SYS_CTRL1_BLOCK;
345 reg = AB8500_SW_CONTROL_FALLBACK;
346 bit = 0;
288 } 347 }
289 348
290 /* enable the OPT emulation registers */ 349 /* read the register containing fallback bit */
291 ret = abx500_set_register_interruptible(di->dev, 0x11, 0x00, 0x2); 350 ret = abx500_get_register_interruptible(di->dev, bank, reg, &val);
292 if (ret) { 351 if (ret < 0) {
293 dev_err(di->dev, "%d write failed\n", __LINE__); 352 dev_err(di->dev, "%d read failed\n", __LINE__);
294 return; 353 return;
295 } 354 }
296 355
297 if (di->autopower) 356 if (is_ab8500(di->parent)) {
298 reg |= 0x8; 357 /* enable the OPT emulation registers */
358 ret = abx500_set_register_interruptible(di->dev, 0x11, 0x00, 0x2);
359 if (ret) {
360 dev_err(di->dev, "%d write failed\n", __LINE__);
361 goto disable_otp;
362 }
363 }
364
365 if (fallback)
366 val |= (1 << bit);
299 else 367 else
300 reg &= ~0x8; 368 val &= ~(1 << bit);
301 369
302 /* write back the changed value to autopower reg */ 370 /* write back the changed fallback bit value to register */
303 ret = abx500_set_register_interruptible(di->dev, 0x15, 0x00, reg); 371 ret = abx500_set_register_interruptible(di->dev, bank, reg, val);
304 if (ret) { 372 if (ret) {
305 dev_err(di->dev, "%d write failed\n", __LINE__); 373 dev_err(di->dev, "%d write failed\n", __LINE__);
306 return;
307 } 374 }
308 375
309 /* disable the set OTP registers again */ 376disable_otp:
310 ret = abx500_set_register_interruptible(di->dev, 0x11, 0x00, 0x0); 377 if (is_ab8500(di->parent)) {
311 if (ret) { 378 /* disable the set OTP registers again */
312 dev_err(di->dev, "%d write failed\n", __LINE__); 379 ret = abx500_set_register_interruptible(di->dev, 0x11, 0x00, 0x0);
313 return; 380 if (ret) {
381 dev_err(di->dev, "%d write failed\n", __LINE__);
382 }
314 } 383 }
315} 384}
316 385
@@ -329,12 +398,12 @@ static void ab8500_power_supply_changed(struct ab8500_charger *di,
329 !di->ac.charger_connected && 398 !di->ac.charger_connected &&
330 di->autopower) { 399 di->autopower) {
331 di->autopower = false; 400 di->autopower = false;
332 ab8500_power_loss_handling(di); 401 ab8500_enable_disable_sw_fallback(di, false);
333 } else if (!di->autopower && 402 } else if (!di->autopower &&
334 (di->ac.charger_connected || 403 (di->ac.charger_connected ||
335 di->usb.charger_connected)) { 404 di->usb.charger_connected)) {
336 di->autopower = true; 405 di->autopower = true;
337 ab8500_power_loss_handling(di); 406 ab8500_enable_disable_sw_fallback(di, true);
338 } 407 }
339 } 408 }
340 power_supply_changed(psy); 409 power_supply_changed(psy);
@@ -347,6 +416,19 @@ static void ab8500_charger_set_usb_connected(struct ab8500_charger *di,
347 dev_dbg(di->dev, "USB connected:%i\n", connected); 416 dev_dbg(di->dev, "USB connected:%i\n", connected);
348 di->usb.charger_connected = connected; 417 di->usb.charger_connected = connected;
349 sysfs_notify(&di->usb_chg.psy.dev->kobj, NULL, "present"); 418 sysfs_notify(&di->usb_chg.psy.dev->kobj, NULL, "present");
419
420 if (connected) {
421 mutex_lock(&di->charger_attached_mutex);
422 mutex_unlock(&di->charger_attached_mutex);
423
424 queue_delayed_work(di->charger_wq,
425 &di->usb_charger_attached_work,
426 HZ);
427 } else {
428 cancel_delayed_work_sync(&di->usb_charger_attached_work);
429 mutex_lock(&di->charger_attached_mutex);
430 mutex_unlock(&di->charger_attached_mutex);
431 }
350 } 432 }
351} 433}
352 434
@@ -500,6 +582,7 @@ static int ab8500_charger_usb_cv(struct ab8500_charger *di)
500/** 582/**
501 * ab8500_charger_detect_chargers() - Detect the connected chargers 583 * ab8500_charger_detect_chargers() - Detect the connected chargers
502 * @di: pointer to the ab8500_charger structure 584 * @di: pointer to the ab8500_charger structure
585 * @probe: if probe, don't delay and wait for HW
503 * 586 *
504 * Returns the type of charger connected. 587 * Returns the type of charger connected.
505 * For USB it will not mean we can actually charge from it 588 * For USB it will not mean we can actually charge from it
@@ -513,7 +596,7 @@ static int ab8500_charger_usb_cv(struct ab8500_charger *di)
513 * USB_PW_CONN if the USB power supply is connected 596 * USB_PW_CONN if the USB power supply is connected
514 * AC_PW_CONN + USB_PW_CONN if USB and AC power supplies are both connected 597 * AC_PW_CONN + USB_PW_CONN if USB and AC power supplies are both connected
515 */ 598 */
516static int ab8500_charger_detect_chargers(struct ab8500_charger *di) 599static int ab8500_charger_detect_chargers(struct ab8500_charger *di, bool probe)
517{ 600{
518 int result = NO_PW_CONN; 601 int result = NO_PW_CONN;
519 int ret; 602 int ret;
@@ -531,13 +614,25 @@ static int ab8500_charger_detect_chargers(struct ab8500_charger *di)
531 result = AC_PW_CONN; 614 result = AC_PW_CONN;
532 615
533 /* Check for USB charger */ 616 /* Check for USB charger */
617
618 if (!probe) {
619 /*
620 * AB8500 says VBUS_DET_DBNC1 & VBUS_DET_DBNC100
621 * when disconnecting ACA even though no
622 * charger was connected. Try waiting a little
623 * longer than the 100 ms of VBUS_DET_DBNC100...
624 */
625 msleep(110);
626 }
534 ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER, 627 ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
535 AB8500_CH_USBCH_STAT1_REG, &val); 628 AB8500_CH_USBCH_STAT1_REG, &val);
536 if (ret < 0) { 629 if (ret < 0) {
537 dev_err(di->dev, "%s ab8500 read failed\n", __func__); 630 dev_err(di->dev, "%s ab8500 read failed\n", __func__);
538 return ret; 631 return ret;
539 } 632 }
540 633 dev_dbg(di->dev,
634 "%s AB8500_CH_USBCH_STAT1_REG %x\n", __func__,
635 val);
541 if ((val & VBUS_DET_DBNC1) && (val & VBUS_DET_DBNC100)) 636 if ((val & VBUS_DET_DBNC1) && (val & VBUS_DET_DBNC100))
542 result |= USB_PW_CONN; 637 result |= USB_PW_CONN;
543 638
@@ -554,31 +649,53 @@ static int ab8500_charger_detect_chargers(struct ab8500_charger *di)
554 * Returns error code in case of failure else 0 on success 649 * Returns error code in case of failure else 0 on success
555 */ 650 */
556static int ab8500_charger_max_usb_curr(struct ab8500_charger *di, 651static int ab8500_charger_max_usb_curr(struct ab8500_charger *di,
557 enum ab8500_charger_link_status link_status) 652 enum ab8500_charger_link_status link_status)
558{ 653{
559 int ret = 0; 654 int ret = 0;
560 655
656 di->usb_device_is_unrecognised = false;
657
658 /*
659 * Platform only supports USB 2.0.
660 * This means that charging current from USB source
661 * is maximum 500 mA. Every occurence of USB_STAT_*_HOST_*
662 * should set USB_CH_IP_CUR_LVL_0P5.
663 */
664
561 switch (link_status) { 665 switch (link_status) {
562 case USB_STAT_STD_HOST_NC: 666 case USB_STAT_STD_HOST_NC:
563 case USB_STAT_STD_HOST_C_NS: 667 case USB_STAT_STD_HOST_C_NS:
564 case USB_STAT_STD_HOST_C_S: 668 case USB_STAT_STD_HOST_C_S:
565 dev_dbg(di->dev, "USB Type - Standard host is " 669 dev_dbg(di->dev, "USB Type - Standard host is "
566 "detected through USB driver\n"); 670 "detected through USB driver\n");
567 di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P09; 671 di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
672 di->is_usb_host = true;
673 di->is_aca_rid = 0;
568 break; 674 break;
569 case USB_STAT_HOST_CHG_HS_CHIRP: 675 case USB_STAT_HOST_CHG_HS_CHIRP:
570 di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5; 676 di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
677 di->is_usb_host = true;
678 di->is_aca_rid = 0;
571 break; 679 break;
572 case USB_STAT_HOST_CHG_HS: 680 case USB_STAT_HOST_CHG_HS:
681 di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
682 di->is_usb_host = true;
683 di->is_aca_rid = 0;
684 break;
573 case USB_STAT_ACA_RID_C_HS: 685 case USB_STAT_ACA_RID_C_HS:
574 di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P9; 686 di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P9;
687 di->is_usb_host = false;
688 di->is_aca_rid = 0;
575 break; 689 break;
576 case USB_STAT_ACA_RID_A: 690 case USB_STAT_ACA_RID_A:
577 /* 691 /*
578 * Dedicated charger level minus maximum current accessory 692 * Dedicated charger level minus maximum current accessory
579 * can consume (300mA). Closest level is 1100mA 693 * can consume (900mA). Closest level is 500mA
580 */ 694 */
581 di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P1; 695 dev_dbg(di->dev, "USB_STAT_ACA_RID_A detected\n");
696 di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
697 di->is_usb_host = false;
698 di->is_aca_rid = 1;
582 break; 699 break;
583 case USB_STAT_ACA_RID_B: 700 case USB_STAT_ACA_RID_B:
584 /* 701 /*
@@ -586,34 +703,68 @@ static int ab8500_charger_max_usb_curr(struct ab8500_charger *di,
586 * 100mA for potential accessory). Closest level is 1300mA 703 * 100mA for potential accessory). Closest level is 1300mA
587 */ 704 */
588 di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P3; 705 di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P3;
706 dev_dbg(di->dev, "USB Type - 0x%02x MaxCurr: %d", link_status,
707 di->max_usb_in_curr);
708 di->is_usb_host = false;
709 di->is_aca_rid = 1;
589 break; 710 break;
590 case USB_STAT_DEDICATED_CHG:
591 case USB_STAT_HOST_CHG_NM: 711 case USB_STAT_HOST_CHG_NM:
712 di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
713 di->is_usb_host = true;
714 di->is_aca_rid = 0;
715 break;
716 case USB_STAT_DEDICATED_CHG:
717 di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P5;
718 di->is_usb_host = false;
719 di->is_aca_rid = 0;
720 break;
592 case USB_STAT_ACA_RID_C_HS_CHIRP: 721 case USB_STAT_ACA_RID_C_HS_CHIRP:
593 case USB_STAT_ACA_RID_C_NM: 722 case USB_STAT_ACA_RID_C_NM:
594 di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P5; 723 di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P5;
724 di->is_usb_host = false;
725 di->is_aca_rid = 1;
595 break; 726 break;
596 case USB_STAT_RESERVED:
597 /*
598 * This state is used to indicate that VBUS has dropped below
599 * the detection level 4 times in a row. This is due to the
600 * charger output current is set to high making the charger
601 * voltage collapse. This have to be propagated through to
602 * chargalg. This is done using the property
603 * POWER_SUPPLY_PROP_CURRENT_AVG = 1
604 */
605 di->flags.vbus_collapse = true;
606 dev_dbg(di->dev, "USB Type - USB_STAT_RESERVED "
607 "VBUS has collapsed\n");
608 ret = -1;
609 break;
610 case USB_STAT_HM_IDGND:
611 case USB_STAT_NOT_CONFIGURED: 727 case USB_STAT_NOT_CONFIGURED:
612 case USB_STAT_NOT_VALID_LINK: 728 if (di->vbus_detected) {
729 di->usb_device_is_unrecognised = true;
730 dev_dbg(di->dev, "USB Type - Legacy charger.\n");
731 di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P5;
732 break;
733 }
734 case USB_STAT_HM_IDGND:
613 dev_err(di->dev, "USB Type - Charging not allowed\n"); 735 dev_err(di->dev, "USB Type - Charging not allowed\n");
614 di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05; 736 di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05;
615 ret = -ENXIO; 737 ret = -ENXIO;
616 break; 738 break;
739 case USB_STAT_RESERVED:
740 if (is_ab8500(di->parent)) {
741 di->flags.vbus_collapse = true;
742 dev_err(di->dev, "USB Type - USB_STAT_RESERVED "
743 "VBUS has collapsed\n");
744 ret = -ENXIO;
745 break;
746 }
747 if (is_ab9540(di->parent) || is_ab8505(di->parent)) {
748 dev_dbg(di->dev, "USB Type - Charging not allowed\n");
749 di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05;
750 dev_dbg(di->dev, "USB Type - 0x%02x MaxCurr: %d",
751 link_status, di->max_usb_in_curr);
752 ret = -ENXIO;
753 break;
754 }
755 break;
756 case USB_STAT_CARKIT_1:
757 case USB_STAT_CARKIT_2:
758 case USB_STAT_ACA_DOCK_CHARGER:
759 case USB_STAT_CHARGER_LINE_1:
760 di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
761 dev_dbg(di->dev, "USB Type - 0x%02x MaxCurr: %d", link_status,
762 di->max_usb_in_curr);
763 case USB_STAT_NOT_VALID_LINK:
764 dev_err(di->dev, "USB Type invalid - try charging anyway\n");
765 di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
766 break;
767
617 default: 768 default:
618 dev_err(di->dev, "USB Type - Unknown\n"); 769 dev_err(di->dev, "USB Type - Unknown\n");
619 di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05; 770 di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05;
@@ -645,8 +796,14 @@ static int ab8500_charger_read_usb_type(struct ab8500_charger *di)
645 dev_err(di->dev, "%s ab8500 read failed\n", __func__); 796 dev_err(di->dev, "%s ab8500 read failed\n", __func__);
646 return ret; 797 return ret;
647 } 798 }
648 ret = abx500_get_register_interruptible(di->dev, AB8500_USB, 799 if (is_ab8500(di->parent)) {
649 AB8500_USB_LINE_STAT_REG, &val); 800 ret = abx500_get_register_interruptible(di->dev, AB8500_USB,
801 AB8500_USB_LINE_STAT_REG, &val);
802 } else {
803 if (is_ab9540(di->parent) || is_ab8505(di->parent))
804 ret = abx500_get_register_interruptible(di->dev,
805 AB8500_USB, AB8500_USB_LINK1_STAT_REG, &val);
806 }
650 if (ret < 0) { 807 if (ret < 0) {
651 dev_err(di->dev, "%s ab8500 read failed\n", __func__); 808 dev_err(di->dev, "%s ab8500 read failed\n", __func__);
652 return ret; 809 return ret;
@@ -682,16 +839,25 @@ static int ab8500_charger_detect_usb_type(struct ab8500_charger *di)
682 ret = abx500_get_register_interruptible(di->dev, 839 ret = abx500_get_register_interruptible(di->dev,
683 AB8500_INTERRUPT, AB8500_IT_SOURCE21_REG, 840 AB8500_INTERRUPT, AB8500_IT_SOURCE21_REG,
684 &val); 841 &val);
842 dev_dbg(di->dev, "%s AB8500_IT_SOURCE21_REG %x\n",
843 __func__, val);
685 if (ret < 0) { 844 if (ret < 0) {
686 dev_err(di->dev, "%s ab8500 read failed\n", __func__); 845 dev_err(di->dev, "%s ab8500 read failed\n", __func__);
687 return ret; 846 return ret;
688 } 847 }
689 ret = abx500_get_register_interruptible(di->dev, AB8500_USB, 848
690 AB8500_USB_LINE_STAT_REG, &val); 849 if (is_ab8500(di->parent))
850 ret = abx500_get_register_interruptible(di->dev,
851 AB8500_USB, AB8500_USB_LINE_STAT_REG, &val);
852 else
853 ret = abx500_get_register_interruptible(di->dev,
854 AB8500_USB, AB8500_USB_LINK1_STAT_REG, &val);
691 if (ret < 0) { 855 if (ret < 0) {
692 dev_err(di->dev, "%s ab8500 read failed\n", __func__); 856 dev_err(di->dev, "%s ab8500 read failed\n", __func__);
693 return ret; 857 return ret;
694 } 858 }
859 dev_dbg(di->dev, "%s AB8500_USB_LINE_STAT_REG %x\n", __func__,
860 val);
695 /* 861 /*
696 * Until the IT source register is read the UsbLineStatus 862 * Until the IT source register is read the UsbLineStatus
697 * register is not updated, hence doing the same 863 * register is not updated, hence doing the same
@@ -936,6 +1102,144 @@ static int ab8500_charger_get_usb_cur(struct ab8500_charger *di)
936} 1102}
937 1103
938/** 1104/**
1105 * ab8500_charger_set_current() - set charger current
1106 * @di: pointer to the ab8500_charger structure
1107 * @ich: charger current, in mA
1108 * @reg: select what charger register to set
1109 *
1110 * Set charger current.
1111 * There is no state machine in the AB to step up/down the charger
1112 * current to avoid dips and spikes on MAIN, VBUS and VBAT when
1113 * charging is started. Instead we need to implement
1114 * this charger current step-up/down here.
1115 * Returns error code in case of failure else 0(on success)
1116 */
1117static int ab8500_charger_set_current(struct ab8500_charger *di,
1118 int ich, int reg)
1119{
1120 int ret = 0;
1121 int auto_curr_index, curr_index, prev_curr_index, shift_value, i;
1122 u8 reg_value;
1123 u32 step_udelay;
1124 bool no_stepping = false;
1125
1126 atomic_inc(&di->current_stepping_sessions);
1127
1128 ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
1129 reg, &reg_value);
1130 if (ret < 0) {
1131 dev_err(di->dev, "%s read failed\n", __func__);
1132 goto exit_set_current;
1133 }
1134
1135 switch (reg) {
1136 case AB8500_MCH_IPT_CURLVL_REG:
1137 shift_value = MAIN_CH_INPUT_CURR_SHIFT;
1138 prev_curr_index = (reg_value >> shift_value);
1139 curr_index = ab8500_current_to_regval(ich);
1140 step_udelay = STEP_UDELAY;
1141 if (!di->ac.charger_connected)
1142 no_stepping = true;
1143 break;
1144 case AB8500_USBCH_IPT_CRNTLVL_REG:
1145 shift_value = VBUS_IN_CURR_LIM_SHIFT;
1146 prev_curr_index = (reg_value >> shift_value);
1147 curr_index = ab8500_vbus_in_curr_to_regval(ich);
1148 step_udelay = STEP_UDELAY * 100;
1149
1150 ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
1151 AB8500_CH_USBCH_STAT2_REG, &reg_value);
1152 if (ret < 0) {
1153 dev_err(di->dev, "%s read failed\n", __func__);
1154 goto exit_set_current;
1155 }
1156 auto_curr_index =
1157 reg_value >> AUTO_VBUS_IN_CURR_LIM_SHIFT;
1158
1159 dev_dbg(di->dev, "%s Auto VBUS curr is %d mA\n",
1160 __func__,
1161 ab8500_charger_vbus_in_curr_map[auto_curr_index]);
1162
1163 prev_curr_index = min(prev_curr_index, auto_curr_index);
1164
1165 if (!di->usb.charger_connected)
1166 no_stepping = true;
1167 break;
1168 case AB8500_CH_OPT_CRNTLVL_REG:
1169 shift_value = 0;
1170 prev_curr_index = (reg_value >> shift_value);
1171 curr_index = ab8500_current_to_regval(ich);
1172 step_udelay = STEP_UDELAY;
1173 if (curr_index && (curr_index - prev_curr_index) > 1)
1174 step_udelay *= 100;
1175
1176 if (!di->usb.charger_connected && !di->ac.charger_connected)
1177 no_stepping = true;
1178
1179 break;
1180 default:
1181 dev_err(di->dev, "%s current register not valid\n", __func__);
1182 ret = -ENXIO;
1183 goto exit_set_current;
1184 }
1185
1186 if (curr_index < 0) {
1187 dev_err(di->dev, "requested current limit out-of-range\n");
1188 ret = -ENXIO;
1189 goto exit_set_current;
1190 }
1191
1192 /* only update current if it's been changed */
1193 if (prev_curr_index == curr_index) {
1194 dev_dbg(di->dev, "%s current not changed for reg: 0x%02x\n",
1195 __func__, reg);
1196 ret = 0;
1197 goto exit_set_current;
1198 }
1199
1200 dev_dbg(di->dev, "%s set charger current: %d mA for reg: 0x%02x\n",
1201 __func__, ich, reg);
1202
1203 if (no_stepping) {
1204 ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
1205 reg, (u8)curr_index << shift_value);
1206 if (ret)
1207 dev_err(di->dev, "%s write failed\n", __func__);
1208 } else if (prev_curr_index > curr_index) {
1209 for (i = prev_curr_index - 1; i >= curr_index; i--) {
1210 dev_dbg(di->dev, "curr change_1 to: %x for 0x%02x\n",
1211 (u8) i << shift_value, reg);
1212 ret = abx500_set_register_interruptible(di->dev,
1213 AB8500_CHARGER, reg, (u8)i << shift_value);
1214 if (ret) {
1215 dev_err(di->dev, "%s write failed\n", __func__);
1216 goto exit_set_current;
1217 }
1218 if (i != curr_index)
1219 usleep_range(step_udelay, step_udelay * 2);
1220 }
1221 } else {
1222 for (i = prev_curr_index + 1; i <= curr_index; i++) {
1223 dev_dbg(di->dev, "curr change_2 to: %x for 0x%02x\n",
1224 (u8)i << shift_value, reg);
1225 ret = abx500_set_register_interruptible(di->dev,
1226 AB8500_CHARGER, reg, (u8)i << shift_value);
1227 if (ret) {
1228 dev_err(di->dev, "%s write failed\n", __func__);
1229 goto exit_set_current;
1230 }
1231 if (i != curr_index)
1232 usleep_range(step_udelay, step_udelay * 2);
1233 }
1234 }
1235
1236exit_set_current:
1237 atomic_dec(&di->current_stepping_sessions);
1238
1239 return ret;
1240}
1241
1242/**
939 * ab8500_charger_set_vbus_in_curr() - set VBUS input current limit 1243 * ab8500_charger_set_vbus_in_curr() - set VBUS input current limit
940 * @di: pointer to the ab8500_charger structure 1244 * @di: pointer to the ab8500_charger structure
941 * @ich_in: charger input current limit 1245 * @ich_in: charger input current limit
@@ -946,12 +1250,11 @@ static int ab8500_charger_get_usb_cur(struct ab8500_charger *di)
946static int ab8500_charger_set_vbus_in_curr(struct ab8500_charger *di, 1250static int ab8500_charger_set_vbus_in_curr(struct ab8500_charger *di,
947 int ich_in) 1251 int ich_in)
948{ 1252{
949 int ret;
950 int input_curr_index;
951 int min_value; 1253 int min_value;
1254 int ret;
952 1255
953 /* We should always use to lowest current limit */ 1256 /* We should always use to lowest current limit */
954 min_value = min(di->bat->chg_params->usb_curr_max, ich_in); 1257 min_value = min(di->bm->chg_params->usb_curr_max, ich_in);
955 1258
956 switch (min_value) { 1259 switch (min_value) {
957 case 100: 1260 case 100:
@@ -966,22 +1269,47 @@ static int ab8500_charger_set_vbus_in_curr(struct ab8500_charger *di,
966 break; 1269 break;
967 } 1270 }
968 1271
969 input_curr_index = ab8500_vbus_in_curr_to_regval(min_value); 1272 dev_info(di->dev, "VBUS input current limit set to %d mA\n", min_value);
970 if (input_curr_index < 0) {
971 dev_err(di->dev, "VBUS input current limit too high\n");
972 return -ENXIO;
973 }
974 1273
975 ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER, 1274 mutex_lock(&di->usb_ipt_crnt_lock);
976 AB8500_USBCH_IPT_CRNTLVL_REG, 1275 ret = ab8500_charger_set_current(di, min_value,
977 input_curr_index << VBUS_IN_CURR_LIM_SHIFT); 1276 AB8500_USBCH_IPT_CRNTLVL_REG);
978 if (ret) 1277 mutex_unlock(&di->usb_ipt_crnt_lock);
979 dev_err(di->dev, "%s write failed\n", __func__);
980 1278
981 return ret; 1279 return ret;
982} 1280}
983 1281
984/** 1282/**
1283 * ab8500_charger_set_main_in_curr() - set main charger input current
1284 * @di: pointer to the ab8500_charger structure
1285 * @ich_in: input charger current, in mA
1286 *
1287 * Set main charger input current.
1288 * Returns error code in case of failure else 0(on success)
1289 */
1290static int ab8500_charger_set_main_in_curr(struct ab8500_charger *di,
1291 int ich_in)
1292{
1293 return ab8500_charger_set_current(di, ich_in,
1294 AB8500_MCH_IPT_CURLVL_REG);
1295}
1296
1297/**
1298 * ab8500_charger_set_output_curr() - set charger output current
1299 * @di: pointer to the ab8500_charger structure
1300 * @ich_out: output charger current, in mA
1301 *
1302 * Set charger output current.
1303 * Returns error code in case of failure else 0(on success)
1304 */
1305static int ab8500_charger_set_output_curr(struct ab8500_charger *di,
1306 int ich_out)
1307{
1308 return ab8500_charger_set_current(di, ich_out,
1309 AB8500_CH_OPT_CRNTLVL_REG);
1310}
1311
1312/**
985 * ab8500_charger_led_en() - turn on/off chargign led 1313 * ab8500_charger_led_en() - turn on/off chargign led
986 * @di: pointer to the ab8500_charger structure 1314 * @di: pointer to the ab8500_charger structure
987 * @on: flag to turn on/off the chargign led 1315 * @on: flag to turn on/off the chargign led
@@ -1074,7 +1402,7 @@ static int ab8500_charger_ac_en(struct ux500_charger *charger,
1074 volt_index = ab8500_voltage_to_regval(vset); 1402 volt_index = ab8500_voltage_to_regval(vset);
1075 curr_index = ab8500_current_to_regval(iset); 1403 curr_index = ab8500_current_to_regval(iset);
1076 input_curr_index = ab8500_current_to_regval( 1404 input_curr_index = ab8500_current_to_regval(
1077 di->bat->chg_params->ac_curr_max); 1405 di->bm->chg_params->ac_curr_max);
1078 if (volt_index < 0 || curr_index < 0 || input_curr_index < 0) { 1406 if (volt_index < 0 || curr_index < 0 || input_curr_index < 0) {
1079 dev_err(di->dev, 1407 dev_err(di->dev,
1080 "Charger voltage or current too high, " 1408 "Charger voltage or current too high, "
@@ -1090,23 +1418,24 @@ static int ab8500_charger_ac_en(struct ux500_charger *charger,
1090 return ret; 1418 return ret;
1091 } 1419 }
1092 /* MainChInputCurr: current that can be drawn from the charger*/ 1420 /* MainChInputCurr: current that can be drawn from the charger*/
1093 ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER, 1421 ret = ab8500_charger_set_main_in_curr(di,
1094 AB8500_MCH_IPT_CURLVL_REG, 1422 di->bm->chg_params->ac_curr_max);
1095 input_curr_index << MAIN_CH_INPUT_CURR_SHIFT);
1096 if (ret) { 1423 if (ret) {
1097 dev_err(di->dev, "%s write failed\n", __func__); 1424 dev_err(di->dev, "%s Failed to set MainChInputCurr\n",
1425 __func__);
1098 return ret; 1426 return ret;
1099 } 1427 }
1100 /* ChOutputCurentLevel: protected output current */ 1428 /* ChOutputCurentLevel: protected output current */
1101 ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER, 1429 ret = ab8500_charger_set_output_curr(di, iset);
1102 AB8500_CH_OPT_CRNTLVL_REG, (u8) curr_index);
1103 if (ret) { 1430 if (ret) {
1104 dev_err(di->dev, "%s write failed\n", __func__); 1431 dev_err(di->dev, "%s "
1432 "Failed to set ChOutputCurentLevel\n",
1433 __func__);
1105 return ret; 1434 return ret;
1106 } 1435 }
1107 1436
1108 /* Check if VBAT overshoot control should be enabled */ 1437 /* Check if VBAT overshoot control should be enabled */
1109 if (!di->bat->enable_overshoot) 1438 if (!di->bm->enable_overshoot)
1110 overshoot = MAIN_CH_NO_OVERSHOOT_ENA_N; 1439 overshoot = MAIN_CH_NO_OVERSHOOT_ENA_N;
1111 1440
1112 /* Enable Main Charger */ 1441 /* Enable Main Charger */
@@ -1158,12 +1487,11 @@ static int ab8500_charger_ac_en(struct ux500_charger *charger,
1158 return ret; 1487 return ret;
1159 } 1488 }
1160 1489
1161 ret = abx500_set_register_interruptible(di->dev, 1490 ret = ab8500_charger_set_output_curr(di, 0);
1162 AB8500_CHARGER,
1163 AB8500_CH_OPT_CRNTLVL_REG, CH_OP_CUR_LVL_0P1);
1164 if (ret) { 1491 if (ret) {
1165 dev_err(di->dev, 1492 dev_err(di->dev, "%s "
1166 "%s write failed\n", __func__); 1493 "Failed to set ChOutputCurentLevel\n",
1494 __func__);
1167 return ret; 1495 return ret;
1168 } 1496 }
1169 } else { 1497 } else {
@@ -1259,24 +1587,13 @@ static int ab8500_charger_usb_en(struct ux500_charger *charger,
1259 dev_err(di->dev, "%s write failed\n", __func__); 1587 dev_err(di->dev, "%s write failed\n", __func__);
1260 return ret; 1588 return ret;
1261 } 1589 }
1262 /* USBChInputCurr: current that can be drawn from the usb */
1263 ret = ab8500_charger_set_vbus_in_curr(di, di->max_usb_in_curr);
1264 if (ret) {
1265 dev_err(di->dev, "setting USBChInputCurr failed\n");
1266 return ret;
1267 }
1268 /* ChOutputCurentLevel: protected output current */
1269 ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
1270 AB8500_CH_OPT_CRNTLVL_REG, (u8) curr_index);
1271 if (ret) {
1272 dev_err(di->dev, "%s write failed\n", __func__);
1273 return ret;
1274 }
1275 /* Check if VBAT overshoot control should be enabled */ 1590 /* Check if VBAT overshoot control should be enabled */
1276 if (!di->bat->enable_overshoot) 1591 if (!di->bm->enable_overshoot)
1277 overshoot = USB_CHG_NO_OVERSHOOT_ENA_N; 1592 overshoot = USB_CHG_NO_OVERSHOOT_ENA_N;
1278 1593
1279 /* Enable USB Charger */ 1594 /* Enable USB Charger */
1595 dev_dbg(di->dev,
1596 "Enabling USB with write to AB8500_USBCH_CTRL1_REG\n");
1280 ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER, 1597 ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
1281 AB8500_USBCH_CTRL1_REG, USB_CH_ENA | overshoot); 1598 AB8500_USBCH_CTRL1_REG, USB_CH_ENA | overshoot);
1282 if (ret) { 1599 if (ret) {
@@ -1289,11 +1606,29 @@ static int ab8500_charger_usb_en(struct ux500_charger *charger,
1289 if (ret < 0) 1606 if (ret < 0)
1290 dev_err(di->dev, "failed to enable LED\n"); 1607 dev_err(di->dev, "failed to enable LED\n");
1291 1608
1609 di->usb.charger_online = 1;
1610
1611 /* USBChInputCurr: current that can be drawn from the usb */
1612 ret = ab8500_charger_set_vbus_in_curr(di, di->max_usb_in_curr);
1613 if (ret) {
1614 dev_err(di->dev, "setting USBChInputCurr failed\n");
1615 return ret;
1616 }
1617
1618 /* ChOutputCurentLevel: protected output current */
1619 ret = ab8500_charger_set_output_curr(di, ich_out);
1620 if (ret) {
1621 dev_err(di->dev, "%s "
1622 "Failed to set ChOutputCurentLevel\n",
1623 __func__);
1624 return ret;
1625 }
1626
1292 queue_delayed_work(di->charger_wq, &di->check_vbat_work, HZ); 1627 queue_delayed_work(di->charger_wq, &di->check_vbat_work, HZ);
1293 1628
1294 di->usb.charger_online = 1;
1295 } else { 1629 } else {
1296 /* Disable USB charging */ 1630 /* Disable USB charging */
1631 dev_dbg(di->dev, "%s Disabled USB charging\n", __func__);
1297 ret = abx500_set_register_interruptible(di->dev, 1632 ret = abx500_set_register_interruptible(di->dev,
1298 AB8500_CHARGER, 1633 AB8500_CHARGER,
1299 AB8500_USBCH_CTRL1_REG, 0); 1634 AB8500_USBCH_CTRL1_REG, 0);
@@ -1306,7 +1641,21 @@ static int ab8500_charger_usb_en(struct ux500_charger *charger,
1306 ret = ab8500_charger_led_en(di, false); 1641 ret = ab8500_charger_led_en(di, false);
1307 if (ret < 0) 1642 if (ret < 0)
1308 dev_err(di->dev, "failed to disable LED\n"); 1643 dev_err(di->dev, "failed to disable LED\n");
1644 /* USBChInputCurr: current that can be drawn from the usb */
1645 ret = ab8500_charger_set_vbus_in_curr(di, 0);
1646 if (ret) {
1647 dev_err(di->dev, "setting USBChInputCurr failed\n");
1648 return ret;
1649 }
1309 1650
1651 /* ChOutputCurentLevel: protected output current */
1652 ret = ab8500_charger_set_output_curr(di, 0);
1653 if (ret) {
1654 dev_err(di->dev, "%s "
1655 "Failed to reset ChOutputCurentLevel\n",
1656 __func__);
1657 return ret;
1658 }
1310 di->usb.charger_online = 0; 1659 di->usb.charger_online = 0;
1311 di->usb.wd_expired = false; 1660 di->usb.wd_expired = false;
1312 1661
@@ -1366,7 +1715,6 @@ static int ab8500_charger_update_charger_current(struct ux500_charger *charger,
1366 int ich_out) 1715 int ich_out)
1367{ 1716{
1368 int ret; 1717 int ret;
1369 int curr_index;
1370 struct ab8500_charger *di; 1718 struct ab8500_charger *di;
1371 1719
1372 if (charger->psy.type == POWER_SUPPLY_TYPE_MAINS) 1720 if (charger->psy.type == POWER_SUPPLY_TYPE_MAINS)
@@ -1376,18 +1724,11 @@ static int ab8500_charger_update_charger_current(struct ux500_charger *charger,
1376 else 1724 else
1377 return -ENXIO; 1725 return -ENXIO;
1378 1726
1379 curr_index = ab8500_current_to_regval(ich_out); 1727 ret = ab8500_charger_set_output_curr(di, ich_out);
1380 if (curr_index < 0) {
1381 dev_err(di->dev,
1382 "Charger current too high, "
1383 "charging not started\n");
1384 return -ENXIO;
1385 }
1386
1387 ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
1388 AB8500_CH_OPT_CRNTLVL_REG, (u8) curr_index);
1389 if (ret) { 1728 if (ret) {
1390 dev_err(di->dev, "%s write failed\n", __func__); 1729 dev_err(di->dev, "%s "
1730 "Failed to set ChOutputCurentLevel\n",
1731 __func__);
1391 return ret; 1732 return ret;
1392 } 1733 }
1393 1734
@@ -1597,7 +1938,7 @@ static void ab8500_charger_ac_work(struct work_struct *work)
1597 * synchronously, we have the check if the main charger is 1938 * synchronously, we have the check if the main charger is
1598 * connected by reading the status register 1939 * connected by reading the status register
1599 */ 1940 */
1600 ret = ab8500_charger_detect_chargers(di); 1941 ret = ab8500_charger_detect_chargers(di, false);
1601 if (ret < 0) 1942 if (ret < 0)
1602 return; 1943 return;
1603 1944
@@ -1612,6 +1953,84 @@ static void ab8500_charger_ac_work(struct work_struct *work)
1612 sysfs_notify(&di->ac_chg.psy.dev->kobj, NULL, "present"); 1953 sysfs_notify(&di->ac_chg.psy.dev->kobj, NULL, "present");
1613} 1954}
1614 1955
1956static void ab8500_charger_usb_attached_work(struct work_struct *work)
1957{
1958 struct ab8500_charger *di = container_of(work,
1959 struct ab8500_charger,
1960 usb_charger_attached_work.work);
1961 int usbch = (USB_CH_VBUSDROP | USB_CH_VBUSDETDBNC);
1962 int ret, i;
1963 u8 statval;
1964
1965 for (i = 0; i < 10; i++) {
1966 ret = abx500_get_register_interruptible(di->dev,
1967 AB8500_CHARGER,
1968 AB8500_CH_USBCH_STAT1_REG,
1969 &statval);
1970 if (ret < 0) {
1971 dev_err(di->dev, "ab8500 read failed %d\n", __LINE__);
1972 goto reschedule;
1973 }
1974 if ((statval & usbch) != usbch)
1975 goto reschedule;
1976
1977 msleep(CHARGER_STATUS_POLL);
1978 }
1979
1980 ab8500_charger_usb_en(&di->usb_chg, 0, 0, 0);
1981
1982 mutex_lock(&di->charger_attached_mutex);
1983 mutex_unlock(&di->charger_attached_mutex);
1984
1985 return;
1986
1987reschedule:
1988 queue_delayed_work(di->charger_wq,
1989 &di->usb_charger_attached_work,
1990 HZ);
1991}
1992
1993static void ab8500_charger_ac_attached_work(struct work_struct *work)
1994{
1995
1996 struct ab8500_charger *di = container_of(work,
1997 struct ab8500_charger,
1998 ac_charger_attached_work.work);
1999 int mainch = (MAIN_CH_STATUS2_MAINCHGDROP |
2000 MAIN_CH_STATUS2_MAINCHARGERDETDBNC);
2001 int ret, i;
2002 u8 statval;
2003
2004 for (i = 0; i < 10; i++) {
2005 ret = abx500_get_register_interruptible(di->dev,
2006 AB8500_CHARGER,
2007 AB8500_CH_STATUS2_REG,
2008 &statval);
2009 if (ret < 0) {
2010 dev_err(di->dev, "ab8500 read failed %d\n", __LINE__);
2011 goto reschedule;
2012 }
2013
2014 if ((statval & mainch) != mainch)
2015 goto reschedule;
2016
2017 msleep(CHARGER_STATUS_POLL);
2018 }
2019
2020 ab8500_charger_ac_en(&di->ac_chg, 0, 0, 0);
2021 queue_work(di->charger_wq, &di->ac_work);
2022
2023 mutex_lock(&di->charger_attached_mutex);
2024 mutex_unlock(&di->charger_attached_mutex);
2025
2026 return;
2027
2028reschedule:
2029 queue_delayed_work(di->charger_wq,
2030 &di->ac_charger_attached_work,
2031 HZ);
2032}
2033
1615/** 2034/**
1616 * ab8500_charger_detect_usb_type_work() - work to detect USB type 2035 * ab8500_charger_detect_usb_type_work() - work to detect USB type
1617 * @work: Pointer to the work_struct structure 2036 * @work: Pointer to the work_struct structure
@@ -1630,16 +2049,18 @@ static void ab8500_charger_detect_usb_type_work(struct work_struct *work)
1630 * synchronously, we have the check if is 2049 * synchronously, we have the check if is
1631 * connected by reading the status register 2050 * connected by reading the status register
1632 */ 2051 */
1633 ret = ab8500_charger_detect_chargers(di); 2052 ret = ab8500_charger_detect_chargers(di, false);
1634 if (ret < 0) 2053 if (ret < 0)
1635 return; 2054 return;
1636 2055
1637 if (!(ret & USB_PW_CONN)) { 2056 if (!(ret & USB_PW_CONN)) {
1638 di->vbus_detected = 0; 2057 dev_dbg(di->dev, "%s di->vbus_detected = false\n", __func__);
2058 di->vbus_detected = false;
1639 ab8500_charger_set_usb_connected(di, false); 2059 ab8500_charger_set_usb_connected(di, false);
1640 ab8500_power_supply_changed(di, &di->usb_chg.psy); 2060 ab8500_power_supply_changed(di, &di->usb_chg.psy);
1641 } else { 2061 } else {
1642 di->vbus_detected = 1; 2062 dev_dbg(di->dev, "%s di->vbus_detected = true\n", __func__);
2063 di->vbus_detected = true;
1643 2064
1644 if (is_ab8500_1p1_or_earlier(di->parent)) { 2065 if (is_ab8500_1p1_or_earlier(di->parent)) {
1645 ret = ab8500_charger_detect_usb_type(di); 2066 ret = ab8500_charger_detect_usb_type(di);
@@ -1649,7 +2070,8 @@ static void ab8500_charger_detect_usb_type_work(struct work_struct *work)
1649 &di->usb_chg.psy); 2070 &di->usb_chg.psy);
1650 } 2071 }
1651 } else { 2072 } else {
1652 /* For ABB cut2.0 and onwards we have an IRQ, 2073 /*
2074 * For ABB cut2.0 and onwards we have an IRQ,
1653 * USB_LINK_STATUS that will be triggered when the USB 2075 * USB_LINK_STATUS that will be triggered when the USB
1654 * link status changes. The exception is USB connected 2076 * link status changes. The exception is USB connected
1655 * during startup. Then we don't get a 2077 * during startup. Then we don't get a
@@ -1670,6 +2092,29 @@ static void ab8500_charger_detect_usb_type_work(struct work_struct *work)
1670} 2092}
1671 2093
1672/** 2094/**
2095 * ab8500_charger_usb_link_attach_work() - work to detect USB type
2096 * @work: pointer to the work_struct structure
2097 *
2098 * Detect the type of USB plugged
2099 */
2100static void ab8500_charger_usb_link_attach_work(struct work_struct *work)
2101{
2102 struct ab8500_charger *di =
2103 container_of(work, struct ab8500_charger, attach_work.work);
2104 int ret;
2105
2106 /* Update maximum input current if USB enumeration is not detected */
2107 if (!di->usb.charger_online) {
2108 ret = ab8500_charger_set_vbus_in_curr(di, di->max_usb_in_curr);
2109 if (ret)
2110 return;
2111 }
2112
2113 ab8500_charger_set_usb_connected(di, true);
2114 ab8500_power_supply_changed(di, &di->usb_chg.psy);
2115}
2116
2117/**
1673 * ab8500_charger_usb_link_status_work() - work to detect USB type 2118 * ab8500_charger_usb_link_status_work() - work to detect USB type
1674 * @work: pointer to the work_struct structure 2119 * @work: pointer to the work_struct structure
1675 * 2120 *
@@ -1677,7 +2122,9 @@ static void ab8500_charger_detect_usb_type_work(struct work_struct *work)
1677 */ 2122 */
1678static void ab8500_charger_usb_link_status_work(struct work_struct *work) 2123static void ab8500_charger_usb_link_status_work(struct work_struct *work)
1679{ 2124{
2125 int detected_chargers;
1680 int ret; 2126 int ret;
2127 u8 val;
1681 2128
1682 struct ab8500_charger *di = container_of(work, 2129 struct ab8500_charger *di = container_of(work,
1683 struct ab8500_charger, usb_link_status_work); 2130 struct ab8500_charger, usb_link_status_work);
@@ -1687,31 +2134,95 @@ static void ab8500_charger_usb_link_status_work(struct work_struct *work)
1687 * synchronously, we have the check if is 2134 * synchronously, we have the check if is
1688 * connected by reading the status register 2135 * connected by reading the status register
1689 */ 2136 */
1690 ret = ab8500_charger_detect_chargers(di); 2137 detected_chargers = ab8500_charger_detect_chargers(di, false);
1691 if (ret < 0) 2138 if (detected_chargers < 0)
1692 return; 2139 return;
1693 2140
1694 if (!(ret & USB_PW_CONN)) { 2141 /*
1695 di->vbus_detected = 0; 2142 * Some chargers that breaks the USB spec is
2143 * identified as invalid by AB8500 and it refuse
2144 * to start the charging process. but by jumping
2145 * thru a few hoops it can be forced to start.
2146 */
2147 ret = abx500_get_register_interruptible(di->dev, AB8500_USB,
2148 AB8500_USB_LINE_STAT_REG, &val);
2149 if (ret >= 0)
2150 dev_dbg(di->dev, "UsbLineStatus register = 0x%02x\n", val);
2151 else
2152 dev_dbg(di->dev, "Error reading USB link status\n");
2153
2154 if (detected_chargers & USB_PW_CONN) {
2155 if (((val & AB8500_USB_LINK_STATUS) >> 3) == USB_STAT_NOT_VALID_LINK &&
2156 di->invalid_charger_detect_state == 0) {
2157 dev_dbg(di->dev, "Invalid charger detected, state= 0\n");
2158 /*Enable charger*/
2159 abx500_mask_and_set_register_interruptible(di->dev,
2160 AB8500_CHARGER, AB8500_USBCH_CTRL1_REG, 0x01, 0x01);
2161 /*Enable charger detection*/
2162 abx500_mask_and_set_register_interruptible(di->dev, AB8500_USB,
2163 AB8500_MCH_IPT_CURLVL_REG, 0x01, 0x01);
2164 di->invalid_charger_detect_state = 1;
2165 /*exit and wait for new link status interrupt.*/
2166 return;
2167
2168 }
2169 if (di->invalid_charger_detect_state == 1) {
2170 dev_dbg(di->dev, "Invalid charger detected, state= 1\n");
2171 /*Stop charger detection*/
2172 abx500_mask_and_set_register_interruptible(di->dev, AB8500_USB,
2173 AB8500_MCH_IPT_CURLVL_REG, 0x01, 0x00);
2174 /*Check link status*/
2175 ret = abx500_get_register_interruptible(di->dev, AB8500_USB,
2176 AB8500_USB_LINE_STAT_REG, &val);
2177 dev_dbg(di->dev, "USB link status= 0x%02x\n",
2178 (val & AB8500_USB_LINK_STATUS) >> 3);
2179 di->invalid_charger_detect_state = 2;
2180 }
2181 } else {
2182 di->invalid_charger_detect_state = 0;
2183 }
2184
2185 if (!(detected_chargers & USB_PW_CONN)) {
2186 di->vbus_detected = false;
1696 ab8500_charger_set_usb_connected(di, false); 2187 ab8500_charger_set_usb_connected(di, false);
1697 ab8500_power_supply_changed(di, &di->usb_chg.psy); 2188 ab8500_power_supply_changed(di, &di->usb_chg.psy);
1698 } else { 2189 return;
1699 di->vbus_detected = 1; 2190 }
1700 ret = ab8500_charger_read_usb_type(di);
1701 if (!ret) {
1702 /* Update maximum input current */
1703 ret = ab8500_charger_set_vbus_in_curr(di,
1704 di->max_usb_in_curr);
1705 if (ret)
1706 return;
1707 2191
1708 ab8500_charger_set_usb_connected(di, true); 2192 dev_dbg(di->dev,"%s di->vbus_detected = true\n",__func__);
1709 ab8500_power_supply_changed(di, &di->usb_chg.psy); 2193 di->vbus_detected = true;
1710 } else if (ret == -ENXIO) { 2194 ret = ab8500_charger_read_usb_type(di);
2195 if (ret) {
2196 if (ret == -ENXIO) {
1711 /* No valid charger type detected */ 2197 /* No valid charger type detected */
1712 ab8500_charger_set_usb_connected(di, false); 2198 ab8500_charger_set_usb_connected(di, false);
1713 ab8500_power_supply_changed(di, &di->usb_chg.psy); 2199 ab8500_power_supply_changed(di, &di->usb_chg.psy);
1714 } 2200 }
2201 return;
2202 }
2203
2204 if (di->usb_device_is_unrecognised) {
2205 dev_dbg(di->dev,
2206 "Potential Legacy Charger device. "
2207 "Delay work for %d msec for USB enum "
2208 "to finish",
2209 WAIT_ACA_RID_ENUMERATION);
2210 queue_delayed_work(di->charger_wq,
2211 &di->attach_work,
2212 msecs_to_jiffies(WAIT_ACA_RID_ENUMERATION));
2213 } else if (di->is_aca_rid == 1) {
2214 /* Only wait once */
2215 di->is_aca_rid++;
2216 dev_dbg(di->dev,
2217 "%s Wait %d msec for USB enum to finish",
2218 __func__, WAIT_ACA_RID_ENUMERATION);
2219 queue_delayed_work(di->charger_wq,
2220 &di->attach_work,
2221 msecs_to_jiffies(WAIT_ACA_RID_ENUMERATION));
2222 } else {
2223 queue_delayed_work(di->charger_wq,
2224 &di->attach_work,
2225 0);
1715 } 2226 }
1716} 2227}
1717 2228
@@ -1721,24 +2232,20 @@ static void ab8500_charger_usb_state_changed_work(struct work_struct *work)
1721 unsigned long flags; 2232 unsigned long flags;
1722 2233
1723 struct ab8500_charger *di = container_of(work, 2234 struct ab8500_charger *di = container_of(work,
1724 struct ab8500_charger, usb_state_changed_work); 2235 struct ab8500_charger, usb_state_changed_work.work);
1725 2236
1726 if (!di->vbus_detected) 2237 if (!di->vbus_detected) {
2238 dev_dbg(di->dev,
2239 "%s !di->vbus_detected\n",
2240 __func__);
1727 return; 2241 return;
2242 }
1728 2243
1729 spin_lock_irqsave(&di->usb_state.usb_lock, flags); 2244 spin_lock_irqsave(&di->usb_state.usb_lock, flags);
1730 di->usb_state.usb_changed = false; 2245 di->usb_state.state = di->usb_state.state_tmp;
2246 di->usb_state.usb_current = di->usb_state.usb_current_tmp;
1731 spin_unlock_irqrestore(&di->usb_state.usb_lock, flags); 2247 spin_unlock_irqrestore(&di->usb_state.usb_lock, flags);
1732 2248
1733 /*
1734 * wait for some time until you get updates from the usb stack
1735 * and negotiations are completed
1736 */
1737 msleep(250);
1738
1739 if (di->usb_state.usb_changed)
1740 return;
1741
1742 dev_dbg(di->dev, "%s USB state: 0x%02x mA: %d\n", 2249 dev_dbg(di->dev, "%s USB state: 0x%02x mA: %d\n",
1743 __func__, di->usb_state.state, di->usb_state.usb_current); 2250 __func__, di->usb_state.state, di->usb_state.usb_current);
1744 2251
@@ -1892,6 +2399,10 @@ static irqreturn_t ab8500_charger_mainchunplugdet_handler(int irq, void *_di)
1892 dev_dbg(di->dev, "Main charger unplugged\n"); 2399 dev_dbg(di->dev, "Main charger unplugged\n");
1893 queue_work(di->charger_wq, &di->ac_work); 2400 queue_work(di->charger_wq, &di->ac_work);
1894 2401
2402 cancel_delayed_work_sync(&di->ac_charger_attached_work);
2403 mutex_lock(&di->charger_attached_mutex);
2404 mutex_unlock(&di->charger_attached_mutex);
2405
1895 return IRQ_HANDLED; 2406 return IRQ_HANDLED;
1896} 2407}
1897 2408
@@ -1909,6 +2420,11 @@ static irqreturn_t ab8500_charger_mainchplugdet_handler(int irq, void *_di)
1909 dev_dbg(di->dev, "Main charger plugged\n"); 2420 dev_dbg(di->dev, "Main charger plugged\n");
1910 queue_work(di->charger_wq, &di->ac_work); 2421 queue_work(di->charger_wq, &di->ac_work);
1911 2422
2423 mutex_lock(&di->charger_attached_mutex);
2424 mutex_unlock(&di->charger_attached_mutex);
2425 queue_delayed_work(di->charger_wq,
2426 &di->ac_charger_attached_work,
2427 HZ);
1912 return IRQ_HANDLED; 2428 return IRQ_HANDLED;
1913} 2429}
1914 2430
@@ -1971,6 +2487,21 @@ static irqreturn_t ab8500_charger_mainchthprotf_handler(int irq, void *_di)
1971 return IRQ_HANDLED; 2487 return IRQ_HANDLED;
1972} 2488}
1973 2489
2490static void ab8500_charger_vbus_drop_end_work(struct work_struct *work)
2491{
2492 struct ab8500_charger *di = container_of(work,
2493 struct ab8500_charger, vbus_drop_end_work.work);
2494
2495 di->flags.vbus_drop_end = false;
2496
2497 /* Reset the drop counter */
2498 abx500_set_register_interruptible(di->dev,
2499 AB8500_CHARGER, AB8500_CHARGER_CTRL, 0x01);
2500
2501 if (di->usb.charger_connected)
2502 ab8500_charger_set_vbus_in_curr(di, di->max_usb_in_curr);
2503}
2504
1974/** 2505/**
1975 * ab8500_charger_vbusdetf_handler() - VBUS falling detected 2506 * ab8500_charger_vbusdetf_handler() - VBUS falling detected
1976 * @irq: interrupt number 2507 * @irq: interrupt number
@@ -1982,6 +2513,7 @@ static irqreturn_t ab8500_charger_vbusdetf_handler(int irq, void *_di)
1982{ 2513{
1983 struct ab8500_charger *di = _di; 2514 struct ab8500_charger *di = _di;
1984 2515
2516 di->vbus_detected = false;
1985 dev_dbg(di->dev, "VBUS falling detected\n"); 2517 dev_dbg(di->dev, "VBUS falling detected\n");
1986 queue_work(di->charger_wq, &di->detect_usb_type_work); 2518 queue_work(di->charger_wq, &di->detect_usb_type_work);
1987 2519
@@ -2001,6 +2533,7 @@ static irqreturn_t ab8500_charger_vbusdetr_handler(int irq, void *_di)
2001 2533
2002 di->vbus_detected = true; 2534 di->vbus_detected = true;
2003 dev_dbg(di->dev, "VBUS rising detected\n"); 2535 dev_dbg(di->dev, "VBUS rising detected\n");
2536
2004 queue_work(di->charger_wq, &di->detect_usb_type_work); 2537 queue_work(di->charger_wq, &di->detect_usb_type_work);
2005 2538
2006 return IRQ_HANDLED; 2539 return IRQ_HANDLED;
@@ -2109,6 +2642,25 @@ static irqreturn_t ab8500_charger_chwdexp_handler(int irq, void *_di)
2109} 2642}
2110 2643
2111/** 2644/**
2645 * ab8500_charger_vbuschdropend_handler() - VBUS drop removed
2646 * @irq: interrupt number
2647 * @_di: pointer to the ab8500_charger structure
2648 *
2649 * Returns IRQ status(IRQ_HANDLED)
2650 */
2651static irqreturn_t ab8500_charger_vbuschdropend_handler(int irq, void *_di)
2652{
2653 struct ab8500_charger *di = _di;
2654
2655 dev_dbg(di->dev, "VBUS charger drop ended\n");
2656 di->flags.vbus_drop_end = true;
2657 queue_delayed_work(di->charger_wq, &di->vbus_drop_end_work,
2658 round_jiffies(30 * HZ));
2659
2660 return IRQ_HANDLED;
2661}
2662
2663/**
2112 * ab8500_charger_vbusovv_handler() - VBUS overvoltage detected 2664 * ab8500_charger_vbusovv_handler() - VBUS overvoltage detected
2113 * @irq: interrupt number 2665 * @irq: interrupt number
2114 * @_di: pointer to the ab8500_charger structure 2666 * @_di: pointer to the ab8500_charger structure
@@ -2148,6 +2700,7 @@ static int ab8500_charger_ac_get_property(struct power_supply *psy,
2148 union power_supply_propval *val) 2700 union power_supply_propval *val)
2149{ 2701{
2150 struct ab8500_charger *di; 2702 struct ab8500_charger *di;
2703 int ret;
2151 2704
2152 di = to_ab8500_charger_ac_device_info(psy_to_ux500_charger(psy)); 2705 di = to_ab8500_charger_ac_device_info(psy_to_ux500_charger(psy));
2153 2706
@@ -2169,7 +2722,10 @@ static int ab8500_charger_ac_get_property(struct power_supply *psy,
2169 val->intval = di->ac.charger_connected; 2722 val->intval = di->ac.charger_connected;
2170 break; 2723 break;
2171 case POWER_SUPPLY_PROP_VOLTAGE_NOW: 2724 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
2172 di->ac.charger_voltage = ab8500_charger_get_ac_voltage(di); 2725 ret = ab8500_charger_get_ac_voltage(di);
2726 if (ret >= 0)
2727 di->ac.charger_voltage = ret;
2728 /* On error, use previous value */
2173 val->intval = di->ac.charger_voltage * 1000; 2729 val->intval = di->ac.charger_voltage * 1000;
2174 break; 2730 break;
2175 case POWER_SUPPLY_PROP_VOLTAGE_AVG: 2731 case POWER_SUPPLY_PROP_VOLTAGE_AVG:
@@ -2181,7 +2737,10 @@ static int ab8500_charger_ac_get_property(struct power_supply *psy,
2181 val->intval = di->ac.cv_active; 2737 val->intval = di->ac.cv_active;
2182 break; 2738 break;
2183 case POWER_SUPPLY_PROP_CURRENT_NOW: 2739 case POWER_SUPPLY_PROP_CURRENT_NOW:
2184 val->intval = ab8500_charger_get_ac_current(di) * 1000; 2740 ret = ab8500_charger_get_ac_current(di);
2741 if (ret >= 0)
2742 di->ac.charger_current = ret;
2743 val->intval = di->ac.charger_current * 1000;
2185 break; 2744 break;
2186 default: 2745 default:
2187 return -EINVAL; 2746 return -EINVAL;
@@ -2208,6 +2767,7 @@ static int ab8500_charger_usb_get_property(struct power_supply *psy,
2208 union power_supply_propval *val) 2767 union power_supply_propval *val)
2209{ 2768{
2210 struct ab8500_charger *di; 2769 struct ab8500_charger *di;
2770 int ret;
2211 2771
2212 di = to_ab8500_charger_usb_device_info(psy_to_ux500_charger(psy)); 2772 di = to_ab8500_charger_usb_device_info(psy_to_ux500_charger(psy));
2213 2773
@@ -2231,7 +2791,9 @@ static int ab8500_charger_usb_get_property(struct power_supply *psy,
2231 val->intval = di->usb.charger_connected; 2791 val->intval = di->usb.charger_connected;
2232 break; 2792 break;
2233 case POWER_SUPPLY_PROP_VOLTAGE_NOW: 2793 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
2234 di->usb.charger_voltage = ab8500_charger_get_vbus_voltage(di); 2794 ret = ab8500_charger_get_vbus_voltage(di);
2795 if (ret >= 0)
2796 di->usb.charger_voltage = ret;
2235 val->intval = di->usb.charger_voltage * 1000; 2797 val->intval = di->usb.charger_voltage * 1000;
2236 break; 2798 break;
2237 case POWER_SUPPLY_PROP_VOLTAGE_AVG: 2799 case POWER_SUPPLY_PROP_VOLTAGE_AVG:
@@ -2243,7 +2805,10 @@ static int ab8500_charger_usb_get_property(struct power_supply *psy,
2243 val->intval = di->usb.cv_active; 2805 val->intval = di->usb.cv_active;
2244 break; 2806 break;
2245 case POWER_SUPPLY_PROP_CURRENT_NOW: 2807 case POWER_SUPPLY_PROP_CURRENT_NOW:
2246 val->intval = ab8500_charger_get_usb_current(di) * 1000; 2808 ret = ab8500_charger_get_usb_current(di);
2809 if (ret >= 0)
2810 di->usb.charger_current = ret;
2811 val->intval = di->usb.charger_current * 1000;
2247 break; 2812 break;
2248 case POWER_SUPPLY_PROP_CURRENT_AVG: 2813 case POWER_SUPPLY_PROP_CURRENT_AVG:
2249 /* 2814 /*
@@ -2293,13 +2858,23 @@ static int ab8500_charger_init_hw_registers(struct ab8500_charger *di)
2293 } 2858 }
2294 } 2859 }
2295 2860
2296 /* VBUS OVV set to 6.3V and enable automatic current limitiation */ 2861 if (is_ab9540_2p0(di->parent) || is_ab8505_2p0(di->parent))
2297 ret = abx500_set_register_interruptible(di->dev, 2862 ret = abx500_mask_and_set_register_interruptible(di->dev,
2298 AB8500_CHARGER, 2863 AB8500_CHARGER,
2299 AB8500_USBCH_CTRL2_REG, 2864 AB8500_USBCH_CTRL2_REG,
2300 VBUS_OVV_SELECT_6P3V | VBUS_AUTO_IN_CURR_LIM_ENA); 2865 VBUS_AUTO_IN_CURR_LIM_ENA,
2866 VBUS_AUTO_IN_CURR_LIM_ENA);
2867 else
2868 /*
2869 * VBUS OVV set to 6.3V and enable automatic current limitation
2870 */
2871 ret = abx500_set_register_interruptible(di->dev,
2872 AB8500_CHARGER,
2873 AB8500_USBCH_CTRL2_REG,
2874 VBUS_OVV_SELECT_6P3V | VBUS_AUTO_IN_CURR_LIM_ENA);
2301 if (ret) { 2875 if (ret) {
2302 dev_err(di->dev, "failed to set VBUS OVV\n"); 2876 dev_err(di->dev,
2877 "failed to set automatic current limitation\n");
2303 goto out; 2878 goto out;
2304 } 2879 }
2305 2880
@@ -2355,12 +2930,26 @@ static int ab8500_charger_init_hw_registers(struct ab8500_charger *di)
2355 goto out; 2930 goto out;
2356 } 2931 }
2357 2932
2933 /* Set charger watchdog timeout */
2934 ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
2935 AB8500_CH_WD_TIMER_REG, WD_TIMER);
2936 if (ret) {
2937 dev_err(di->dev, "failed to set charger watchdog timeout\n");
2938 goto out;
2939 }
2940
2941 ret = ab8500_charger_led_en(di, false);
2942 if (ret < 0) {
2943 dev_err(di->dev, "failed to disable LED\n");
2944 goto out;
2945 }
2946
2358 /* Backup battery voltage and current */ 2947 /* Backup battery voltage and current */
2359 ret = abx500_set_register_interruptible(di->dev, 2948 ret = abx500_set_register_interruptible(di->dev,
2360 AB8500_RTC, 2949 AB8500_RTC,
2361 AB8500_RTC_BACKUP_CHG_REG, 2950 AB8500_RTC_BACKUP_CHG_REG,
2362 di->bat->bkup_bat_v | 2951 di->bm->bkup_bat_v |
2363 di->bat->bkup_bat_i); 2952 di->bm->bkup_bat_i);
2364 if (ret) { 2953 if (ret) {
2365 dev_err(di->dev, "failed to setup backup battery charging\n"); 2954 dev_err(di->dev, "failed to setup backup battery charging\n");
2366 goto out; 2955 goto out;
@@ -2394,6 +2983,7 @@ static struct ab8500_charger_interrupts ab8500_charger_irq[] = {
2394 {"USB_CHARGER_NOT_OKR", ab8500_charger_usbchargernotokr_handler}, 2983 {"USB_CHARGER_NOT_OKR", ab8500_charger_usbchargernotokr_handler},
2395 {"VBUS_OVV", ab8500_charger_vbusovv_handler}, 2984 {"VBUS_OVV", ab8500_charger_vbusovv_handler},
2396 {"CH_WD_EXP", ab8500_charger_chwdexp_handler}, 2985 {"CH_WD_EXP", ab8500_charger_chwdexp_handler},
2986 {"VBUS_CH_DROP_END", ab8500_charger_vbuschdropend_handler},
2397}; 2987};
2398 2988
2399static int ab8500_charger_usb_notifier_call(struct notifier_block *nb, 2989static int ab8500_charger_usb_notifier_call(struct notifier_block *nb,
@@ -2404,6 +2994,9 @@ static int ab8500_charger_usb_notifier_call(struct notifier_block *nb,
2404 enum ab8500_usb_state bm_usb_state; 2994 enum ab8500_usb_state bm_usb_state;
2405 unsigned mA = *((unsigned *)power); 2995 unsigned mA = *((unsigned *)power);
2406 2996
2997 if (!di)
2998 return NOTIFY_DONE;
2999
2407 if (event != USB_EVENT_VBUS) { 3000 if (event != USB_EVENT_VBUS) {
2408 dev_dbg(di->dev, "not a standard host, returning\n"); 3001 dev_dbg(di->dev, "not a standard host, returning\n");
2409 return NOTIFY_DONE; 3002 return NOTIFY_DONE;
@@ -2427,13 +3020,15 @@ static int ab8500_charger_usb_notifier_call(struct notifier_block *nb,
2427 __func__, bm_usb_state, mA); 3020 __func__, bm_usb_state, mA);
2428 3021
2429 spin_lock(&di->usb_state.usb_lock); 3022 spin_lock(&di->usb_state.usb_lock);
2430 di->usb_state.usb_changed = true; 3023 di->usb_state.state_tmp = bm_usb_state;
3024 di->usb_state.usb_current_tmp = mA;
2431 spin_unlock(&di->usb_state.usb_lock); 3025 spin_unlock(&di->usb_state.usb_lock);
2432 3026
2433 di->usb_state.state = bm_usb_state; 3027 /*
2434 di->usb_state.usb_current = mA; 3028 * wait for some time until you get updates from the usb stack
2435 3029 * and negotiations are completed
2436 queue_work(di->charger_wq, &di->usb_state_changed_work); 3030 */
3031 queue_delayed_work(di->charger_wq, &di->usb_state_changed_work, HZ/2);
2437 3032
2438 return NOTIFY_OK; 3033 return NOTIFY_OK;
2439} 3034}
@@ -2473,6 +3068,9 @@ static int ab8500_charger_resume(struct platform_device *pdev)
2473 &di->check_hw_failure_work, 0); 3068 &di->check_hw_failure_work, 0);
2474 } 3069 }
2475 3070
3071 if (di->flags.vbus_drop_end)
3072 queue_delayed_work(di->charger_wq, &di->vbus_drop_end_work, 0);
3073
2476 return 0; 3074 return 0;
2477} 3075}
2478 3076
@@ -2485,6 +3083,23 @@ static int ab8500_charger_suspend(struct platform_device *pdev,
2485 if (delayed_work_pending(&di->check_hw_failure_work)) 3083 if (delayed_work_pending(&di->check_hw_failure_work))
2486 cancel_delayed_work(&di->check_hw_failure_work); 3084 cancel_delayed_work(&di->check_hw_failure_work);
2487 3085
3086 if (delayed_work_pending(&di->vbus_drop_end_work))
3087 cancel_delayed_work(&di->vbus_drop_end_work);
3088
3089 flush_delayed_work(&di->attach_work);
3090 flush_delayed_work(&di->usb_charger_attached_work);
3091 flush_delayed_work(&di->ac_charger_attached_work);
3092 flush_delayed_work(&di->check_usbchgnotok_work);
3093 flush_delayed_work(&di->check_vbat_work);
3094 flush_delayed_work(&di->kick_wd_work);
3095
3096 flush_work(&di->usb_link_status_work);
3097 flush_work(&di->ac_work);
3098 flush_work(&di->detect_usb_type_work);
3099
3100 if (atomic_read(&di->current_stepping_sessions))
3101 return -EAGAIN;
3102
2488 return 0; 3103 return 0;
2489} 3104}
2490#else 3105#else
@@ -2509,9 +3124,6 @@ static int ab8500_charger_remove(struct platform_device *pdev)
2509 free_irq(irq, di); 3124 free_irq(irq, di);
2510 } 3125 }
2511 3126
2512 /* disable the regulator */
2513 regulator_put(di->regu);
2514
2515 /* Backup battery voltage and current disable */ 3127 /* Backup battery voltage and current disable */
2516 ret = abx500_mask_and_set_register_interruptible(di->dev, 3128 ret = abx500_mask_and_set_register_interruptible(di->dev,
2517 AB8500_RTC, AB8500_RTC_CTRL_REG, RTC_BUP_CH_ENA, 0); 3129 AB8500_RTC, AB8500_RTC_CTRL_REG, RTC_BUP_CH_ENA, 0);
@@ -2525,8 +3137,12 @@ static int ab8500_charger_remove(struct platform_device *pdev)
2525 destroy_workqueue(di->charger_wq); 3137 destroy_workqueue(di->charger_wq);
2526 3138
2527 flush_scheduled_work(); 3139 flush_scheduled_work();
2528 power_supply_unregister(&di->usb_chg.psy); 3140 if(di->usb_chg.enabled)
2529 power_supply_unregister(&di->ac_chg.psy); 3141 power_supply_unregister(&di->usb_chg.psy);
3142#if !defined(CONFIG_CHARGER_PM2301)
3143 if(di->ac_chg.enabled)
3144 power_supply_unregister(&di->ac_chg.psy);
3145#endif
2530 platform_set_drvdata(pdev, NULL); 3146 platform_set_drvdata(pdev, NULL);
2531 3147
2532 return 0; 3148 return 0;
@@ -2541,32 +3157,31 @@ static char *supply_interface[] = {
2541static int ab8500_charger_probe(struct platform_device *pdev) 3157static int ab8500_charger_probe(struct platform_device *pdev)
2542{ 3158{
2543 struct device_node *np = pdev->dev.of_node; 3159 struct device_node *np = pdev->dev.of_node;
3160 struct abx500_bm_data *plat = pdev->dev.platform_data;
2544 struct ab8500_charger *di; 3161 struct ab8500_charger *di;
2545 int irq, i, charger_status, ret = 0; 3162 int irq, i, charger_status, ret = 0, ch_stat;
2546 3163
2547 di = devm_kzalloc(&pdev->dev, sizeof(*di), GFP_KERNEL); 3164 di = devm_kzalloc(&pdev->dev, sizeof(*di), GFP_KERNEL);
2548 if (!di) { 3165 if (!di) {
2549 dev_err(&pdev->dev, "%s no mem for ab8500_charger\n", __func__); 3166 dev_err(&pdev->dev, "%s no mem for ab8500_charger\n", __func__);
2550 return -ENOMEM; 3167 return -ENOMEM;
2551 } 3168 }
2552 di->bat = pdev->mfd_cell->platform_data; 3169
2553 if (!di->bat) { 3170 if (!plat) {
2554 if (np) { 3171 dev_err(&pdev->dev, "no battery management data supplied\n");
2555 ret = bmdevs_of_probe(&pdev->dev, np, &di->bat); 3172 return -EINVAL;
2556 if (ret) { 3173 }
2557 dev_err(&pdev->dev, 3174 di->bm = plat;
2558 "failed to get battery information\n"); 3175
2559 return ret; 3176 if (np) {
2560 } 3177 ret = ab8500_bm_of_probe(&pdev->dev, np, di->bm);
2561 di->autopower_cfg = of_property_read_bool(np, "autopower_cfg"); 3178 if (ret) {
2562 } else { 3179 dev_err(&pdev->dev, "failed to get battery information\n");
2563 dev_err(&pdev->dev, "missing dt node for ab8500_charger\n"); 3180 return ret;
2564 return -EINVAL;
2565 } 3181 }
2566 } else { 3182 di->autopower_cfg = of_property_read_bool(np, "autopower_cfg");
2567 dev_info(&pdev->dev, "falling back to legacy platform data\n"); 3183 } else
2568 di->autopower_cfg = false; 3184 di->autopower_cfg = false;
2569 }
2570 3185
2571 /* get parent data */ 3186 /* get parent data */
2572 di->dev = &pdev->dev; 3187 di->dev = &pdev->dev;
@@ -2575,8 +3190,10 @@ static int ab8500_charger_probe(struct platform_device *pdev)
2575 3190
2576 /* initialize lock */ 3191 /* initialize lock */
2577 spin_lock_init(&di->usb_state.usb_lock); 3192 spin_lock_init(&di->usb_state.usb_lock);
3193 mutex_init(&di->usb_ipt_crnt_lock);
2578 3194
2579 di->autopower = false; 3195 di->autopower = false;
3196 di->invalid_charger_detect_state = 0;
2580 3197
2581 /* AC supply */ 3198 /* AC supply */
2582 /* power_supply base class */ 3199 /* power_supply base class */
@@ -2595,6 +3212,9 @@ static int ab8500_charger_probe(struct platform_device *pdev)
2595 ARRAY_SIZE(ab8500_charger_voltage_map) - 1]; 3212 ARRAY_SIZE(ab8500_charger_voltage_map) - 1];
2596 di->ac_chg.max_out_curr = ab8500_charger_current_map[ 3213 di->ac_chg.max_out_curr = ab8500_charger_current_map[
2597 ARRAY_SIZE(ab8500_charger_current_map) - 1]; 3214 ARRAY_SIZE(ab8500_charger_current_map) - 1];
3215 di->ac_chg.wdt_refresh = CHG_WD_INTERVAL;
3216 di->ac_chg.enabled = di->bm->ac_enabled;
3217 di->ac_chg.external = false;
2598 3218
2599 /* USB supply */ 3219 /* USB supply */
2600 /* power_supply base class */ 3220 /* power_supply base class */
@@ -2613,7 +3233,9 @@ static int ab8500_charger_probe(struct platform_device *pdev)
2613 ARRAY_SIZE(ab8500_charger_voltage_map) - 1]; 3233 ARRAY_SIZE(ab8500_charger_voltage_map) - 1];
2614 di->usb_chg.max_out_curr = ab8500_charger_current_map[ 3234 di->usb_chg.max_out_curr = ab8500_charger_current_map[
2615 ARRAY_SIZE(ab8500_charger_current_map) - 1]; 3235 ARRAY_SIZE(ab8500_charger_current_map) - 1];
2616 3236 di->usb_chg.wdt_refresh = CHG_WD_INTERVAL;
3237 di->usb_chg.enabled = di->bm->usb_enabled;
3238 di->usb_chg.external = false;
2617 3239
2618 /* Create a work queue for the charger */ 3240 /* Create a work queue for the charger */
2619 di->charger_wq = 3241 di->charger_wq =
@@ -2623,12 +3245,19 @@ static int ab8500_charger_probe(struct platform_device *pdev)
2623 return -ENOMEM; 3245 return -ENOMEM;
2624 } 3246 }
2625 3247
3248 mutex_init(&di->charger_attached_mutex);
3249
2626 /* Init work for HW failure check */ 3250 /* Init work for HW failure check */
2627 INIT_DEFERRABLE_WORK(&di->check_hw_failure_work, 3251 INIT_DEFERRABLE_WORK(&di->check_hw_failure_work,
2628 ab8500_charger_check_hw_failure_work); 3252 ab8500_charger_check_hw_failure_work);
2629 INIT_DEFERRABLE_WORK(&di->check_usbchgnotok_work, 3253 INIT_DEFERRABLE_WORK(&di->check_usbchgnotok_work,
2630 ab8500_charger_check_usbchargernotok_work); 3254 ab8500_charger_check_usbchargernotok_work);
2631 3255
3256 INIT_DELAYED_WORK(&di->ac_charger_attached_work,
3257 ab8500_charger_ac_attached_work);
3258 INIT_DELAYED_WORK(&di->usb_charger_attached_work,
3259 ab8500_charger_usb_attached_work);
3260
2632 /* 3261 /*
2633 * For ABB revision 1.0 and 1.1 there is a bug in the watchdog 3262 * For ABB revision 1.0 and 1.1 there is a bug in the watchdog
2634 * logic. That means we have to continously kick the charger 3263 * logic. That means we have to continously kick the charger
@@ -2644,6 +3273,15 @@ static int ab8500_charger_probe(struct platform_device *pdev)
2644 INIT_DEFERRABLE_WORK(&di->check_vbat_work, 3273 INIT_DEFERRABLE_WORK(&di->check_vbat_work,
2645 ab8500_charger_check_vbat_work); 3274 ab8500_charger_check_vbat_work);
2646 3275
3276 INIT_DELAYED_WORK(&di->attach_work,
3277 ab8500_charger_usb_link_attach_work);
3278
3279 INIT_DELAYED_WORK(&di->usb_state_changed_work,
3280 ab8500_charger_usb_state_changed_work);
3281
3282 INIT_DELAYED_WORK(&di->vbus_drop_end_work,
3283 ab8500_charger_vbus_drop_end_work);
3284
2647 /* Init work for charger detection */ 3285 /* Init work for charger detection */
2648 INIT_WORK(&di->usb_link_status_work, 3286 INIT_WORK(&di->usb_link_status_work,
2649 ab8500_charger_usb_link_status_work); 3287 ab8500_charger_usb_link_status_work);
@@ -2651,9 +3289,6 @@ static int ab8500_charger_probe(struct platform_device *pdev)
2651 INIT_WORK(&di->detect_usb_type_work, 3289 INIT_WORK(&di->detect_usb_type_work,
2652 ab8500_charger_detect_usb_type_work); 3290 ab8500_charger_detect_usb_type_work);
2653 3291
2654 INIT_WORK(&di->usb_state_changed_work,
2655 ab8500_charger_usb_state_changed_work);
2656
2657 /* Init work for checking HW status */ 3292 /* Init work for checking HW status */
2658 INIT_WORK(&di->check_main_thermal_prot_work, 3293 INIT_WORK(&di->check_main_thermal_prot_work,
2659 ab8500_charger_check_main_thermal_prot_work); 3294 ab8500_charger_check_main_thermal_prot_work);
@@ -2665,7 +3300,7 @@ static int ab8500_charger_probe(struct platform_device *pdev)
2665 * is a charger connected to avoid erroneous BTEMP_HIGH/LOW 3300 * is a charger connected to avoid erroneous BTEMP_HIGH/LOW
2666 * interrupts during charging 3301 * interrupts during charging
2667 */ 3302 */
2668 di->regu = regulator_get(di->dev, "vddadc"); 3303 di->regu = devm_regulator_get(di->dev, "vddadc");
2669 if (IS_ERR(di->regu)) { 3304 if (IS_ERR(di->regu)) {
2670 ret = PTR_ERR(di->regu); 3305 ret = PTR_ERR(di->regu);
2671 dev_err(di->dev, "failed to get vddadc regulator\n"); 3306 dev_err(di->dev, "failed to get vddadc regulator\n");
@@ -2677,21 +3312,25 @@ static int ab8500_charger_probe(struct platform_device *pdev)
2677 ret = ab8500_charger_init_hw_registers(di); 3312 ret = ab8500_charger_init_hw_registers(di);
2678 if (ret) { 3313 if (ret) {
2679 dev_err(di->dev, "failed to initialize ABB registers\n"); 3314 dev_err(di->dev, "failed to initialize ABB registers\n");
2680 goto free_regulator; 3315 goto free_charger_wq;
2681 } 3316 }
2682 3317
2683 /* Register AC charger class */ 3318 /* Register AC charger class */
2684 ret = power_supply_register(di->dev, &di->ac_chg.psy); 3319 if(di->ac_chg.enabled) {
2685 if (ret) { 3320 ret = power_supply_register(di->dev, &di->ac_chg.psy);
2686 dev_err(di->dev, "failed to register AC charger\n"); 3321 if (ret) {
2687 goto free_regulator; 3322 dev_err(di->dev, "failed to register AC charger\n");
3323 goto free_charger_wq;
3324 }
2688 } 3325 }
2689 3326
2690 /* Register USB charger class */ 3327 /* Register USB charger class */
2691 ret = power_supply_register(di->dev, &di->usb_chg.psy); 3328 if(di->usb_chg.enabled) {
2692 if (ret) { 3329 ret = power_supply_register(di->dev, &di->usb_chg.psy);
2693 dev_err(di->dev, "failed to register USB charger\n"); 3330 if (ret) {
2694 goto free_ac; 3331 dev_err(di->dev, "failed to register USB charger\n");
3332 goto free_ac;
3333 }
2695 } 3334 }
2696 3335
2697 di->usb_phy = usb_get_phy(USB_PHY_TYPE_USB2); 3336 di->usb_phy = usb_get_phy(USB_PHY_TYPE_USB2);
@@ -2708,7 +3347,7 @@ static int ab8500_charger_probe(struct platform_device *pdev)
2708 } 3347 }
2709 3348
2710 /* Identify the connected charger types during startup */ 3349 /* Identify the connected charger types during startup */
2711 charger_status = ab8500_charger_detect_chargers(di); 3350 charger_status = ab8500_charger_detect_chargers(di, true);
2712 if (charger_status & AC_PW_CONN) { 3351 if (charger_status & AC_PW_CONN) {
2713 di->ac.charger_connected = 1; 3352 di->ac.charger_connected = 1;
2714 di->ac_conn = true; 3353 di->ac_conn = true;
@@ -2717,7 +3356,6 @@ static int ab8500_charger_probe(struct platform_device *pdev)
2717 } 3356 }
2718 3357
2719 if (charger_status & USB_PW_CONN) { 3358 if (charger_status & USB_PW_CONN) {
2720 dev_dbg(di->dev, "VBUS Detect during startup\n");
2721 di->vbus_detected = true; 3359 di->vbus_detected = true;
2722 di->vbus_detected_start = true; 3360 di->vbus_detected_start = true;
2723 queue_work(di->charger_wq, 3361 queue_work(di->charger_wq,
@@ -2742,6 +3380,23 @@ static int ab8500_charger_probe(struct platform_device *pdev)
2742 3380
2743 platform_set_drvdata(pdev, di); 3381 platform_set_drvdata(pdev, di);
2744 3382
3383 mutex_lock(&di->charger_attached_mutex);
3384
3385 ch_stat = ab8500_charger_detect_chargers(di, false);
3386
3387 if ((ch_stat & AC_PW_CONN) == AC_PW_CONN) {
3388 queue_delayed_work(di->charger_wq,
3389 &di->ac_charger_attached_work,
3390 HZ);
3391 }
3392 if ((ch_stat & USB_PW_CONN) == USB_PW_CONN) {
3393 queue_delayed_work(di->charger_wq,
3394 &di->usb_charger_attached_work,
3395 HZ);
3396 }
3397
3398 mutex_unlock(&di->charger_attached_mutex);
3399
2745 return ret; 3400 return ret;
2746 3401
2747free_irq: 3402free_irq:
@@ -2755,11 +3410,11 @@ free_irq:
2755put_usb_phy: 3410put_usb_phy:
2756 usb_put_phy(di->usb_phy); 3411 usb_put_phy(di->usb_phy);
2757free_usb: 3412free_usb:
2758 power_supply_unregister(&di->usb_chg.psy); 3413 if(di->usb_chg.enabled)
3414 power_supply_unregister(&di->usb_chg.psy);
2759free_ac: 3415free_ac:
2760 power_supply_unregister(&di->ac_chg.psy); 3416 if(di->ac_chg.enabled)
2761free_regulator: 3417 power_supply_unregister(&di->ac_chg.psy);
2762 regulator_put(di->regu);
2763free_charger_wq: 3418free_charger_wq:
2764 destroy_workqueue(di->charger_wq); 3419 destroy_workqueue(di->charger_wq);
2765 return ret; 3420 return ret;
diff --git a/drivers/power/ab8500_fg.c b/drivers/power/ab8500_fg.c
index b3bf178c3462..25dae4c4b0ef 100644
--- a/drivers/power/ab8500_fg.c
+++ b/drivers/power/ab8500_fg.c
@@ -32,6 +32,7 @@
32#include <linux/mfd/abx500/ab8500.h> 32#include <linux/mfd/abx500/ab8500.h>
33#include <linux/mfd/abx500/ab8500-bm.h> 33#include <linux/mfd/abx500/ab8500-bm.h>
34#include <linux/mfd/abx500/ab8500-gpadc.h> 34#include <linux/mfd/abx500/ab8500-gpadc.h>
35#include <linux/kernel.h>
35 36
36#define MILLI_TO_MICRO 1000 37#define MILLI_TO_MICRO 1000
37#define FG_LSB_IN_MA 1627 38#define FG_LSB_IN_MA 1627
@@ -42,7 +43,7 @@
42 43
43#define NBR_AVG_SAMPLES 20 44#define NBR_AVG_SAMPLES 20
44 45
45#define LOW_BAT_CHECK_INTERVAL (2 * HZ) 46#define LOW_BAT_CHECK_INTERVAL (HZ / 16) /* 62.5 ms */
46 47
47#define VALID_CAPACITY_SEC (45 * 60) /* 45 minutes */ 48#define VALID_CAPACITY_SEC (45 * 60) /* 45 minutes */
48#define BATT_OK_MIN 2360 /* mV */ 49#define BATT_OK_MIN 2360 /* mV */
@@ -113,6 +114,13 @@ struct ab8500_fg_avg_cap {
113 int sum; 114 int sum;
114}; 115};
115 116
117struct ab8500_fg_cap_scaling {
118 bool enable;
119 int cap_to_scale[2];
120 int disable_cap_level;
121 int scaled_cap;
122};
123
116struct ab8500_fg_battery_capacity { 124struct ab8500_fg_battery_capacity {
117 int max_mah_design; 125 int max_mah_design;
118 int max_mah; 126 int max_mah;
@@ -123,6 +131,7 @@ struct ab8500_fg_battery_capacity {
123 int prev_percent; 131 int prev_percent;
124 int prev_level; 132 int prev_level;
125 int user_mah; 133 int user_mah;
134 struct ab8500_fg_cap_scaling cap_scale;
126}; 135};
127 136
128struct ab8500_fg_flags { 137struct ab8500_fg_flags {
@@ -160,6 +169,8 @@ struct inst_curr_result_list {
160 * @recovery_cnt: Counter for recovery mode 169 * @recovery_cnt: Counter for recovery mode
161 * @high_curr_cnt: Counter for high current mode 170 * @high_curr_cnt: Counter for high current mode
162 * @init_cnt: Counter for init mode 171 * @init_cnt: Counter for init mode
172 * @low_bat_cnt Counter for number of consecutive low battery measures
173 * @nbr_cceoc_irq_cnt Counter for number of CCEOC irqs received since enabled
163 * @recovery_needed: Indicate if recovery is needed 174 * @recovery_needed: Indicate if recovery is needed
164 * @high_curr_mode: Indicate if we're in high current mode 175 * @high_curr_mode: Indicate if we're in high current mode
165 * @init_capacity: Indicate if initial capacity measuring should be done 176 * @init_capacity: Indicate if initial capacity measuring should be done
@@ -167,13 +178,14 @@ struct inst_curr_result_list {
167 * @calib_state State during offset calibration 178 * @calib_state State during offset calibration
168 * @discharge_state: Current discharge state 179 * @discharge_state: Current discharge state
169 * @charge_state: Current charge state 180 * @charge_state: Current charge state
181 * @ab8500_fg_started Completion struct used for the instant current start
170 * @ab8500_fg_complete Completion struct used for the instant current reading 182 * @ab8500_fg_complete Completion struct used for the instant current reading
171 * @flags: Structure for information about events triggered 183 * @flags: Structure for information about events triggered
172 * @bat_cap: Structure for battery capacity specific parameters 184 * @bat_cap: Structure for battery capacity specific parameters
173 * @avg_cap: Average capacity filter 185 * @avg_cap: Average capacity filter
174 * @parent: Pointer to the struct ab8500 186 * @parent: Pointer to the struct ab8500
175 * @gpadc: Pointer to the struct gpadc 187 * @gpadc: Pointer to the struct gpadc
176 * @bat: Pointer to the abx500_bm platform data 188 * @bm: Platform specific battery management information
177 * @fg_psy: Structure that holds the FG specific battery properties 189 * @fg_psy: Structure that holds the FG specific battery properties
178 * @fg_wq: Work queue for running the FG algorithm 190 * @fg_wq: Work queue for running the FG algorithm
179 * @fg_periodic_work: Work to run the FG algorithm periodically 191 * @fg_periodic_work: Work to run the FG algorithm periodically
@@ -199,6 +211,8 @@ struct ab8500_fg {
199 int recovery_cnt; 211 int recovery_cnt;
200 int high_curr_cnt; 212 int high_curr_cnt;
201 int init_cnt; 213 int init_cnt;
214 int low_bat_cnt;
215 int nbr_cceoc_irq_cnt;
202 bool recovery_needed; 216 bool recovery_needed;
203 bool high_curr_mode; 217 bool high_curr_mode;
204 bool init_capacity; 218 bool init_capacity;
@@ -206,13 +220,14 @@ struct ab8500_fg {
206 enum ab8500_fg_calibration_state calib_state; 220 enum ab8500_fg_calibration_state calib_state;
207 enum ab8500_fg_discharge_state discharge_state; 221 enum ab8500_fg_discharge_state discharge_state;
208 enum ab8500_fg_charge_state charge_state; 222 enum ab8500_fg_charge_state charge_state;
223 struct completion ab8500_fg_started;
209 struct completion ab8500_fg_complete; 224 struct completion ab8500_fg_complete;
210 struct ab8500_fg_flags flags; 225 struct ab8500_fg_flags flags;
211 struct ab8500_fg_battery_capacity bat_cap; 226 struct ab8500_fg_battery_capacity bat_cap;
212 struct ab8500_fg_avg_cap avg_cap; 227 struct ab8500_fg_avg_cap avg_cap;
213 struct ab8500 *parent; 228 struct ab8500 *parent;
214 struct ab8500_gpadc *gpadc; 229 struct ab8500_gpadc *gpadc;
215 struct abx500_bm_data *bat; 230 struct abx500_bm_data *bm;
216 struct power_supply fg_psy; 231 struct power_supply fg_psy;
217 struct workqueue_struct *fg_wq; 232 struct workqueue_struct *fg_wq;
218 struct delayed_work fg_periodic_work; 233 struct delayed_work fg_periodic_work;
@@ -355,7 +370,7 @@ static int ab8500_fg_is_low_curr(struct ab8500_fg *di, int curr)
355 /* 370 /*
356 * We want to know if we're in low current mode 371 * We want to know if we're in low current mode
357 */ 372 */
358 if (curr > -di->bat->fg_params->high_curr_threshold) 373 if (curr > -di->bm->fg_params->high_curr_threshold)
359 return true; 374 return true;
360 else 375 else
361 return false; 376 return false;
@@ -484,8 +499,9 @@ static int ab8500_fg_coulomb_counter(struct ab8500_fg *di, bool enable)
484 di->flags.fg_enabled = true; 499 di->flags.fg_enabled = true;
485 } else { 500 } else {
486 /* Clear any pending read requests */ 501 /* Clear any pending read requests */
487 ret = abx500_set_register_interruptible(di->dev, 502 ret = abx500_mask_and_set_register_interruptible(di->dev,
488 AB8500_GAS_GAUGE, AB8500_GASG_CC_CTRL_REG, 0); 503 AB8500_GAS_GAUGE, AB8500_GASG_CC_CTRL_REG,
504 (RESET_ACCU | READ_REQ), 0);
489 if (ret) 505 if (ret)
490 goto cc_err; 506 goto cc_err;
491 507
@@ -523,13 +539,14 @@ cc_err:
523 * Note: This is part "one" and has to be called before 539 * Note: This is part "one" and has to be called before
524 * ab8500_fg_inst_curr_finalize() 540 * ab8500_fg_inst_curr_finalize()
525 */ 541 */
526 int ab8500_fg_inst_curr_start(struct ab8500_fg *di) 542int ab8500_fg_inst_curr_start(struct ab8500_fg *di)
527{ 543{
528 u8 reg_val; 544 u8 reg_val;
529 int ret; 545 int ret;
530 546
531 mutex_lock(&di->cc_lock); 547 mutex_lock(&di->cc_lock);
532 548
549 di->nbr_cceoc_irq_cnt = 0;
533 ret = abx500_get_register_interruptible(di->dev, AB8500_RTC, 550 ret = abx500_get_register_interruptible(di->dev, AB8500_RTC,
534 AB8500_RTC_CC_CONF_REG, &reg_val); 551 AB8500_RTC_CC_CONF_REG, &reg_val);
535 if (ret < 0) 552 if (ret < 0)
@@ -557,6 +574,7 @@ cc_err:
557 } 574 }
558 575
559 /* Return and WFI */ 576 /* Return and WFI */
577 INIT_COMPLETION(di->ab8500_fg_started);
560 INIT_COMPLETION(di->ab8500_fg_complete); 578 INIT_COMPLETION(di->ab8500_fg_complete);
561 enable_irq(di->irq); 579 enable_irq(di->irq);
562 580
@@ -568,6 +586,17 @@ fail:
568} 586}
569 587
570/** 588/**
589 * ab8500_fg_inst_curr_started() - check if fg conversion has started
590 * @di: pointer to the ab8500_fg structure
591 *
592 * Returns 1 if conversion started, 0 if still waiting
593 */
594int ab8500_fg_inst_curr_started(struct ab8500_fg *di)
595{
596 return completion_done(&di->ab8500_fg_started);
597}
598
599/**
571 * ab8500_fg_inst_curr_done() - check if fg conversion is done 600 * ab8500_fg_inst_curr_done() - check if fg conversion is done
572 * @di: pointer to the ab8500_fg structure 601 * @di: pointer to the ab8500_fg structure
573 * 602 *
@@ -595,13 +624,15 @@ int ab8500_fg_inst_curr_finalize(struct ab8500_fg *di, int *res)
595 int timeout; 624 int timeout;
596 625
597 if (!completion_done(&di->ab8500_fg_complete)) { 626 if (!completion_done(&di->ab8500_fg_complete)) {
598 timeout = wait_for_completion_timeout(&di->ab8500_fg_complete, 627 timeout = wait_for_completion_timeout(
628 &di->ab8500_fg_complete,
599 INS_CURR_TIMEOUT); 629 INS_CURR_TIMEOUT);
600 dev_dbg(di->dev, "Finalize time: %d ms\n", 630 dev_dbg(di->dev, "Finalize time: %d ms\n",
601 ((INS_CURR_TIMEOUT - timeout) * 1000) / HZ); 631 ((INS_CURR_TIMEOUT - timeout) * 1000) / HZ);
602 if (!timeout) { 632 if (!timeout) {
603 ret = -ETIME; 633 ret = -ETIME;
604 disable_irq(di->irq); 634 disable_irq(di->irq);
635 di->nbr_cceoc_irq_cnt = 0;
605 dev_err(di->dev, "completion timed out [%d]\n", 636 dev_err(di->dev, "completion timed out [%d]\n",
606 __LINE__); 637 __LINE__);
607 goto fail; 638 goto fail;
@@ -609,6 +640,7 @@ int ab8500_fg_inst_curr_finalize(struct ab8500_fg *di, int *res)
609 } 640 }
610 641
611 disable_irq(di->irq); 642 disable_irq(di->irq);
643 di->nbr_cceoc_irq_cnt = 0;
612 644
613 ret = abx500_mask_and_set_register_interruptible(di->dev, 645 ret = abx500_mask_and_set_register_interruptible(di->dev,
614 AB8500_GAS_GAUGE, AB8500_GASG_CC_CTRL_REG, 646 AB8500_GAS_GAUGE, AB8500_GASG_CC_CTRL_REG,
@@ -647,7 +679,7 @@ int ab8500_fg_inst_curr_finalize(struct ab8500_fg *di, int *res)
647 * 112.9nAh assumes 10mOhm, but fg_res is in 0.1mOhm 679 * 112.9nAh assumes 10mOhm, but fg_res is in 0.1mOhm
648 */ 680 */
649 val = (val * QLSB_NANO_AMP_HOURS_X10 * 36 * 4) / 681 val = (val * QLSB_NANO_AMP_HOURS_X10 * 36 * 4) /
650 (1000 * di->bat->fg_res); 682 (1000 * di->bm->fg_res);
651 683
652 if (di->turn_off_fg) { 684 if (di->turn_off_fg) {
653 dev_dbg(di->dev, "%s Disable FG\n", __func__); 685 dev_dbg(di->dev, "%s Disable FG\n", __func__);
@@ -683,6 +715,7 @@ fail:
683int ab8500_fg_inst_curr_blocking(struct ab8500_fg *di) 715int ab8500_fg_inst_curr_blocking(struct ab8500_fg *di)
684{ 716{
685 int ret; 717 int ret;
718 int timeout;
686 int res = 0; 719 int res = 0;
687 720
688 ret = ab8500_fg_inst_curr_start(di); 721 ret = ab8500_fg_inst_curr_start(di);
@@ -691,13 +724,33 @@ int ab8500_fg_inst_curr_blocking(struct ab8500_fg *di)
691 return 0; 724 return 0;
692 } 725 }
693 726
727 /* Wait for CC to actually start */
728 if (!completion_done(&di->ab8500_fg_started)) {
729 timeout = wait_for_completion_timeout(
730 &di->ab8500_fg_started,
731 INS_CURR_TIMEOUT);
732 dev_dbg(di->dev, "Start time: %d ms\n",
733 ((INS_CURR_TIMEOUT - timeout) * 1000) / HZ);
734 if (!timeout) {
735 ret = -ETIME;
736 dev_err(di->dev, "completion timed out [%d]\n",
737 __LINE__);
738 goto fail;
739 }
740 }
741
694 ret = ab8500_fg_inst_curr_finalize(di, &res); 742 ret = ab8500_fg_inst_curr_finalize(di, &res);
695 if (ret) { 743 if (ret) {
696 dev_err(di->dev, "Failed to finalize fg_inst\n"); 744 dev_err(di->dev, "Failed to finalize fg_inst\n");
697 return 0; 745 return 0;
698 } 746 }
699 747
748 dev_dbg(di->dev, "%s instant current: %d", __func__, res);
700 return res; 749 return res;
750fail:
751 disable_irq(di->irq);
752 mutex_unlock(&di->cc_lock);
753 return ret;
701} 754}
702 755
703/** 756/**
@@ -750,19 +803,16 @@ static void ab8500_fg_acc_cur_work(struct work_struct *work)
750 * 112.9nAh assumes 10mOhm, but fg_res is in 0.1mOhm 803 * 112.9nAh assumes 10mOhm, but fg_res is in 0.1mOhm
751 */ 804 */
752 di->accu_charge = (val * QLSB_NANO_AMP_HOURS_X10) / 805 di->accu_charge = (val * QLSB_NANO_AMP_HOURS_X10) /
753 (100 * di->bat->fg_res); 806 (100 * di->bm->fg_res);
754 807
755 /* 808 /*
756 * Convert to unit value in mA 809 * Convert to unit value in mA
757 * Full scale input voltage is 810 * by dividing by the conversion
758 * 66.660mV => LSB = 66.660mV/(4096*res) = 1.627mA
759 * Given a 250ms conversion cycle time the LSB corresponds
760 * to 112.9 nAh. Convert to current by dividing by the conversion
761 * time in hours (= samples / (3600 * 4)h) 811 * time in hours (= samples / (3600 * 4)h)
762 * 112.9nAh assumes 10mOhm, but fg_res is in 0.1mOhm 812 * and multiply with 1000
763 */ 813 */
764 di->avg_curr = (val * QLSB_NANO_AMP_HOURS_X10 * 36) / 814 di->avg_curr = (val * QLSB_NANO_AMP_HOURS_X10 * 36) /
765 (1000 * di->bat->fg_res * (di->fg_samples / 4)); 815 (1000 * di->bm->fg_res * (di->fg_samples / 4));
766 816
767 di->flags.conv_done = true; 817 di->flags.conv_done = true;
768 818
@@ -770,6 +820,8 @@ static void ab8500_fg_acc_cur_work(struct work_struct *work)
770 820
771 queue_work(di->fg_wq, &di->fg_work); 821 queue_work(di->fg_wq, &di->fg_work);
772 822
823 dev_dbg(di->dev, "fg_res: %d, fg_samples: %d, gasg: %d, accu_charge: %d \n",
824 di->bm->fg_res, di->fg_samples, val, di->accu_charge);
773 return; 825 return;
774exit: 826exit:
775 dev_err(di->dev, 827 dev_err(di->dev,
@@ -814,8 +866,8 @@ static int ab8500_fg_volt_to_capacity(struct ab8500_fg *di, int voltage)
814 struct abx500_v_to_cap *tbl; 866 struct abx500_v_to_cap *tbl;
815 int cap = 0; 867 int cap = 0;
816 868
817 tbl = di->bat->bat_type[di->bat->batt_id].v_to_cap_tbl, 869 tbl = di->bm->bat_type[di->bm->batt_id].v_to_cap_tbl,
818 tbl_size = di->bat->bat_type[di->bat->batt_id].n_v_cap_tbl_elements; 870 tbl_size = di->bm->bat_type[di->bm->batt_id].n_v_cap_tbl_elements;
819 871
820 for (i = 0; i < tbl_size; ++i) { 872 for (i = 0; i < tbl_size; ++i) {
821 if (voltage > tbl[i].voltage) 873 if (voltage > tbl[i].voltage)
@@ -866,8 +918,8 @@ static int ab8500_fg_battery_resistance(struct ab8500_fg *di)
866 struct batres_vs_temp *tbl; 918 struct batres_vs_temp *tbl;
867 int resist = 0; 919 int resist = 0;
868 920
869 tbl = di->bat->bat_type[di->bat->batt_id].batres_tbl; 921 tbl = di->bm->bat_type[di->bm->batt_id].batres_tbl;
870 tbl_size = di->bat->bat_type[di->bat->batt_id].n_batres_tbl_elements; 922 tbl_size = di->bm->bat_type[di->bm->batt_id].n_batres_tbl_elements;
871 923
872 for (i = 0; i < tbl_size; ++i) { 924 for (i = 0; i < tbl_size; ++i) {
873 if (di->bat_temp / 10 > tbl[i].temp) 925 if (di->bat_temp / 10 > tbl[i].temp)
@@ -888,11 +940,11 @@ static int ab8500_fg_battery_resistance(struct ab8500_fg *di)
888 940
889 dev_dbg(di->dev, "%s Temp: %d battery internal resistance: %d" 941 dev_dbg(di->dev, "%s Temp: %d battery internal resistance: %d"
890 " fg resistance %d, total: %d (mOhm)\n", 942 " fg resistance %d, total: %d (mOhm)\n",
891 __func__, di->bat_temp, resist, di->bat->fg_res / 10, 943 __func__, di->bat_temp, resist, di->bm->fg_res / 10,
892 (di->bat->fg_res / 10) + resist); 944 (di->bm->fg_res / 10) + resist);
893 945
894 /* fg_res variable is in 0.1mOhm */ 946 /* fg_res variable is in 0.1mOhm */
895 resist += di->bat->fg_res / 10; 947 resist += di->bm->fg_res / 10;
896 948
897 return resist; 949 return resist;
898} 950}
@@ -915,7 +967,7 @@ static int ab8500_fg_load_comp_volt_to_capacity(struct ab8500_fg *di)
915 do { 967 do {
916 vbat += ab8500_fg_bat_voltage(di); 968 vbat += ab8500_fg_bat_voltage(di);
917 i++; 969 i++;
918 msleep(5); 970 usleep_range(5000, 6000);
919 } while (!ab8500_fg_inst_curr_done(di)); 971 } while (!ab8500_fg_inst_curr_done(di));
920 972
921 ab8500_fg_inst_curr_finalize(di, &di->inst_curr); 973 ab8500_fg_inst_curr_finalize(di, &di->inst_curr);
@@ -1108,16 +1160,16 @@ static int ab8500_fg_capacity_level(struct ab8500_fg *di)
1108{ 1160{
1109 int ret, percent; 1161 int ret, percent;
1110 1162
1111 percent = di->bat_cap.permille / 10; 1163 percent = DIV_ROUND_CLOSEST(di->bat_cap.permille, 10);
1112 1164
1113 if (percent <= di->bat->cap_levels->critical || 1165 if (percent <= di->bm->cap_levels->critical ||
1114 di->flags.low_bat) 1166 di->flags.low_bat)
1115 ret = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL; 1167 ret = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
1116 else if (percent <= di->bat->cap_levels->low) 1168 else if (percent <= di->bm->cap_levels->low)
1117 ret = POWER_SUPPLY_CAPACITY_LEVEL_LOW; 1169 ret = POWER_SUPPLY_CAPACITY_LEVEL_LOW;
1118 else if (percent <= di->bat->cap_levels->normal) 1170 else if (percent <= di->bm->cap_levels->normal)
1119 ret = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL; 1171 ret = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
1120 else if (percent <= di->bat->cap_levels->high) 1172 else if (percent <= di->bm->cap_levels->high)
1121 ret = POWER_SUPPLY_CAPACITY_LEVEL_HIGH; 1173 ret = POWER_SUPPLY_CAPACITY_LEVEL_HIGH;
1122 else 1174 else
1123 ret = POWER_SUPPLY_CAPACITY_LEVEL_FULL; 1175 ret = POWER_SUPPLY_CAPACITY_LEVEL_FULL;
@@ -1126,6 +1178,99 @@ static int ab8500_fg_capacity_level(struct ab8500_fg *di)
1126} 1178}
1127 1179
1128/** 1180/**
1181 * ab8500_fg_calculate_scaled_capacity() - Capacity scaling
1182 * @di: pointer to the ab8500_fg structure
1183 *
1184 * Calculates the capacity to be shown to upper layers. Scales the capacity
1185 * to have 100% as a reference from the actual capacity upon removal of charger
1186 * when charging is in maintenance mode.
1187 */
1188static int ab8500_fg_calculate_scaled_capacity(struct ab8500_fg *di)
1189{
1190 struct ab8500_fg_cap_scaling *cs = &di->bat_cap.cap_scale;
1191 int capacity = di->bat_cap.prev_percent;
1192
1193 if (!cs->enable)
1194 return capacity;
1195
1196 /*
1197 * As long as we are in fully charge mode scale the capacity
1198 * to show 100%.
1199 */
1200 if (di->flags.fully_charged) {
1201 cs->cap_to_scale[0] = 100;
1202 cs->cap_to_scale[1] =
1203 max(capacity, di->bm->fg_params->maint_thres);
1204 dev_dbg(di->dev, "Scale cap with %d/%d\n",
1205 cs->cap_to_scale[0], cs->cap_to_scale[1]);
1206 }
1207
1208 /* Calculates the scaled capacity. */
1209 if ((cs->cap_to_scale[0] != cs->cap_to_scale[1])
1210 && (cs->cap_to_scale[1] > 0))
1211 capacity = min(100,
1212 DIV_ROUND_CLOSEST(di->bat_cap.prev_percent *
1213 cs->cap_to_scale[0],
1214 cs->cap_to_scale[1]));
1215
1216 if (di->flags.charging) {
1217 if (capacity < cs->disable_cap_level) {
1218 cs->disable_cap_level = capacity;
1219 dev_dbg(di->dev, "Cap to stop scale lowered %d%%\n",
1220 cs->disable_cap_level);
1221 } else if (!di->flags.fully_charged) {
1222 if (di->bat_cap.prev_percent >=
1223 cs->disable_cap_level) {
1224 dev_dbg(di->dev, "Disabling scaled capacity\n");
1225 cs->enable = false;
1226 capacity = di->bat_cap.prev_percent;
1227 } else {
1228 dev_dbg(di->dev,
1229 "Waiting in cap to level %d%%\n",
1230 cs->disable_cap_level);
1231 capacity = cs->disable_cap_level;
1232 }
1233 }
1234 }
1235
1236 return capacity;
1237}
1238
1239/**
1240 * ab8500_fg_update_cap_scalers() - Capacity scaling
1241 * @di: pointer to the ab8500_fg structure
1242 *
1243 * To be called when state change from charge<->discharge to update
1244 * the capacity scalers.
1245 */
1246static void ab8500_fg_update_cap_scalers(struct ab8500_fg *di)
1247{
1248 struct ab8500_fg_cap_scaling *cs = &di->bat_cap.cap_scale;
1249
1250 if (!cs->enable)
1251 return;
1252 if (di->flags.charging) {
1253 di->bat_cap.cap_scale.disable_cap_level =
1254 di->bat_cap.cap_scale.scaled_cap;
1255 dev_dbg(di->dev, "Cap to stop scale at charge %d%%\n",
1256 di->bat_cap.cap_scale.disable_cap_level);
1257 } else {
1258 if (cs->scaled_cap != 100) {
1259 cs->cap_to_scale[0] = cs->scaled_cap;
1260 cs->cap_to_scale[1] = di->bat_cap.prev_percent;
1261 } else {
1262 cs->cap_to_scale[0] = 100;
1263 cs->cap_to_scale[1] =
1264 max(di->bat_cap.prev_percent,
1265 di->bm->fg_params->maint_thres);
1266 }
1267
1268 dev_dbg(di->dev, "Cap to scale at discharge %d/%d\n",
1269 cs->cap_to_scale[0], cs->cap_to_scale[1]);
1270 }
1271}
1272
1273/**
1129 * ab8500_fg_check_capacity_limits() - Check if capacity has changed 1274 * ab8500_fg_check_capacity_limits() - Check if capacity has changed
1130 * @di: pointer to the ab8500_fg structure 1275 * @di: pointer to the ab8500_fg structure
1131 * @init: capacity is allowed to go up in init mode 1276 * @init: capacity is allowed to go up in init mode
@@ -1136,6 +1281,7 @@ static int ab8500_fg_capacity_level(struct ab8500_fg *di)
1136static void ab8500_fg_check_capacity_limits(struct ab8500_fg *di, bool init) 1281static void ab8500_fg_check_capacity_limits(struct ab8500_fg *di, bool init)
1137{ 1282{
1138 bool changed = false; 1283 bool changed = false;
1284 int percent = DIV_ROUND_CLOSEST(di->bat_cap.permille, 10);
1139 1285
1140 di->bat_cap.level = ab8500_fg_capacity_level(di); 1286 di->bat_cap.level = ab8500_fg_capacity_level(di);
1141 1287
@@ -1167,33 +1313,41 @@ static void ab8500_fg_check_capacity_limits(struct ab8500_fg *di, bool init)
1167 dev_dbg(di->dev, "Battery low, set capacity to 0\n"); 1313 dev_dbg(di->dev, "Battery low, set capacity to 0\n");
1168 di->bat_cap.prev_percent = 0; 1314 di->bat_cap.prev_percent = 0;
1169 di->bat_cap.permille = 0; 1315 di->bat_cap.permille = 0;
1316 percent = 0;
1170 di->bat_cap.prev_mah = 0; 1317 di->bat_cap.prev_mah = 0;
1171 di->bat_cap.mah = 0; 1318 di->bat_cap.mah = 0;
1172 changed = true; 1319 changed = true;
1173 } else if (di->flags.fully_charged) { 1320 } else if (di->flags.fully_charged) {
1174 /* 1321 /*
1175 * We report 100% if algorithm reported fully charged 1322 * We report 100% if algorithm reported fully charged
1176 * unless capacity drops too much 1323 * and show 100% during maintenance charging (scaling).
1177 */ 1324 */
1178 if (di->flags.force_full) { 1325 if (di->flags.force_full) {
1179 di->bat_cap.prev_percent = di->bat_cap.permille / 10; 1326 di->bat_cap.prev_percent = percent;
1180 di->bat_cap.prev_mah = di->bat_cap.mah; 1327 di->bat_cap.prev_mah = di->bat_cap.mah;
1181 } else if (!di->flags.force_full && 1328
1182 di->bat_cap.prev_percent != 1329 changed = true;
1183 (di->bat_cap.permille) / 10 && 1330
1184 (di->bat_cap.permille / 10) < 1331 if (!di->bat_cap.cap_scale.enable &&
1185 di->bat->fg_params->maint_thres) { 1332 di->bm->capacity_scaling) {
1333 di->bat_cap.cap_scale.enable = true;
1334 di->bat_cap.cap_scale.cap_to_scale[0] = 100;
1335 di->bat_cap.cap_scale.cap_to_scale[1] =
1336 di->bat_cap.prev_percent;
1337 di->bat_cap.cap_scale.disable_cap_level = 100;
1338 }
1339 } else if (di->bat_cap.prev_percent != percent) {
1186 dev_dbg(di->dev, 1340 dev_dbg(di->dev,
1187 "battery reported full " 1341 "battery reported full "
1188 "but capacity dropping: %d\n", 1342 "but capacity dropping: %d\n",
1189 di->bat_cap.permille / 10); 1343 percent);
1190 di->bat_cap.prev_percent = di->bat_cap.permille / 10; 1344 di->bat_cap.prev_percent = percent;
1191 di->bat_cap.prev_mah = di->bat_cap.mah; 1345 di->bat_cap.prev_mah = di->bat_cap.mah;
1192 1346
1193 changed = true; 1347 changed = true;
1194 } 1348 }
1195 } else if (di->bat_cap.prev_percent != di->bat_cap.permille / 10) { 1349 } else if (di->bat_cap.prev_percent != percent) {
1196 if (di->bat_cap.permille / 10 == 0) { 1350 if (percent == 0) {
1197 /* 1351 /*
1198 * We will not report 0% unless we've got 1352 * We will not report 0% unless we've got
1199 * the LOW_BAT IRQ, no matter what the FG 1353 * the LOW_BAT IRQ, no matter what the FG
@@ -1203,11 +1357,11 @@ static void ab8500_fg_check_capacity_limits(struct ab8500_fg *di, bool init)
1203 di->bat_cap.permille = 1; 1357 di->bat_cap.permille = 1;
1204 di->bat_cap.prev_mah = 1; 1358 di->bat_cap.prev_mah = 1;
1205 di->bat_cap.mah = 1; 1359 di->bat_cap.mah = 1;
1360 percent = 1;
1206 1361
1207 changed = true; 1362 changed = true;
1208 } else if (!(!di->flags.charging && 1363 } else if (!(!di->flags.charging &&
1209 (di->bat_cap.permille / 10) > 1364 percent > di->bat_cap.prev_percent) || init) {
1210 di->bat_cap.prev_percent) || init) {
1211 /* 1365 /*
1212 * We do not allow reported capacity to go up 1366 * We do not allow reported capacity to go up
1213 * unless we're charging or if we're in init 1367 * unless we're charging or if we're in init
@@ -1215,9 +1369,9 @@ static void ab8500_fg_check_capacity_limits(struct ab8500_fg *di, bool init)
1215 dev_dbg(di->dev, 1369 dev_dbg(di->dev,
1216 "capacity changed from %d to %d (%d)\n", 1370 "capacity changed from %d to %d (%d)\n",
1217 di->bat_cap.prev_percent, 1371 di->bat_cap.prev_percent,
1218 di->bat_cap.permille / 10, 1372 percent,
1219 di->bat_cap.permille); 1373 di->bat_cap.permille);
1220 di->bat_cap.prev_percent = di->bat_cap.permille / 10; 1374 di->bat_cap.prev_percent = percent;
1221 di->bat_cap.prev_mah = di->bat_cap.mah; 1375 di->bat_cap.prev_mah = di->bat_cap.mah;
1222 1376
1223 changed = true; 1377 changed = true;
@@ -1225,12 +1379,20 @@ static void ab8500_fg_check_capacity_limits(struct ab8500_fg *di, bool init)
1225 dev_dbg(di->dev, "capacity not allowed to go up since " 1379 dev_dbg(di->dev, "capacity not allowed to go up since "
1226 "no charger is connected: %d to %d (%d)\n", 1380 "no charger is connected: %d to %d (%d)\n",
1227 di->bat_cap.prev_percent, 1381 di->bat_cap.prev_percent,
1228 di->bat_cap.permille / 10, 1382 percent,
1229 di->bat_cap.permille); 1383 di->bat_cap.permille);
1230 } 1384 }
1231 } 1385 }
1232 1386
1233 if (changed) { 1387 if (changed) {
1388 if (di->bm->capacity_scaling) {
1389 di->bat_cap.cap_scale.scaled_cap =
1390 ab8500_fg_calculate_scaled_capacity(di);
1391
1392 dev_info(di->dev, "capacity=%d (%d)\n",
1393 di->bat_cap.prev_percent,
1394 di->bat_cap.cap_scale.scaled_cap);
1395 }
1234 power_supply_changed(&di->fg_psy); 1396 power_supply_changed(&di->fg_psy);
1235 if (di->flags.fully_charged && di->flags.force_full) { 1397 if (di->flags.fully_charged && di->flags.force_full) {
1236 dev_dbg(di->dev, "Battery full, notifying.\n"); 1398 dev_dbg(di->dev, "Battery full, notifying.\n");
@@ -1284,7 +1446,7 @@ static void ab8500_fg_algorithm_charging(struct ab8500_fg *di)
1284 switch (di->charge_state) { 1446 switch (di->charge_state) {
1285 case AB8500_FG_CHARGE_INIT: 1447 case AB8500_FG_CHARGE_INIT:
1286 di->fg_samples = SEC_TO_SAMPLE( 1448 di->fg_samples = SEC_TO_SAMPLE(
1287 di->bat->fg_params->accu_charging); 1449 di->bm->fg_params->accu_charging);
1288 1450
1289 ab8500_fg_coulomb_counter(di, true); 1451 ab8500_fg_coulomb_counter(di, true);
1290 ab8500_fg_charge_state_to(di, AB8500_FG_CHARGE_READOUT); 1452 ab8500_fg_charge_state_to(di, AB8500_FG_CHARGE_READOUT);
@@ -1296,7 +1458,7 @@ static void ab8500_fg_algorithm_charging(struct ab8500_fg *di)
1296 * Read the FG and calculate the new capacity 1458 * Read the FG and calculate the new capacity
1297 */ 1459 */
1298 mutex_lock(&di->cc_lock); 1460 mutex_lock(&di->cc_lock);
1299 if (!di->flags.conv_done) { 1461 if (!di->flags.conv_done && !di->flags.force_full) {
1300 /* Wasn't the CC IRQ that got us here */ 1462 /* Wasn't the CC IRQ that got us here */
1301 mutex_unlock(&di->cc_lock); 1463 mutex_unlock(&di->cc_lock);
1302 dev_dbg(di->dev, "%s CC conv not done\n", 1464 dev_dbg(di->dev, "%s CC conv not done\n",
@@ -1346,8 +1508,8 @@ static bool check_sysfs_capacity(struct ab8500_fg *di)
1346 cap_permille = ab8500_fg_convert_mah_to_permille(di, 1508 cap_permille = ab8500_fg_convert_mah_to_permille(di,
1347 di->bat_cap.user_mah); 1509 di->bat_cap.user_mah);
1348 1510
1349 lower = di->bat_cap.permille - di->bat->fg_params->user_cap_limit * 10; 1511 lower = di->bat_cap.permille - di->bm->fg_params->user_cap_limit * 10;
1350 upper = di->bat_cap.permille + di->bat->fg_params->user_cap_limit * 10; 1512 upper = di->bat_cap.permille + di->bm->fg_params->user_cap_limit * 10;
1351 1513
1352 if (lower < 0) 1514 if (lower < 0)
1353 lower = 0; 1515 lower = 0;
@@ -1387,7 +1549,7 @@ static void ab8500_fg_algorithm_discharging(struct ab8500_fg *di)
1387 case AB8500_FG_DISCHARGE_INIT: 1549 case AB8500_FG_DISCHARGE_INIT:
1388 /* We use the FG IRQ to work on */ 1550 /* We use the FG IRQ to work on */
1389 di->init_cnt = 0; 1551 di->init_cnt = 0;
1390 di->fg_samples = SEC_TO_SAMPLE(di->bat->fg_params->init_timer); 1552 di->fg_samples = SEC_TO_SAMPLE(di->bm->fg_params->init_timer);
1391 ab8500_fg_coulomb_counter(di, true); 1553 ab8500_fg_coulomb_counter(di, true);
1392 ab8500_fg_discharge_state_to(di, 1554 ab8500_fg_discharge_state_to(di,
1393 AB8500_FG_DISCHARGE_INITMEASURING); 1555 AB8500_FG_DISCHARGE_INITMEASURING);
@@ -1400,18 +1562,17 @@ static void ab8500_fg_algorithm_discharging(struct ab8500_fg *di)
1400 * samples to get an initial capacity. 1562 * samples to get an initial capacity.
1401 * Then go to READOUT 1563 * Then go to READOUT
1402 */ 1564 */
1403 sleep_time = di->bat->fg_params->init_timer; 1565 sleep_time = di->bm->fg_params->init_timer;
1404 1566
1405 /* Discard the first [x] seconds */ 1567 /* Discard the first [x] seconds */
1406 if (di->init_cnt > 1568 if (di->init_cnt > di->bm->fg_params->init_discard_time) {
1407 di->bat->fg_params->init_discard_time) {
1408 ab8500_fg_calc_cap_discharge_voltage(di, true); 1569 ab8500_fg_calc_cap_discharge_voltage(di, true);
1409 1570
1410 ab8500_fg_check_capacity_limits(di, true); 1571 ab8500_fg_check_capacity_limits(di, true);
1411 } 1572 }
1412 1573
1413 di->init_cnt += sleep_time; 1574 di->init_cnt += sleep_time;
1414 if (di->init_cnt > di->bat->fg_params->init_total_time) 1575 if (di->init_cnt > di->bm->fg_params->init_total_time)
1415 ab8500_fg_discharge_state_to(di, 1576 ab8500_fg_discharge_state_to(di,
1416 AB8500_FG_DISCHARGE_READOUT_INIT); 1577 AB8500_FG_DISCHARGE_READOUT_INIT);
1417 1578
@@ -1426,7 +1587,7 @@ static void ab8500_fg_algorithm_discharging(struct ab8500_fg *di)
1426 /* Intentional fallthrough */ 1587 /* Intentional fallthrough */
1427 1588
1428 case AB8500_FG_DISCHARGE_RECOVERY: 1589 case AB8500_FG_DISCHARGE_RECOVERY:
1429 sleep_time = di->bat->fg_params->recovery_sleep_timer; 1590 sleep_time = di->bm->fg_params->recovery_sleep_timer;
1430 1591
1431 /* 1592 /*
1432 * We should check the power consumption 1593 * We should check the power consumption
@@ -1438,9 +1599,9 @@ static void ab8500_fg_algorithm_discharging(struct ab8500_fg *di)
1438 1599
1439 if (ab8500_fg_is_low_curr(di, di->inst_curr)) { 1600 if (ab8500_fg_is_low_curr(di, di->inst_curr)) {
1440 if (di->recovery_cnt > 1601 if (di->recovery_cnt >
1441 di->bat->fg_params->recovery_total_time) { 1602 di->bm->fg_params->recovery_total_time) {
1442 di->fg_samples = SEC_TO_SAMPLE( 1603 di->fg_samples = SEC_TO_SAMPLE(
1443 di->bat->fg_params->accu_high_curr); 1604 di->bm->fg_params->accu_high_curr);
1444 ab8500_fg_coulomb_counter(di, true); 1605 ab8500_fg_coulomb_counter(di, true);
1445 ab8500_fg_discharge_state_to(di, 1606 ab8500_fg_discharge_state_to(di,
1446 AB8500_FG_DISCHARGE_READOUT); 1607 AB8500_FG_DISCHARGE_READOUT);
@@ -1453,7 +1614,7 @@ static void ab8500_fg_algorithm_discharging(struct ab8500_fg *di)
1453 di->recovery_cnt += sleep_time; 1614 di->recovery_cnt += sleep_time;
1454 } else { 1615 } else {
1455 di->fg_samples = SEC_TO_SAMPLE( 1616 di->fg_samples = SEC_TO_SAMPLE(
1456 di->bat->fg_params->accu_high_curr); 1617 di->bm->fg_params->accu_high_curr);
1457 ab8500_fg_coulomb_counter(di, true); 1618 ab8500_fg_coulomb_counter(di, true);
1458 ab8500_fg_discharge_state_to(di, 1619 ab8500_fg_discharge_state_to(di,
1459 AB8500_FG_DISCHARGE_READOUT); 1620 AB8500_FG_DISCHARGE_READOUT);
@@ -1462,7 +1623,7 @@ static void ab8500_fg_algorithm_discharging(struct ab8500_fg *di)
1462 1623
1463 case AB8500_FG_DISCHARGE_READOUT_INIT: 1624 case AB8500_FG_DISCHARGE_READOUT_INIT:
1464 di->fg_samples = SEC_TO_SAMPLE( 1625 di->fg_samples = SEC_TO_SAMPLE(
1465 di->bat->fg_params->accu_high_curr); 1626 di->bm->fg_params->accu_high_curr);
1466 ab8500_fg_coulomb_counter(di, true); 1627 ab8500_fg_coulomb_counter(di, true);
1467 ab8500_fg_discharge_state_to(di, 1628 ab8500_fg_discharge_state_to(di,
1468 AB8500_FG_DISCHARGE_READOUT); 1629 AB8500_FG_DISCHARGE_READOUT);
@@ -1480,7 +1641,7 @@ static void ab8500_fg_algorithm_discharging(struct ab8500_fg *di)
1480 1641
1481 if (di->recovery_needed) { 1642 if (di->recovery_needed) {
1482 ab8500_fg_discharge_state_to(di, 1643 ab8500_fg_discharge_state_to(di,
1483 AB8500_FG_DISCHARGE_RECOVERY); 1644 AB8500_FG_DISCHARGE_INIT_RECOVERY);
1484 1645
1485 queue_delayed_work(di->fg_wq, 1646 queue_delayed_work(di->fg_wq,
1486 &di->fg_periodic_work, 0); 1647 &di->fg_periodic_work, 0);
@@ -1509,9 +1670,9 @@ static void ab8500_fg_algorithm_discharging(struct ab8500_fg *di)
1509 } 1670 }
1510 1671
1511 di->high_curr_cnt += 1672 di->high_curr_cnt +=
1512 di->bat->fg_params->accu_high_curr; 1673 di->bm->fg_params->accu_high_curr;
1513 if (di->high_curr_cnt > 1674 if (di->high_curr_cnt >
1514 di->bat->fg_params->high_curr_time) 1675 di->bm->fg_params->high_curr_time)
1515 di->recovery_needed = true; 1676 di->recovery_needed = true;
1516 1677
1517 ab8500_fg_calc_cap_discharge_fg(di); 1678 ab8500_fg_calc_cap_discharge_fg(di);
@@ -1523,12 +1684,10 @@ static void ab8500_fg_algorithm_discharging(struct ab8500_fg *di)
1523 1684
1524 case AB8500_FG_DISCHARGE_WAKEUP: 1685 case AB8500_FG_DISCHARGE_WAKEUP:
1525 ab8500_fg_coulomb_counter(di, true); 1686 ab8500_fg_coulomb_counter(di, true);
1526 di->inst_curr = ab8500_fg_inst_curr_blocking(di);
1527
1528 ab8500_fg_calc_cap_discharge_voltage(di, true); 1687 ab8500_fg_calc_cap_discharge_voltage(di, true);
1529 1688
1530 di->fg_samples = SEC_TO_SAMPLE( 1689 di->fg_samples = SEC_TO_SAMPLE(
1531 di->bat->fg_params->accu_high_curr); 1690 di->bm->fg_params->accu_high_curr);
1532 ab8500_fg_coulomb_counter(di, true); 1691 ab8500_fg_coulomb_counter(di, true);
1533 ab8500_fg_discharge_state_to(di, 1692 ab8500_fg_discharge_state_to(di,
1534 AB8500_FG_DISCHARGE_READOUT); 1693 AB8500_FG_DISCHARGE_READOUT);
@@ -1641,8 +1800,6 @@ static void ab8500_fg_periodic_work(struct work_struct *work)
1641 fg_periodic_work.work); 1800 fg_periodic_work.work);
1642 1801
1643 if (di->init_capacity) { 1802 if (di->init_capacity) {
1644 /* A dummy read that will return 0 */
1645 di->inst_curr = ab8500_fg_inst_curr_blocking(di);
1646 /* Get an initial capacity calculation */ 1803 /* Get an initial capacity calculation */
1647 ab8500_fg_calc_cap_discharge_voltage(di, true); 1804 ab8500_fg_calc_cap_discharge_voltage(di, true);
1648 ab8500_fg_check_capacity_limits(di, true); 1805 ab8500_fg_check_capacity_limits(di, true);
@@ -1684,24 +1841,26 @@ static void ab8500_fg_check_hw_failure_work(struct work_struct *work)
1684 * If we have had a battery over-voltage situation, 1841 * If we have had a battery over-voltage situation,
1685 * check ovv-bit to see if it should be reset. 1842 * check ovv-bit to see if it should be reset.
1686 */ 1843 */
1687 if (di->flags.bat_ovv) { 1844 ret = abx500_get_register_interruptible(di->dev,
1688 ret = abx500_get_register_interruptible(di->dev, 1845 AB8500_CHARGER, AB8500_CH_STAT_REG,
1689 AB8500_CHARGER, AB8500_CH_STAT_REG, 1846 &reg_value);
1690 &reg_value); 1847 if (ret < 0) {
1691 if (ret < 0) { 1848 dev_err(di->dev, "%s ab8500 read failed\n", __func__);
1692 dev_err(di->dev, "%s ab8500 read failed\n", __func__); 1849 return;
1693 return; 1850 }
1694 } 1851 if ((reg_value & BATT_OVV) == BATT_OVV) {
1695 if ((reg_value & BATT_OVV) != BATT_OVV) { 1852 if (!di->flags.bat_ovv) {
1696 dev_dbg(di->dev, "Battery recovered from OVV\n"); 1853 dev_dbg(di->dev, "Battery OVV\n");
1697 di->flags.bat_ovv = false; 1854 di->flags.bat_ovv = true;
1698 power_supply_changed(&di->fg_psy); 1855 power_supply_changed(&di->fg_psy);
1699 return;
1700 } 1856 }
1701
1702 /* Not yet recovered from ovv, reschedule this test */ 1857 /* Not yet recovered from ovv, reschedule this test */
1703 queue_delayed_work(di->fg_wq, &di->fg_check_hw_failure_work, 1858 queue_delayed_work(di->fg_wq, &di->fg_check_hw_failure_work,
1704 round_jiffies(HZ)); 1859 HZ);
1860 } else {
1861 dev_dbg(di->dev, "Battery recovered from OVV\n");
1862 di->flags.bat_ovv = false;
1863 power_supply_changed(&di->fg_psy);
1705 } 1864 }
1706} 1865}
1707 1866
@@ -1721,26 +1880,30 @@ static void ab8500_fg_low_bat_work(struct work_struct *work)
1721 vbat = ab8500_fg_bat_voltage(di); 1880 vbat = ab8500_fg_bat_voltage(di);
1722 1881
1723 /* Check if LOW_BAT still fulfilled */ 1882 /* Check if LOW_BAT still fulfilled */
1724 if (vbat < di->bat->fg_params->lowbat_threshold) { 1883 if (vbat < di->bm->fg_params->lowbat_threshold) {
1725 di->flags.low_bat = true; 1884 /* Is it time to shut down? */
1726 dev_warn(di->dev, "Battery voltage still LOW\n"); 1885 if (di->low_bat_cnt < 1) {
1727 1886 di->flags.low_bat = true;
1728 /* 1887 dev_warn(di->dev, "Shut down pending...\n");
1729 * We need to re-schedule this check to be able to detect 1888 } else {
1730 * if the voltage increases again during charging 1889 /*
1731 */ 1890 * Else we need to re-schedule this check to be able to detect
1732 queue_delayed_work(di->fg_wq, &di->fg_low_bat_work, 1891 * if the voltage increases again during charging or
1733 round_jiffies(LOW_BAT_CHECK_INTERVAL)); 1892 * due to decreasing load.
1893 */
1894 di->low_bat_cnt--;
1895 dev_warn(di->dev, "Battery voltage still LOW\n");
1896 queue_delayed_work(di->fg_wq, &di->fg_low_bat_work,
1897 round_jiffies(LOW_BAT_CHECK_INTERVAL));
1898 }
1734 } else { 1899 } else {
1735 di->flags.low_bat = false; 1900 di->flags.low_bat_delay = false;
1901 di->low_bat_cnt = 10;
1736 dev_warn(di->dev, "Battery voltage OK again\n"); 1902 dev_warn(di->dev, "Battery voltage OK again\n");
1737 } 1903 }
1738 1904
1739 /* This is needed to dispatch LOW_BAT */ 1905 /* This is needed to dispatch LOW_BAT */
1740 ab8500_fg_check_capacity_limits(di, false); 1906 ab8500_fg_check_capacity_limits(di, false);
1741
1742 /* Set this flag to check if LOW_BAT IRQ still occurs */
1743 di->flags.low_bat_delay = false;
1744} 1907}
1745 1908
1746/** 1909/**
@@ -1779,8 +1942,8 @@ static int ab8500_fg_battok_init_hw_register(struct ab8500_fg *di)
1779 int ret; 1942 int ret;
1780 int new_val; 1943 int new_val;
1781 1944
1782 sel0 = di->bat->fg_params->battok_falling_th_sel0; 1945 sel0 = di->bm->fg_params->battok_falling_th_sel0;
1783 sel1 = di->bat->fg_params->battok_raising_th_sel1; 1946 sel1 = di->bm->fg_params->battok_raising_th_sel1;
1784 1947
1785 cbp_sel0 = ab8500_fg_battok_calc(di, sel0); 1948 cbp_sel0 = ab8500_fg_battok_calc(di, sel0);
1786 cbp_sel1 = ab8500_fg_battok_calc(di, sel1); 1949 cbp_sel1 = ab8500_fg_battok_calc(di, sel1);
@@ -1828,7 +1991,13 @@ static void ab8500_fg_instant_work(struct work_struct *work)
1828static irqreturn_t ab8500_fg_cc_data_end_handler(int irq, void *_di) 1991static irqreturn_t ab8500_fg_cc_data_end_handler(int irq, void *_di)
1829{ 1992{
1830 struct ab8500_fg *di = _di; 1993 struct ab8500_fg *di = _di;
1831 complete(&di->ab8500_fg_complete); 1994 if (!di->nbr_cceoc_irq_cnt) {
1995 di->nbr_cceoc_irq_cnt++;
1996 complete(&di->ab8500_fg_started);
1997 } else {
1998 di->nbr_cceoc_irq_cnt = 0;
1999 complete(&di->ab8500_fg_complete);
2000 }
1832 return IRQ_HANDLED; 2001 return IRQ_HANDLED;
1833} 2002}
1834 2003
@@ -1875,8 +2044,6 @@ static irqreturn_t ab8500_fg_batt_ovv_handler(int irq, void *_di)
1875 struct ab8500_fg *di = _di; 2044 struct ab8500_fg *di = _di;
1876 2045
1877 dev_dbg(di->dev, "Battery OVV\n"); 2046 dev_dbg(di->dev, "Battery OVV\n");
1878 di->flags.bat_ovv = true;
1879 power_supply_changed(&di->fg_psy);
1880 2047
1881 /* Schedule a new HW failure check */ 2048 /* Schedule a new HW failure check */
1882 queue_delayed_work(di->fg_wq, &di->fg_check_hw_failure_work, 0); 2049 queue_delayed_work(di->fg_wq, &di->fg_check_hw_failure_work, 0);
@@ -1895,6 +2062,7 @@ static irqreturn_t ab8500_fg_lowbatf_handler(int irq, void *_di)
1895{ 2062{
1896 struct ab8500_fg *di = _di; 2063 struct ab8500_fg *di = _di;
1897 2064
2065 /* Initiate handling in ab8500_fg_low_bat_work() if not already initiated. */
1898 if (!di->flags.low_bat_delay) { 2066 if (!di->flags.low_bat_delay) {
1899 dev_warn(di->dev, "Battery voltage is below LOW threshold\n"); 2067 dev_warn(di->dev, "Battery voltage is below LOW threshold\n");
1900 di->flags.low_bat_delay = true; 2068 di->flags.low_bat_delay = true;
@@ -1963,7 +2131,7 @@ static int ab8500_fg_get_property(struct power_supply *psy,
1963 di->bat_cap.max_mah); 2131 di->bat_cap.max_mah);
1964 break; 2132 break;
1965 case POWER_SUPPLY_PROP_ENERGY_NOW: 2133 case POWER_SUPPLY_PROP_ENERGY_NOW:
1966 if (di->flags.batt_unknown && !di->bat->chg_unknown_bat && 2134 if (di->flags.batt_unknown && !di->bm->chg_unknown_bat &&
1967 di->flags.batt_id_received) 2135 di->flags.batt_id_received)
1968 val->intval = ab8500_fg_convert_mah_to_uwh(di, 2136 val->intval = ab8500_fg_convert_mah_to_uwh(di,
1969 di->bat_cap.max_mah); 2137 di->bat_cap.max_mah);
@@ -1978,21 +2146,23 @@ static int ab8500_fg_get_property(struct power_supply *psy,
1978 val->intval = di->bat_cap.max_mah; 2146 val->intval = di->bat_cap.max_mah;
1979 break; 2147 break;
1980 case POWER_SUPPLY_PROP_CHARGE_NOW: 2148 case POWER_SUPPLY_PROP_CHARGE_NOW:
1981 if (di->flags.batt_unknown && !di->bat->chg_unknown_bat && 2149 if (di->flags.batt_unknown && !di->bm->chg_unknown_bat &&
1982 di->flags.batt_id_received) 2150 di->flags.batt_id_received)
1983 val->intval = di->bat_cap.max_mah; 2151 val->intval = di->bat_cap.max_mah;
1984 else 2152 else
1985 val->intval = di->bat_cap.prev_mah; 2153 val->intval = di->bat_cap.prev_mah;
1986 break; 2154 break;
1987 case POWER_SUPPLY_PROP_CAPACITY: 2155 case POWER_SUPPLY_PROP_CAPACITY:
1988 if (di->flags.batt_unknown && !di->bat->chg_unknown_bat && 2156 if (di->bm->capacity_scaling)
2157 val->intval = di->bat_cap.cap_scale.scaled_cap;
2158 else if (di->flags.batt_unknown && !di->bm->chg_unknown_bat &&
1989 di->flags.batt_id_received) 2159 di->flags.batt_id_received)
1990 val->intval = 100; 2160 val->intval = 100;
1991 else 2161 else
1992 val->intval = di->bat_cap.prev_percent; 2162 val->intval = di->bat_cap.prev_percent;
1993 break; 2163 break;
1994 case POWER_SUPPLY_PROP_CAPACITY_LEVEL: 2164 case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
1995 if (di->flags.batt_unknown && !di->bat->chg_unknown_bat && 2165 if (di->flags.batt_unknown && !di->bm->chg_unknown_bat &&
1996 di->flags.batt_id_received) 2166 di->flags.batt_id_received)
1997 val->intval = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN; 2167 val->intval = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN;
1998 else 2168 else
@@ -2049,6 +2219,8 @@ static int ab8500_fg_get_ext_psy_data(struct device *dev, void *data)
2049 break; 2219 break;
2050 di->flags.charging = false; 2220 di->flags.charging = false;
2051 di->flags.fully_charged = false; 2221 di->flags.fully_charged = false;
2222 if (di->bm->capacity_scaling)
2223 ab8500_fg_update_cap_scalers(di);
2052 queue_work(di->fg_wq, &di->fg_work); 2224 queue_work(di->fg_wq, &di->fg_work);
2053 break; 2225 break;
2054 case POWER_SUPPLY_STATUS_FULL: 2226 case POWER_SUPPLY_STATUS_FULL:
@@ -2061,10 +2233,13 @@ static int ab8500_fg_get_ext_psy_data(struct device *dev, void *data)
2061 queue_work(di->fg_wq, &di->fg_work); 2233 queue_work(di->fg_wq, &di->fg_work);
2062 break; 2234 break;
2063 case POWER_SUPPLY_STATUS_CHARGING: 2235 case POWER_SUPPLY_STATUS_CHARGING:
2064 if (di->flags.charging) 2236 if (di->flags.charging &&
2237 !di->flags.fully_charged)
2065 break; 2238 break;
2066 di->flags.charging = true; 2239 di->flags.charging = true;
2067 di->flags.fully_charged = false; 2240 di->flags.fully_charged = false;
2241 if (di->bm->capacity_scaling)
2242 ab8500_fg_update_cap_scalers(di);
2068 queue_work(di->fg_wq, &di->fg_work); 2243 queue_work(di->fg_wq, &di->fg_work);
2069 break; 2244 break;
2070 }; 2245 };
@@ -2075,10 +2250,11 @@ static int ab8500_fg_get_ext_psy_data(struct device *dev, void *data)
2075 case POWER_SUPPLY_PROP_TECHNOLOGY: 2250 case POWER_SUPPLY_PROP_TECHNOLOGY:
2076 switch (ext->type) { 2251 switch (ext->type) {
2077 case POWER_SUPPLY_TYPE_BATTERY: 2252 case POWER_SUPPLY_TYPE_BATTERY:
2078 if (!di->flags.batt_id_received) { 2253 if (!di->flags.batt_id_received &&
2254 di->bm->batt_id != BATTERY_UNKNOWN) {
2079 const struct abx500_battery_type *b; 2255 const struct abx500_battery_type *b;
2080 2256
2081 b = &(di->bat->bat_type[di->bat->batt_id]); 2257 b = &(di->bm->bat_type[di->bm->batt_id]);
2082 2258
2083 di->flags.batt_id_received = true; 2259 di->flags.batt_id_received = true;
2084 2260
@@ -2104,8 +2280,8 @@ static int ab8500_fg_get_ext_psy_data(struct device *dev, void *data)
2104 case POWER_SUPPLY_PROP_TEMP: 2280 case POWER_SUPPLY_PROP_TEMP:
2105 switch (ext->type) { 2281 switch (ext->type) {
2106 case POWER_SUPPLY_TYPE_BATTERY: 2282 case POWER_SUPPLY_TYPE_BATTERY:
2107 if (di->flags.batt_id_received) 2283 if (di->flags.batt_id_received)
2108 di->bat_temp = ret.intval; 2284 di->bat_temp = ret.intval;
2109 break; 2285 break;
2110 default: 2286 default:
2111 break; 2287 break;
@@ -2155,7 +2331,7 @@ static int ab8500_fg_init_hw_registers(struct ab8500_fg *di)
2155 AB8500_SYS_CTRL2_BLOCK, 2331 AB8500_SYS_CTRL2_BLOCK,
2156 AB8500_LOW_BAT_REG, 2332 AB8500_LOW_BAT_REG,
2157 ab8500_volt_to_regval( 2333 ab8500_volt_to_regval(
2158 di->bat->fg_params->lowbat_threshold) << 1 | 2334 di->bm->fg_params->lowbat_threshold) << 1 |
2159 LOW_BAT_ENABLE); 2335 LOW_BAT_ENABLE);
2160 if (ret) { 2336 if (ret) {
2161 dev_err(di->dev, "%s write failed\n", __func__); 2337 dev_err(di->dev, "%s write failed\n", __func__);
@@ -2395,6 +2571,11 @@ static int ab8500_fg_suspend(struct platform_device *pdev,
2395 struct ab8500_fg *di = platform_get_drvdata(pdev); 2571 struct ab8500_fg *di = platform_get_drvdata(pdev);
2396 2572
2397 flush_delayed_work(&di->fg_periodic_work); 2573 flush_delayed_work(&di->fg_periodic_work);
2574 flush_work(&di->fg_work);
2575 flush_work(&di->fg_acc_cur_work);
2576 flush_delayed_work(&di->fg_reinit_work);
2577 flush_delayed_work(&di->fg_low_bat_work);
2578 flush_delayed_work(&di->fg_check_hw_failure_work);
2398 2579
2399 /* 2580 /*
2400 * If the FG is enabled we will disable it before going to suspend 2581 * If the FG is enabled we will disable it before going to suspend
@@ -2448,6 +2629,7 @@ static char *supply_interface[] = {
2448static int ab8500_fg_probe(struct platform_device *pdev) 2629static int ab8500_fg_probe(struct platform_device *pdev)
2449{ 2630{
2450 struct device_node *np = pdev->dev.of_node; 2631 struct device_node *np = pdev->dev.of_node;
2632 struct abx500_bm_data *plat = pdev->dev.platform_data;
2451 struct ab8500_fg *di; 2633 struct ab8500_fg *di;
2452 int i, irq; 2634 int i, irq;
2453 int ret = 0; 2635 int ret = 0;
@@ -2457,21 +2639,19 @@ static int ab8500_fg_probe(struct platform_device *pdev)
2457 dev_err(&pdev->dev, "%s no mem for ab8500_fg\n", __func__); 2639 dev_err(&pdev->dev, "%s no mem for ab8500_fg\n", __func__);
2458 return -ENOMEM; 2640 return -ENOMEM;
2459 } 2641 }
2460 di->bat = pdev->mfd_cell->platform_data; 2642
2461 if (!di->bat) { 2643 if (!plat) {
2462 if (np) { 2644 dev_err(&pdev->dev, "no battery management data supplied\n");
2463 ret = bmdevs_of_probe(&pdev->dev, np, &di->bat); 2645 return -EINVAL;
2464 if (ret) { 2646 }
2465 dev_err(&pdev->dev, 2647 di->bm = plat;
2466 "failed to get battery information\n"); 2648
2467 return ret; 2649 if (np) {
2468 } 2650 ret = ab8500_bm_of_probe(&pdev->dev, np, di->bm);
2469 } else { 2651 if (ret) {
2470 dev_err(&pdev->dev, "missing dt node for ab8500_fg\n"); 2652 dev_err(&pdev->dev, "failed to get battery information\n");
2471 return -EINVAL; 2653 return ret;
2472 } 2654 }
2473 } else {
2474 dev_info(&pdev->dev, "falling back to legacy platform data\n");
2475 } 2655 }
2476 2656
2477 mutex_init(&di->cc_lock); 2657 mutex_init(&di->cc_lock);
@@ -2491,11 +2671,11 @@ static int ab8500_fg_probe(struct platform_device *pdev)
2491 di->fg_psy.external_power_changed = ab8500_fg_external_power_changed; 2671 di->fg_psy.external_power_changed = ab8500_fg_external_power_changed;
2492 2672
2493 di->bat_cap.max_mah_design = MILLI_TO_MICRO * 2673 di->bat_cap.max_mah_design = MILLI_TO_MICRO *
2494 di->bat->bat_type[di->bat->batt_id].charge_full_design; 2674 di->bm->bat_type[di->bm->batt_id].charge_full_design;
2495 2675
2496 di->bat_cap.max_mah = di->bat_cap.max_mah_design; 2676 di->bat_cap.max_mah = di->bat_cap.max_mah_design;
2497 2677
2498 di->vbat_nom = di->bat->bat_type[di->bat->batt_id].nominal_voltage; 2678 di->vbat_nom = di->bm->bat_type[di->bm->batt_id].nominal_voltage;
2499 2679
2500 di->init_capacity = true; 2680 di->init_capacity = true;
2501 2681
@@ -2531,6 +2711,12 @@ static int ab8500_fg_probe(struct platform_device *pdev)
2531 INIT_DEFERRABLE_WORK(&di->fg_check_hw_failure_work, 2711 INIT_DEFERRABLE_WORK(&di->fg_check_hw_failure_work,
2532 ab8500_fg_check_hw_failure_work); 2712 ab8500_fg_check_hw_failure_work);
2533 2713
2714 /* Reset battery low voltage flag */
2715 di->flags.low_bat = false;
2716
2717 /* Initialize low battery counter */
2718 di->low_bat_cnt = 10;
2719
2534 /* Initialize OVV, and other registers */ 2720 /* Initialize OVV, and other registers */
2535 ret = ab8500_fg_init_hw_registers(di); 2721 ret = ab8500_fg_init_hw_registers(di);
2536 if (ret) { 2722 if (ret) {
@@ -2549,10 +2735,14 @@ static int ab8500_fg_probe(struct platform_device *pdev)
2549 goto free_inst_curr_wq; 2735 goto free_inst_curr_wq;
2550 } 2736 }
2551 2737
2552 di->fg_samples = SEC_TO_SAMPLE(di->bat->fg_params->init_timer); 2738 di->fg_samples = SEC_TO_SAMPLE(di->bm->fg_params->init_timer);
2553 ab8500_fg_coulomb_counter(di, true); 2739 ab8500_fg_coulomb_counter(di, true);
2554 2740
2555 /* Initialize completion used to notify completion of inst current */ 2741 /*
2742 * Initialize completion used to notify completion and start
2743 * of inst current
2744 */
2745 init_completion(&di->ab8500_fg_started);
2556 init_completion(&di->ab8500_fg_complete); 2746 init_completion(&di->ab8500_fg_complete);
2557 2747
2558 /* Register interrupts */ 2748 /* Register interrupts */
@@ -2572,6 +2762,7 @@ static int ab8500_fg_probe(struct platform_device *pdev)
2572 } 2762 }
2573 di->irq = platform_get_irq_byname(pdev, "CCEOC"); 2763 di->irq = platform_get_irq_byname(pdev, "CCEOC");
2574 disable_irq(di->irq); 2764 disable_irq(di->irq);
2765 di->nbr_cceoc_irq_cnt = 0;
2575 2766
2576 platform_set_drvdata(pdev, di); 2767 platform_set_drvdata(pdev, di);
2577 2768
diff --git a/drivers/power/abx500_chargalg.c b/drivers/power/abx500_chargalg.c
index 297089146064..f043c0851a76 100644
--- a/drivers/power/abx500_chargalg.c
+++ b/drivers/power/abx500_chargalg.c
@@ -33,9 +33,6 @@
33/* End-of-charge criteria counter */ 33/* End-of-charge criteria counter */
34#define EOC_COND_CNT 10 34#define EOC_COND_CNT 10
35 35
36/* Recharge criteria counter */
37#define RCH_COND_CNT 3
38
39#define to_abx500_chargalg_device_info(x) container_of((x), \ 36#define to_abx500_chargalg_device_info(x) container_of((x), \
40 struct abx500_chargalg, chargalg_psy); 37 struct abx500_chargalg, chargalg_psy);
41 38
@@ -196,7 +193,6 @@ enum maxim_ret {
196 * @dev: pointer to the structure device 193 * @dev: pointer to the structure device
197 * @charge_status: battery operating status 194 * @charge_status: battery operating status
198 * @eoc_cnt: counter used to determine end-of_charge 195 * @eoc_cnt: counter used to determine end-of_charge
199 * @rch_cnt: counter used to determine start of recharge
200 * @maintenance_chg: indicate if maintenance charge is active 196 * @maintenance_chg: indicate if maintenance charge is active
201 * @t_hyst_norm temperature hysteresis when the temperature has been 197 * @t_hyst_norm temperature hysteresis when the temperature has been
202 * over or under normal limits 198 * over or under normal limits
@@ -207,7 +203,7 @@ enum maxim_ret {
207 * @chg_info: information about connected charger types 203 * @chg_info: information about connected charger types
208 * @batt_data: data of the battery 204 * @batt_data: data of the battery
209 * @susp_status: current charger suspension status 205 * @susp_status: current charger suspension status
210 * @bat: pointer to the abx500_bm platform data 206 * @bm: Platform specific battery management information
211 * @chargalg_psy: structure that holds the battery properties exposed by 207 * @chargalg_psy: structure that holds the battery properties exposed by
212 * the charging algorithm 208 * the charging algorithm
213 * @events: structure for information about events triggered 209 * @events: structure for information about events triggered
@@ -223,7 +219,6 @@ struct abx500_chargalg {
223 struct device *dev; 219 struct device *dev;
224 int charge_status; 220 int charge_status;
225 int eoc_cnt; 221 int eoc_cnt;
226 int rch_cnt;
227 bool maintenance_chg; 222 bool maintenance_chg;
228 int t_hyst_norm; 223 int t_hyst_norm;
229 int t_hyst_lowhigh; 224 int t_hyst_lowhigh;
@@ -232,7 +227,7 @@ struct abx500_chargalg {
232 struct abx500_chargalg_charger_info chg_info; 227 struct abx500_chargalg_charger_info chg_info;
233 struct abx500_chargalg_battery_data batt_data; 228 struct abx500_chargalg_battery_data batt_data;
234 struct abx500_chargalg_suspension_status susp_status; 229 struct abx500_chargalg_suspension_status susp_status;
235 struct abx500_bm_data *bat; 230 struct abx500_bm_data *bm;
236 struct power_supply chargalg_psy; 231 struct power_supply chargalg_psy;
237 struct ux500_charger *ac_chg; 232 struct ux500_charger *ac_chg;
238 struct ux500_charger *usb_chg; 233 struct ux500_charger *usb_chg;
@@ -367,13 +362,13 @@ static void abx500_chargalg_start_safety_timer(struct abx500_chargalg *di)
367 case AC_CHG: 362 case AC_CHG:
368 timer_expiration = 363 timer_expiration =
369 round_jiffies(jiffies + 364 round_jiffies(jiffies +
370 (di->bat->main_safety_tmr_h * 3600 * HZ)); 365 (di->bm->main_safety_tmr_h * 3600 * HZ));
371 break; 366 break;
372 367
373 case USB_CHG: 368 case USB_CHG:
374 timer_expiration = 369 timer_expiration =
375 round_jiffies(jiffies + 370 round_jiffies(jiffies +
376 (di->bat->usb_safety_tmr_h * 3600 * HZ)); 371 (di->bm->usb_safety_tmr_h * 3600 * HZ));
377 break; 372 break;
378 373
379 default: 374 default:
@@ -450,8 +445,18 @@ static int abx500_chargalg_kick_watchdog(struct abx500_chargalg *di)
450{ 445{
451 /* Check if charger exists and kick watchdog if charging */ 446 /* Check if charger exists and kick watchdog if charging */
452 if (di->ac_chg && di->ac_chg->ops.kick_wd && 447 if (di->ac_chg && di->ac_chg->ops.kick_wd &&
453 di->chg_info.online_chg & AC_CHG) 448 di->chg_info.online_chg & AC_CHG) {
449 /*
450 * If AB charger watchdog expired, pm2xxx charging
451 * gets disabled. To be safe, kick both AB charger watchdog
452 * and pm2xxx watchdog.
453 */
454 if (di->ac_chg->external &&
455 di->usb_chg && di->usb_chg->ops.kick_wd)
456 di->usb_chg->ops.kick_wd(di->usb_chg);
457
454 return di->ac_chg->ops.kick_wd(di->ac_chg); 458 return di->ac_chg->ops.kick_wd(di->ac_chg);
459 }
455 else if (di->usb_chg && di->usb_chg->ops.kick_wd && 460 else if (di->usb_chg && di->usb_chg->ops.kick_wd &&
456 di->chg_info.online_chg & USB_CHG) 461 di->chg_info.online_chg & USB_CHG)
457 return di->usb_chg->ops.kick_wd(di->usb_chg); 462 return di->usb_chg->ops.kick_wd(di->usb_chg);
@@ -608,6 +613,8 @@ static void abx500_chargalg_hold_charging(struct abx500_chargalg *di)
608static void abx500_chargalg_start_charging(struct abx500_chargalg *di, 613static void abx500_chargalg_start_charging(struct abx500_chargalg *di,
609 int vset, int iset) 614 int vset, int iset)
610{ 615{
616 bool start_chargalg_wd = true;
617
611 switch (di->chg_info.charger_type) { 618 switch (di->chg_info.charger_type) {
612 case AC_CHG: 619 case AC_CHG:
613 dev_dbg(di->dev, 620 dev_dbg(di->dev,
@@ -625,8 +632,12 @@ static void abx500_chargalg_start_charging(struct abx500_chargalg *di,
625 632
626 default: 633 default:
627 dev_err(di->dev, "Unknown charger to charge from\n"); 634 dev_err(di->dev, "Unknown charger to charge from\n");
635 start_chargalg_wd = false;
628 break; 636 break;
629 } 637 }
638
639 if (start_chargalg_wd && !delayed_work_pending(&di->chargalg_wd_work))
640 queue_delayed_work(di->chargalg_wq, &di->chargalg_wd_work, 0);
630} 641}
631 642
632/** 643/**
@@ -638,32 +649,32 @@ static void abx500_chargalg_start_charging(struct abx500_chargalg *di,
638 */ 649 */
639static void abx500_chargalg_check_temp(struct abx500_chargalg *di) 650static void abx500_chargalg_check_temp(struct abx500_chargalg *di)
640{ 651{
641 if (di->batt_data.temp > (di->bat->temp_low + di->t_hyst_norm) && 652 if (di->batt_data.temp > (di->bm->temp_low + di->t_hyst_norm) &&
642 di->batt_data.temp < (di->bat->temp_high - di->t_hyst_norm)) { 653 di->batt_data.temp < (di->bm->temp_high - di->t_hyst_norm)) {
643 /* Temp OK! */ 654 /* Temp OK! */
644 di->events.btemp_underover = false; 655 di->events.btemp_underover = false;
645 di->events.btemp_lowhigh = false; 656 di->events.btemp_lowhigh = false;
646 di->t_hyst_norm = 0; 657 di->t_hyst_norm = 0;
647 di->t_hyst_lowhigh = 0; 658 di->t_hyst_lowhigh = 0;
648 } else { 659 } else {
649 if (((di->batt_data.temp >= di->bat->temp_high) && 660 if (((di->batt_data.temp >= di->bm->temp_high) &&
650 (di->batt_data.temp < 661 (di->batt_data.temp <
651 (di->bat->temp_over - di->t_hyst_lowhigh))) || 662 (di->bm->temp_over - di->t_hyst_lowhigh))) ||
652 ((di->batt_data.temp > 663 ((di->batt_data.temp >
653 (di->bat->temp_under + di->t_hyst_lowhigh)) && 664 (di->bm->temp_under + di->t_hyst_lowhigh)) &&
654 (di->batt_data.temp <= di->bat->temp_low))) { 665 (di->batt_data.temp <= di->bm->temp_low))) {
655 /* TEMP minor!!!!! */ 666 /* TEMP minor!!!!! */
656 di->events.btemp_underover = false; 667 di->events.btemp_underover = false;
657 di->events.btemp_lowhigh = true; 668 di->events.btemp_lowhigh = true;
658 di->t_hyst_norm = di->bat->temp_hysteresis; 669 di->t_hyst_norm = di->bm->temp_hysteresis;
659 di->t_hyst_lowhigh = 0; 670 di->t_hyst_lowhigh = 0;
660 } else if (di->batt_data.temp <= di->bat->temp_under || 671 } else if (di->batt_data.temp <= di->bm->temp_under ||
661 di->batt_data.temp >= di->bat->temp_over) { 672 di->batt_data.temp >= di->bm->temp_over) {
662 /* TEMP major!!!!! */ 673 /* TEMP major!!!!! */
663 di->events.btemp_underover = true; 674 di->events.btemp_underover = true;
664 di->events.btemp_lowhigh = false; 675 di->events.btemp_lowhigh = false;
665 di->t_hyst_norm = 0; 676 di->t_hyst_norm = 0;
666 di->t_hyst_lowhigh = di->bat->temp_hysteresis; 677 di->t_hyst_lowhigh = di->bm->temp_hysteresis;
667 } else { 678 } else {
668 /* Within hysteresis */ 679 /* Within hysteresis */
669 dev_dbg(di->dev, "Within hysteresis limit temp: %d " 680 dev_dbg(di->dev, "Within hysteresis limit temp: %d "
@@ -682,12 +693,12 @@ static void abx500_chargalg_check_temp(struct abx500_chargalg *di)
682 */ 693 */
683static void abx500_chargalg_check_charger_voltage(struct abx500_chargalg *di) 694static void abx500_chargalg_check_charger_voltage(struct abx500_chargalg *di)
684{ 695{
685 if (di->chg_info.usb_volt > di->bat->chg_params->usb_volt_max) 696 if (di->chg_info.usb_volt > di->bm->chg_params->usb_volt_max)
686 di->chg_info.usb_chg_ok = false; 697 di->chg_info.usb_chg_ok = false;
687 else 698 else
688 di->chg_info.usb_chg_ok = true; 699 di->chg_info.usb_chg_ok = true;
689 700
690 if (di->chg_info.ac_volt > di->bat->chg_params->ac_volt_max) 701 if (di->chg_info.ac_volt > di->bm->chg_params->ac_volt_max)
691 di->chg_info.ac_chg_ok = false; 702 di->chg_info.ac_chg_ok = false;
692 else 703 else
693 di->chg_info.ac_chg_ok = true; 704 di->chg_info.ac_chg_ok = true;
@@ -707,10 +718,10 @@ static void abx500_chargalg_end_of_charge(struct abx500_chargalg *di)
707 if (di->charge_status == POWER_SUPPLY_STATUS_CHARGING && 718 if (di->charge_status == POWER_SUPPLY_STATUS_CHARGING &&
708 di->charge_state == STATE_NORMAL && 719 di->charge_state == STATE_NORMAL &&
709 !di->maintenance_chg && (di->batt_data.volt >= 720 !di->maintenance_chg && (di->batt_data.volt >=
710 di->bat->bat_type[di->bat->batt_id].termination_vol || 721 di->bm->bat_type[di->bm->batt_id].termination_vol ||
711 di->events.usb_cv_active || di->events.ac_cv_active) && 722 di->events.usb_cv_active || di->events.ac_cv_active) &&
712 di->batt_data.avg_curr < 723 di->batt_data.avg_curr <
713 di->bat->bat_type[di->bat->batt_id].termination_curr && 724 di->bm->bat_type[di->bm->batt_id].termination_curr &&
714 di->batt_data.avg_curr > 0) { 725 di->batt_data.avg_curr > 0) {
715 if (++di->eoc_cnt >= EOC_COND_CNT) { 726 if (++di->eoc_cnt >= EOC_COND_CNT) {
716 di->eoc_cnt = 0; 727 di->eoc_cnt = 0;
@@ -733,12 +744,12 @@ static void abx500_chargalg_end_of_charge(struct abx500_chargalg *di)
733static void init_maxim_chg_curr(struct abx500_chargalg *di) 744static void init_maxim_chg_curr(struct abx500_chargalg *di)
734{ 745{
735 di->ccm.original_iset = 746 di->ccm.original_iset =
736 di->bat->bat_type[di->bat->batt_id].normal_cur_lvl; 747 di->bm->bat_type[di->bm->batt_id].normal_cur_lvl;
737 di->ccm.current_iset = 748 di->ccm.current_iset =
738 di->bat->bat_type[di->bat->batt_id].normal_cur_lvl; 749 di->bm->bat_type[di->bm->batt_id].normal_cur_lvl;
739 di->ccm.test_delta_i = di->bat->maxi->charger_curr_step; 750 di->ccm.test_delta_i = di->bm->maxi->charger_curr_step;
740 di->ccm.max_current = di->bat->maxi->chg_curr; 751 di->ccm.max_current = di->bm->maxi->chg_curr;
741 di->ccm.condition_cnt = di->bat->maxi->wait_cycles; 752 di->ccm.condition_cnt = di->bm->maxi->wait_cycles;
742 di->ccm.level = 0; 753 di->ccm.level = 0;
743} 754}
744 755
@@ -755,7 +766,7 @@ static enum maxim_ret abx500_chargalg_chg_curr_maxim(struct abx500_chargalg *di)
755{ 766{
756 int delta_i; 767 int delta_i;
757 768
758 if (!di->bat->maxi->ena_maxi) 769 if (!di->bm->maxi->ena_maxi)
759 return MAXIM_RET_NOACTION; 770 return MAXIM_RET_NOACTION;
760 771
761 delta_i = di->ccm.original_iset - di->batt_data.inst_curr; 772 delta_i = di->ccm.original_iset - di->batt_data.inst_curr;
@@ -766,7 +777,7 @@ static enum maxim_ret abx500_chargalg_chg_curr_maxim(struct abx500_chargalg *di)
766 if (di->ccm.wait_cnt == 0) { 777 if (di->ccm.wait_cnt == 0) {
767 dev_dbg(di->dev, "lowering current\n"); 778 dev_dbg(di->dev, "lowering current\n");
768 di->ccm.wait_cnt++; 779 di->ccm.wait_cnt++;
769 di->ccm.condition_cnt = di->bat->maxi->wait_cycles; 780 di->ccm.condition_cnt = di->bm->maxi->wait_cycles;
770 di->ccm.max_current = 781 di->ccm.max_current =
771 di->ccm.current_iset - di->ccm.test_delta_i; 782 di->ccm.current_iset - di->ccm.test_delta_i;
772 di->ccm.current_iset = di->ccm.max_current; 783 di->ccm.current_iset = di->ccm.max_current;
@@ -791,7 +802,7 @@ static enum maxim_ret abx500_chargalg_chg_curr_maxim(struct abx500_chargalg *di)
791 if (di->ccm.current_iset == di->ccm.original_iset) 802 if (di->ccm.current_iset == di->ccm.original_iset)
792 return MAXIM_RET_NOACTION; 803 return MAXIM_RET_NOACTION;
793 804
794 di->ccm.condition_cnt = di->bat->maxi->wait_cycles; 805 di->ccm.condition_cnt = di->bm->maxi->wait_cycles;
795 di->ccm.current_iset = di->ccm.original_iset; 806 di->ccm.current_iset = di->ccm.original_iset;
796 di->ccm.level = 0; 807 di->ccm.level = 0;
797 808
@@ -803,7 +814,7 @@ static enum maxim_ret abx500_chargalg_chg_curr_maxim(struct abx500_chargalg *di)
803 di->ccm.max_current) { 814 di->ccm.max_current) {
804 if (di->ccm.condition_cnt-- == 0) { 815 if (di->ccm.condition_cnt-- == 0) {
805 /* Increse the iset with cco.test_delta_i */ 816 /* Increse the iset with cco.test_delta_i */
806 di->ccm.condition_cnt = di->bat->maxi->wait_cycles; 817 di->ccm.condition_cnt = di->bm->maxi->wait_cycles;
807 di->ccm.current_iset += di->ccm.test_delta_i; 818 di->ccm.current_iset += di->ccm.test_delta_i;
808 di->ccm.level++; 819 di->ccm.level++;
809 dev_dbg(di->dev, " Maximization needed, increase" 820 dev_dbg(di->dev, " Maximization needed, increase"
@@ -818,7 +829,7 @@ static enum maxim_ret abx500_chargalg_chg_curr_maxim(struct abx500_chargalg *di)
818 return MAXIM_RET_NOACTION; 829 return MAXIM_RET_NOACTION;
819 } 830 }
820 } else { 831 } else {
821 di->ccm.condition_cnt = di->bat->maxi->wait_cycles; 832 di->ccm.condition_cnt = di->bm->maxi->wait_cycles;
822 return MAXIM_RET_NOACTION; 833 return MAXIM_RET_NOACTION;
823 } 834 }
824} 835}
@@ -838,7 +849,7 @@ static void handle_maxim_chg_curr(struct abx500_chargalg *di)
838 break; 849 break;
839 case MAXIM_RET_IBAT_TOO_HIGH: 850 case MAXIM_RET_IBAT_TOO_HIGH:
840 result = abx500_chargalg_update_chg_curr(di, 851 result = abx500_chargalg_update_chg_curr(di,
841 di->bat->bat_type[di->bat->batt_id].normal_cur_lvl); 852 di->bm->bat_type[di->bm->batt_id].normal_cur_lvl);
842 if (result) 853 if (result)
843 dev_err(di->dev, "failed to set chg curr\n"); 854 dev_err(di->dev, "failed to set chg curr\n");
844 break; 855 break;
@@ -858,6 +869,7 @@ static int abx500_chargalg_get_ext_psy_data(struct device *dev, void *data)
858 union power_supply_propval ret; 869 union power_supply_propval ret;
859 int i, j; 870 int i, j;
860 bool psy_found = false; 871 bool psy_found = false;
872 bool capacity_updated = false;
861 873
862 psy = (struct power_supply *)data; 874 psy = (struct power_supply *)data;
863 ext = dev_get_drvdata(dev); 875 ext = dev_get_drvdata(dev);
@@ -870,6 +882,16 @@ static int abx500_chargalg_get_ext_psy_data(struct device *dev, void *data)
870 if (!psy_found) 882 if (!psy_found)
871 return 0; 883 return 0;
872 884
885 /*
886 * If external is not registering 'POWER_SUPPLY_PROP_CAPACITY' to its
887 * property because of handling that sysfs entry on its own, this is
888 * the place to get the battery capacity.
889 */
890 if (!ext->get_property(ext, POWER_SUPPLY_PROP_CAPACITY, &ret)) {
891 di->batt_data.percent = ret.intval;
892 capacity_updated = true;
893 }
894
873 /* Go through all properties for the psy */ 895 /* Go through all properties for the psy */
874 for (j = 0; j < ext->num_properties; j++) { 896 for (j = 0; j < ext->num_properties; j++) {
875 enum power_supply_property prop; 897 enum power_supply_property prop;
@@ -1154,7 +1176,8 @@ static int abx500_chargalg_get_ext_psy_data(struct device *dev, void *data)
1154 } 1176 }
1155 break; 1177 break;
1156 case POWER_SUPPLY_PROP_CAPACITY: 1178 case POWER_SUPPLY_PROP_CAPACITY:
1157 di->batt_data.percent = ret.intval; 1179 if (!capacity_updated)
1180 di->batt_data.percent = ret.intval;
1158 break; 1181 break;
1159 default: 1182 default:
1160 break; 1183 break;
@@ -1210,7 +1233,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
1210 * this way 1233 * this way
1211 */ 1234 */
1212 if (!charger_status || 1235 if (!charger_status ||
1213 (di->events.batt_unknown && !di->bat->chg_unknown_bat)) { 1236 (di->events.batt_unknown && !di->bm->chg_unknown_bat)) {
1214 if (di->charge_state != STATE_HANDHELD) { 1237 if (di->charge_state != STATE_HANDHELD) {
1215 di->events.safety_timer_expired = false; 1238 di->events.safety_timer_expired = false;
1216 abx500_chargalg_state_to(di, STATE_HANDHELD_INIT); 1239 abx500_chargalg_state_to(di, STATE_HANDHELD_INIT);
@@ -1394,8 +1417,8 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
1394 1417
1395 case STATE_NORMAL_INIT: 1418 case STATE_NORMAL_INIT:
1396 abx500_chargalg_start_charging(di, 1419 abx500_chargalg_start_charging(di,
1397 di->bat->bat_type[di->bat->batt_id].normal_vol_lvl, 1420 di->bm->bat_type[di->bm->batt_id].normal_vol_lvl,
1398 di->bat->bat_type[di->bat->batt_id].normal_cur_lvl); 1421 di->bm->bat_type[di->bm->batt_id].normal_cur_lvl);
1399 abx500_chargalg_state_to(di, STATE_NORMAL); 1422 abx500_chargalg_state_to(di, STATE_NORMAL);
1400 abx500_chargalg_start_safety_timer(di); 1423 abx500_chargalg_start_safety_timer(di);
1401 abx500_chargalg_stop_maintenance_timer(di); 1424 abx500_chargalg_stop_maintenance_timer(di);
@@ -1411,7 +1434,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
1411 handle_maxim_chg_curr(di); 1434 handle_maxim_chg_curr(di);
1412 if (di->charge_status == POWER_SUPPLY_STATUS_FULL && 1435 if (di->charge_status == POWER_SUPPLY_STATUS_FULL &&
1413 di->maintenance_chg) { 1436 di->maintenance_chg) {
1414 if (di->bat->no_maintenance) 1437 if (di->bm->no_maintenance)
1415 abx500_chargalg_state_to(di, 1438 abx500_chargalg_state_to(di,
1416 STATE_WAIT_FOR_RECHARGE_INIT); 1439 STATE_WAIT_FOR_RECHARGE_INIT);
1417 else 1440 else
@@ -1424,28 +1447,25 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
1424 case STATE_WAIT_FOR_RECHARGE_INIT: 1447 case STATE_WAIT_FOR_RECHARGE_INIT:
1425 abx500_chargalg_hold_charging(di); 1448 abx500_chargalg_hold_charging(di);
1426 abx500_chargalg_state_to(di, STATE_WAIT_FOR_RECHARGE); 1449 abx500_chargalg_state_to(di, STATE_WAIT_FOR_RECHARGE);
1427 di->rch_cnt = RCH_COND_CNT;
1428 /* Intentional fallthrough */ 1450 /* Intentional fallthrough */
1429 1451
1430 case STATE_WAIT_FOR_RECHARGE: 1452 case STATE_WAIT_FOR_RECHARGE:
1431 if (di->batt_data.volt <= 1453 if (di->batt_data.percent <=
1432 di->bat->bat_type[di->bat->batt_id].recharge_vol) { 1454 di->bm->bat_type[di->bm->batt_id].
1433 if (di->rch_cnt-- == 0) 1455 recharge_cap)
1434 abx500_chargalg_state_to(di, STATE_NORMAL_INIT); 1456 abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
1435 } else
1436 di->rch_cnt = RCH_COND_CNT;
1437 break; 1457 break;
1438 1458
1439 case STATE_MAINTENANCE_A_INIT: 1459 case STATE_MAINTENANCE_A_INIT:
1440 abx500_chargalg_stop_safety_timer(di); 1460 abx500_chargalg_stop_safety_timer(di);
1441 abx500_chargalg_start_maintenance_timer(di, 1461 abx500_chargalg_start_maintenance_timer(di,
1442 di->bat->bat_type[ 1462 di->bm->bat_type[
1443 di->bat->batt_id].maint_a_chg_timer_h); 1463 di->bm->batt_id].maint_a_chg_timer_h);
1444 abx500_chargalg_start_charging(di, 1464 abx500_chargalg_start_charging(di,
1445 di->bat->bat_type[ 1465 di->bm->bat_type[
1446 di->bat->batt_id].maint_a_vol_lvl, 1466 di->bm->batt_id].maint_a_vol_lvl,
1447 di->bat->bat_type[ 1467 di->bm->bat_type[
1448 di->bat->batt_id].maint_a_cur_lvl); 1468 di->bm->batt_id].maint_a_cur_lvl);
1449 abx500_chargalg_state_to(di, STATE_MAINTENANCE_A); 1469 abx500_chargalg_state_to(di, STATE_MAINTENANCE_A);
1450 power_supply_changed(&di->chargalg_psy); 1470 power_supply_changed(&di->chargalg_psy);
1451 /* Intentional fallthrough*/ 1471 /* Intentional fallthrough*/
@@ -1459,13 +1479,13 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
1459 1479
1460 case STATE_MAINTENANCE_B_INIT: 1480 case STATE_MAINTENANCE_B_INIT:
1461 abx500_chargalg_start_maintenance_timer(di, 1481 abx500_chargalg_start_maintenance_timer(di,
1462 di->bat->bat_type[ 1482 di->bm->bat_type[
1463 di->bat->batt_id].maint_b_chg_timer_h); 1483 di->bm->batt_id].maint_b_chg_timer_h);
1464 abx500_chargalg_start_charging(di, 1484 abx500_chargalg_start_charging(di,
1465 di->bat->bat_type[ 1485 di->bm->bat_type[
1466 di->bat->batt_id].maint_b_vol_lvl, 1486 di->bm->batt_id].maint_b_vol_lvl,
1467 di->bat->bat_type[ 1487 di->bm->bat_type[
1468 di->bat->batt_id].maint_b_cur_lvl); 1488 di->bm->batt_id].maint_b_cur_lvl);
1469 abx500_chargalg_state_to(di, STATE_MAINTENANCE_B); 1489 abx500_chargalg_state_to(di, STATE_MAINTENANCE_B);
1470 power_supply_changed(&di->chargalg_psy); 1490 power_supply_changed(&di->chargalg_psy);
1471 /* Intentional fallthrough*/ 1491 /* Intentional fallthrough*/
@@ -1479,10 +1499,10 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
1479 1499
1480 case STATE_TEMP_LOWHIGH_INIT: 1500 case STATE_TEMP_LOWHIGH_INIT:
1481 abx500_chargalg_start_charging(di, 1501 abx500_chargalg_start_charging(di,
1482 di->bat->bat_type[ 1502 di->bm->bat_type[
1483 di->bat->batt_id].low_high_vol_lvl, 1503 di->bm->batt_id].low_high_vol_lvl,
1484 di->bat->bat_type[ 1504 di->bm->bat_type[
1485 di->bat->batt_id].low_high_cur_lvl); 1505 di->bm->batt_id].low_high_cur_lvl);
1486 abx500_chargalg_stop_maintenance_timer(di); 1506 abx500_chargalg_stop_maintenance_timer(di);
1487 di->charge_status = POWER_SUPPLY_STATUS_CHARGING; 1507 di->charge_status = POWER_SUPPLY_STATUS_CHARGING;
1488 abx500_chargalg_state_to(di, STATE_TEMP_LOWHIGH); 1508 abx500_chargalg_state_to(di, STATE_TEMP_LOWHIGH);
@@ -1543,11 +1563,11 @@ static void abx500_chargalg_periodic_work(struct work_struct *work)
1543 if (di->chg_info.conn_chg) 1563 if (di->chg_info.conn_chg)
1544 queue_delayed_work(di->chargalg_wq, 1564 queue_delayed_work(di->chargalg_wq,
1545 &di->chargalg_periodic_work, 1565 &di->chargalg_periodic_work,
1546 di->bat->interval_charging * HZ); 1566 di->bm->interval_charging * HZ);
1547 else 1567 else
1548 queue_delayed_work(di->chargalg_wq, 1568 queue_delayed_work(di->chargalg_wq,
1549 &di->chargalg_periodic_work, 1569 &di->chargalg_periodic_work,
1550 di->bat->interval_not_charging * HZ); 1570 di->bm->interval_not_charging * HZ);
1551} 1571}
1552 1572
1553/** 1573/**
@@ -1614,10 +1634,13 @@ static int abx500_chargalg_get_property(struct power_supply *psy,
1614 if (di->events.batt_ovv) { 1634 if (di->events.batt_ovv) {
1615 val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE; 1635 val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
1616 } else if (di->events.btemp_underover) { 1636 } else if (di->events.btemp_underover) {
1617 if (di->batt_data.temp <= di->bat->temp_under) 1637 if (di->batt_data.temp <= di->bm->temp_under)
1618 val->intval = POWER_SUPPLY_HEALTH_COLD; 1638 val->intval = POWER_SUPPLY_HEALTH_COLD;
1619 else 1639 else
1620 val->intval = POWER_SUPPLY_HEALTH_OVERHEAT; 1640 val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
1641 } else if (di->charge_state == STATE_SAFETY_TIMER_EXPIRED ||
1642 di->charge_state == STATE_SAFETY_TIMER_EXPIRED_INIT) {
1643 val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
1621 } else { 1644 } else {
1622 val->intval = POWER_SUPPLY_HEALTH_GOOD; 1645 val->intval = POWER_SUPPLY_HEALTH_GOOD;
1623 } 1646 }
@@ -1631,6 +1654,25 @@ static int abx500_chargalg_get_property(struct power_supply *psy,
1631/* Exposure to the sysfs interface */ 1654/* Exposure to the sysfs interface */
1632 1655
1633/** 1656/**
1657 * abx500_chargalg_sysfs_show() - sysfs show operations
1658 * @kobj: pointer to the struct kobject
1659 * @attr: pointer to the struct attribute
1660 * @buf: buffer that holds the parameter to send to userspace
1661 *
1662 * Returns a buffer to be displayed in user space
1663 */
1664static ssize_t abx500_chargalg_sysfs_show(struct kobject *kobj,
1665 struct attribute *attr, char *buf)
1666{
1667 struct abx500_chargalg *di = container_of(kobj,
1668 struct abx500_chargalg, chargalg_kobject);
1669
1670 return sprintf(buf, "%d\n",
1671 di->susp_status.ac_suspended &&
1672 di->susp_status.usb_suspended);
1673}
1674
1675/**
1634 * abx500_chargalg_sysfs_charger() - sysfs store operations 1676 * abx500_chargalg_sysfs_charger() - sysfs store operations
1635 * @kobj: pointer to the struct kobject 1677 * @kobj: pointer to the struct kobject
1636 * @attr: pointer to the struct attribute 1678 * @attr: pointer to the struct attribute
@@ -1698,7 +1740,7 @@ static ssize_t abx500_chargalg_sysfs_charger(struct kobject *kobj,
1698static struct attribute abx500_chargalg_en_charger = \ 1740static struct attribute abx500_chargalg_en_charger = \
1699{ 1741{
1700 .name = "chargalg", 1742 .name = "chargalg",
1701 .mode = S_IWUGO, 1743 .mode = S_IRUGO | S_IWUSR,
1702}; 1744};
1703 1745
1704static struct attribute *abx500_chargalg_chg[] = { 1746static struct attribute *abx500_chargalg_chg[] = {
@@ -1707,6 +1749,7 @@ static struct attribute *abx500_chargalg_chg[] = {
1707}; 1749};
1708 1750
1709static const struct sysfs_ops abx500_chargalg_sysfs_ops = { 1751static const struct sysfs_ops abx500_chargalg_sysfs_ops = {
1752 .show = abx500_chargalg_sysfs_show,
1710 .store = abx500_chargalg_sysfs_charger, 1753 .store = abx500_chargalg_sysfs_charger,
1711}; 1754};
1712 1755
@@ -1806,6 +1849,7 @@ static char *supply_interface[] = {
1806static int abx500_chargalg_probe(struct platform_device *pdev) 1849static int abx500_chargalg_probe(struct platform_device *pdev)
1807{ 1850{
1808 struct device_node *np = pdev->dev.of_node; 1851 struct device_node *np = pdev->dev.of_node;
1852 struct abx500_bm_data *plat = pdev->dev.platform_data;
1809 struct abx500_chargalg *di; 1853 struct abx500_chargalg *di;
1810 int ret = 0; 1854 int ret = 0;
1811 1855
@@ -1814,21 +1858,19 @@ static int abx500_chargalg_probe(struct platform_device *pdev)
1814 dev_err(&pdev->dev, "%s no mem for ab8500_chargalg\n", __func__); 1858 dev_err(&pdev->dev, "%s no mem for ab8500_chargalg\n", __func__);
1815 return -ENOMEM; 1859 return -ENOMEM;
1816 } 1860 }
1817 di->bat = pdev->mfd_cell->platform_data; 1861
1818 if (!di->bat) { 1862 if (!plat) {
1819 if (np) { 1863 dev_err(&pdev->dev, "no battery management data supplied\n");
1820 ret = bmdevs_of_probe(&pdev->dev, np, &di->bat); 1864 return -EINVAL;
1821 if (ret) { 1865 }
1822 dev_err(&pdev->dev, 1866 di->bm = plat;
1823 "failed to get battery information\n"); 1867
1824 return ret; 1868 if (np) {
1825 } 1869 ret = ab8500_bm_of_probe(&pdev->dev, np, di->bm);
1826 } else { 1870 if (ret) {
1827 dev_err(&pdev->dev, "missing dt node for ab8500_chargalg\n"); 1871 dev_err(&pdev->dev, "failed to get battery information\n");
1828 return -EINVAL; 1872 return ret;
1829 } 1873 }
1830 } else {
1831 dev_info(&pdev->dev, "falling back to legacy platform data\n");
1832 } 1874 }
1833 1875
1834 /* get device struct */ 1876 /* get device struct */
diff --git a/drivers/power/bq2415x_charger.c b/drivers/power/bq2415x_charger.c
index ee842b37f462..ca91396fc48e 100644
--- a/drivers/power/bq2415x_charger.c
+++ b/drivers/power/bq2415x_charger.c
@@ -28,7 +28,6 @@
28 * http://www.ti.com/product/bq24155 28 * http://www.ti.com/product/bq24155
29 */ 29 */
30 30
31#include <linux/version.h>
32#include <linux/kernel.h> 31#include <linux/kernel.h>
33#include <linux/module.h> 32#include <linux/module.h>
34#include <linux/param.h> 33#include <linux/param.h>
@@ -734,12 +733,10 @@ static int bq2415x_set_mode(struct bq2415x_device *bq, enum bq2415x_mode mode)
734 int charger = 0; 733 int charger = 0;
735 int boost = 0; 734 int boost = 0;
736 735
737 if (mode == BQ2415X_MODE_HOST_CHARGER ||
738 mode == BQ2415X_MODE_DEDICATED_CHARGER)
739 charger = 1;
740
741 if (mode == BQ2415X_MODE_BOOST) 736 if (mode == BQ2415X_MODE_BOOST)
742 boost = 1; 737 boost = 1;
738 else if (mode != BQ2415X_MODE_OFF)
739 charger = 1;
743 740
744 if (!charger) 741 if (!charger)
745 ret = bq2415x_exec_command(bq, BQ2415X_CHARGER_DISABLE); 742 ret = bq2415x_exec_command(bq, BQ2415X_CHARGER_DISABLE);
@@ -751,6 +748,10 @@ static int bq2415x_set_mode(struct bq2415x_device *bq, enum bq2415x_mode mode)
751 return ret; 748 return ret;
752 749
753 switch (mode) { 750 switch (mode) {
751 case BQ2415X_MODE_OFF:
752 dev_dbg(bq->dev, "changing mode to: Offline\n");
753 ret = bq2415x_set_current_limit(bq, 100);
754 break;
754 case BQ2415X_MODE_NONE: 755 case BQ2415X_MODE_NONE:
755 dev_dbg(bq->dev, "changing mode to: N/A\n"); 756 dev_dbg(bq->dev, "changing mode to: N/A\n");
756 ret = bq2415x_set_current_limit(bq, 100); 757 ret = bq2415x_set_current_limit(bq, 100);
@@ -843,7 +844,7 @@ static void bq2415x_timer_error(struct bq2415x_device *bq, const char *msg)
843 dev_err(bq->dev, "%s\n", msg); 844 dev_err(bq->dev, "%s\n", msg);
844 if (bq->automode > 0) 845 if (bq->automode > 0)
845 bq->automode = 0; 846 bq->automode = 0;
846 bq2415x_set_mode(bq, BQ2415X_MODE_NONE); 847 bq2415x_set_mode(bq, BQ2415X_MODE_OFF);
847 bq2415x_set_autotimer(bq, 0); 848 bq2415x_set_autotimer(bq, 0);
848} 849}
849 850
@@ -1136,6 +1137,10 @@ static ssize_t bq2415x_sysfs_set_mode(struct device *dev,
1136 return -ENOSYS; 1137 return -ENOSYS;
1137 bq->automode = 1; 1138 bq->automode = 1;
1138 mode = bq->reported_mode; 1139 mode = bq->reported_mode;
1140 } else if (strncmp(buf, "off", 3) == 0) {
1141 if (bq->automode > 0)
1142 bq->automode = 0;
1143 mode = BQ2415X_MODE_OFF;
1139 } else if (strncmp(buf, "none", 4) == 0) { 1144 } else if (strncmp(buf, "none", 4) == 0) {
1140 if (bq->automode > 0) 1145 if (bq->automode > 0)
1141 bq->automode = 0; 1146 bq->automode = 0;
@@ -1183,6 +1188,9 @@ static ssize_t bq2415x_sysfs_show_mode(struct device *dev,
1183 ret += sprintf(buf+ret, "auto ("); 1188 ret += sprintf(buf+ret, "auto (");
1184 1189
1185 switch (bq->mode) { 1190 switch (bq->mode) {
1191 case BQ2415X_MODE_OFF:
1192 ret += sprintf(buf+ret, "off");
1193 break;
1186 case BQ2415X_MODE_NONE: 1194 case BQ2415X_MODE_NONE:
1187 ret += sprintf(buf+ret, "none"); 1195 ret += sprintf(buf+ret, "none");
1188 break; 1196 break;
@@ -1217,6 +1225,8 @@ static ssize_t bq2415x_sysfs_show_reported_mode(struct device *dev,
1217 return -EINVAL; 1225 return -EINVAL;
1218 1226
1219 switch (bq->reported_mode) { 1227 switch (bq->reported_mode) {
1228 case BQ2415X_MODE_OFF:
1229 return sprintf(buf, "off\n");
1220 case BQ2415X_MODE_NONE: 1230 case BQ2415X_MODE_NONE:
1221 return sprintf(buf, "none\n"); 1231 return sprintf(buf, "none\n");
1222 case BQ2415X_MODE_HOST_CHARGER: 1232 case BQ2415X_MODE_HOST_CHARGER:
@@ -1523,7 +1533,7 @@ static int bq2415x_probe(struct i2c_client *client,
1523 goto error_1; 1533 goto error_1;
1524 } 1534 }
1525 1535
1526 bq = kzalloc(sizeof(*bq), GFP_KERNEL); 1536 bq = devm_kzalloc(&client->dev, sizeof(*bq), GFP_KERNEL);
1527 if (!bq) { 1537 if (!bq) {
1528 dev_err(&client->dev, "failed to allocate device data\n"); 1538 dev_err(&client->dev, "failed to allocate device data\n");
1529 ret = -ENOMEM; 1539 ret = -ENOMEM;
@@ -1536,8 +1546,8 @@ static int bq2415x_probe(struct i2c_client *client,
1536 bq->dev = &client->dev; 1546 bq->dev = &client->dev;
1537 bq->chip = id->driver_data; 1547 bq->chip = id->driver_data;
1538 bq->name = name; 1548 bq->name = name;
1539 bq->mode = BQ2415X_MODE_NONE; 1549 bq->mode = BQ2415X_MODE_OFF;
1540 bq->reported_mode = BQ2415X_MODE_NONE; 1550 bq->reported_mode = BQ2415X_MODE_OFF;
1541 bq->autotimer = 0; 1551 bq->autotimer = 0;
1542 bq->automode = 0; 1552 bq->automode = 0;
1543 1553
@@ -1549,19 +1559,19 @@ static int bq2415x_probe(struct i2c_client *client,
1549 ret = bq2415x_power_supply_init(bq); 1559 ret = bq2415x_power_supply_init(bq);
1550 if (ret) { 1560 if (ret) {
1551 dev_err(bq->dev, "failed to register power supply: %d\n", ret); 1561 dev_err(bq->dev, "failed to register power supply: %d\n", ret);
1552 goto error_3; 1562 goto error_2;
1553 } 1563 }
1554 1564
1555 ret = bq2415x_sysfs_init(bq); 1565 ret = bq2415x_sysfs_init(bq);
1556 if (ret) { 1566 if (ret) {
1557 dev_err(bq->dev, "failed to create sysfs entries: %d\n", ret); 1567 dev_err(bq->dev, "failed to create sysfs entries: %d\n", ret);
1558 goto error_4; 1568 goto error_3;
1559 } 1569 }
1560 1570
1561 ret = bq2415x_set_defaults(bq); 1571 ret = bq2415x_set_defaults(bq);
1562 if (ret) { 1572 if (ret) {
1563 dev_err(bq->dev, "failed to set default values: %d\n", ret); 1573 dev_err(bq->dev, "failed to set default values: %d\n", ret);
1564 goto error_5; 1574 goto error_4;
1565 } 1575 }
1566 1576
1567 if (bq->init_data.set_mode_hook) { 1577 if (bq->init_data.set_mode_hook) {
@@ -1585,12 +1595,10 @@ static int bq2415x_probe(struct i2c_client *client,
1585 dev_info(bq->dev, "driver registered\n"); 1595 dev_info(bq->dev, "driver registered\n");
1586 return 0; 1596 return 0;
1587 1597
1588error_5:
1589 bq2415x_sysfs_exit(bq);
1590error_4: 1598error_4:
1591 bq2415x_power_supply_exit(bq); 1599 bq2415x_sysfs_exit(bq);
1592error_3: 1600error_3:
1593 kfree(bq); 1601 bq2415x_power_supply_exit(bq);
1594error_2: 1602error_2:
1595 kfree(name); 1603 kfree(name);
1596error_1: 1604error_1:
@@ -1622,7 +1630,6 @@ static int bq2415x_remove(struct i2c_client *client)
1622 dev_info(bq->dev, "driver unregistered\n"); 1630 dev_info(bq->dev, "driver unregistered\n");
1623 1631
1624 kfree(bq->name); 1632 kfree(bq->name);
1625 kfree(bq);
1626 1633
1627 return 0; 1634 return 0;
1628} 1635}
@@ -1652,18 +1659,7 @@ static struct i2c_driver bq2415x_driver = {
1652 .remove = bq2415x_remove, 1659 .remove = bq2415x_remove,
1653 .id_table = bq2415x_i2c_id_table, 1660 .id_table = bq2415x_i2c_id_table,
1654}; 1661};
1655 1662module_i2c_driver(bq2415x_driver);
1656static int __init bq2415x_init(void)
1657{
1658 return i2c_add_driver(&bq2415x_driver);
1659}
1660module_init(bq2415x_init);
1661
1662static void __exit bq2415x_exit(void)
1663{
1664 i2c_del_driver(&bq2415x_driver);
1665}
1666module_exit(bq2415x_exit);
1667 1663
1668MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>"); 1664MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
1669MODULE_DESCRIPTION("bq2415x charger driver"); 1665MODULE_DESCRIPTION("bq2415x charger driver");
diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
index 36b34efdafc9..8ccf5d7d0add 100644
--- a/drivers/power/bq27x00_battery.c
+++ b/drivers/power/bq27x00_battery.c
@@ -299,7 +299,7 @@ static int bq27x00_battery_read_energy(struct bq27x00_device_info *di)
299} 299}
300 300
301/* 301/*
302 * Return the battery temperature in tenths of degree Celsius 302 * Return the battery temperature in tenths of degree Kelvin
303 * Or < 0 if something fails. 303 * Or < 0 if something fails.
304 */ 304 */
305static int bq27x00_battery_read_temperature(struct bq27x00_device_info *di) 305static int bq27x00_battery_read_temperature(struct bq27x00_device_info *di)
@@ -312,10 +312,8 @@ static int bq27x00_battery_read_temperature(struct bq27x00_device_info *di)
312 return temp; 312 return temp;
313 } 313 }
314 314
315 if (bq27xxx_is_chip_version_higher(di)) 315 if (!bq27xxx_is_chip_version_higher(di))
316 temp -= 2731; 316 temp = 5 * temp / 2;
317 else
318 temp = ((temp * 5) - 5463) / 2;
319 317
320 return temp; 318 return temp;
321} 319}
@@ -448,7 +446,6 @@ static void bq27x00_update(struct bq27x00_device_info *di)
448 cache.temperature = bq27x00_battery_read_temperature(di); 446 cache.temperature = bq27x00_battery_read_temperature(di);
449 if (!is_bq27425) 447 if (!is_bq27425)
450 cache.cycle_count = bq27x00_battery_read_cyct(di); 448 cache.cycle_count = bq27x00_battery_read_cyct(di);
451 cache.cycle_count = bq27x00_battery_read_cyct(di);
452 cache.power_avg = 449 cache.power_avg =
453 bq27x00_battery_read_pwr_avg(di, BQ27x00_POWER_AVG); 450 bq27x00_battery_read_pwr_avg(di, BQ27x00_POWER_AVG);
454 451
@@ -642,6 +639,8 @@ static int bq27x00_battery_get_property(struct power_supply *psy,
642 break; 639 break;
643 case POWER_SUPPLY_PROP_TEMP: 640 case POWER_SUPPLY_PROP_TEMP:
644 ret = bq27x00_simple_value(di->cache.temperature, val); 641 ret = bq27x00_simple_value(di->cache.temperature, val);
642 if (ret == 0)
643 val->intval -= 2731;
645 break; 644 break;
646 case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW: 645 case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW:
647 ret = bq27x00_simple_value(di->cache.time_to_empty, val); 646 ret = bq27x00_simple_value(di->cache.time_to_empty, val);
@@ -696,7 +695,6 @@ static int bq27x00_powersupply_init(struct bq27x00_device_info *di)
696 int ret; 695 int ret;
697 696
698 di->bat.type = POWER_SUPPLY_TYPE_BATTERY; 697 di->bat.type = POWER_SUPPLY_TYPE_BATTERY;
699 di->chip = BQ27425;
700 if (di->chip == BQ27425) { 698 if (di->chip == BQ27425) {
701 di->bat.properties = bq27425_battery_props; 699 di->bat.properties = bq27425_battery_props;
702 di->bat.num_properties = ARRAY_SIZE(bq27425_battery_props); 700 di->bat.num_properties = ARRAY_SIZE(bq27425_battery_props);
diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c
index 6ba047f5ac2c..8acc3f8d303c 100644
--- a/drivers/power/charger-manager.c
+++ b/drivers/power/charger-manager.c
@@ -669,15 +669,21 @@ static void _setup_polling(struct work_struct *work)
669 WARN(cm_wq == NULL, "charger-manager: workqueue not initialized" 669 WARN(cm_wq == NULL, "charger-manager: workqueue not initialized"
670 ". try it later. %s\n", __func__); 670 ". try it later. %s\n", __func__);
671 671
672 /*
673 * Use mod_delayed_work() iff the next polling interval should
674 * occur before the currently scheduled one. If @cm_monitor_work
675 * isn't active, the end result is the same, so no need to worry
676 * about stale @next_polling.
677 */
672 _next_polling = jiffies + polling_jiffy; 678 _next_polling = jiffies + polling_jiffy;
673 679
674 if (!delayed_work_pending(&cm_monitor_work) || 680 if (time_before(_next_polling, next_polling)) {
675 (delayed_work_pending(&cm_monitor_work) &&
676 time_after(next_polling, _next_polling))) {
677 next_polling = jiffies + polling_jiffy;
678 mod_delayed_work(cm_wq, &cm_monitor_work, polling_jiffy); 681 mod_delayed_work(cm_wq, &cm_monitor_work, polling_jiffy);
682 next_polling = _next_polling;
683 } else {
684 if (queue_delayed_work(cm_wq, &cm_monitor_work, polling_jiffy))
685 next_polling = _next_polling;
679 } 686 }
680
681out: 687out:
682 mutex_unlock(&cm_list_mtx); 688 mutex_unlock(&cm_list_mtx);
683} 689}
@@ -751,8 +757,7 @@ static void misc_event_handler(struct charger_manager *cm,
751 if (cm_suspended) 757 if (cm_suspended)
752 device_set_wakeup_capable(cm->dev, true); 758 device_set_wakeup_capable(cm->dev, true);
753 759
754 if (!delayed_work_pending(&cm_monitor_work) && 760 if (is_polling_required(cm) && cm->desc->polling_interval_ms)
755 is_polling_required(cm) && cm->desc->polling_interval_ms)
756 schedule_work(&setup_polling); 761 schedule_work(&setup_polling);
757 uevent_notify(cm, default_event_names[type]); 762 uevent_notify(cm, default_event_names[type]);
758} 763}
@@ -1170,8 +1175,7 @@ static int charger_extcon_notifier(struct notifier_block *self,
1170 * when charger cable is attached. 1175 * when charger cable is attached.
1171 */ 1176 */
1172 if (cable->attached && is_polling_required(cable->cm)) { 1177 if (cable->attached && is_polling_required(cable->cm)) {
1173 if (work_pending(&setup_polling)) 1178 cancel_work_sync(&setup_polling);
1174 cancel_work_sync(&setup_polling);
1175 schedule_work(&setup_polling); 1179 schedule_work(&setup_polling);
1176 } 1180 }
1177 1181
@@ -1215,6 +1219,55 @@ static int charger_extcon_init(struct charger_manager *cm,
1215 return ret; 1219 return ret;
1216} 1220}
1217 1221
1222/**
1223 * charger_manager_register_extcon - Register extcon device to recevie state
1224 * of charger cable.
1225 * @cm: the Charger Manager representing the battery.
1226 *
1227 * This function support EXTCON(External Connector) subsystem to detect the
1228 * state of charger cables for enabling or disabling charger(regulator) and
1229 * select the charger cable for charging among a number of external cable
1230 * according to policy of H/W board.
1231 */
1232static int charger_manager_register_extcon(struct charger_manager *cm)
1233{
1234 struct charger_desc *desc = cm->desc;
1235 struct charger_regulator *charger;
1236 int ret = 0;
1237 int i;
1238 int j;
1239
1240 for (i = 0; i < desc->num_charger_regulators; i++) {
1241 charger = &desc->charger_regulators[i];
1242
1243 charger->consumer = regulator_get(cm->dev,
1244 charger->regulator_name);
1245 if (charger->consumer == NULL) {
1246 dev_err(cm->dev, "Cannot find charger(%s)n",
1247 charger->regulator_name);
1248 ret = -EINVAL;
1249 goto err;
1250 }
1251 charger->cm = cm;
1252
1253 for (j = 0; j < charger->num_cables; j++) {
1254 struct charger_cable *cable = &charger->cables[j];
1255
1256 ret = charger_extcon_init(cm, cable);
1257 if (ret < 0) {
1258 dev_err(cm->dev, "Cannot initialize charger(%s)n",
1259 charger->regulator_name);
1260 goto err;
1261 }
1262 cable->charger = charger;
1263 cable->cm = cm;
1264 }
1265 }
1266
1267err:
1268 return ret;
1269}
1270
1218/* help function of sysfs node to control charger(regulator) */ 1271/* help function of sysfs node to control charger(regulator) */
1219static ssize_t charger_name_show(struct device *dev, 1272static ssize_t charger_name_show(struct device *dev,
1220 struct device_attribute *attr, char *buf) 1273 struct device_attribute *attr, char *buf)
@@ -1274,7 +1327,7 @@ static ssize_t charger_externally_control_store(struct device *dev,
1274 1327
1275 for (i = 0; i < desc->num_charger_regulators; i++) { 1328 for (i = 0; i < desc->num_charger_regulators; i++) {
1276 if (&desc->charger_regulators[i] != charger && 1329 if (&desc->charger_regulators[i] != charger &&
1277 !desc->charger_regulators[i].externally_control) { 1330 !desc->charger_regulators[i].externally_control) {
1278 /* 1331 /*
1279 * At least, one charger is controlled by 1332 * At least, one charger is controlled by
1280 * charger-manager 1333 * charger-manager
@@ -1303,13 +1356,107 @@ static ssize_t charger_externally_control_store(struct device *dev,
1303 return count; 1356 return count;
1304} 1357}
1305 1358
1359/**
1360 * charger_manager_register_sysfs - Register sysfs entry for each charger
1361 * @cm: the Charger Manager representing the battery.
1362 *
1363 * This function add sysfs entry for charger(regulator) to control charger from
1364 * user-space. If some development board use one more chargers for charging
1365 * but only need one charger on specific case which is dependent on user
1366 * scenario or hardware restrictions, the user enter 1 or 0(zero) to '/sys/
1367 * class/power_supply/battery/charger.[index]/externally_control'. For example,
1368 * if user enter 1 to 'sys/class/power_supply/battery/charger.[index]/
1369 * externally_control, this charger isn't controlled from charger-manager and
1370 * always stay off state of regulator.
1371 */
1372static int charger_manager_register_sysfs(struct charger_manager *cm)
1373{
1374 struct charger_desc *desc = cm->desc;
1375 struct charger_regulator *charger;
1376 int chargers_externally_control = 1;
1377 char buf[11];
1378 char *str;
1379 int ret = 0;
1380 int i;
1381
1382 /* Create sysfs entry to control charger(regulator) */
1383 for (i = 0; i < desc->num_charger_regulators; i++) {
1384 charger = &desc->charger_regulators[i];
1385
1386 snprintf(buf, 10, "charger.%d", i);
1387 str = kzalloc(sizeof(char) * (strlen(buf) + 1), GFP_KERNEL);
1388 if (!str) {
1389 dev_err(cm->dev, "Cannot allocate memory: %s\n",
1390 charger->regulator_name);
1391 ret = -ENOMEM;
1392 goto err;
1393 }
1394 strcpy(str, buf);
1395
1396 charger->attrs[0] = &charger->attr_name.attr;
1397 charger->attrs[1] = &charger->attr_state.attr;
1398 charger->attrs[2] = &charger->attr_externally_control.attr;
1399 charger->attrs[3] = NULL;
1400 charger->attr_g.name = str;
1401 charger->attr_g.attrs = charger->attrs;
1402
1403 sysfs_attr_init(&charger->attr_name.attr);
1404 charger->attr_name.attr.name = "name";
1405 charger->attr_name.attr.mode = 0444;
1406 charger->attr_name.show = charger_name_show;
1407
1408 sysfs_attr_init(&charger->attr_state.attr);
1409 charger->attr_state.attr.name = "state";
1410 charger->attr_state.attr.mode = 0444;
1411 charger->attr_state.show = charger_state_show;
1412
1413 sysfs_attr_init(&charger->attr_externally_control.attr);
1414 charger->attr_externally_control.attr.name
1415 = "externally_control";
1416 charger->attr_externally_control.attr.mode = 0644;
1417 charger->attr_externally_control.show
1418 = charger_externally_control_show;
1419 charger->attr_externally_control.store
1420 = charger_externally_control_store;
1421
1422 if (!desc->charger_regulators[i].externally_control ||
1423 !chargers_externally_control)
1424 chargers_externally_control = 0;
1425
1426 dev_info(cm->dev, "'%s' regulator's externally_control"
1427 "is %d\n", charger->regulator_name,
1428 charger->externally_control);
1429
1430 ret = sysfs_create_group(&cm->charger_psy.dev->kobj,
1431 &charger->attr_g);
1432 if (ret < 0) {
1433 dev_err(cm->dev, "Cannot create sysfs entry"
1434 "of %s regulator\n",
1435 charger->regulator_name);
1436 ret = -EINVAL;
1437 goto err;
1438 }
1439 }
1440
1441 if (chargers_externally_control) {
1442 dev_err(cm->dev, "Cannot register regulator because "
1443 "charger-manager must need at least "
1444 "one charger for charging battery\n");
1445
1446 ret = -EINVAL;
1447 goto err;
1448 }
1449
1450err:
1451 return ret;
1452}
1453
1306static int charger_manager_probe(struct platform_device *pdev) 1454static int charger_manager_probe(struct platform_device *pdev)
1307{ 1455{
1308 struct charger_desc *desc = dev_get_platdata(&pdev->dev); 1456 struct charger_desc *desc = dev_get_platdata(&pdev->dev);
1309 struct charger_manager *cm; 1457 struct charger_manager *cm;
1310 int ret = 0, i = 0; 1458 int ret = 0, i = 0;
1311 int j = 0; 1459 int j = 0;
1312 int chargers_externally_control = 1;
1313 union power_supply_propval val; 1460 union power_supply_propval val;
1314 1461
1315 if (g_desc && !rtc_dev && g_desc->rtc_name) { 1462 if (g_desc && !rtc_dev && g_desc->rtc_name) {
@@ -1440,11 +1587,10 @@ static int charger_manager_probe(struct platform_device *pdev)
1440 1587
1441 memcpy(&cm->charger_psy, &psy_default, sizeof(psy_default)); 1588 memcpy(&cm->charger_psy, &psy_default, sizeof(psy_default));
1442 1589
1443 if (!desc->psy_name) { 1590 if (!desc->psy_name)
1444 strncpy(cm->psy_name_buf, psy_default.name, PSY_NAME_MAX); 1591 strncpy(cm->psy_name_buf, psy_default.name, PSY_NAME_MAX);
1445 } else { 1592 else
1446 strncpy(cm->psy_name_buf, desc->psy_name, PSY_NAME_MAX); 1593 strncpy(cm->psy_name_buf, desc->psy_name, PSY_NAME_MAX);
1447 }
1448 cm->charger_psy.name = cm->psy_name_buf; 1594 cm->charger_psy.name = cm->psy_name_buf;
1449 1595
1450 /* Allocate for psy properties because they may vary */ 1596 /* Allocate for psy properties because they may vary */
@@ -1496,105 +1642,19 @@ static int charger_manager_probe(struct platform_device *pdev)
1496 goto err_register; 1642 goto err_register;
1497 } 1643 }
1498 1644
1499 for (i = 0 ; i < desc->num_charger_regulators ; i++) { 1645 /* Register extcon device for charger cable */
1500 struct charger_regulator *charger 1646 ret = charger_manager_register_extcon(cm);
1501 = &desc->charger_regulators[i]; 1647 if (ret < 0) {
1502 char buf[11]; 1648 dev_err(&pdev->dev, "Cannot initialize extcon device\n");
1503 char *str; 1649 goto err_reg_extcon;
1504
1505 charger->consumer = regulator_get(&pdev->dev,
1506 charger->regulator_name);
1507 if (charger->consumer == NULL) {
1508 dev_err(&pdev->dev, "Cannot find charger(%s)n",
1509 charger->regulator_name);
1510 ret = -EINVAL;
1511 goto err_chg_get;
1512 }
1513 charger->cm = cm;
1514
1515 for (j = 0 ; j < charger->num_cables ; j++) {
1516 struct charger_cable *cable = &charger->cables[j];
1517
1518 ret = charger_extcon_init(cm, cable);
1519 if (ret < 0) {
1520 dev_err(&pdev->dev, "Cannot find charger(%s)n",
1521 charger->regulator_name);
1522 goto err_extcon;
1523 }
1524 cable->charger = charger;
1525 cable->cm = cm;
1526 }
1527
1528 /* Create sysfs entry to control charger(regulator) */
1529 snprintf(buf, 10, "charger.%d", i);
1530 str = kzalloc(sizeof(char) * (strlen(buf) + 1), GFP_KERNEL);
1531 if (!str) {
1532 for (i--; i >= 0; i--) {
1533 charger = &desc->charger_regulators[i];
1534 kfree(charger->attr_g.name);
1535 }
1536 ret = -ENOMEM;
1537
1538 goto err_extcon;
1539 }
1540 strcpy(str, buf);
1541
1542 charger->attrs[0] = &charger->attr_name.attr;
1543 charger->attrs[1] = &charger->attr_state.attr;
1544 charger->attrs[2] = &charger->attr_externally_control.attr;
1545 charger->attrs[3] = NULL;
1546 charger->attr_g.name = str;
1547 charger->attr_g.attrs = charger->attrs;
1548
1549 sysfs_attr_init(&charger->attr_name.attr);
1550 charger->attr_name.attr.name = "name";
1551 charger->attr_name.attr.mode = 0444;
1552 charger->attr_name.show = charger_name_show;
1553
1554 sysfs_attr_init(&charger->attr_state.attr);
1555 charger->attr_state.attr.name = "state";
1556 charger->attr_state.attr.mode = 0444;
1557 charger->attr_state.show = charger_state_show;
1558
1559 sysfs_attr_init(&charger->attr_externally_control.attr);
1560 charger->attr_externally_control.attr.name
1561 = "externally_control";
1562 charger->attr_externally_control.attr.mode = 0644;
1563 charger->attr_externally_control.show
1564 = charger_externally_control_show;
1565 charger->attr_externally_control.store
1566 = charger_externally_control_store;
1567
1568 if (!desc->charger_regulators[i].externally_control ||
1569 !chargers_externally_control) {
1570 chargers_externally_control = 0;
1571 }
1572 dev_info(&pdev->dev, "'%s' regulator's externally_control"
1573 "is %d\n", charger->regulator_name,
1574 charger->externally_control);
1575
1576 ret = sysfs_create_group(&cm->charger_psy.dev->kobj,
1577 &charger->attr_g);
1578 if (ret < 0) {
1579 dev_info(&pdev->dev, "Cannot create sysfs entry"
1580 "of %s regulator\n",
1581 charger->regulator_name);
1582 }
1583 }
1584
1585 if (chargers_externally_control) {
1586 dev_err(&pdev->dev, "Cannot register regulator because "
1587 "charger-manager must need at least "
1588 "one charger for charging battery\n");
1589
1590 ret = -EINVAL;
1591 goto err_chg_enable;
1592 } 1650 }
1593 1651
1594 ret = try_charger_enable(cm, true); 1652 /* Register sysfs entry for charger(regulator) */
1595 if (ret) { 1653 ret = charger_manager_register_sysfs(cm);
1596 dev_err(&pdev->dev, "Cannot enable charger regulators\n"); 1654 if (ret < 0) {
1597 goto err_chg_enable; 1655 dev_err(&pdev->dev,
1656 "Cannot initialize sysfs entry of regulator\n");
1657 goto err_reg_sysfs;
1598 } 1658 }
1599 1659
1600 /* Add to the list */ 1660 /* Add to the list */
@@ -1613,27 +1673,28 @@ static int charger_manager_probe(struct platform_device *pdev)
1613 1673
1614 return 0; 1674 return 0;
1615 1675
1616err_chg_enable: 1676err_reg_sysfs:
1617 for (i = 0; i < desc->num_charger_regulators; i++) { 1677 for (i = 0; i < desc->num_charger_regulators; i++) {
1618 struct charger_regulator *charger; 1678 struct charger_regulator *charger;
1619 1679
1620 charger = &desc->charger_regulators[i]; 1680 charger = &desc->charger_regulators[i];
1621 sysfs_remove_group(&cm->charger_psy.dev->kobj, 1681 sysfs_remove_group(&cm->charger_psy.dev->kobj,
1622 &charger->attr_g); 1682 &charger->attr_g);
1683
1623 kfree(charger->attr_g.name); 1684 kfree(charger->attr_g.name);
1624 } 1685 }
1625err_extcon: 1686err_reg_extcon:
1626 for (i = 0 ; i < desc->num_charger_regulators ; i++) { 1687 for (i = 0; i < desc->num_charger_regulators; i++) {
1627 struct charger_regulator *charger 1688 struct charger_regulator *charger;
1628 = &desc->charger_regulators[i]; 1689
1629 for (j = 0 ; j < charger->num_cables ; j++) { 1690 charger = &desc->charger_regulators[i];
1691 for (j = 0; j < charger->num_cables; j++) {
1630 struct charger_cable *cable = &charger->cables[j]; 1692 struct charger_cable *cable = &charger->cables[j];
1631 extcon_unregister_interest(&cable->extcon_dev); 1693 extcon_unregister_interest(&cable->extcon_dev);
1632 } 1694 }
1633 } 1695
1634err_chg_get:
1635 for (i = 0 ; i < desc->num_charger_regulators ; i++)
1636 regulator_put(desc->charger_regulators[i].consumer); 1696 regulator_put(desc->charger_regulators[i].consumer);
1697 }
1637 1698
1638 power_supply_unregister(&cm->charger_psy); 1699 power_supply_unregister(&cm->charger_psy);
1639err_register: 1700err_register:
@@ -1661,10 +1722,8 @@ static int charger_manager_remove(struct platform_device *pdev)
1661 list_del(&cm->entry); 1722 list_del(&cm->entry);
1662 mutex_unlock(&cm_list_mtx); 1723 mutex_unlock(&cm_list_mtx);
1663 1724
1664 if (work_pending(&setup_polling)) 1725 cancel_work_sync(&setup_polling);
1665 cancel_work_sync(&setup_polling); 1726 cancel_delayed_work_sync(&cm_monitor_work);
1666 if (delayed_work_pending(&cm_monitor_work))
1667 cancel_delayed_work_sync(&cm_monitor_work);
1668 1727
1669 for (i = 0 ; i < desc->num_charger_regulators ; i++) { 1728 for (i = 0 ; i < desc->num_charger_regulators ; i++) {
1670 struct charger_regulator *charger 1729 struct charger_regulator *charger
@@ -1733,8 +1792,7 @@ static int cm_suspend_prepare(struct device *dev)
1733 cm_suspended = true; 1792 cm_suspended = true;
1734 } 1793 }
1735 1794
1736 if (delayed_work_pending(&cm->fullbatt_vchk_work)) 1795 cancel_delayed_work(&cm->fullbatt_vchk_work);
1737 cancel_delayed_work(&cm->fullbatt_vchk_work);
1738 cm->status_save_ext_pwr_inserted = is_ext_pwr_online(cm); 1796 cm->status_save_ext_pwr_inserted = is_ext_pwr_online(cm);
1739 cm->status_save_batt = is_batt_present(cm); 1797 cm->status_save_batt = is_batt_present(cm);
1740 1798
diff --git a/drivers/power/da9030_battery.c b/drivers/power/da9030_battery.c
index 94762e67e22b..e8c5a391a498 100644
--- a/drivers/power/da9030_battery.c
+++ b/drivers/power/da9030_battery.c
@@ -22,6 +22,7 @@
22 22
23#include <linux/debugfs.h> 23#include <linux/debugfs.h>
24#include <linux/seq_file.h> 24#include <linux/seq_file.h>
25#include <linux/notifier.h>
25 26
26#define DA9030_FAULT_LOG 0x0a 27#define DA9030_FAULT_LOG 0x0a
27#define DA9030_FAULT_LOG_OVER_TEMP (1 << 7) 28#define DA9030_FAULT_LOG_OVER_TEMP (1 << 7)
diff --git a/drivers/power/da9052-battery.c b/drivers/power/da9052-battery.c
index 3c5c2e459d73..08193feb3b08 100644
--- a/drivers/power/da9052-battery.c
+++ b/drivers/power/da9052-battery.c
@@ -337,7 +337,7 @@ static unsigned char da9052_determine_vc_tbl_index(unsigned char adc_temp)
337 if (adc_temp > vc_tbl_ref[DA9052_VC_TBL_REF_SZ - 1]) 337 if (adc_temp > vc_tbl_ref[DA9052_VC_TBL_REF_SZ - 1])
338 return DA9052_VC_TBL_REF_SZ - 1; 338 return DA9052_VC_TBL_REF_SZ - 1;
339 339
340 for (i = 0; i < DA9052_VC_TBL_REF_SZ; i++) { 340 for (i = 0; i < DA9052_VC_TBL_REF_SZ - 1; i++) {
341 if ((adc_temp > vc_tbl_ref[i]) && 341 if ((adc_temp > vc_tbl_ref[i]) &&
342 (adc_temp <= DA9052_MEAN(vc_tbl_ref[i], vc_tbl_ref[i + 1]))) 342 (adc_temp <= DA9052_MEAN(vc_tbl_ref[i], vc_tbl_ref[i + 1])))
343 return i; 343 return i;
diff --git a/drivers/power/ds2782_battery.c b/drivers/power/ds2782_battery.c
index 2fa9b6bf1f3f..e7301b3ed623 100644
--- a/drivers/power/ds2782_battery.c
+++ b/drivers/power/ds2782_battery.c
@@ -7,6 +7,8 @@
7 * 7 *
8 * DS2786 added by Yulia Vilensky <vilensky@compulab.co.il> 8 * DS2786 added by Yulia Vilensky <vilensky@compulab.co.il>
9 * 9 *
10 * UEvent sending added by Evgeny Romanov <romanov@neurosoft.ru>
11 *
10 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as 13 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation. 14 * published by the Free Software Foundation.
@@ -19,6 +21,7 @@
19#include <linux/errno.h> 21#include <linux/errno.h>
20#include <linux/swab.h> 22#include <linux/swab.h>
21#include <linux/i2c.h> 23#include <linux/i2c.h>
24#include <linux/delay.h>
22#include <linux/idr.h> 25#include <linux/idr.h>
23#include <linux/power_supply.h> 26#include <linux/power_supply.h>
24#include <linux/slab.h> 27#include <linux/slab.h>
@@ -40,6 +43,8 @@
40 43
41#define DS2786_CURRENT_UNITS 25 44#define DS2786_CURRENT_UNITS 25
42 45
46#define DS278x_DELAY 1000
47
43struct ds278x_info; 48struct ds278x_info;
44 49
45struct ds278x_battery_ops { 50struct ds278x_battery_ops {
@@ -54,8 +59,11 @@ struct ds278x_info {
54 struct i2c_client *client; 59 struct i2c_client *client;
55 struct power_supply battery; 60 struct power_supply battery;
56 struct ds278x_battery_ops *ops; 61 struct ds278x_battery_ops *ops;
62 struct delayed_work bat_work;
57 int id; 63 int id;
58 int rsns; 64 int rsns;
65 int capacity;
66 int status; /* State Of Charge */
59}; 67};
60 68
61static DEFINE_IDR(battery_id); 69static DEFINE_IDR(battery_id);
@@ -220,6 +228,8 @@ static int ds278x_get_status(struct ds278x_info *info, int *status)
220 if (err) 228 if (err)
221 return err; 229 return err;
222 230
231 info->capacity = capacity;
232
223 if (capacity == 100) 233 if (capacity == 100)
224 *status = POWER_SUPPLY_STATUS_FULL; 234 *status = POWER_SUPPLY_STATUS_FULL;
225 else if (current_uA == 0) 235 else if (current_uA == 0)
@@ -267,6 +277,27 @@ static int ds278x_battery_get_property(struct power_supply *psy,
267 return ret; 277 return ret;
268} 278}
269 279
280static void ds278x_bat_update(struct ds278x_info *info)
281{
282 int old_status = info->status;
283 int old_capacity = info->capacity;
284
285 ds278x_get_status(info, &info->status);
286
287 if ((old_status != info->status) || (old_capacity != info->capacity))
288 power_supply_changed(&info->battery);
289}
290
291static void ds278x_bat_work(struct work_struct *work)
292{
293 struct ds278x_info *info;
294
295 info = container_of(work, struct ds278x_info, bat_work.work);
296 ds278x_bat_update(info);
297
298 schedule_delayed_work(&info->bat_work, DS278x_DELAY);
299}
300
270static enum power_supply_property ds278x_battery_props[] = { 301static enum power_supply_property ds278x_battery_props[] = {
271 POWER_SUPPLY_PROP_STATUS, 302 POWER_SUPPLY_PROP_STATUS,
272 POWER_SUPPLY_PROP_CAPACITY, 303 POWER_SUPPLY_PROP_CAPACITY,
@@ -295,10 +326,39 @@ static int ds278x_battery_remove(struct i2c_client *client)
295 idr_remove(&battery_id, info->id); 326 idr_remove(&battery_id, info->id);
296 mutex_unlock(&battery_lock); 327 mutex_unlock(&battery_lock);
297 328
329 cancel_delayed_work(&info->bat_work);
330
298 kfree(info); 331 kfree(info);
299 return 0; 332 return 0;
300} 333}
301 334
335#ifdef CONFIG_PM
336
337static int ds278x_suspend(struct i2c_client *client,
338 pm_message_t state)
339{
340 struct ds278x_info *info = i2c_get_clientdata(client);
341
342 cancel_delayed_work(&info->bat_work);
343 return 0;
344}
345
346static int ds278x_resume(struct i2c_client *client)
347{
348 struct ds278x_info *info = i2c_get_clientdata(client);
349
350 schedule_delayed_work(&info->bat_work, DS278x_DELAY);
351 return 0;
352}
353
354#else
355
356#define ds278x_suspend NULL
357#define ds278x_resume NULL
358
359#endif /* CONFIG_PM */
360
361
302enum ds278x_num_id { 362enum ds278x_num_id {
303 DS2782 = 0, 363 DS2782 = 0,
304 DS2786, 364 DS2786,
@@ -368,10 +428,17 @@ static int ds278x_battery_probe(struct i2c_client *client,
368 info->ops = &ds278x_ops[id->driver_data]; 428 info->ops = &ds278x_ops[id->driver_data];
369 ds278x_power_supply_init(&info->battery); 429 ds278x_power_supply_init(&info->battery);
370 430
431 info->capacity = 100;
432 info->status = POWER_SUPPLY_STATUS_FULL;
433
434 INIT_DELAYED_WORK(&info->bat_work, ds278x_bat_work);
435
371 ret = power_supply_register(&client->dev, &info->battery); 436 ret = power_supply_register(&client->dev, &info->battery);
372 if (ret) { 437 if (ret) {
373 dev_err(&client->dev, "failed to register battery\n"); 438 dev_err(&client->dev, "failed to register battery\n");
374 goto fail_register; 439 goto fail_register;
440 } else {
441 schedule_delayed_work(&info->bat_work, DS278x_DELAY);
375 } 442 }
376 443
377 return 0; 444 return 0;
@@ -401,6 +468,8 @@ static struct i2c_driver ds278x_battery_driver = {
401 }, 468 },
402 .probe = ds278x_battery_probe, 469 .probe = ds278x_battery_probe,
403 .remove = ds278x_battery_remove, 470 .remove = ds278x_battery_remove,
471 .suspend = ds278x_suspend,
472 .resume = ds278x_resume,
404 .id_table = ds278x_id, 473 .id_table = ds278x_id,
405}; 474};
406module_i2c_driver(ds278x_battery_driver); 475module_i2c_driver(ds278x_battery_driver);
diff --git a/drivers/power/generic-adc-battery.c b/drivers/power/generic-adc-battery.c
index 32ce17e235c0..836816b82cbc 100644
--- a/drivers/power/generic-adc-battery.c
+++ b/drivers/power/generic-adc-battery.c
@@ -263,9 +263,6 @@ static int gab_probe(struct platform_device *pdev)
263 psy->external_power_changed = gab_ext_power_changed; 263 psy->external_power_changed = gab_ext_power_changed;
264 adc_bat->pdata = pdata; 264 adc_bat->pdata = pdata;
265 265
266 /* calculate the total number of channels */
267 chan = ARRAY_SIZE(gab_chan_name);
268
269 /* 266 /*
270 * copying the static properties and allocating extra memory for holding 267 * copying the static properties and allocating extra memory for holding
271 * the extra configurable properties received from platform data. 268 * the extra configurable properties received from platform data.
@@ -291,6 +288,7 @@ static int gab_probe(struct platform_device *pdev)
291 gab_chan_name[chan]); 288 gab_chan_name[chan]);
292 if (IS_ERR(adc_bat->channel[chan])) { 289 if (IS_ERR(adc_bat->channel[chan])) {
293 ret = PTR_ERR(adc_bat->channel[chan]); 290 ret = PTR_ERR(adc_bat->channel[chan]);
291 adc_bat->channel[chan] = NULL;
294 } else { 292 } else {
295 /* copying properties for supported channels only */ 293 /* copying properties for supported channels only */
296 memcpy(properties + sizeof(*(psy->properties)) * index, 294 memcpy(properties + sizeof(*(psy->properties)) * index,
@@ -344,8 +342,10 @@ err_gpio:
344gpio_req_fail: 342gpio_req_fail:
345 power_supply_unregister(psy); 343 power_supply_unregister(psy);
346err_reg_fail: 344err_reg_fail:
347 for (chan = 0; ARRAY_SIZE(gab_chan_name); chan++) 345 for (chan = 0; chan < ARRAY_SIZE(gab_chan_name); chan++) {
348 iio_channel_release(adc_bat->channel[chan]); 346 if (adc_bat->channel[chan])
347 iio_channel_release(adc_bat->channel[chan]);
348 }
349second_mem_fail: 349second_mem_fail:
350 kfree(psy->properties); 350 kfree(psy->properties);
351first_mem_fail: 351first_mem_fail:
@@ -365,8 +365,10 @@ static int gab_remove(struct platform_device *pdev)
365 gpio_free(pdata->gpio_charge_finished); 365 gpio_free(pdata->gpio_charge_finished);
366 } 366 }
367 367
368 for (chan = 0; ARRAY_SIZE(gab_chan_name); chan++) 368 for (chan = 0; chan < ARRAY_SIZE(gab_chan_name); chan++) {
369 iio_channel_release(adc_bat->channel[chan]); 369 if (adc_bat->channel[chan])
370 iio_channel_release(adc_bat->channel[chan]);
371 }
370 372
371 kfree(adc_bat->psy.properties); 373 kfree(adc_bat->psy.properties);
372 cancel_delayed_work(&adc_bat->bat_work); 374 cancel_delayed_work(&adc_bat->bat_work);
diff --git a/drivers/power/goldfish_battery.c b/drivers/power/goldfish_battery.c
new file mode 100644
index 000000000000..c10f460f986f
--- /dev/null
+++ b/drivers/power/goldfish_battery.c
@@ -0,0 +1,236 @@
1/*
2 * Power supply driver for the goldfish emulator
3 *
4 * Copyright (C) 2008 Google, Inc.
5 * Copyright (C) 2012 Intel, Inc.
6 * Copyright (C) 2013 Intel, Inc.
7 * Author: Mike Lockwood <lockwood@android.com>
8 *
9 * This software is licensed under the terms of the GNU General Public
10 * License version 2, as published by the Free Software Foundation, and
11 * may be copied, distributed, and modified under those terms.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19#include <linux/module.h>
20#include <linux/err.h>
21#include <linux/platform_device.h>
22#include <linux/power_supply.h>
23#include <linux/types.h>
24#include <linux/pci.h>
25#include <linux/interrupt.h>
26#include <linux/io.h>
27
28struct goldfish_battery_data {
29 void __iomem *reg_base;
30 int irq;
31 spinlock_t lock;
32
33 struct power_supply battery;
34 struct power_supply ac;
35};
36
37#define GOLDFISH_BATTERY_READ(data, addr) \
38 (readl(data->reg_base + addr))
39#define GOLDFISH_BATTERY_WRITE(data, addr, x) \
40 (writel(x, data->reg_base + addr))
41
42/*
43 * Temporary variable used between goldfish_battery_probe() and
44 * goldfish_battery_open().
45 */
46static struct goldfish_battery_data *battery_data;
47
48enum {
49 /* status register */
50 BATTERY_INT_STATUS = 0x00,
51 /* set this to enable IRQ */
52 BATTERY_INT_ENABLE = 0x04,
53
54 BATTERY_AC_ONLINE = 0x08,
55 BATTERY_STATUS = 0x0C,
56 BATTERY_HEALTH = 0x10,
57 BATTERY_PRESENT = 0x14,
58 BATTERY_CAPACITY = 0x18,
59
60 BATTERY_STATUS_CHANGED = 1U << 0,
61 AC_STATUS_CHANGED = 1U << 1,
62 BATTERY_INT_MASK = BATTERY_STATUS_CHANGED | AC_STATUS_CHANGED,
63};
64
65
66static int goldfish_ac_get_property(struct power_supply *psy,
67 enum power_supply_property psp,
68 union power_supply_propval *val)
69{
70 struct goldfish_battery_data *data = container_of(psy,
71 struct goldfish_battery_data, ac);
72 int ret = 0;
73
74 switch (psp) {
75 case POWER_SUPPLY_PROP_ONLINE:
76 val->intval = GOLDFISH_BATTERY_READ(data, BATTERY_AC_ONLINE);
77 break;
78 default:
79 ret = -EINVAL;
80 break;
81 }
82 return ret;
83}
84
85static int goldfish_battery_get_property(struct power_supply *psy,
86 enum power_supply_property psp,
87 union power_supply_propval *val)
88{
89 struct goldfish_battery_data *data = container_of(psy,
90 struct goldfish_battery_data, battery);
91 int ret = 0;
92
93 switch (psp) {
94 case POWER_SUPPLY_PROP_STATUS:
95 val->intval = GOLDFISH_BATTERY_READ(data, BATTERY_STATUS);
96 break;
97 case POWER_SUPPLY_PROP_HEALTH:
98 val->intval = GOLDFISH_BATTERY_READ(data, BATTERY_HEALTH);
99 break;
100 case POWER_SUPPLY_PROP_PRESENT:
101 val->intval = GOLDFISH_BATTERY_READ(data, BATTERY_PRESENT);
102 break;
103 case POWER_SUPPLY_PROP_TECHNOLOGY:
104 val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
105 break;
106 case POWER_SUPPLY_PROP_CAPACITY:
107 val->intval = GOLDFISH_BATTERY_READ(data, BATTERY_CAPACITY);
108 break;
109 default:
110 ret = -EINVAL;
111 break;
112 }
113
114 return ret;
115}
116
117static enum power_supply_property goldfish_battery_props[] = {
118 POWER_SUPPLY_PROP_STATUS,
119 POWER_SUPPLY_PROP_HEALTH,
120 POWER_SUPPLY_PROP_PRESENT,
121 POWER_SUPPLY_PROP_TECHNOLOGY,
122 POWER_SUPPLY_PROP_CAPACITY,
123};
124
125static enum power_supply_property goldfish_ac_props[] = {
126 POWER_SUPPLY_PROP_ONLINE,
127};
128
129static irqreturn_t goldfish_battery_interrupt(int irq, void *dev_id)
130{
131 unsigned long irq_flags;
132 struct goldfish_battery_data *data = dev_id;
133 uint32_t status;
134
135 spin_lock_irqsave(&data->lock, irq_flags);
136
137 /* read status flags, which will clear the interrupt */
138 status = GOLDFISH_BATTERY_READ(data, BATTERY_INT_STATUS);
139 status &= BATTERY_INT_MASK;
140
141 if (status & BATTERY_STATUS_CHANGED)
142 power_supply_changed(&data->battery);
143 if (status & AC_STATUS_CHANGED)
144 power_supply_changed(&data->ac);
145
146 spin_unlock_irqrestore(&data->lock, irq_flags);
147 return status ? IRQ_HANDLED : IRQ_NONE;
148}
149
150
151static int goldfish_battery_probe(struct platform_device *pdev)
152{
153 int ret;
154 struct resource *r;
155 struct goldfish_battery_data *data;
156
157 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
158 if (data == NULL)
159 return -ENOMEM;
160
161 spin_lock_init(&data->lock);
162
163 data->battery.properties = goldfish_battery_props;
164 data->battery.num_properties = ARRAY_SIZE(goldfish_battery_props);
165 data->battery.get_property = goldfish_battery_get_property;
166 data->battery.name = "battery";
167 data->battery.type = POWER_SUPPLY_TYPE_BATTERY;
168
169 data->ac.properties = goldfish_ac_props;
170 data->ac.num_properties = ARRAY_SIZE(goldfish_ac_props);
171 data->ac.get_property = goldfish_ac_get_property;
172 data->ac.name = "ac";
173 data->ac.type = POWER_SUPPLY_TYPE_MAINS;
174
175 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
176 if (r == NULL) {
177 dev_err(&pdev->dev, "platform_get_resource failed\n");
178 return -ENODEV;
179 }
180
181 data->reg_base = devm_ioremap(&pdev->dev, r->start, r->end - r->start + 1);
182 if (data->reg_base == NULL) {
183 dev_err(&pdev->dev, "unable to remap MMIO\n");
184 return -ENOMEM;
185 }
186
187 data->irq = platform_get_irq(pdev, 0);
188 if (data->irq < 0) {
189 dev_err(&pdev->dev, "platform_get_irq failed\n");
190 return -ENODEV;
191 }
192
193 ret = devm_request_irq(&pdev->dev, data->irq, goldfish_battery_interrupt,
194 IRQF_SHARED, pdev->name, data);
195 if (ret)
196 return ret;
197
198 ret = power_supply_register(&pdev->dev, &data->ac);
199 if (ret)
200 return ret;
201
202 ret = power_supply_register(&pdev->dev, &data->battery);
203 if (ret) {
204 power_supply_unregister(&data->ac);
205 return ret;
206 }
207
208 platform_set_drvdata(pdev, data);
209 battery_data = data;
210
211 GOLDFISH_BATTERY_WRITE(data, BATTERY_INT_ENABLE, BATTERY_INT_MASK);
212 return 0;
213}
214
215static int goldfish_battery_remove(struct platform_device *pdev)
216{
217 struct goldfish_battery_data *data = platform_get_drvdata(pdev);
218
219 power_supply_unregister(&data->battery);
220 power_supply_unregister(&data->ac);
221 battery_data = NULL;
222 return 0;
223}
224
225static struct platform_driver goldfish_battery_device = {
226 .probe = goldfish_battery_probe,
227 .remove = goldfish_battery_remove,
228 .driver = {
229 .name = "goldfish-battery"
230 }
231};
232module_platform_driver(goldfish_battery_device);
233
234MODULE_AUTHOR("Mike Lockwood lockwood@android.com");
235MODULE_LICENSE("GPL");
236MODULE_DESCRIPTION("Battery driver for the Goldfish emulator");
diff --git a/drivers/power/lp8727_charger.c b/drivers/power/lp8727_charger.c
index 4ee71a90e248..5ef41b819172 100644
--- a/drivers/power/lp8727_charger.c
+++ b/drivers/power/lp8727_charger.c
@@ -367,28 +367,28 @@ static int lp8727_battery_get_property(struct power_supply *psy,
367 return -EINVAL; 367 return -EINVAL;
368 368
369 if (pdata->get_batt_present) 369 if (pdata->get_batt_present)
370 val->intval = pchg->pdata->get_batt_present(); 370 val->intval = pdata->get_batt_present();
371 break; 371 break;
372 case POWER_SUPPLY_PROP_VOLTAGE_NOW: 372 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
373 if (!pdata) 373 if (!pdata)
374 return -EINVAL; 374 return -EINVAL;
375 375
376 if (pdata->get_batt_level) 376 if (pdata->get_batt_level)
377 val->intval = pchg->pdata->get_batt_level(); 377 val->intval = pdata->get_batt_level();
378 break; 378 break;
379 case POWER_SUPPLY_PROP_CAPACITY: 379 case POWER_SUPPLY_PROP_CAPACITY:
380 if (!pdata) 380 if (!pdata)
381 return -EINVAL; 381 return -EINVAL;
382 382
383 if (pdata->get_batt_capacity) 383 if (pdata->get_batt_capacity)
384 val->intval = pchg->pdata->get_batt_capacity(); 384 val->intval = pdata->get_batt_capacity();
385 break; 385 break;
386 case POWER_SUPPLY_PROP_TEMP: 386 case POWER_SUPPLY_PROP_TEMP:
387 if (!pdata) 387 if (!pdata)
388 return -EINVAL; 388 return -EINVAL;
389 389
390 if (pdata->get_batt_temp) 390 if (pdata->get_batt_temp)
391 val->intval = pchg->pdata->get_batt_temp(); 391 val->intval = pdata->get_batt_temp();
392 break; 392 break;
393 default: 393 default:
394 break; 394 break;
diff --git a/drivers/power/lp8788-charger.c b/drivers/power/lp8788-charger.c
index 22b6407c9ca9..e33d6b2a7a56 100644
--- a/drivers/power/lp8788-charger.c
+++ b/drivers/power/lp8788-charger.c
@@ -367,7 +367,8 @@ static inline bool lp8788_is_valid_charger_register(u8 addr)
367 return addr >= LP8788_CHG_START && addr <= LP8788_CHG_END; 367 return addr >= LP8788_CHG_START && addr <= LP8788_CHG_END;
368} 368}
369 369
370static int lp8788_update_charger_params(struct lp8788_charger *pchg) 370static int lp8788_update_charger_params(struct platform_device *pdev,
371 struct lp8788_charger *pchg)
371{ 372{
372 struct lp8788 *lp = pchg->lp; 373 struct lp8788 *lp = pchg->lp;
373 struct lp8788_charger_platform_data *pdata = pchg->pdata; 374 struct lp8788_charger_platform_data *pdata = pchg->pdata;
@@ -376,7 +377,7 @@ static int lp8788_update_charger_params(struct lp8788_charger *pchg)
376 int ret; 377 int ret;
377 378
378 if (!pdata || !pdata->chg_params) { 379 if (!pdata || !pdata->chg_params) {
379 dev_info(lp->dev, "skip updating charger parameters\n"); 380 dev_info(&pdev->dev, "skip updating charger parameters\n");
380 return 0; 381 return 0;
381 } 382 }
382 383
@@ -537,7 +538,6 @@ err_free_irq:
537static int lp8788_irq_register(struct platform_device *pdev, 538static int lp8788_irq_register(struct platform_device *pdev,
538 struct lp8788_charger *pchg) 539 struct lp8788_charger *pchg)
539{ 540{
540 struct lp8788 *lp = pchg->lp;
541 const char *name[] = { 541 const char *name[] = {
542 LP8788_CHG_IRQ, LP8788_PRSW_IRQ, LP8788_BATT_IRQ 542 LP8788_CHG_IRQ, LP8788_PRSW_IRQ, LP8788_BATT_IRQ
543 }; 543 };
@@ -550,13 +550,13 @@ static int lp8788_irq_register(struct platform_device *pdev,
550 for (i = 0; i < ARRAY_SIZE(name); i++) { 550 for (i = 0; i < ARRAY_SIZE(name); i++) {
551 ret = lp8788_set_irqs(pdev, pchg, name[i]); 551 ret = lp8788_set_irqs(pdev, pchg, name[i]);
552 if (ret) { 552 if (ret) {
553 dev_warn(lp->dev, "irq setup failed: %s\n", name[i]); 553 dev_warn(&pdev->dev, "irq setup failed: %s\n", name[i]);
554 return ret; 554 return ret;
555 } 555 }
556 } 556 }
557 557
558 if (pchg->num_irqs > LP8788_MAX_CHG_IRQS) { 558 if (pchg->num_irqs > LP8788_MAX_CHG_IRQS) {
559 dev_err(lp->dev, "invalid total number of irqs: %d\n", 559 dev_err(&pdev->dev, "invalid total number of irqs: %d\n",
560 pchg->num_irqs); 560 pchg->num_irqs);
561 return -EINVAL; 561 return -EINVAL;
562 } 562 }
@@ -690,9 +690,10 @@ static int lp8788_charger_probe(struct platform_device *pdev)
690{ 690{
691 struct lp8788 *lp = dev_get_drvdata(pdev->dev.parent); 691 struct lp8788 *lp = dev_get_drvdata(pdev->dev.parent);
692 struct lp8788_charger *pchg; 692 struct lp8788_charger *pchg;
693 struct device *dev = &pdev->dev;
693 int ret; 694 int ret;
694 695
695 pchg = devm_kzalloc(lp->dev, sizeof(struct lp8788_charger), GFP_KERNEL); 696 pchg = devm_kzalloc(dev, sizeof(struct lp8788_charger), GFP_KERNEL);
696 if (!pchg) 697 if (!pchg)
697 return -ENOMEM; 698 return -ENOMEM;
698 699
@@ -700,7 +701,7 @@ static int lp8788_charger_probe(struct platform_device *pdev)
700 pchg->pdata = lp->pdata ? lp->pdata->chg_pdata : NULL; 701 pchg->pdata = lp->pdata ? lp->pdata->chg_pdata : NULL;
701 platform_set_drvdata(pdev, pchg); 702 platform_set_drvdata(pdev, pchg);
702 703
703 ret = lp8788_update_charger_params(pchg); 704 ret = lp8788_update_charger_params(pdev, pchg);
704 if (ret) 705 if (ret)
705 return ret; 706 return ret;
706 707
@@ -718,7 +719,7 @@ static int lp8788_charger_probe(struct platform_device *pdev)
718 719
719 ret = lp8788_irq_register(pdev, pchg); 720 ret = lp8788_irq_register(pdev, pchg);
720 if (ret) 721 if (ret)
721 dev_warn(lp->dev, "failed to register charger irq: %d\n", ret); 722 dev_warn(dev, "failed to register charger irq: %d\n", ret);
722 723
723 return 0; 724 return 0;
724} 725}
diff --git a/drivers/power/max17040_battery.c b/drivers/power/max17040_battery.c
index 22cfe9cc4727..74a0bd9bc162 100644
--- a/drivers/power/max17040_battery.c
+++ b/drivers/power/max17040_battery.c
@@ -207,7 +207,7 @@ static int max17040_probe(struct i2c_client *client,
207 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE)) 207 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE))
208 return -EIO; 208 return -EIO;
209 209
210 chip = kzalloc(sizeof(*chip), GFP_KERNEL); 210 chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
211 if (!chip) 211 if (!chip)
212 return -ENOMEM; 212 return -ENOMEM;
213 213
@@ -225,7 +225,6 @@ static int max17040_probe(struct i2c_client *client,
225 ret = power_supply_register(&client->dev, &chip->battery); 225 ret = power_supply_register(&client->dev, &chip->battery);
226 if (ret) { 226 if (ret) {
227 dev_err(&client->dev, "failed: power supply register\n"); 227 dev_err(&client->dev, "failed: power supply register\n");
228 kfree(chip);
229 return ret; 228 return ret;
230 } 229 }
231 230
@@ -244,7 +243,6 @@ static int max17040_remove(struct i2c_client *client)
244 243
245 power_supply_unregister(&chip->battery); 244 power_supply_unregister(&chip->battery);
246 cancel_delayed_work(&chip->work); 245 cancel_delayed_work(&chip->work);
247 kfree(chip);
248 return 0; 246 return 0;
249} 247}
250 248
diff --git a/drivers/power/pm2301_charger.c b/drivers/power/pm2301_charger.c
new file mode 100644
index 000000000000..ed48d75bb786
--- /dev/null
+++ b/drivers/power/pm2301_charger.c
@@ -0,0 +1,1088 @@
1/*
2 * Copyright 2012 ST Ericsson.
3 *
4 * Power supply driver for ST Ericsson pm2xxx_charger charger
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/device.h>
14#include <linux/interrupt.h>
15#include <linux/delay.h>
16#include <linux/slab.h>
17#include <linux/platform_device.h>
18#include <linux/power_supply.h>
19#include <linux/completion.h>
20#include <linux/regulator/consumer.h>
21#include <linux/err.h>
22#include <linux/i2c.h>
23#include <linux/workqueue.h>
24#include <linux/kobject.h>
25#include <linux/mfd/abx500.h>
26#include <linux/mfd/abx500/ab8500.h>
27#include <linux/mfd/abx500/ab8500-bm.h>
28#include <linux/mfd/abx500/ab8500-gpadc.h>
29#include <linux/mfd/abx500/ux500_chargalg.h>
30#include <linux/pm2301_charger.h>
31#include <linux/gpio.h>
32
33#include "pm2301_charger.h"
34
35#define to_pm2xxx_charger_ac_device_info(x) container_of((x), \
36 struct pm2xxx_charger, ac_chg)
37
38static int pm2xxx_interrupt_registers[] = {
39 PM2XXX_REG_INT1,
40 PM2XXX_REG_INT2,
41 PM2XXX_REG_INT3,
42 PM2XXX_REG_INT4,
43 PM2XXX_REG_INT5,
44 PM2XXX_REG_INT6,
45};
46
47static enum power_supply_property pm2xxx_charger_ac_props[] = {
48 POWER_SUPPLY_PROP_HEALTH,
49 POWER_SUPPLY_PROP_PRESENT,
50 POWER_SUPPLY_PROP_ONLINE,
51 POWER_SUPPLY_PROP_VOLTAGE_AVG,
52};
53
54static int pm2xxx_charger_voltage_map[] = {
55 3500,
56 3525,
57 3550,
58 3575,
59 3600,
60 3625,
61 3650,
62 3675,
63 3700,
64 3725,
65 3750,
66 3775,
67 3800,
68 3825,
69 3850,
70 3875,
71 3900,
72 3925,
73 3950,
74 3975,
75 4000,
76 4025,
77 4050,
78 4075,
79 4100,
80 4125,
81 4150,
82 4175,
83 4200,
84 4225,
85 4250,
86 4275,
87 4300,
88};
89
90static int pm2xxx_charger_current_map[] = {
91 200,
92 200,
93 400,
94 600,
95 800,
96 1000,
97 1200,
98 1400,
99 1600,
100 1800,
101 2000,
102 2200,
103 2400,
104 2600,
105 2800,
106 3000,
107};
108
109static const struct i2c_device_id pm2xxx_ident[] = {
110 { "pm2301", 0 },
111 { }
112};
113
114static void set_lpn_pin(struct pm2xxx_charger *pm2)
115{
116 if (pm2->ac.charger_connected)
117 return;
118 gpio_set_value(pm2->lpn_pin, 1);
119
120 return;
121}
122
123static void clear_lpn_pin(struct pm2xxx_charger *pm2)
124{
125 if (pm2->ac.charger_connected)
126 return;
127 gpio_set_value(pm2->lpn_pin, 0);
128
129 return;
130}
131
132static int pm2xxx_reg_read(struct pm2xxx_charger *pm2, int reg, u8 *val)
133{
134 int ret;
135 /*
136 * When AC adaptor is unplugged, the host
137 * must put LPN high to be able to
138 * communicate by I2C with PM2301
139 * and receive I2C "acknowledge" from PM2301.
140 */
141 mutex_lock(&pm2->lock);
142 set_lpn_pin(pm2);
143
144 ret = i2c_smbus_read_i2c_block_data(pm2->config.pm2xxx_i2c, reg,
145 1, val);
146 if (ret < 0)
147 dev_err(pm2->dev, "Error reading register at 0x%x\n", reg);
148 else
149 ret = 0;
150 clear_lpn_pin(pm2);
151 mutex_unlock(&pm2->lock);
152
153 return ret;
154}
155
156static int pm2xxx_reg_write(struct pm2xxx_charger *pm2, int reg, u8 val)
157{
158 int ret;
159 /*
160 * When AC adaptor is unplugged, the host
161 * must put LPN high to be able to
162 * communicate by I2C with PM2301
163 * and receive I2C "acknowledge" from PM2301.
164 */
165 mutex_lock(&pm2->lock);
166 set_lpn_pin(pm2);
167
168 ret = i2c_smbus_write_i2c_block_data(pm2->config.pm2xxx_i2c, reg,
169 1, &val);
170 if (ret < 0)
171 dev_err(pm2->dev, "Error writing register at 0x%x\n", reg);
172 else
173 ret = 0;
174 clear_lpn_pin(pm2);
175 mutex_unlock(&pm2->lock);
176
177 return ret;
178}
179
180static int pm2xxx_charging_enable_mngt(struct pm2xxx_charger *pm2)
181{
182 int ret;
183
184 /* Enable charging */
185 ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG2,
186 (PM2XXX_CH_AUTO_RESUME_EN | PM2XXX_CHARGER_ENA));
187
188 return ret;
189}
190
191static int pm2xxx_charging_disable_mngt(struct pm2xxx_charger *pm2)
192{
193 int ret;
194
195 /* Disable charging */
196 ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG2,
197 (PM2XXX_CH_AUTO_RESUME_DIS | PM2XXX_CHARGER_DIS));
198
199 return ret;
200}
201
202static int pm2xxx_charger_batt_therm_mngt(struct pm2xxx_charger *pm2, int val)
203{
204 queue_work(pm2->charger_wq, &pm2->check_main_thermal_prot_work);
205
206 return 0;
207}
208
209
210int pm2xxx_charger_die_therm_mngt(struct pm2xxx_charger *pm2, int val)
211{
212 queue_work(pm2->charger_wq, &pm2->check_main_thermal_prot_work);
213
214 return 0;
215}
216
217static int pm2xxx_charger_ovv_mngt(struct pm2xxx_charger *pm2, int val)
218{
219 int ret = 0;
220
221 pm2->failure_input_ovv++;
222 if (pm2->failure_input_ovv < 4) {
223 ret = pm2xxx_charging_enable_mngt(pm2);
224 goto out;
225 } else {
226 pm2->failure_input_ovv = 0;
227 dev_err(pm2->dev, "Overvoltage detected\n");
228 pm2->flags.ovv = true;
229 power_supply_changed(&pm2->ac_chg.psy);
230 }
231
232out:
233 return ret;
234}
235
236static int pm2xxx_charger_wd_exp_mngt(struct pm2xxx_charger *pm2, int val)
237{
238 dev_dbg(pm2->dev , "20 minutes watchdog occured\n");
239
240 pm2->ac.wd_expired = true;
241 power_supply_changed(&pm2->ac_chg.psy);
242
243 return 0;
244}
245
246static int pm2xxx_charger_vbat_lsig_mngt(struct pm2xxx_charger *pm2, int val)
247{
248 switch (val) {
249 case PM2XXX_INT1_ITVBATLOWR:
250 dev_dbg(pm2->dev, "VBAT grows above VBAT_LOW level\n");
251 break;
252
253 case PM2XXX_INT1_ITVBATLOWF:
254 dev_dbg(pm2->dev, "VBAT drops below VBAT_LOW level\n");
255 break;
256
257 default:
258 dev_err(pm2->dev, "Unknown VBAT level\n");
259 }
260
261 return 0;
262}
263
264static int pm2xxx_charger_bat_disc_mngt(struct pm2xxx_charger *pm2, int val)
265{
266 dev_dbg(pm2->dev, "battery disconnected\n");
267
268 return 0;
269}
270
271static int pm2xxx_charger_detection(struct pm2xxx_charger *pm2, u8 *val)
272{
273 int ret;
274
275 ret = pm2xxx_reg_read(pm2, PM2XXX_SRCE_REG_INT2, val);
276
277 if (ret < 0) {
278 dev_err(pm2->dev, "Charger detection failed\n");
279 goto out;
280 }
281
282 *val &= (PM2XXX_INT2_S_ITVPWR1PLUG | PM2XXX_INT2_S_ITVPWR2PLUG);
283
284out:
285 return ret;
286}
287
288static int pm2xxx_charger_itv_pwr_plug_mngt(struct pm2xxx_charger *pm2, int val)
289{
290
291 int ret;
292 u8 read_val;
293
294 /*
295 * Since we can't be sure that the events are received
296 * synchronously, we have the check if the main charger is
297 * connected by reading the interrupt source register.
298 */
299 ret = pm2xxx_charger_detection(pm2, &read_val);
300
301 if ((ret == 0) && read_val) {
302 pm2->ac.charger_connected = 1;
303 pm2->ac_conn = true;
304 queue_work(pm2->charger_wq, &pm2->ac_work);
305 }
306
307
308 return ret;
309}
310
311static int pm2xxx_charger_itv_pwr_unplug_mngt(struct pm2xxx_charger *pm2,
312 int val)
313{
314 pm2->ac.charger_connected = 0;
315 queue_work(pm2->charger_wq, &pm2->ac_work);
316
317 return 0;
318}
319
320static int pm2_int_reg0(void *pm2_data, int val)
321{
322 struct pm2xxx_charger *pm2 = pm2_data;
323 int ret = 0;
324
325 if (val & (PM2XXX_INT1_ITVBATLOWR | PM2XXX_INT1_ITVBATLOWF)) {
326 ret = pm2xxx_charger_vbat_lsig_mngt(pm2, val &
327 (PM2XXX_INT1_ITVBATLOWR | PM2XXX_INT1_ITVBATLOWF));
328 }
329
330 if (val & PM2XXX_INT1_ITVBATDISCONNECT) {
331 ret = pm2xxx_charger_bat_disc_mngt(pm2,
332 PM2XXX_INT1_ITVBATDISCONNECT);
333 }
334
335 return ret;
336}
337
338static int pm2_int_reg1(void *pm2_data, int val)
339{
340 struct pm2xxx_charger *pm2 = pm2_data;
341 int ret = 0;
342
343 if (val & (PM2XXX_INT2_ITVPWR1PLUG | PM2XXX_INT2_ITVPWR2PLUG)) {
344 dev_dbg(pm2->dev , "Main charger plugged\n");
345 ret = pm2xxx_charger_itv_pwr_plug_mngt(pm2, val &
346 (PM2XXX_INT2_ITVPWR1PLUG | PM2XXX_INT2_ITVPWR2PLUG));
347 }
348
349 if (val &
350 (PM2XXX_INT2_ITVPWR1UNPLUG | PM2XXX_INT2_ITVPWR2UNPLUG)) {
351 dev_dbg(pm2->dev , "Main charger unplugged\n");
352 ret = pm2xxx_charger_itv_pwr_unplug_mngt(pm2, val &
353 (PM2XXX_INT2_ITVPWR1UNPLUG |
354 PM2XXX_INT2_ITVPWR2UNPLUG));
355 }
356
357 return ret;
358}
359
360static int pm2_int_reg2(void *pm2_data, int val)
361{
362 struct pm2xxx_charger *pm2 = pm2_data;
363 int ret = 0;
364
365 if (val & PM2XXX_INT3_ITAUTOTIMEOUTWD)
366 ret = pm2xxx_charger_wd_exp_mngt(pm2, val);
367
368 if (val & (PM2XXX_INT3_ITCHPRECHARGEWD |
369 PM2XXX_INT3_ITCHCCWD | PM2XXX_INT3_ITCHCVWD)) {
370 dev_dbg(pm2->dev,
371 "Watchdog occured for precharge, CC and CV charge\n");
372 }
373
374 return ret;
375}
376
377static int pm2_int_reg3(void *pm2_data, int val)
378{
379 struct pm2xxx_charger *pm2 = pm2_data;
380 int ret = 0;
381
382 if (val & (PM2XXX_INT4_ITCHARGINGON)) {
383 dev_dbg(pm2->dev ,
384 "chargind operation has started\n");
385 }
386
387 if (val & (PM2XXX_INT4_ITVRESUME)) {
388 dev_dbg(pm2->dev,
389 "battery discharged down to VResume threshold\n");
390 }
391
392 if (val & (PM2XXX_INT4_ITBATTFULL)) {
393 dev_dbg(pm2->dev , "battery fully detected\n");
394 }
395
396 if (val & (PM2XXX_INT4_ITCVPHASE)) {
397 dev_dbg(pm2->dev, "CV phase enter with 0.5C charging\n");
398 }
399
400 if (val & (PM2XXX_INT4_ITVPWR2OVV | PM2XXX_INT4_ITVPWR1OVV)) {
401 pm2->failure_case = VPWR_OVV;
402 ret = pm2xxx_charger_ovv_mngt(pm2, val &
403 (PM2XXX_INT4_ITVPWR2OVV | PM2XXX_INT4_ITVPWR1OVV));
404 dev_dbg(pm2->dev, "VPWR/VSYSTEM overvoltage detected\n");
405 }
406
407 if (val & (PM2XXX_INT4_S_ITBATTEMPCOLD |
408 PM2XXX_INT4_S_ITBATTEMPHOT)) {
409 ret = pm2xxx_charger_batt_therm_mngt(pm2, val &
410 (PM2XXX_INT4_S_ITBATTEMPCOLD |
411 PM2XXX_INT4_S_ITBATTEMPHOT));
412 dev_dbg(pm2->dev, "BTEMP is too Low/High\n");
413 }
414
415 return ret;
416}
417
418static int pm2_int_reg4(void *pm2_data, int val)
419{
420 struct pm2xxx_charger *pm2 = pm2_data;
421 int ret = 0;
422
423 if (val & PM2XXX_INT5_ITVSYSTEMOVV) {
424 pm2->failure_case = VSYSTEM_OVV;
425 ret = pm2xxx_charger_ovv_mngt(pm2, val &
426 PM2XXX_INT5_ITVSYSTEMOVV);
427 dev_dbg(pm2->dev, "VSYSTEM overvoltage detected\n");
428 }
429
430 if (val & (PM2XXX_INT5_ITTHERMALWARNINGFALL |
431 PM2XXX_INT5_ITTHERMALWARNINGRISE |
432 PM2XXX_INT5_ITTHERMALSHUTDOWNFALL |
433 PM2XXX_INT5_ITTHERMALSHUTDOWNRISE)) {
434 dev_dbg(pm2->dev, "BTEMP die temperature is too Low/High\n");
435 ret = pm2xxx_charger_die_therm_mngt(pm2, val &
436 (PM2XXX_INT5_ITTHERMALWARNINGFALL |
437 PM2XXX_INT5_ITTHERMALWARNINGRISE |
438 PM2XXX_INT5_ITTHERMALSHUTDOWNFALL |
439 PM2XXX_INT5_ITTHERMALSHUTDOWNRISE));
440 }
441
442 return ret;
443}
444
445static int pm2_int_reg5(void *pm2_data, int val)
446{
447 struct pm2xxx_charger *pm2 = pm2_data;
448 int ret = 0;
449
450
451 if (val & (PM2XXX_INT6_ITVPWR2DROP | PM2XXX_INT6_ITVPWR1DROP)) {
452 dev_dbg(pm2->dev, "VMPWR drop to VBAT level\n");
453 }
454
455 if (val & (PM2XXX_INT6_ITVPWR2VALIDRISE |
456 PM2XXX_INT6_ITVPWR1VALIDRISE |
457 PM2XXX_INT6_ITVPWR2VALIDFALL |
458 PM2XXX_INT6_ITVPWR1VALIDFALL)) {
459 dev_dbg(pm2->dev, "Falling/Rising edge on WPWR1/2\n");
460 }
461
462 return ret;
463}
464
465static irqreturn_t pm2xxx_irq_int(int irq, void *data)
466{
467 struct pm2xxx_charger *pm2 = data;
468 struct pm2xxx_interrupts *interrupt = pm2->pm2_int;
469 int i;
470
471 for (i = 0; i < PM2XXX_NUM_INT_REG; i++) {
472 pm2xxx_reg_read(pm2,
473 pm2xxx_interrupt_registers[i],
474 &(interrupt->reg[i]));
475
476 if (interrupt->reg[i] > 0)
477 interrupt->handler[i](pm2, interrupt->reg[i]);
478 }
479
480 return IRQ_HANDLED;
481}
482
483static int pm2xxx_charger_get_ac_cv(struct pm2xxx_charger *pm2)
484{
485 int ret = 0;
486 u8 val;
487
488 if (pm2->ac.charger_connected && pm2->ac.charger_online) {
489
490 ret = pm2xxx_reg_read(pm2, PM2XXX_SRCE_REG_INT4, &val);
491 if (ret < 0) {
492 dev_err(pm2->dev, "%s pm2xxx read failed\n", __func__);
493 goto out;
494 }
495
496 if (val & PM2XXX_INT4_S_ITCVPHASE)
497 ret = PM2XXX_CONST_VOLT;
498 else
499 ret = PM2XXX_CONST_CURR;
500 }
501out:
502 return ret;
503}
504
505static int pm2xxx_current_to_regval(int curr)
506{
507 int i;
508
509 if (curr < pm2xxx_charger_current_map[0])
510 return 0;
511
512 for (i = 1; i < ARRAY_SIZE(pm2xxx_charger_current_map); i++) {
513 if (curr < pm2xxx_charger_current_map[i])
514 return (i - 1);
515 }
516
517 i = ARRAY_SIZE(pm2xxx_charger_current_map) - 1;
518 if (curr == pm2xxx_charger_current_map[i])
519 return i;
520 else
521 return -EINVAL;
522}
523
524static int pm2xxx_voltage_to_regval(int curr)
525{
526 int i;
527
528 if (curr < pm2xxx_charger_voltage_map[0])
529 return 0;
530
531 for (i = 1; i < ARRAY_SIZE(pm2xxx_charger_voltage_map); i++) {
532 if (curr < pm2xxx_charger_voltage_map[i])
533 return i - 1;
534 }
535
536 i = ARRAY_SIZE(pm2xxx_charger_voltage_map) - 1;
537 if (curr == pm2xxx_charger_voltage_map[i])
538 return i;
539 else
540 return -EINVAL;
541}
542
543static int pm2xxx_charger_update_charger_current(struct ux500_charger *charger,
544 int ich_out)
545{
546 int ret;
547 int curr_index;
548 struct pm2xxx_charger *pm2;
549 u8 val;
550
551 if (charger->psy.type == POWER_SUPPLY_TYPE_MAINS)
552 pm2 = to_pm2xxx_charger_ac_device_info(charger);
553 else
554 return -ENXIO;
555
556 curr_index = pm2xxx_current_to_regval(ich_out);
557 if (curr_index < 0) {
558 dev_err(pm2->dev,
559 "Charger current too high, charging not started\n");
560 return -ENXIO;
561 }
562
563 ret = pm2xxx_reg_read(pm2, PM2XXX_BATT_CTRL_REG6, &val);
564 if (ret >= 0) {
565 val &= ~PM2XXX_DIR_CH_CC_CURRENT_MASK;
566 val |= curr_index;
567 ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG6, val);
568 if (ret < 0) {
569 dev_err(pm2->dev,
570 "%s write failed\n", __func__);
571 }
572 }
573 else
574 dev_err(pm2->dev, "%s read failed\n", __func__);
575
576 return ret;
577}
578
579static int pm2xxx_charger_ac_get_property(struct power_supply *psy,
580 enum power_supply_property psp,
581 union power_supply_propval *val)
582{
583 struct pm2xxx_charger *pm2;
584
585 pm2 = to_pm2xxx_charger_ac_device_info(psy_to_ux500_charger(psy));
586
587 switch (psp) {
588 case POWER_SUPPLY_PROP_HEALTH:
589 if (pm2->flags.mainextchnotok)
590 val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
591 else if (pm2->ac.wd_expired)
592 val->intval = POWER_SUPPLY_HEALTH_DEAD;
593 else if (pm2->flags.main_thermal_prot)
594 val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
595 else
596 val->intval = POWER_SUPPLY_HEALTH_GOOD;
597 break;
598 case POWER_SUPPLY_PROP_ONLINE:
599 val->intval = pm2->ac.charger_online;
600 break;
601 case POWER_SUPPLY_PROP_PRESENT:
602 val->intval = pm2->ac.charger_connected;
603 break;
604 case POWER_SUPPLY_PROP_VOLTAGE_AVG:
605 pm2->ac.cv_active = pm2xxx_charger_get_ac_cv(pm2);
606 val->intval = pm2->ac.cv_active;
607 break;
608 default:
609 return -EINVAL;
610 }
611 return 0;
612}
613
614static int pm2xxx_charging_init(struct pm2xxx_charger *pm2)
615{
616 int ret = 0;
617
618 /* enable CC and CV watchdog */
619 ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG3,
620 (PM2XXX_CH_WD_CV_PHASE_60MIN | PM2XXX_CH_WD_CC_PHASE_60MIN));
621 if( ret < 0)
622 return ret;
623
624 /* enable precharge watchdog */
625 ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG4,
626 PM2XXX_CH_WD_PRECH_PHASE_60MIN);
627
628 /* Disable auto timeout */
629 ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG5,
630 PM2XXX_CH_WD_AUTO_TIMEOUT_20MIN);
631
632 /*
633 * EOC current level = 100mA
634 * Precharge current level = 100mA
635 * CC current level = 1000mA
636 */
637 ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG6,
638 (PM2XXX_DIR_CH_CC_CURRENT_1000MA |
639 PM2XXX_CH_PRECH_CURRENT_100MA |
640 PM2XXX_CH_EOC_CURRENT_100MA));
641
642 /*
643 * recharge threshold = 3.8V
644 * Precharge to CC threshold = 2.9V
645 */
646 ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG7,
647 (PM2XXX_CH_PRECH_VOL_2_9 | PM2XXX_CH_VRESUME_VOL_3_8));
648
649 /* float voltage charger level = 4.2V */
650 ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG8,
651 PM2XXX_CH_VOLT_4_2);
652
653 /* Voltage drop between VBAT and VSYS in HW charging = 300mV */
654 ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG9,
655 (PM2XXX_CH_150MV_DROP_300MV | PM2XXX_CHARCHING_INFO_DIS |
656 PM2XXX_CH_CC_REDUCED_CURRENT_IDENT |
657 PM2XXX_CH_CC_MODEDROP_DIS));
658
659 /* Input charger level of over voltage = 10V */
660 ret = pm2xxx_reg_write(pm2, PM2XXX_INP_VOLT_VPWR2,
661 PM2XXX_VPWR2_OVV_10);
662 ret = pm2xxx_reg_write(pm2, PM2XXX_INP_VOLT_VPWR1,
663 PM2XXX_VPWR1_OVV_10);
664
665 /* Input charger drop */
666 ret = pm2xxx_reg_write(pm2, PM2XXX_INP_DROP_VPWR2,
667 (PM2XXX_VPWR2_HW_OPT_DIS | PM2XXX_VPWR2_VALID_DIS |
668 PM2XXX_VPWR2_DROP_DIS));
669 ret = pm2xxx_reg_write(pm2, PM2XXX_INP_DROP_VPWR1,
670 (PM2XXX_VPWR1_HW_OPT_DIS | PM2XXX_VPWR1_VALID_DIS |
671 PM2XXX_VPWR1_DROP_DIS));
672
673 /* Disable battery low monitoring */
674 ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_LOW_LEV_COMP_REG,
675 PM2XXX_VBAT_LOW_MONITORING_ENA);
676
677 /* Disable LED */
678 ret = pm2xxx_reg_write(pm2, PM2XXX_LED_CTRL_REG,
679 PM2XXX_LED_SELECT_DIS);
680
681 return ret;
682}
683
684static int pm2xxx_charger_ac_en(struct ux500_charger *charger,
685 int enable, int vset, int iset)
686{
687 int ret;
688 int volt_index;
689 int curr_index;
690 u8 val;
691
692 struct pm2xxx_charger *pm2 = to_pm2xxx_charger_ac_device_info(charger);
693
694 if (enable) {
695 if (!pm2->ac.charger_connected) {
696 dev_dbg(pm2->dev, "AC charger not connected\n");
697 return -ENXIO;
698 }
699
700 dev_dbg(pm2->dev, "Enable AC: %dmV %dmA\n", vset, iset);
701 if (!pm2->vddadc_en_ac) {
702 regulator_enable(pm2->regu);
703 pm2->vddadc_en_ac = true;
704 }
705
706 ret = pm2xxx_charging_init(pm2);
707 if (ret < 0) {
708 dev_err(pm2->dev, "%s charging init failed\n",
709 __func__);
710 goto error_occured;
711 }
712
713 volt_index = pm2xxx_voltage_to_regval(vset);
714 curr_index = pm2xxx_current_to_regval(iset);
715
716 if (volt_index < 0 || curr_index < 0) {
717 dev_err(pm2->dev,
718 "Charger voltage or current too high, "
719 "charging not started\n");
720 return -ENXIO;
721 }
722
723 ret = pm2xxx_reg_read(pm2, PM2XXX_BATT_CTRL_REG8, &val);
724 if (ret < 0) {
725 dev_err(pm2->dev, "%s pm2xxx read failed\n", __func__);
726 goto error_occured;
727 }
728 val &= ~PM2XXX_CH_VOLT_MASK;
729 val |= volt_index;
730 ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG8, val);
731 if (ret < 0) {
732 dev_err(pm2->dev, "%s pm2xxx write failed\n", __func__);
733 goto error_occured;
734 }
735
736 ret = pm2xxx_reg_read(pm2, PM2XXX_BATT_CTRL_REG6, &val);
737 if (ret < 0) {
738 dev_err(pm2->dev, "%s pm2xxx read failed\n", __func__);
739 goto error_occured;
740 }
741 val &= ~PM2XXX_DIR_CH_CC_CURRENT_MASK;
742 val |= curr_index;
743 ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG6, val);
744 if (ret < 0) {
745 dev_err(pm2->dev, "%s pm2xxx write failed\n", __func__);
746 goto error_occured;
747 }
748
749 if (!pm2->bat->enable_overshoot) {
750 ret = pm2xxx_reg_read(pm2, PM2XXX_LED_CTRL_REG, &val);
751 if (ret < 0) {
752 dev_err(pm2->dev, "%s pm2xxx read failed\n",
753 __func__);
754 goto error_occured;
755 }
756 val |= PM2XXX_ANTI_OVERSHOOT_EN;
757 ret = pm2xxx_reg_write(pm2, PM2XXX_LED_CTRL_REG, val);
758 if (ret < 0) {
759 dev_err(pm2->dev, "%s pm2xxx write failed\n",
760 __func__);
761 goto error_occured;
762 }
763 }
764
765 ret = pm2xxx_charging_enable_mngt(pm2);
766 if (ret < 0) {
767 dev_err(pm2->dev, "Failed to enable"
768 "pm2xxx ac charger\n");
769 goto error_occured;
770 }
771
772 pm2->ac.charger_online = 1;
773 } else {
774 pm2->ac.charger_online = 0;
775 pm2->ac.wd_expired = false;
776
777 /* Disable regulator if enabled */
778 if (pm2->vddadc_en_ac) {
779 regulator_disable(pm2->regu);
780 pm2->vddadc_en_ac = false;
781 }
782
783 ret = pm2xxx_charging_disable_mngt(pm2);
784 if (ret < 0) {
785 dev_err(pm2->dev, "failed to disable"
786 "pm2xxx ac charger\n");
787 goto error_occured;
788 }
789
790 dev_dbg(pm2->dev, "PM2301: " "Disabled AC charging\n");
791 }
792 power_supply_changed(&pm2->ac_chg.psy);
793
794error_occured:
795 return ret;
796}
797
798static int pm2xxx_charger_watchdog_kick(struct ux500_charger *charger)
799{
800 int ret;
801 struct pm2xxx_charger *pm2;
802
803 if (charger->psy.type == POWER_SUPPLY_TYPE_MAINS)
804 pm2 = to_pm2xxx_charger_ac_device_info(charger);
805 else
806 return -ENXIO;
807
808 ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_WD_KICK, WD_TIMER);
809 if (ret)
810 dev_err(pm2->dev, "Failed to kick WD!\n");
811
812 return ret;
813}
814
815static void pm2xxx_charger_ac_work(struct work_struct *work)
816{
817 struct pm2xxx_charger *pm2 = container_of(work,
818 struct pm2xxx_charger, ac_work);
819
820
821 power_supply_changed(&pm2->ac_chg.psy);
822 sysfs_notify(&pm2->ac_chg.psy.dev->kobj, NULL, "present");
823};
824
825static void pm2xxx_charger_check_main_thermal_prot_work(
826 struct work_struct *work)
827{
828};
829
830static struct pm2xxx_interrupts pm2xxx_int = {
831 .handler[0] = pm2_int_reg0,
832 .handler[1] = pm2_int_reg1,
833 .handler[2] = pm2_int_reg2,
834 .handler[3] = pm2_int_reg3,
835 .handler[4] = pm2_int_reg4,
836 .handler[5] = pm2_int_reg5,
837};
838
839static struct pm2xxx_irq pm2xxx_charger_irq[] = {
840 {"PM2XXX_IRQ_INT", pm2xxx_irq_int},
841};
842
843static int pm2xxx_wall_charger_resume(struct i2c_client *i2c_client)
844{
845 return 0;
846}
847
848static int pm2xxx_wall_charger_suspend(struct i2c_client *i2c_client,
849 pm_message_t state)
850{
851 return 0;
852}
853
854static int __devinit pm2xxx_wall_charger_probe(struct i2c_client *i2c_client,
855 const struct i2c_device_id *id)
856{
857 struct pm2xxx_platform_data *pl_data = i2c_client->dev.platform_data;
858 struct pm2xxx_charger *pm2;
859 int ret = 0;
860 u8 val;
861
862 pm2 = kzalloc(sizeof(struct pm2xxx_charger), GFP_KERNEL);
863 if (!pm2) {
864 dev_err(pm2->dev, "pm2xxx_charger allocation failed\n");
865 return -ENOMEM;
866 }
867
868 /* get parent data */
869 pm2->dev = &i2c_client->dev;
870 pm2->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
871
872 pm2->pm2_int = &pm2xxx_int;
873
874 /* get charger spcific platform data */
875 if (!pl_data->wall_charger) {
876 dev_err(pm2->dev, "no charger platform data supplied\n");
877 ret = -EINVAL;
878 goto free_device_info;
879 }
880
881 pm2->pdata = pl_data->wall_charger;
882
883 /* get battery specific platform data */
884 if (!pl_data->battery) {
885 dev_err(pm2->dev, "no battery platform data supplied\n");
886 ret = -EINVAL;
887 goto free_device_info;
888 }
889
890 pm2->bat = pl_data->battery;
891
892 /*get lpn GPIO from platform data*/
893 if (!pm2->pdata->lpn_gpio) {
894 dev_err(pm2->dev, "no lpn gpio data supplied\n");
895 ret = -EINVAL;
896 goto free_device_info;
897 }
898 pm2->lpn_pin = pm2->pdata->lpn_gpio;
899
900 if (!i2c_check_functionality(i2c_client->adapter,
901 I2C_FUNC_SMBUS_BYTE_DATA |
902 I2C_FUNC_SMBUS_READ_WORD_DATA)) {
903 ret = -ENODEV;
904 dev_info(pm2->dev, "pm2301 i2c_check_functionality failed\n");
905 goto free_device_info;
906 }
907
908 pm2->config.pm2xxx_i2c = i2c_client;
909 pm2->config.pm2xxx_id = (struct i2c_device_id *) id;
910 i2c_set_clientdata(i2c_client, pm2);
911
912 /* AC supply */
913 /* power_supply base class */
914 pm2->ac_chg.psy.name = pm2->pdata->label;
915 pm2->ac_chg.psy.type = POWER_SUPPLY_TYPE_MAINS;
916 pm2->ac_chg.psy.properties = pm2xxx_charger_ac_props;
917 pm2->ac_chg.psy.num_properties = ARRAY_SIZE(pm2xxx_charger_ac_props);
918 pm2->ac_chg.psy.get_property = pm2xxx_charger_ac_get_property;
919 pm2->ac_chg.psy.supplied_to = pm2->pdata->supplied_to;
920 pm2->ac_chg.psy.num_supplicants = pm2->pdata->num_supplicants;
921 /* pm2xxx_charger sub-class */
922 pm2->ac_chg.ops.enable = &pm2xxx_charger_ac_en;
923 pm2->ac_chg.ops.kick_wd = &pm2xxx_charger_watchdog_kick;
924 pm2->ac_chg.ops.update_curr = &pm2xxx_charger_update_charger_current;
925 pm2->ac_chg.max_out_volt = pm2xxx_charger_voltage_map[
926 ARRAY_SIZE(pm2xxx_charger_voltage_map) - 1];
927 pm2->ac_chg.max_out_curr = pm2xxx_charger_current_map[
928 ARRAY_SIZE(pm2xxx_charger_current_map) - 1];
929 pm2->ac_chg.wdt_refresh = WD_KICK_INTERVAL;
930 pm2->ac_chg.enabled = true;
931 pm2->ac_chg.external = true;
932
933 /* Create a work queue for the charger */
934 pm2->charger_wq =
935 create_singlethread_workqueue("pm2xxx_charger_wq");
936 if (pm2->charger_wq == NULL) {
937 dev_err(pm2->dev, "failed to create work queue\n");
938 goto free_device_info;
939 }
940
941 /* Init work for charger detection */
942 INIT_WORK(&pm2->ac_work, pm2xxx_charger_ac_work);
943
944 /* Init work for checking HW status */
945 INIT_WORK(&pm2->check_main_thermal_prot_work,
946 pm2xxx_charger_check_main_thermal_prot_work);
947
948 /*
949 * VDD ADC supply needs to be enabled from this driver when there
950 * is a charger connected to avoid erroneous BTEMP_HIGH/LOW
951 * interrupts during charging
952 */
953 pm2->regu = regulator_get(pm2->dev, "vddadc");
954 if (IS_ERR(pm2->regu)) {
955 ret = PTR_ERR(pm2->regu);
956 dev_err(pm2->dev, "failed to get vddadc regulator\n");
957 goto free_charger_wq;
958 }
959
960 /* Register AC charger class */
961 ret = power_supply_register(pm2->dev, &pm2->ac_chg.psy);
962 if (ret) {
963 dev_err(pm2->dev, "failed to register AC charger\n");
964 goto free_regulator;
965 }
966
967 /* Register interrupts */
968 ret = request_threaded_irq(pm2->pdata->irq_number, NULL,
969 pm2xxx_charger_irq[0].isr,
970 pm2->pdata->irq_type,
971 pm2xxx_charger_irq[0].name, pm2);
972
973 if (ret != 0) {
974 dev_err(pm2->dev, "failed to request %s IRQ %d: %d\n",
975 pm2xxx_charger_irq[0].name, pm2->pdata->irq_number, ret);
976 goto unregister_pm2xxx_charger;
977 }
978
979 /*Initialize lock*/
980 mutex_init(&pm2->lock);
981
982 /*
983 * Charger detection mechanism requires pulling up the LPN pin
984 * while i2c communication if Charger is not connected
985 * LPN pin of PM2301 is GPIO60 of AB9540
986 */
987 ret = gpio_request(pm2->lpn_pin, "pm2301_lpm_gpio");
988 if (ret < 0) {
989 dev_err(pm2->dev, "pm2301_lpm_gpio request failed\n");
990 goto unregister_pm2xxx_charger;
991 }
992 ret = gpio_direction_output(pm2->lpn_pin, 0);
993 if (ret < 0) {
994 dev_err(pm2->dev, "pm2301_lpm_gpio direction failed\n");
995 goto free_gpio;
996 }
997
998 ret = pm2xxx_charger_detection(pm2, &val);
999
1000 if ((ret == 0) && val) {
1001 pm2->ac.charger_connected = 1;
1002 pm2->ac_conn = true;
1003 power_supply_changed(&pm2->ac_chg.psy);
1004 sysfs_notify(&pm2->ac_chg.psy.dev->kobj, NULL, "present");
1005 }
1006
1007 return 0;
1008
1009free_gpio:
1010 gpio_free(pm2->lpn_pin);
1011unregister_pm2xxx_charger:
1012 /* unregister power supply */
1013 power_supply_unregister(&pm2->ac_chg.psy);
1014free_regulator:
1015 /* disable the regulator */
1016 regulator_put(pm2->regu);
1017free_charger_wq:
1018 destroy_workqueue(pm2->charger_wq);
1019free_device_info:
1020 kfree(pm2);
1021 return ret;
1022}
1023
1024static int __devexit pm2xxx_wall_charger_remove(struct i2c_client *i2c_client)
1025{
1026 struct pm2xxx_charger *pm2 = i2c_get_clientdata(i2c_client);
1027
1028 /* Disable AC charging */
1029 pm2xxx_charger_ac_en(&pm2->ac_chg, false, 0, 0);
1030
1031 /* Disable interrupts */
1032 free_irq(pm2->pdata->irq_number, pm2);
1033
1034 /* Delete the work queue */
1035 destroy_workqueue(pm2->charger_wq);
1036
1037 flush_scheduled_work();
1038
1039 /* disable the regulator */
1040 regulator_put(pm2->regu);
1041
1042 power_supply_unregister(&pm2->ac_chg.psy);
1043
1044 /*Free GPIO60*/
1045 gpio_free(pm2->lpn_pin);
1046
1047 kfree(pm2);
1048
1049 return 0;
1050}
1051
1052static const struct i2c_device_id pm2xxx_id[] = {
1053 { "pm2301", 0 },
1054 { }
1055};
1056
1057MODULE_DEVICE_TABLE(i2c, pm2xxx_id);
1058
1059static struct i2c_driver pm2xxx_charger_driver = {
1060 .probe = pm2xxx_wall_charger_probe,
1061 .remove = __devexit_p(pm2xxx_wall_charger_remove),
1062 .suspend = pm2xxx_wall_charger_suspend,
1063 .resume = pm2xxx_wall_charger_resume,
1064 .driver = {
1065 .name = "pm2xxx-wall_charger",
1066 .owner = THIS_MODULE,
1067 },
1068 .id_table = pm2xxx_id,
1069};
1070
1071static int __init pm2xxx_charger_init(void)
1072{
1073 return i2c_add_driver(&pm2xxx_charger_driver);
1074}
1075
1076static void __exit pm2xxx_charger_exit(void)
1077{
1078 i2c_del_driver(&pm2xxx_charger_driver);
1079}
1080
1081subsys_initcall_sync(pm2xxx_charger_init);
1082module_exit(pm2xxx_charger_exit);
1083
1084MODULE_LICENSE("GPL v2");
1085MODULE_AUTHOR("Rajkumar kasirajan, Olivier Launay");
1086MODULE_ALIAS("platform:pm2xxx-charger");
1087MODULE_DESCRIPTION("PM2xxx charger management driver");
1088
diff --git a/drivers/power/pm2301_charger.h b/drivers/power/pm2301_charger.h
new file mode 100644
index 000000000000..e6319cdbc94f
--- /dev/null
+++ b/drivers/power/pm2301_charger.h
@@ -0,0 +1,513 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2012
3 *
4 * PM2301 power supply interface
5 *
6 * License terms: GNU General Public License (GPL), version 2
7 */
8
9#ifndef PM2301_CHARGER_H
10#define PM2301_CHARGER_H
11
12#define MAIN_WDOG_ENA 0x01
13#define MAIN_WDOG_KICK 0x02
14#define MAIN_WDOG_DIS 0x00
15#define CHARG_WD_KICK 0x01
16#define MAIN_CH_ENA 0x01
17#define MAIN_CH_NO_OVERSHOOT_ENA_N 0x02
18#define MAIN_CH_DET 0x01
19#define MAIN_CH_CV_ON 0x04
20#define OTP_ENABLE_WD 0x01
21
22#define MAIN_CH_INPUT_CURR_SHIFT 4
23
24#define LED_INDICATOR_PWM_ENA 0x01
25#define LED_INDICATOR_PWM_DIS 0x00
26#define LED_IND_CUR_5MA 0x04
27#define LED_INDICATOR_PWM_DUTY_252_256 0xBF
28
29/* HW failure constants */
30#define MAIN_CH_TH_PROT 0x02
31#define MAIN_CH_NOK 0x01
32
33/* Watchdog timeout constant */
34#define WD_TIMER 0x30 /* 4min */
35#define WD_KICK_INTERVAL (30 * HZ)
36
37#define PM2XXX_NUM_INT_REG 0x6
38
39/* Constant voltage/current */
40#define PM2XXX_CONST_CURR 0x0
41#define PM2XXX_CONST_VOLT 0x1
42
43/* Lowest charger voltage is 3.39V -> 0x4E */
44#define LOW_VOLT_REG 0x4E
45
46#define PM2XXX_BATT_CTRL_REG1 0x00
47#define PM2XXX_BATT_CTRL_REG2 0x01
48#define PM2XXX_BATT_CTRL_REG3 0x02
49#define PM2XXX_BATT_CTRL_REG4 0x03
50#define PM2XXX_BATT_CTRL_REG5 0x04
51#define PM2XXX_BATT_CTRL_REG6 0x05
52#define PM2XXX_BATT_CTRL_REG7 0x06
53#define PM2XXX_BATT_CTRL_REG8 0x07
54#define PM2XXX_NTC_CTRL_REG1 0x08
55#define PM2XXX_NTC_CTRL_REG2 0x09
56#define PM2XXX_BATT_CTRL_REG9 0x0A
57#define PM2XXX_BATT_STAT_REG1 0x0B
58#define PM2XXX_INP_VOLT_VPWR2 0x11
59#define PM2XXX_INP_DROP_VPWR2 0x13
60#define PM2XXX_INP_VOLT_VPWR1 0x15
61#define PM2XXX_INP_DROP_VPWR1 0x17
62#define PM2XXX_INP_MODE_VPWR 0x18
63#define PM2XXX_BATT_WD_KICK 0x70
64#define PM2XXX_DEV_VER_STAT 0x0C
65#define PM2XXX_THERM_WARN_CTRL_REG 0x20
66#define PM2XXX_BATT_DISC_REG 0x21
67#define PM2XXX_BATT_LOW_LEV_COMP_REG 0x22
68#define PM2XXX_BATT_LOW_LEV_VAL_REG 0x23
69#define PM2XXX_I2C_PAD_CTRL_REG 0x24
70#define PM2XXX_SW_CTRL_REG 0x26
71#define PM2XXX_LED_CTRL_REG 0x28
72
73#define PM2XXX_REG_INT1 0x40
74#define PM2XXX_MASK_REG_INT1 0x50
75#define PM2XXX_SRCE_REG_INT1 0x60
76#define PM2XXX_REG_INT2 0x41
77#define PM2XXX_MASK_REG_INT2 0x51
78#define PM2XXX_SRCE_REG_INT2 0x61
79#define PM2XXX_REG_INT3 0x42
80#define PM2XXX_MASK_REG_INT3 0x52
81#define PM2XXX_SRCE_REG_INT3 0x62
82#define PM2XXX_REG_INT4 0x43
83#define PM2XXX_MASK_REG_INT4 0x53
84#define PM2XXX_SRCE_REG_INT4 0x63
85#define PM2XXX_REG_INT5 0x44
86#define PM2XXX_MASK_REG_INT5 0x54
87#define PM2XXX_SRCE_REG_INT5 0x64
88#define PM2XXX_REG_INT6 0x45
89#define PM2XXX_MASK_REG_INT6 0x55
90#define PM2XXX_SRCE_REG_INT6 0x65
91
92#define VPWR_OVV 0x0
93#define VSYSTEM_OVV 0x1
94
95/* control Reg 1 */
96#define PM2XXX_CH_RESUME_EN 0x1
97#define PM2XXX_CH_RESUME_DIS 0x0
98
99/* control Reg 2 */
100#define PM2XXX_CH_AUTO_RESUME_EN 0X2
101#define PM2XXX_CH_AUTO_RESUME_DIS 0X0
102#define PM2XXX_CHARGER_ENA 0x4
103#define PM2XXX_CHARGER_DIS 0x0
104
105/* control Reg 3 */
106#define PM2XXX_CH_WD_CC_PHASE_OFF 0x0
107#define PM2XXX_CH_WD_CC_PHASE_5MIN 0x1
108#define PM2XXX_CH_WD_CC_PHASE_10MIN 0x2
109#define PM2XXX_CH_WD_CC_PHASE_30MIN 0x3
110#define PM2XXX_CH_WD_CC_PHASE_60MIN 0x4
111#define PM2XXX_CH_WD_CC_PHASE_120MIN 0x5
112#define PM2XXX_CH_WD_CC_PHASE_240MIN 0x6
113#define PM2XXX_CH_WD_CC_PHASE_360MIN 0x7
114
115#define PM2XXX_CH_WD_CV_PHASE_OFF (0x0<<3)
116#define PM2XXX_CH_WD_CV_PHASE_5MIN (0x1<<3)
117#define PM2XXX_CH_WD_CV_PHASE_10MIN (0x2<<3)
118#define PM2XXX_CH_WD_CV_PHASE_30MIN (0x3<<3)
119#define PM2XXX_CH_WD_CV_PHASE_60MIN (0x4<<3)
120#define PM2XXX_CH_WD_CV_PHASE_120MIN (0x5<<3)
121#define PM2XXX_CH_WD_CV_PHASE_240MIN (0x6<<3)
122#define PM2XXX_CH_WD_CV_PHASE_360MIN (0x7<<3)
123
124/* control Reg 4 */
125#define PM2XXX_CH_WD_PRECH_PHASE_OFF 0x0
126#define PM2XXX_CH_WD_PRECH_PHASE_1MIN 0x1
127#define PM2XXX_CH_WD_PRECH_PHASE_5MIN 0x2
128#define PM2XXX_CH_WD_PRECH_PHASE_10MIN 0x3
129#define PM2XXX_CH_WD_PRECH_PHASE_30MIN 0x4
130#define PM2XXX_CH_WD_PRECH_PHASE_60MIN 0x5
131#define PM2XXX_CH_WD_PRECH_PHASE_120MIN 0x6
132#define PM2XXX_CH_WD_PRECH_PHASE_240MIN 0x7
133
134/* control Reg 5 */
135#define PM2XXX_CH_WD_AUTO_TIMEOUT_NONE 0x0
136#define PM2XXX_CH_WD_AUTO_TIMEOUT_20MIN 0x1
137
138/* control Reg 6 */
139#define PM2XXX_DIR_CH_CC_CURRENT_MASK 0x0F
140#define PM2XXX_DIR_CH_CC_CURRENT_200MA 0x0
141#define PM2XXX_DIR_CH_CC_CURRENT_400MA 0x2
142#define PM2XXX_DIR_CH_CC_CURRENT_600MA 0x3
143#define PM2XXX_DIR_CH_CC_CURRENT_800MA 0x4
144#define PM2XXX_DIR_CH_CC_CURRENT_1000MA 0x5
145#define PM2XXX_DIR_CH_CC_CURRENT_1200MA 0x6
146#define PM2XXX_DIR_CH_CC_CURRENT_1400MA 0x7
147#define PM2XXX_DIR_CH_CC_CURRENT_1600MA 0x8
148#define PM2XXX_DIR_CH_CC_CURRENT_1800MA 0x9
149#define PM2XXX_DIR_CH_CC_CURRENT_2000MA 0xA
150#define PM2XXX_DIR_CH_CC_CURRENT_2200MA 0xB
151#define PM2XXX_DIR_CH_CC_CURRENT_2400MA 0xC
152#define PM2XXX_DIR_CH_CC_CURRENT_2600MA 0xD
153#define PM2XXX_DIR_CH_CC_CURRENT_2800MA 0xE
154#define PM2XXX_DIR_CH_CC_CURRENT_3000MA 0xF
155
156#define PM2XXX_CH_PRECH_CURRENT_MASK 0x30
157#define PM2XXX_CH_PRECH_CURRENT_25MA (0x0<<4)
158#define PM2XXX_CH_PRECH_CURRENT_50MA (0x1<<4)
159#define PM2XXX_CH_PRECH_CURRENT_75MA (0x2<<4)
160#define PM2XXX_CH_PRECH_CURRENT_100MA (0x3<<4)
161
162#define PM2XXX_CH_EOC_CURRENT_MASK 0xC0
163#define PM2XXX_CH_EOC_CURRENT_100MA (0x0<<6)
164#define PM2XXX_CH_EOC_CURRENT_150MA (0x1<<6)
165#define PM2XXX_CH_EOC_CURRENT_300MA (0x2<<6)
166#define PM2XXX_CH_EOC_CURRENT_400MA (0x3<<6)
167
168/* control Reg 7 */
169#define PM2XXX_CH_PRECH_VOL_2_5 0x0
170#define PM2XXX_CH_PRECH_VOL_2_7 0x1
171#define PM2XXX_CH_PRECH_VOL_2_9 0x2
172#define PM2XXX_CH_PRECH_VOL_3_1 0x3
173
174#define PM2XXX_CH_VRESUME_VOL_3_2 (0x0<<2)
175#define PM2XXX_CH_VRESUME_VOL_3_4 (0x1<<2)
176#define PM2XXX_CH_VRESUME_VOL_3_6 (0x2<<2)
177#define PM2XXX_CH_VRESUME_VOL_3_8 (0x3<<2)
178
179/* control Reg 8 */
180#define PM2XXX_CH_VOLT_MASK 0x3F
181#define PM2XXX_CH_VOLT_3_5 0x0
182#define PM2XXX_CH_VOLT_3_5225 0x1
183#define PM2XXX_CH_VOLT_3_6 0x4
184#define PM2XXX_CH_VOLT_3_7 0x8
185#define PM2XXX_CH_VOLT_4_0 0x14
186#define PM2XXX_CH_VOLT_4_175 0x1B
187#define PM2XXX_CH_VOLT_4_2 0x1C
188#define PM2XXX_CH_VOLT_4_275 0x1F
189#define PM2XXX_CH_VOLT_4_3 0x20
190
191/*NTC control register 1*/
192#define PM2XXX_BTEMP_HIGH_TH_45 0x0
193#define PM2XXX_BTEMP_HIGH_TH_50 0x1
194#define PM2XXX_BTEMP_HIGH_TH_55 0x2
195#define PM2XXX_BTEMP_HIGH_TH_60 0x3
196#define PM2XXX_BTEMP_HIGH_TH_65 0x4
197
198#define PM2XXX_BTEMP_LOW_TH_N5 (0x0<<3)
199#define PM2XXX_BTEMP_LOW_TH_0 (0x1<<3)
200#define PM2XXX_BTEMP_LOW_TH_5 (0x2<<3)
201#define PM2XXX_BTEMP_LOW_TH_10 (0x3<<3)
202
203/*NTC control register 2*/
204#define PM2XXX_NTC_BETA_COEFF_3477 0x0
205#define PM2XXX_NTC_BETA_COEFF_3964 0x1
206
207#define PM2XXX_NTC_RES_10K (0x0<<2)
208#define PM2XXX_NTC_RES_47K (0x1<<2)
209#define PM2XXX_NTC_RES_100K (0x2<<2)
210#define PM2XXX_NTC_RES_NO_NTC (0x3<<2)
211
212/* control Reg 9 */
213#define PM2XXX_CH_CC_MODEDROP_EN 1
214#define PM2XXX_CH_CC_MODEDROP_DIS 0
215
216#define PM2XXX_CH_CC_REDUCED_CURRENT_100MA (0x0<<1)
217#define PM2XXX_CH_CC_REDUCED_CURRENT_200MA (0x1<<1)
218#define PM2XXX_CH_CC_REDUCED_CURRENT_400MA (0x2<<1)
219#define PM2XXX_CH_CC_REDUCED_CURRENT_IDENT (0x3<<1)
220
221#define PM2XXX_CHARCHING_INFO_DIS (0<<3)
222#define PM2XXX_CHARCHING_INFO_EN (1<<3)
223
224#define PM2XXX_CH_150MV_DROP_300MV (0<<4)
225#define PM2XXX_CH_150MV_DROP_150MV (1<<4)
226
227
228/* charger status register */
229#define PM2XXX_CHG_STATUS_OFF 0x0
230#define PM2XXX_CHG_STATUS_ON 0x1
231#define PM2XXX_CHG_STATUS_FULL 0x2
232#define PM2XXX_CHG_STATUS_ERR 0x3
233#define PM2XXX_CHG_STATUS_WAIT 0x4
234#define PM2XXX_CHG_STATUS_NOBAT 0x5
235
236/* Input charger voltage VPWR2 */
237#define PM2XXX_VPWR2_OVV_6_0 0x0
238#define PM2XXX_VPWR2_OVV_6_3 0x1
239#define PM2XXX_VPWR2_OVV_10 0x2
240#define PM2XXX_VPWR2_OVV_NONE 0x3
241
242/* Input charger drop VPWR2 */
243#define PM2XXX_VPWR2_HW_OPT_EN (0x1<<4)
244#define PM2XXX_VPWR2_HW_OPT_DIS (0x0<<4)
245
246#define PM2XXX_VPWR2_VALID_EN (0x1<<3)
247#define PM2XXX_VPWR2_VALID_DIS (0x0<<3)
248
249#define PM2XXX_VPWR2_DROP_EN (0x1<<2)
250#define PM2XXX_VPWR2_DROP_DIS (0x0<<2)
251
252/* Input charger voltage VPWR1 */
253#define PM2XXX_VPWR1_OVV_6_0 0x0
254#define PM2XXX_VPWR1_OVV_6_3 0x1
255#define PM2XXX_VPWR1_OVV_10 0x2
256#define PM2XXX_VPWR1_OVV_NONE 0x3
257
258/* Input charger drop VPWR1 */
259#define PM2XXX_VPWR1_HW_OPT_EN (0x1<<4)
260#define PM2XXX_VPWR1_HW_OPT_DIS (0x0<<4)
261
262#define PM2XXX_VPWR1_VALID_EN (0x1<<3)
263#define PM2XXX_VPWR1_VALID_DIS (0x0<<3)
264
265#define PM2XXX_VPWR1_DROP_EN (0x1<<2)
266#define PM2XXX_VPWR1_DROP_DIS (0x0<<2)
267
268/* Battery low level comparator control register */
269#define PM2XXX_VBAT_LOW_MONITORING_DIS 0x0
270#define PM2XXX_VBAT_LOW_MONITORING_ENA 0x1
271
272/* Battery low level value control register */
273#define PM2XXX_VBAT_LOW_LEVEL_2_3 0x0
274#define PM2XXX_VBAT_LOW_LEVEL_2_4 0x1
275#define PM2XXX_VBAT_LOW_LEVEL_2_5 0x2
276#define PM2XXX_VBAT_LOW_LEVEL_2_6 0x3
277#define PM2XXX_VBAT_LOW_LEVEL_2_7 0x4
278#define PM2XXX_VBAT_LOW_LEVEL_2_8 0x5
279#define PM2XXX_VBAT_LOW_LEVEL_2_9 0x6
280#define PM2XXX_VBAT_LOW_LEVEL_3_0 0x7
281#define PM2XXX_VBAT_LOW_LEVEL_3_1 0x8
282#define PM2XXX_VBAT_LOW_LEVEL_3_2 0x9
283#define PM2XXX_VBAT_LOW_LEVEL_3_3 0xA
284#define PM2XXX_VBAT_LOW_LEVEL_3_4 0xB
285#define PM2XXX_VBAT_LOW_LEVEL_3_5 0xC
286#define PM2XXX_VBAT_LOW_LEVEL_3_6 0xD
287#define PM2XXX_VBAT_LOW_LEVEL_3_7 0xE
288#define PM2XXX_VBAT_LOW_LEVEL_3_8 0xF
289#define PM2XXX_VBAT_LOW_LEVEL_3_9 0x10
290#define PM2XXX_VBAT_LOW_LEVEL_4_0 0x11
291#define PM2XXX_VBAT_LOW_LEVEL_4_1 0x12
292#define PM2XXX_VBAT_LOW_LEVEL_4_2 0x13
293
294/* SW CTRL */
295#define PM2XXX_SWCTRL_HW 0x0
296#define PM2XXX_SWCTRL_SW 0x1
297
298
299/* LED Driver Control */
300#define PM2XXX_LED_CURRENT_MASK 0x0C
301#define PM2XXX_LED_CURRENT_2_5MA (0X0<<2)
302#define PM2XXX_LED_CURRENT_1MA (0X1<<2)
303#define PM2XXX_LED_CURRENT_5MA (0X2<<2)
304#define PM2XXX_LED_CURRENT_10MA (0X3<<2)
305
306#define PM2XXX_LED_SELECT_MASK 0x02
307#define PM2XXX_LED_SELECT_EN (0X0<<1)
308#define PM2XXX_LED_SELECT_DIS (0X1<<1)
309
310#define PM2XXX_ANTI_OVERSHOOT_MASK 0x01
311#define PM2XXX_ANTI_OVERSHOOT_DIS 0X0
312#define PM2XXX_ANTI_OVERSHOOT_EN 0X1
313
314enum pm2xxx_reg_int1 {
315 PM2XXX_INT1_ITVBATDISCONNECT = 0x02,
316 PM2XXX_INT1_ITVBATLOWR = 0x04,
317 PM2XXX_INT1_ITVBATLOWF = 0x08,
318};
319
320enum pm2xxx_mask_reg_int1 {
321 PM2XXX_INT1_M_ITVBATDISCONNECT = 0x02,
322 PM2XXX_INT1_M_ITVBATLOWR = 0x04,
323 PM2XXX_INT1_M_ITVBATLOWF = 0x08,
324};
325
326enum pm2xxx_source_reg_int1 {
327 PM2XXX_INT1_S_ITVBATDISCONNECT = 0x02,
328 PM2XXX_INT1_S_ITVBATLOWR = 0x04,
329 PM2XXX_INT1_S_ITVBATLOWF = 0x08,
330};
331
332enum pm2xxx_reg_int2 {
333 PM2XXX_INT2_ITVPWR2PLUG = 0x01,
334 PM2XXX_INT2_ITVPWR2UNPLUG = 0x02,
335 PM2XXX_INT2_ITVPWR1PLUG = 0x04,
336 PM2XXX_INT2_ITVPWR1UNPLUG = 0x08,
337};
338
339enum pm2xxx_mask_reg_int2 {
340 PM2XXX_INT2_M_ITVPWR2PLUG = 0x01,
341 PM2XXX_INT2_M_ITVPWR2UNPLUG = 0x02,
342 PM2XXX_INT2_M_ITVPWR1PLUG = 0x04,
343 PM2XXX_INT2_M_ITVPWR1UNPLUG = 0x08,
344};
345
346enum pm2xxx_source_reg_int2 {
347 PM2XXX_INT2_S_ITVPWR2PLUG = 0x03,
348 PM2XXX_INT2_S_ITVPWR1PLUG = 0x0c,
349};
350
351enum pm2xxx_reg_int3 {
352 PM2XXX_INT3_ITCHPRECHARGEWD = 0x01,
353 PM2XXX_INT3_ITCHCCWD = 0x02,
354 PM2XXX_INT3_ITCHCVWD = 0x04,
355 PM2XXX_INT3_ITAUTOTIMEOUTWD = 0x08,
356};
357
358enum pm2xxx_mask_reg_int3 {
359 PM2XXX_INT3_M_ITCHPRECHARGEWD = 0x01,
360 PM2XXX_INT3_M_ITCHCCWD = 0x02,
361 PM2XXX_INT3_M_ITCHCVWD = 0x04,
362 PM2XXX_INT3_M_ITAUTOTIMEOUTWD = 0x08,
363};
364
365enum pm2xxx_source_reg_int3 {
366 PM2XXX_INT3_S_ITCHPRECHARGEWD = 0x01,
367 PM2XXX_INT3_S_ITCHCCWD = 0x02,
368 PM2XXX_INT3_S_ITCHCVWD = 0x04,
369 PM2XXX_INT3_S_ITAUTOTIMEOUTWD = 0x08,
370};
371
372enum pm2xxx_reg_int4 {
373 PM2XXX_INT4_ITBATTEMPCOLD = 0x01,
374 PM2XXX_INT4_ITBATTEMPHOT = 0x02,
375 PM2XXX_INT4_ITVPWR2OVV = 0x04,
376 PM2XXX_INT4_ITVPWR1OVV = 0x08,
377 PM2XXX_INT4_ITCHARGINGON = 0x10,
378 PM2XXX_INT4_ITVRESUME = 0x20,
379 PM2XXX_INT4_ITBATTFULL = 0x40,
380 PM2XXX_INT4_ITCVPHASE = 0x80,
381};
382
383enum pm2xxx_mask_reg_int4 {
384 PM2XXX_INT4_M_ITBATTEMPCOLD = 0x01,
385 PM2XXX_INT4_M_ITBATTEMPHOT = 0x02,
386 PM2XXX_INT4_M_ITVPWR2OVV = 0x04,
387 PM2XXX_INT4_M_ITVPWR1OVV = 0x08,
388 PM2XXX_INT4_M_ITCHARGINGON = 0x10,
389 PM2XXX_INT4_M_ITVRESUME = 0x20,
390 PM2XXX_INT4_M_ITBATTFULL = 0x40,
391 PM2XXX_INT4_M_ITCVPHASE = 0x80,
392};
393
394enum pm2xxx_source_reg_int4 {
395 PM2XXX_INT4_S_ITBATTEMPCOLD = 0x01,
396 PM2XXX_INT4_S_ITBATTEMPHOT = 0x02,
397 PM2XXX_INT4_S_ITVPWR2OVV = 0x04,
398 PM2XXX_INT4_S_ITVPWR1OVV = 0x08,
399 PM2XXX_INT4_S_ITCHARGINGON = 0x10,
400 PM2XXX_INT4_S_ITVRESUME = 0x20,
401 PM2XXX_INT4_S_ITBATTFULL = 0x40,
402 PM2XXX_INT4_S_ITCVPHASE = 0x80,
403};
404
405enum pm2xxx_reg_int5 {
406 PM2XXX_INT5_ITTHERMALSHUTDOWNRISE = 0x01,
407 PM2XXX_INT5_ITTHERMALSHUTDOWNFALL = 0x02,
408 PM2XXX_INT5_ITTHERMALWARNINGRISE = 0x04,
409 PM2XXX_INT5_ITTHERMALWARNINGFALL = 0x08,
410 PM2XXX_INT5_ITVSYSTEMOVV = 0x10,
411};
412
413enum pm2xxx_mask_reg_int5 {
414 PM2XXX_INT5_M_ITTHERMALSHUTDOWNRISE = 0x01,
415 PM2XXX_INT5_M_ITTHERMALSHUTDOWNFALL = 0x02,
416 PM2XXX_INT5_M_ITTHERMALWARNINGRISE = 0x04,
417 PM2XXX_INT5_M_ITTHERMALWARNINGFALL = 0x08,
418 PM2XXX_INT5_M_ITVSYSTEMOVV = 0x10,
419};
420
421enum pm2xxx_source_reg_int5 {
422 PM2XXX_INT5_S_ITTHERMALSHUTDOWNRISE = 0x01,
423 PM2XXX_INT5_S_ITTHERMALSHUTDOWNFALL = 0x02,
424 PM2XXX_INT5_S_ITTHERMALWARNINGRISE = 0x04,
425 PM2XXX_INT5_S_ITTHERMALWARNINGFALL = 0x08,
426 PM2XXX_INT5_S_ITVSYSTEMOVV = 0x10,
427};
428
429enum pm2xxx_reg_int6 {
430 PM2XXX_INT6_ITVPWR2DROP = 0x01,
431 PM2XXX_INT6_ITVPWR1DROP = 0x02,
432 PM2XXX_INT6_ITVPWR2VALIDRISE = 0x04,
433 PM2XXX_INT6_ITVPWR2VALIDFALL = 0x08,
434 PM2XXX_INT6_ITVPWR1VALIDRISE = 0x10,
435 PM2XXX_INT6_ITVPWR1VALIDFALL = 0x20,
436};
437
438enum pm2xxx_mask_reg_int6 {
439 PM2XXX_INT6_M_ITVPWR2DROP = 0x01,
440 PM2XXX_INT6_M_ITVPWR1DROP = 0x02,
441 PM2XXX_INT6_M_ITVPWR2VALIDRISE = 0x04,
442 PM2XXX_INT6_M_ITVPWR2VALIDFALL = 0x08,
443 PM2XXX_INT6_M_ITVPWR1VALIDRISE = 0x10,
444 PM2XXX_INT6_M_ITVPWR1VALIDFALL = 0x20,
445};
446
447enum pm2xxx_source_reg_int6 {
448 PM2XXX_INT6_S_ITVPWR2DROP = 0x01,
449 PM2XXX_INT6_S_ITVPWR1DROP = 0x02,
450 PM2XXX_INT6_S_ITVPWR2VALIDRISE = 0x04,
451 PM2XXX_INT6_S_ITVPWR2VALIDFALL = 0x08,
452 PM2XXX_INT6_S_ITVPWR1VALIDRISE = 0x10,
453 PM2XXX_INT6_S_ITVPWR1VALIDFALL = 0x20,
454};
455
456struct pm2xxx_charger_info {
457 int charger_connected;
458 int charger_online;
459 int cv_active;
460 bool wd_expired;
461};
462
463struct pm2xxx_charger_event_flags {
464 bool mainextchnotok;
465 bool main_thermal_prot;
466 bool ovv;
467 bool chgwdexp;
468};
469
470struct pm2xxx_interrupts {
471 u8 reg[PM2XXX_NUM_INT_REG];
472 int (*handler[PM2XXX_NUM_INT_REG])(void *, int);
473};
474
475struct pm2xxx_config {
476 struct i2c_client *pm2xxx_i2c;
477 struct i2c_device_id *pm2xxx_id;
478};
479
480struct pm2xxx_irq {
481 char *name;
482 irqreturn_t (*isr)(int irq, void *data);
483};
484
485struct pm2xxx_charger {
486 struct device *dev;
487 u8 chip_id;
488 bool vddadc_en_ac;
489 struct pm2xxx_config config;
490 bool ac_conn;
491 unsigned int gpio_irq;
492 int vbat;
493 int old_vbat;
494 int failure_case;
495 int failure_input_ovv;
496 unsigned int lpn_pin;
497 struct pm2xxx_interrupts *pm2_int;
498 struct ab8500_gpadc *gpadc;
499 struct regulator *regu;
500 struct pm2xxx_bm_data *bat;
501 struct mutex lock;
502 struct ab8500 *parent;
503 struct pm2xxx_charger_info ac;
504 struct pm2xxx_charger_platform_data *pdata;
505 struct workqueue_struct *charger_wq;
506 struct delayed_work check_vbat_work;
507 struct work_struct ac_work;
508 struct work_struct check_main_thermal_prot_work;
509 struct ux500_charger ac_chg;
510 struct pm2xxx_charger_event_flags flags;
511};
512
513#endif /* PM2301_CHARGER_H */
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 40fa3b7cae54..29178f78d73c 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -55,7 +55,8 @@ static ssize_t power_supply_show_property(struct device *dev,
55 }; 55 };
56 static char *health_text[] = { 56 static char *health_text[] = {
57 "Unknown", "Good", "Overheat", "Dead", "Over voltage", 57 "Unknown", "Good", "Overheat", "Dead", "Over voltage",
58 "Unspecified failure", "Cold", 58 "Unspecified failure", "Cold", "Watchdog timer expire",
59 "Safety timer expire"
59 }; 60 };
60 static char *technology_text[] = { 61 static char *technology_text[] = {
61 "Unknown", "NiMH", "Li-ion", "Li-poly", "LiFe", "NiCd", 62 "Unknown", "NiMH", "Li-ion", "Li-poly", "LiFe", "NiCd",
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index 6461b489fb09..1ae65b822864 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -13,3 +13,20 @@ config POWER_RESET_GPIO
13 This driver supports turning off your board via a GPIO line. 13 This driver supports turning off your board via a GPIO line.
14 If your board needs a GPIO high/low to power down, say Y and 14 If your board needs a GPIO high/low to power down, say Y and
15 create a binding in your devicetree. 15 create a binding in your devicetree.
16
17config POWER_RESET_QNAP
18 bool "QNAP power-off driver"
19 depends on OF_GPIO && POWER_RESET && PLAT_ORION
20 help
21 This driver supports turning off QNAP NAS devices by sending
22 commands to the microcontroller which controls the main power.
23
24 Say Y if you have a QNAP NAS.
25
26config POWER_RESET_RESTART
27 bool "Restart power-off driver"
28 depends on ARM
29 help
30 Some boards don't actually have the ability to power off.
31 Instead they restart, and u-boot holds the SoC until the
32 user presses a key. u-boot then boots into Linux.
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index 751488a4a0c5..0f317f50c56f 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -1 +1,3 @@
1obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o 1obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o
2obj-$(CONFIG_POWER_RESET_QNAP) += qnap-poweroff.o
3obj-$(CONFIG_POWER_RESET_RESTART) += restart-poweroff.o \ No newline at end of file
diff --git a/drivers/power/reset/qnap-poweroff.c b/drivers/power/reset/qnap-poweroff.c
new file mode 100644
index 000000000000..37f56f7ee926
--- /dev/null
+++ b/drivers/power/reset/qnap-poweroff.c
@@ -0,0 +1,116 @@
1/*
2 * QNAP Turbo NAS Board power off
3 *
4 * Copyright (C) 2012 Andrew Lunn <andrew@lunn.ch>
5 *
6 * Based on the code from:
7 *
8 * Copyright (C) 2009 Martin Michlmayr <tbm@cyrius.com>
9 * Copyright (C) 2008 Byron Bradley <byron.bbradley@gmail.com>
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/platform_device.h>
20#include <linux/serial_reg.h>
21#include <linux/kallsyms.h>
22#include <linux/of.h>
23#include <linux/io.h>
24#include <linux/clk.h>
25
26#define UART1_REG(x) (base + ((UART_##x) << 2))
27
28static void __iomem *base;
29static unsigned long tclk;
30
31static void qnap_power_off(void)
32{
33 /* 19200 baud divisor */
34 const unsigned divisor = ((tclk + (8 * 19200)) / (16 * 19200));
35
36 pr_err("%s: triggering power-off...\n", __func__);
37
38 /* hijack UART1 and reset into sane state (19200,8n1) */
39 writel(0x83, UART1_REG(LCR));
40 writel(divisor & 0xff, UART1_REG(DLL));
41 writel((divisor >> 8) & 0xff, UART1_REG(DLM));
42 writel(0x03, UART1_REG(LCR));
43 writel(0x00, UART1_REG(IER));
44 writel(0x00, UART1_REG(FCR));
45 writel(0x00, UART1_REG(MCR));
46
47 /* send the power-off command 'A' to PIC */
48 writel('A', UART1_REG(TX));
49}
50
51static int qnap_power_off_probe(struct platform_device *pdev)
52{
53 struct resource *res;
54 struct clk *clk;
55 char symname[KSYM_NAME_LEN];
56
57 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
58 if (!res) {
59 dev_err(&pdev->dev, "Missing resource");
60 return -EINVAL;
61 }
62
63 base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
64 if (!base) {
65 dev_err(&pdev->dev, "Unable to map resource");
66 return -EINVAL;
67 }
68
69 /* We need to know tclk in order to calculate the UART divisor */
70 clk = devm_clk_get(&pdev->dev, NULL);
71 if (IS_ERR(clk)) {
72 dev_err(&pdev->dev, "Clk missing");
73 return PTR_ERR(clk);
74 }
75
76 tclk = clk_get_rate(clk);
77
78 /* Check that nothing else has already setup a handler */
79 if (pm_power_off) {
80 lookup_symbol_name((ulong)pm_power_off, symname);
81 dev_err(&pdev->dev,
82 "pm_power_off already claimed %p %s",
83 pm_power_off, symname);
84 return -EBUSY;
85 }
86 pm_power_off = qnap_power_off;
87
88 return 0;
89}
90
91static int qnap_power_off_remove(struct platform_device *pdev)
92{
93 pm_power_off = NULL;
94 return 0;
95}
96
97static const struct of_device_id qnap_power_off_of_match_table[] = {
98 { .compatible = "qnap,power-off", },
99 {}
100};
101MODULE_DEVICE_TABLE(of, qnap_power_off_of_match_table);
102
103static struct platform_driver qnap_power_off_driver = {
104 .probe = qnap_power_off_probe,
105 .remove = qnap_power_off_remove,
106 .driver = {
107 .owner = THIS_MODULE,
108 .name = "qnap_power_off",
109 .of_match_table = of_match_ptr(qnap_power_off_of_match_table),
110 },
111};
112module_platform_driver(qnap_power_off_driver);
113
114MODULE_AUTHOR("Andrew Lunn <andrew@lunn.ch>");
115MODULE_DESCRIPTION("QNAP Power off driver");
116MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/reset/restart-poweroff.c b/drivers/power/reset/restart-poweroff.c
new file mode 100644
index 000000000000..059cd1501e2a
--- /dev/null
+++ b/drivers/power/reset/restart-poweroff.c
@@ -0,0 +1,65 @@
1/*
2 * Power off by restarting and let u-boot keep hold of the machine
3 * until the user presses a button for example.
4 *
5 * Andrew Lunn <andrew@lunn.ch>
6 *
7 * Copyright (C) 2012 Andrew Lunn
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/platform_device.h>
16#include <linux/of_platform.h>
17#include <linux/module.h>
18#include <asm/system_misc.h>
19
20static void restart_poweroff_do_poweroff(void)
21{
22 arm_pm_restart('h', NULL);
23}
24
25static int restart_poweroff_probe(struct platform_device *pdev)
26{
27 /* If a pm_power_off function has already been added, leave it alone */
28 if (pm_power_off != NULL) {
29 dev_err(&pdev->dev,
30 "pm_power_off function already registered");
31 return -EBUSY;
32 }
33
34 pm_power_off = &restart_poweroff_do_poweroff;
35 return 0;
36}
37
38static int restart_poweroff_remove(struct platform_device *pdev)
39{
40 if (pm_power_off == &restart_poweroff_do_poweroff)
41 pm_power_off = NULL;
42
43 return 0;
44}
45
46static const struct of_device_id of_restart_poweroff_match[] = {
47 { .compatible = "restart-poweroff", },
48 {},
49};
50
51static struct platform_driver restart_poweroff_driver = {
52 .probe = restart_poweroff_probe,
53 .remove = restart_poweroff_remove,
54 .driver = {
55 .name = "poweroff-restart",
56 .owner = THIS_MODULE,
57 .of_match_table = of_restart_poweroff_match,
58 },
59};
60module_platform_driver(restart_poweroff_driver);
61
62MODULE_AUTHOR("Andrew Lunn <andrew@lunn.ch");
63MODULE_DESCRIPTION("restart poweroff driver");
64MODULE_LICENSE("GPLv2");
65MODULE_ALIAS("platform:poweroff-restart");
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
index 2b557119adad..c79ab843333e 100644
--- a/drivers/regulator/88pm8607.c
+++ b/drivers/regulator/88pm8607.c
@@ -30,8 +30,6 @@ struct pm8607_regulator_info {
30 unsigned int *vol_table; 30 unsigned int *vol_table;
31 unsigned int *vol_suspend; 31 unsigned int *vol_suspend;
32 32
33 int update_reg;
34 int update_bit;
35 int slope_double; 33 int slope_double;
36}; 34};
37 35
@@ -222,29 +220,6 @@ static int pm8607_list_voltage(struct regulator_dev *rdev, unsigned index)
222 return ret; 220 return ret;
223} 221}
224 222
225static int pm8607_set_voltage_sel(struct regulator_dev *rdev, unsigned selector)
226{
227 struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
228 uint8_t val;
229 int ret;
230
231 val = (uint8_t)(selector << (ffs(rdev->desc->vsel_mask) - 1));
232
233 ret = pm860x_set_bits(info->i2c, rdev->desc->vsel_reg,
234 rdev->desc->vsel_mask, val);
235 if (ret)
236 return ret;
237 switch (info->desc.id) {
238 case PM8607_ID_BUCK1:
239 case PM8607_ID_BUCK3:
240 ret = pm860x_set_bits(info->i2c, info->update_reg,
241 1 << info->update_bit,
242 1 << info->update_bit);
243 break;
244 }
245 return ret;
246}
247
248static int pm8606_preg_enable(struct regulator_dev *rdev) 223static int pm8606_preg_enable(struct regulator_dev *rdev)
249{ 224{
250 struct pm8607_regulator_info *info = rdev_get_drvdata(rdev); 225 struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
@@ -276,7 +251,7 @@ static int pm8606_preg_is_enabled(struct regulator_dev *rdev)
276 251
277static struct regulator_ops pm8607_regulator_ops = { 252static struct regulator_ops pm8607_regulator_ops = {
278 .list_voltage = pm8607_list_voltage, 253 .list_voltage = pm8607_list_voltage,
279 .set_voltage_sel = pm8607_set_voltage_sel, 254 .set_voltage_sel = regulator_set_voltage_sel_regmap,
280 .get_voltage_sel = regulator_get_voltage_sel_regmap, 255 .get_voltage_sel = regulator_get_voltage_sel_regmap,
281 .enable = regulator_enable_regmap, 256 .enable = regulator_enable_regmap,
282 .disable = regulator_disable_regmap, 257 .disable = regulator_disable_regmap,
@@ -313,11 +288,11 @@ static struct regulator_ops pm8606_preg_ops = {
313 .n_voltages = ARRAY_SIZE(vreg##_table), \ 288 .n_voltages = ARRAY_SIZE(vreg##_table), \
314 .vsel_reg = PM8607_##vreg, \ 289 .vsel_reg = PM8607_##vreg, \
315 .vsel_mask = ARRAY_SIZE(vreg##_table) - 1, \ 290 .vsel_mask = ARRAY_SIZE(vreg##_table) - 1, \
291 .apply_reg = PM8607_##ureg, \
292 .apply_bit = (ubit), \
316 .enable_reg = PM8607_##ereg, \ 293 .enable_reg = PM8607_##ereg, \
317 .enable_mask = 1 << (ebit), \ 294 .enable_mask = 1 << (ebit), \
318 }, \ 295 }, \
319 .update_reg = PM8607_##ureg, \
320 .update_bit = (ubit), \
321 .slope_double = (0), \ 296 .slope_double = (0), \
322 .vol_table = (unsigned int *)&vreg##_table, \ 297 .vol_table = (unsigned int *)&vreg##_table, \
323 .vol_suspend = (unsigned int *)&vreg##_suspend_table, \ 298 .vol_suspend = (unsigned int *)&vreg##_suspend_table, \
@@ -343,9 +318,9 @@ static struct regulator_ops pm8606_preg_ops = {
343} 318}
344 319
345static struct pm8607_regulator_info pm8607_regulator_info[] = { 320static struct pm8607_regulator_info pm8607_regulator_info[] = {
346 PM8607_DVC(BUCK1, GO, 0, SUPPLIES_EN11, 0), 321 PM8607_DVC(BUCK1, GO, BIT(0), SUPPLIES_EN11, 0),
347 PM8607_DVC(BUCK2, GO, 1, SUPPLIES_EN11, 1), 322 PM8607_DVC(BUCK2, GO, BIT(1), SUPPLIES_EN11, 1),
348 PM8607_DVC(BUCK3, GO, 2, SUPPLIES_EN11, 2), 323 PM8607_DVC(BUCK3, GO, BIT(2), SUPPLIES_EN11, 2),
349 324
350 PM8607_LDO(1, LDO1, 0, SUPPLIES_EN11, 3), 325 PM8607_LDO(1, LDO1, 0, SUPPLIES_EN11, 3),
351 PM8607_LDO(2, LDO2, 0, SUPPLIES_EN11, 4), 326 PM8607_LDO(2, LDO2, 0, SUPPLIES_EN11, 4),
@@ -372,7 +347,7 @@ static int pm8607_regulator_dt_init(struct platform_device *pdev,
372 struct regulator_config *config) 347 struct regulator_config *config)
373{ 348{
374 struct device_node *nproot, *np; 349 struct device_node *nproot, *np;
375 nproot = pdev->dev.parent->of_node; 350 nproot = of_node_get(pdev->dev.parent->of_node);
376 if (!nproot) 351 if (!nproot)
377 return -ENODEV; 352 return -ENODEV;
378 nproot = of_find_node_by_name(nproot, "regulators"); 353 nproot = of_find_node_by_name(nproot, "regulators");
@@ -388,6 +363,7 @@ static int pm8607_regulator_dt_init(struct platform_device *pdev,
388 break; 363 break;
389 } 364 }
390 } 365 }
366 of_node_put(nproot);
391 return 0; 367 return 0;
392} 368}
393#else 369#else
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 551a22b07538..a5d97eaee99e 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -91,6 +91,7 @@ config REGULATOR_AAT2870
91config REGULATOR_ARIZONA 91config REGULATOR_ARIZONA
92 tristate "Wolfson Arizona class devices" 92 tristate "Wolfson Arizona class devices"
93 depends on MFD_ARIZONA 93 depends on MFD_ARIZONA
94 depends on SND_SOC
94 help 95 help
95 Support for the regulators found on Wolfson Arizona class 96 Support for the regulators found on Wolfson Arizona class
96 devices. 97 devices.
@@ -277,6 +278,15 @@ config REGULATOR_LP872X
277 help 278 help
278 This driver supports LP8720/LP8725 PMIC 279 This driver supports LP8720/LP8725 PMIC
279 280
281config REGULATOR_LP8755
282 tristate "TI LP8755 High Performance PMU driver"
283 depends on I2C
284 select REGMAP_I2C
285 help
286 This driver supports LP8755 High Performance PMU driver. This
287 chip contains six step-down DC/DC converters which can support
288 9 mode multiphase configuration.
289
280config REGULATOR_LP8788 290config REGULATOR_LP8788
281 bool "TI LP8788 Power Regulators" 291 bool "TI LP8788 Power Regulators"
282 depends on MFD_LP8788 292 depends on MFD_LP8788
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index b802b0c7fb02..6e8250382def 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_REGULATOR_LP3972) += lp3972.o
30obj-$(CONFIG_REGULATOR_LP872X) += lp872x.o 30obj-$(CONFIG_REGULATOR_LP872X) += lp872x.o
31obj-$(CONFIG_REGULATOR_LP8788) += lp8788-buck.o 31obj-$(CONFIG_REGULATOR_LP8788) += lp8788-buck.o
32obj-$(CONFIG_REGULATOR_LP8788) += lp8788-ldo.o 32obj-$(CONFIG_REGULATOR_LP8788) += lp8788-ldo.o
33obj-$(CONFIG_REGULATOR_LP8755) += lp8755.o
33obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o 34obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o
34obj-$(CONFIG_REGULATOR_MAX8649) += max8649.o 35obj-$(CONFIG_REGULATOR_MAX8649) += max8649.o
35obj-$(CONFIG_REGULATOR_MAX8660) += max8660.o 36obj-$(CONFIG_REGULATOR_MAX8660) += max8660.o
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index 8f39cac661d2..0d4a8ccbb536 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -31,12 +31,18 @@
31#include <linux/regulator/driver.h> 31#include <linux/regulator/driver.h>
32#include <linux/regulator/of_regulator.h> 32#include <linux/regulator/of_regulator.h>
33 33
34#define LDO_RAMP_UP_UNIT_IN_CYCLES 64 /* 64 cycles per step */
35#define LDO_RAMP_UP_FREQ_IN_MHZ 24 /* cycle based on 24M OSC */
36
34struct anatop_regulator { 37struct anatop_regulator {
35 const char *name; 38 const char *name;
36 u32 control_reg; 39 u32 control_reg;
37 struct regmap *anatop; 40 struct regmap *anatop;
38 int vol_bit_shift; 41 int vol_bit_shift;
39 int vol_bit_width; 42 int vol_bit_width;
43 u32 delay_reg;
44 int delay_bit_shift;
45 int delay_bit_width;
40 int min_bit_val; 46 int min_bit_val;
41 int min_voltage; 47 int min_voltage;
42 int max_voltage; 48 int max_voltage;
@@ -55,6 +61,32 @@ static int anatop_regmap_set_voltage_sel(struct regulator_dev *reg,
55 return regulator_set_voltage_sel_regmap(reg, selector); 61 return regulator_set_voltage_sel_regmap(reg, selector);
56} 62}
57 63
64static int anatop_regmap_set_voltage_time_sel(struct regulator_dev *reg,
65 unsigned int old_sel,
66 unsigned int new_sel)
67{
68 struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
69 u32 val;
70 int ret = 0;
71
72 /* check whether need to care about LDO ramp up speed */
73 if (anatop_reg->delay_bit_width && new_sel > old_sel) {
74 /*
75 * the delay for LDO ramp up time is
76 * based on the register setting, we need
77 * to calculate how many steps LDO need to
78 * ramp up, and how much delay needed. (us)
79 */
80 regmap_read(anatop_reg->anatop, anatop_reg->delay_reg, &val);
81 val = (val >> anatop_reg->delay_bit_shift) &
82 ((1 << anatop_reg->delay_bit_width) - 1);
83 ret = (new_sel - old_sel) * (LDO_RAMP_UP_UNIT_IN_CYCLES <<
84 val) / LDO_RAMP_UP_FREQ_IN_MHZ + 1;
85 }
86
87 return ret;
88}
89
58static int anatop_regmap_get_voltage_sel(struct regulator_dev *reg) 90static int anatop_regmap_get_voltage_sel(struct regulator_dev *reg)
59{ 91{
60 struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg); 92 struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
@@ -67,6 +99,7 @@ static int anatop_regmap_get_voltage_sel(struct regulator_dev *reg)
67 99
68static struct regulator_ops anatop_rops = { 100static struct regulator_ops anatop_rops = {
69 .set_voltage_sel = anatop_regmap_set_voltage_sel, 101 .set_voltage_sel = anatop_regmap_set_voltage_sel,
102 .set_voltage_time_sel = anatop_regmap_set_voltage_time_sel,
70 .get_voltage_sel = anatop_regmap_get_voltage_sel, 103 .get_voltage_sel = anatop_regmap_get_voltage_sel,
71 .list_voltage = regulator_list_voltage_linear, 104 .list_voltage = regulator_list_voltage_linear,
72 .map_voltage = regulator_map_voltage_linear, 105 .map_voltage = regulator_map_voltage_linear,
@@ -143,6 +176,14 @@ static int anatop_regulator_probe(struct platform_device *pdev)
143 goto anatop_probe_end; 176 goto anatop_probe_end;
144 } 177 }
145 178
179 /* read LDO ramp up setting, only for core reg */
180 of_property_read_u32(np, "anatop-delay-reg-offset",
181 &sreg->delay_reg);
182 of_property_read_u32(np, "anatop-delay-bit-width",
183 &sreg->delay_bit_width);
184 of_property_read_u32(np, "anatop-delay-bit-shift",
185 &sreg->delay_bit_shift);
186
146 rdesc->n_voltages = (sreg->max_voltage - sreg->min_voltage) / 25000 + 1 187 rdesc->n_voltages = (sreg->max_voltage - sreg->min_voltage) / 25000 + 1
147 + sreg->min_bit_val; 188 + sreg->min_bit_val;
148 rdesc->min_uV = sreg->min_voltage; 189 rdesc->min_uV = sreg->min_voltage;
diff --git a/drivers/regulator/arizona-micsupp.c b/drivers/regulator/arizona-micsupp.c
index a6d040cbf8ac..e87536bf0bed 100644
--- a/drivers/regulator/arizona-micsupp.c
+++ b/drivers/regulator/arizona-micsupp.c
@@ -21,6 +21,8 @@
21#include <linux/regulator/machine.h> 21#include <linux/regulator/machine.h>
22#include <linux/gpio.h> 22#include <linux/gpio.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/workqueue.h>
25#include <sound/soc.h>
24 26
25#include <linux/mfd/arizona/core.h> 27#include <linux/mfd/arizona/core.h>
26#include <linux/mfd/arizona/pdata.h> 28#include <linux/mfd/arizona/pdata.h>
@@ -34,6 +36,8 @@ struct arizona_micsupp {
34 36
35 struct regulator_consumer_supply supply; 37 struct regulator_consumer_supply supply;
36 struct regulator_init_data init_data; 38 struct regulator_init_data init_data;
39
40 struct work_struct check_cp_work;
37}; 41};
38 42
39static int arizona_micsupp_list_voltage(struct regulator_dev *rdev, 43static int arizona_micsupp_list_voltage(struct regulator_dev *rdev,
@@ -72,9 +76,73 @@ static int arizona_micsupp_map_voltage(struct regulator_dev *rdev,
72 return selector; 76 return selector;
73} 77}
74 78
79static void arizona_micsupp_check_cp(struct work_struct *work)
80{
81 struct arizona_micsupp *micsupp =
82 container_of(work, struct arizona_micsupp, check_cp_work);
83 struct snd_soc_dapm_context *dapm = micsupp->arizona->dapm;
84 struct arizona *arizona = micsupp->arizona;
85 struct regmap *regmap = arizona->regmap;
86 unsigned int reg;
87 int ret;
88
89 ret = regmap_read(regmap, ARIZONA_MIC_CHARGE_PUMP_1, &reg);
90 if (ret != 0) {
91 dev_err(arizona->dev, "Failed to read CP state: %d\n", ret);
92 return;
93 }
94
95 if (dapm) {
96 if ((reg & (ARIZONA_CPMIC_ENA | ARIZONA_CPMIC_BYPASS)) ==
97 ARIZONA_CPMIC_ENA)
98 snd_soc_dapm_force_enable_pin(dapm, "MICSUPP");
99 else
100 snd_soc_dapm_disable_pin(dapm, "MICSUPP");
101
102 snd_soc_dapm_sync(dapm);
103 }
104}
105
106static int arizona_micsupp_enable(struct regulator_dev *rdev)
107{
108 struct arizona_micsupp *micsupp = rdev_get_drvdata(rdev);
109 int ret;
110
111 ret = regulator_enable_regmap(rdev);
112
113 if (ret == 0)
114 schedule_work(&micsupp->check_cp_work);
115
116 return ret;
117}
118
119static int arizona_micsupp_disable(struct regulator_dev *rdev)
120{
121 struct arizona_micsupp *micsupp = rdev_get_drvdata(rdev);
122 int ret;
123
124 ret = regulator_disable_regmap(rdev);
125 if (ret == 0)
126 schedule_work(&micsupp->check_cp_work);
127
128 return ret;
129}
130
131static int arizona_micsupp_set_bypass(struct regulator_dev *rdev, bool ena)
132{
133 struct arizona_micsupp *micsupp = rdev_get_drvdata(rdev);
134 int ret;
135
136 ret = regulator_set_bypass_regmap(rdev, ena);
137 if (ret == 0)
138 schedule_work(&micsupp->check_cp_work);
139
140 return ret;
141}
142
75static struct regulator_ops arizona_micsupp_ops = { 143static struct regulator_ops arizona_micsupp_ops = {
76 .enable = regulator_enable_regmap, 144 .enable = arizona_micsupp_enable,
77 .disable = regulator_disable_regmap, 145 .disable = arizona_micsupp_disable,
78 .is_enabled = regulator_is_enabled_regmap, 146 .is_enabled = regulator_is_enabled_regmap,
79 147
80 .list_voltage = arizona_micsupp_list_voltage, 148 .list_voltage = arizona_micsupp_list_voltage,
@@ -84,7 +152,7 @@ static struct regulator_ops arizona_micsupp_ops = {
84 .set_voltage_sel = regulator_set_voltage_sel_regmap, 152 .set_voltage_sel = regulator_set_voltage_sel_regmap,
85 153
86 .get_bypass = regulator_get_bypass_regmap, 154 .get_bypass = regulator_get_bypass_regmap,
87 .set_bypass = regulator_set_bypass_regmap, 155 .set_bypass = arizona_micsupp_set_bypass,
88}; 156};
89 157
90static const struct regulator_desc arizona_micsupp = { 158static const struct regulator_desc arizona_micsupp = {
@@ -109,7 +177,8 @@ static const struct regulator_desc arizona_micsupp = {
109static const struct regulator_init_data arizona_micsupp_default = { 177static const struct regulator_init_data arizona_micsupp_default = {
110 .constraints = { 178 .constraints = {
111 .valid_ops_mask = REGULATOR_CHANGE_STATUS | 179 .valid_ops_mask = REGULATOR_CHANGE_STATUS |
112 REGULATOR_CHANGE_VOLTAGE, 180 REGULATOR_CHANGE_VOLTAGE |
181 REGULATOR_CHANGE_BYPASS,
113 .min_uV = 1700000, 182 .min_uV = 1700000,
114 .max_uV = 3300000, 183 .max_uV = 3300000,
115 }, 184 },
@@ -131,6 +200,7 @@ static int arizona_micsupp_probe(struct platform_device *pdev)
131 } 200 }
132 201
133 micsupp->arizona = arizona; 202 micsupp->arizona = arizona;
203 INIT_WORK(&micsupp->check_cp_work, arizona_micsupp_check_cp);
134 204
135 /* 205 /*
136 * Since the chip usually supplies itself we provide some 206 * Since the chip usually supplies itself we provide some
diff --git a/drivers/regulator/as3711-regulator.c b/drivers/regulator/as3711-regulator.c
index 2f1341db38a0..f0ba8c4eefa9 100644
--- a/drivers/regulator/as3711-regulator.c
+++ b/drivers/regulator/as3711-regulator.c
@@ -303,7 +303,7 @@ static int as3711_regulator_probe(struct platform_device *pdev)
303 reg_data = pdata ? pdata->init_data[id] : NULL; 303 reg_data = pdata ? pdata->init_data[id] : NULL;
304 304
305 /* No need to register if there is no regulator data */ 305 /* No need to register if there is no regulator data */
306 if (!ri->desc.name) 306 if (!reg_data)
307 continue; 307 continue;
308 308
309 reg = &regs[id]; 309 reg = &regs[id];
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 278584302f2d..da9782bd27d0 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -200,8 +200,8 @@ static int regulator_check_consumers(struct regulator_dev *rdev,
200 } 200 }
201 201
202 if (*min_uV > *max_uV) { 202 if (*min_uV > *max_uV) {
203 dev_err(regulator->dev, "Restricting voltage, %u-%uuV\n", 203 rdev_err(rdev, "Restricting voltage, %u-%uuV\n",
204 regulator->min_uV, regulator->max_uV); 204 *min_uV, *max_uV);
205 return -EINVAL; 205 return -EINVAL;
206 } 206 }
207 207
@@ -2080,10 +2080,20 @@ EXPORT_SYMBOL_GPL(regulator_get_voltage_sel_regmap);
2080 */ 2080 */
2081int regulator_set_voltage_sel_regmap(struct regulator_dev *rdev, unsigned sel) 2081int regulator_set_voltage_sel_regmap(struct regulator_dev *rdev, unsigned sel)
2082{ 2082{
2083 int ret;
2084
2083 sel <<= ffs(rdev->desc->vsel_mask) - 1; 2085 sel <<= ffs(rdev->desc->vsel_mask) - 1;
2084 2086
2085 return regmap_update_bits(rdev->regmap, rdev->desc->vsel_reg, 2087 ret = regmap_update_bits(rdev->regmap, rdev->desc->vsel_reg,
2086 rdev->desc->vsel_mask, sel); 2088 rdev->desc->vsel_mask, sel);
2089 if (ret)
2090 return ret;
2091
2092 if (rdev->desc->apply_bit)
2093 ret = regmap_update_bits(rdev->regmap, rdev->desc->apply_reg,
2094 rdev->desc->apply_bit,
2095 rdev->desc->apply_bit);
2096 return ret;
2087} 2097}
2088EXPORT_SYMBOL_GPL(regulator_set_voltage_sel_regmap); 2098EXPORT_SYMBOL_GPL(regulator_set_voltage_sel_regmap);
2089 2099
@@ -2229,8 +2239,11 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
2229 best_val = rdev->desc->ops->list_voltage(rdev, ret); 2239 best_val = rdev->desc->ops->list_voltage(rdev, ret);
2230 if (min_uV <= best_val && max_uV >= best_val) { 2240 if (min_uV <= best_val && max_uV >= best_val) {
2231 selector = ret; 2241 selector = ret;
2232 ret = rdev->desc->ops->set_voltage_sel(rdev, 2242 if (old_selector == selector)
2233 ret); 2243 ret = 0;
2244 else
2245 ret = rdev->desc->ops->set_voltage_sel(
2246 rdev, ret);
2234 } else { 2247 } else {
2235 ret = -EINVAL; 2248 ret = -EINVAL;
2236 } 2249 }
@@ -2241,7 +2254,7 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
2241 2254
2242 /* Call set_voltage_time_sel if successfully obtained old_selector */ 2255 /* Call set_voltage_time_sel if successfully obtained old_selector */
2243 if (ret == 0 && _regulator_is_enabled(rdev) && old_selector >= 0 && 2256 if (ret == 0 && _regulator_is_enabled(rdev) && old_selector >= 0 &&
2244 rdev->desc->ops->set_voltage_time_sel) { 2257 old_selector != selector && rdev->desc->ops->set_voltage_time_sel) {
2245 2258
2246 delay = rdev->desc->ops->set_voltage_time_sel(rdev, 2259 delay = rdev->desc->ops->set_voltage_time_sel(rdev,
2247 old_selector, selector); 2260 old_selector, selector);
@@ -2294,6 +2307,7 @@ int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
2294{ 2307{
2295 struct regulator_dev *rdev = regulator->rdev; 2308 struct regulator_dev *rdev = regulator->rdev;
2296 int ret = 0; 2309 int ret = 0;
2310 int old_min_uV, old_max_uV;
2297 2311
2298 mutex_lock(&rdev->mutex); 2312 mutex_lock(&rdev->mutex);
2299 2313
@@ -2315,18 +2329,29 @@ int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
2315 ret = regulator_check_voltage(rdev, &min_uV, &max_uV); 2329 ret = regulator_check_voltage(rdev, &min_uV, &max_uV);
2316 if (ret < 0) 2330 if (ret < 0)
2317 goto out; 2331 goto out;
2332
2333 /* restore original values in case of error */
2334 old_min_uV = regulator->min_uV;
2335 old_max_uV = regulator->max_uV;
2318 regulator->min_uV = min_uV; 2336 regulator->min_uV = min_uV;
2319 regulator->max_uV = max_uV; 2337 regulator->max_uV = max_uV;
2320 2338
2321 ret = regulator_check_consumers(rdev, &min_uV, &max_uV); 2339 ret = regulator_check_consumers(rdev, &min_uV, &max_uV);
2322 if (ret < 0) 2340 if (ret < 0)
2323 goto out; 2341 goto out2;
2324 2342
2325 ret = _regulator_do_set_voltage(rdev, min_uV, max_uV); 2343 ret = _regulator_do_set_voltage(rdev, min_uV, max_uV);
2326 2344 if (ret < 0)
2345 goto out2;
2346
2327out: 2347out:
2328 mutex_unlock(&rdev->mutex); 2348 mutex_unlock(&rdev->mutex);
2329 return ret; 2349 return ret;
2350out2:
2351 regulator->min_uV = old_min_uV;
2352 regulator->max_uV = old_max_uV;
2353 mutex_unlock(&rdev->mutex);
2354 return ret;
2330} 2355}
2331EXPORT_SYMBOL_GPL(regulator_set_voltage); 2356EXPORT_SYMBOL_GPL(regulator_set_voltage);
2332 2357
@@ -3208,7 +3233,7 @@ static int add_regulator_attributes(struct regulator_dev *rdev)
3208 if (status < 0) 3233 if (status < 0)
3209 return status; 3234 return status;
3210 } 3235 }
3211 if (ops->is_enabled) { 3236 if (rdev->ena_gpio || ops->is_enabled) {
3212 status = device_create_file(dev, &dev_attr_state); 3237 status = device_create_file(dev, &dev_attr_state);
3213 if (status < 0) 3238 if (status < 0)
3214 return status; 3239 return status;
diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c
index d0963090442d..96b569abb46c 100644
--- a/drivers/regulator/da9052-regulator.c
+++ b/drivers/regulator/da9052-regulator.c
@@ -70,7 +70,6 @@ struct da9052_regulator_info {
70 int step_uV; 70 int step_uV;
71 int min_uV; 71 int min_uV;
72 int max_uV; 72 int max_uV;
73 unsigned char activate_bit;
74}; 73};
75 74
76struct da9052_regulator { 75struct da9052_regulator {
@@ -210,36 +209,6 @@ static int da9052_map_voltage(struct regulator_dev *rdev,
210 return sel; 209 return sel;
211} 210}
212 211
213static int da9052_regulator_set_voltage_sel(struct regulator_dev *rdev,
214 unsigned int selector)
215{
216 struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
217 struct da9052_regulator_info *info = regulator->info;
218 int id = rdev_get_id(rdev);
219 int ret;
220
221 ret = da9052_reg_update(regulator->da9052, rdev->desc->vsel_reg,
222 rdev->desc->vsel_mask, selector);
223 if (ret < 0)
224 return ret;
225
226 /* Some LDOs and DCDCs are DVC controlled which requires enabling of
227 * the activate bit to implment the changes on the output.
228 */
229 switch (id) {
230 case DA9052_ID_BUCK1:
231 case DA9052_ID_BUCK2:
232 case DA9052_ID_BUCK3:
233 case DA9052_ID_LDO2:
234 case DA9052_ID_LDO3:
235 ret = da9052_reg_update(regulator->da9052, DA9052_SUPPLY_REG,
236 info->activate_bit, info->activate_bit);
237 break;
238 }
239
240 return ret;
241}
242
243static struct regulator_ops da9052_dcdc_ops = { 212static struct regulator_ops da9052_dcdc_ops = {
244 .get_current_limit = da9052_dcdc_get_current_limit, 213 .get_current_limit = da9052_dcdc_get_current_limit,
245 .set_current_limit = da9052_dcdc_set_current_limit, 214 .set_current_limit = da9052_dcdc_set_current_limit,
@@ -247,7 +216,7 @@ static struct regulator_ops da9052_dcdc_ops = {
247 .list_voltage = da9052_list_voltage, 216 .list_voltage = da9052_list_voltage,
248 .map_voltage = da9052_map_voltage, 217 .map_voltage = da9052_map_voltage,
249 .get_voltage_sel = regulator_get_voltage_sel_regmap, 218 .get_voltage_sel = regulator_get_voltage_sel_regmap,
250 .set_voltage_sel = da9052_regulator_set_voltage_sel, 219 .set_voltage_sel = regulator_set_voltage_sel_regmap,
251 .is_enabled = regulator_is_enabled_regmap, 220 .is_enabled = regulator_is_enabled_regmap,
252 .enable = regulator_enable_regmap, 221 .enable = regulator_enable_regmap,
253 .disable = regulator_disable_regmap, 222 .disable = regulator_disable_regmap,
@@ -257,7 +226,7 @@ static struct regulator_ops da9052_ldo_ops = {
257 .list_voltage = da9052_list_voltage, 226 .list_voltage = da9052_list_voltage,
258 .map_voltage = da9052_map_voltage, 227 .map_voltage = da9052_map_voltage,
259 .get_voltage_sel = regulator_get_voltage_sel_regmap, 228 .get_voltage_sel = regulator_get_voltage_sel_regmap,
260 .set_voltage_sel = da9052_regulator_set_voltage_sel, 229 .set_voltage_sel = regulator_set_voltage_sel_regmap,
261 .is_enabled = regulator_is_enabled_regmap, 230 .is_enabled = regulator_is_enabled_regmap,
262 .enable = regulator_enable_regmap, 231 .enable = regulator_enable_regmap,
263 .disable = regulator_disable_regmap, 232 .disable = regulator_disable_regmap,
@@ -274,13 +243,14 @@ static struct regulator_ops da9052_ldo_ops = {
274 .owner = THIS_MODULE,\ 243 .owner = THIS_MODULE,\
275 .vsel_reg = DA9052_BUCKCORE_REG + DA9052_ID_##_id, \ 244 .vsel_reg = DA9052_BUCKCORE_REG + DA9052_ID_##_id, \
276 .vsel_mask = (1 << (sbits)) - 1,\ 245 .vsel_mask = (1 << (sbits)) - 1,\
246 .apply_reg = DA9052_SUPPLY_REG, \
247 .apply_bit = (abits), \
277 .enable_reg = DA9052_BUCKCORE_REG + DA9052_ID_##_id, \ 248 .enable_reg = DA9052_BUCKCORE_REG + DA9052_ID_##_id, \
278 .enable_mask = 1 << (ebits),\ 249 .enable_mask = 1 << (ebits),\
279 },\ 250 },\
280 .min_uV = (min) * 1000,\ 251 .min_uV = (min) * 1000,\
281 .max_uV = (max) * 1000,\ 252 .max_uV = (max) * 1000,\
282 .step_uV = (step) * 1000,\ 253 .step_uV = (step) * 1000,\
283 .activate_bit = (abits),\
284} 254}
285 255
286#define DA9052_DCDC(_id, step, min, max, sbits, ebits, abits) \ 256#define DA9052_DCDC(_id, step, min, max, sbits, ebits, abits) \
@@ -294,13 +264,14 @@ static struct regulator_ops da9052_ldo_ops = {
294 .owner = THIS_MODULE,\ 264 .owner = THIS_MODULE,\
295 .vsel_reg = DA9052_BUCKCORE_REG + DA9052_ID_##_id, \ 265 .vsel_reg = DA9052_BUCKCORE_REG + DA9052_ID_##_id, \
296 .vsel_mask = (1 << (sbits)) - 1,\ 266 .vsel_mask = (1 << (sbits)) - 1,\
267 .apply_reg = DA9052_SUPPLY_REG, \
268 .apply_bit = (abits), \
297 .enable_reg = DA9052_BUCKCORE_REG + DA9052_ID_##_id, \ 269 .enable_reg = DA9052_BUCKCORE_REG + DA9052_ID_##_id, \
298 .enable_mask = 1 << (ebits),\ 270 .enable_mask = 1 << (ebits),\
299 },\ 271 },\
300 .min_uV = (min) * 1000,\ 272 .min_uV = (min) * 1000,\
301 .max_uV = (max) * 1000,\ 273 .max_uV = (max) * 1000,\
302 .step_uV = (step) * 1000,\ 274 .step_uV = (step) * 1000,\
303 .activate_bit = (abits),\
304} 275}
305 276
306static struct da9052_regulator_info da9052_regulator_info[] = { 277static struct da9052_regulator_info da9052_regulator_info[] = {
@@ -395,9 +366,9 @@ static int da9052_regulator_probe(struct platform_device *pdev)
395 config.init_data = pdata->regulators[pdev->id]; 366 config.init_data = pdata->regulators[pdev->id];
396 } else { 367 } else {
397#ifdef CONFIG_OF 368#ifdef CONFIG_OF
398 struct device_node *nproot = da9052->dev->of_node; 369 struct device_node *nproot, *np;
399 struct device_node *np;
400 370
371 nproot = of_node_get(da9052->dev->of_node);
401 if (!nproot) 372 if (!nproot)
402 return -ENODEV; 373 return -ENODEV;
403 374
@@ -414,6 +385,7 @@ static int da9052_regulator_probe(struct platform_device *pdev)
414 break; 385 break;
415 } 386 }
416 } 387 }
388 of_node_put(nproot);
417#endif 389#endif
418 } 390 }
419 391
diff --git a/drivers/regulator/da9055-regulator.c b/drivers/regulator/da9055-regulator.c
index 1a05ac66878f..30221099d09c 100644
--- a/drivers/regulator/da9055-regulator.c
+++ b/drivers/regulator/da9055-regulator.c
@@ -58,7 +58,6 @@ struct da9055_volt_reg {
58 int reg_b; 58 int reg_b;
59 int sl_shift; 59 int sl_shift;
60 int v_mask; 60 int v_mask;
61 int v_shift;
62}; 61};
63 62
64struct da9055_mode_reg { 63struct da9055_mode_reg {
@@ -388,7 +387,6 @@ static struct regulator_ops da9055_ldo_ops = {
388 .reg_b = DA9055_REG_VBCORE_B + DA9055_ID_##_id, \ 387 .reg_b = DA9055_REG_VBCORE_B + DA9055_ID_##_id, \
389 .sl_shift = 7,\ 388 .sl_shift = 7,\
390 .v_mask = (1 << (vbits)) - 1,\ 389 .v_mask = (1 << (vbits)) - 1,\
391 .v_shift = (vbits),\
392 },\ 390 },\
393} 391}
394 392
@@ -417,7 +415,6 @@ static struct regulator_ops da9055_ldo_ops = {
417 .reg_b = DA9055_REG_VBCORE_B + DA9055_ID_##_id, \ 415 .reg_b = DA9055_REG_VBCORE_B + DA9055_ID_##_id, \
418 .sl_shift = 7,\ 416 .sl_shift = 7,\
419 .v_mask = (1 << (vbits)) - 1,\ 417 .v_mask = (1 << (vbits)) - 1,\
420 .v_shift = (vbits),\
421 },\ 418 },\
422 .mode = {\ 419 .mode = {\
423 .reg = DA9055_REG_BCORE_MODE,\ 420 .reg = DA9055_REG_BCORE_MODE,\
diff --git a/drivers/regulator/dbx500-prcmu.c b/drivers/regulator/dbx500-prcmu.c
index 261f3d2299bc..89bd2faaef8c 100644
--- a/drivers/regulator/dbx500-prcmu.c
+++ b/drivers/regulator/dbx500-prcmu.c
@@ -14,6 +14,7 @@
14#include <linux/debugfs.h> 14#include <linux/debugfs.h>
15#include <linux/seq_file.h> 15#include <linux/seq_file.h>
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/module.h>
17 18
18#include "dbx500-prcmu.h" 19#include "dbx500-prcmu.h"
19 20
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index bae681ccd3ea..9d39eb4aafa3 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -132,7 +132,7 @@ static struct regulator_ops gpio_regulator_voltage_ops = {
132 .list_voltage = gpio_regulator_list_voltage, 132 .list_voltage = gpio_regulator_list_voltage,
133}; 133};
134 134
135struct gpio_regulator_config * 135static struct gpio_regulator_config *
136of_get_gpio_regulator_config(struct device *dev, struct device_node *np) 136of_get_gpio_regulator_config(struct device *dev, struct device_node *np)
137{ 137{
138 struct gpio_regulator_config *config; 138 struct gpio_regulator_config *config;
@@ -163,10 +163,7 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np)
163 config->enable_gpio = of_get_named_gpio(np, "enable-gpio", 0); 163 config->enable_gpio = of_get_named_gpio(np, "enable-gpio", 0);
164 164
165 /* Fetch GPIOs. */ 165 /* Fetch GPIOs. */
166 for (i = 0; ; i++) 166 config->nr_gpios = of_gpio_count(np);
167 if (of_get_named_gpio(np, "gpios", i) < 0)
168 break;
169 config->nr_gpios = i;
170 167
171 config->gpios = devm_kzalloc(dev, 168 config->gpios = devm_kzalloc(dev,
172 sizeof(struct gpio) * config->nr_gpios, 169 sizeof(struct gpio) * config->nr_gpios,
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 5f68ff11a298..9cb2c0f34515 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -73,8 +73,6 @@ static const unsigned int buck_voltage_map[] = {
73}; 73};
74 74
75#define BUCK_TARGET_VOL_MASK 0x3f 75#define BUCK_TARGET_VOL_MASK 0x3f
76#define BUCK_TARGET_VOL_MIN_IDX 0x01
77#define BUCK_TARGET_VOL_MAX_IDX 0x19
78 76
79#define LP3971_BUCK_RAMP_REG(x) (buck_base_addr[x]+2) 77#define LP3971_BUCK_RAMP_REG(x) (buck_base_addr[x]+2)
80 78
@@ -140,7 +138,7 @@ static int lp3971_ldo_disable(struct regulator_dev *dev)
140 return lp3971_set_bits(lp3971, LP3971_LDO_ENABLE_REG, mask, 0); 138 return lp3971_set_bits(lp3971, LP3971_LDO_ENABLE_REG, mask, 0);
141} 139}
142 140
143static int lp3971_ldo_get_voltage(struct regulator_dev *dev) 141static int lp3971_ldo_get_voltage_sel(struct regulator_dev *dev)
144{ 142{
145 struct lp3971 *lp3971 = rdev_get_drvdata(dev); 143 struct lp3971 *lp3971 = rdev_get_drvdata(dev);
146 int ldo = rdev_get_id(dev) - LP3971_LDO1; 144 int ldo = rdev_get_id(dev) - LP3971_LDO1;
@@ -149,7 +147,7 @@ static int lp3971_ldo_get_voltage(struct regulator_dev *dev)
149 reg = lp3971_reg_read(lp3971, LP3971_LDO_VOL_CONTR_REG(ldo)); 147 reg = lp3971_reg_read(lp3971, LP3971_LDO_VOL_CONTR_REG(ldo));
150 val = (reg >> LDO_VOL_CONTR_SHIFT(ldo)) & LDO_VOL_CONTR_MASK; 148 val = (reg >> LDO_VOL_CONTR_SHIFT(ldo)) & LDO_VOL_CONTR_MASK;
151 149
152 return dev->desc->volt_table[val]; 150 return val;
153} 151}
154 152
155static int lp3971_ldo_set_voltage_sel(struct regulator_dev *dev, 153static int lp3971_ldo_set_voltage_sel(struct regulator_dev *dev,
@@ -168,7 +166,7 @@ static struct regulator_ops lp3971_ldo_ops = {
168 .is_enabled = lp3971_ldo_is_enabled, 166 .is_enabled = lp3971_ldo_is_enabled,
169 .enable = lp3971_ldo_enable, 167 .enable = lp3971_ldo_enable,
170 .disable = lp3971_ldo_disable, 168 .disable = lp3971_ldo_disable,
171 .get_voltage = lp3971_ldo_get_voltage, 169 .get_voltage_sel = lp3971_ldo_get_voltage_sel,
172 .set_voltage_sel = lp3971_ldo_set_voltage_sel, 170 .set_voltage_sel = lp3971_ldo_set_voltage_sel,
173}; 171};
174 172
@@ -201,24 +199,16 @@ static int lp3971_dcdc_disable(struct regulator_dev *dev)
201 return lp3971_set_bits(lp3971, LP3971_BUCK_VOL_ENABLE_REG, mask, 0); 199 return lp3971_set_bits(lp3971, LP3971_BUCK_VOL_ENABLE_REG, mask, 0);
202} 200}
203 201
204static int lp3971_dcdc_get_voltage(struct regulator_dev *dev) 202static int lp3971_dcdc_get_voltage_sel(struct regulator_dev *dev)
205{ 203{
206 struct lp3971 *lp3971 = rdev_get_drvdata(dev); 204 struct lp3971 *lp3971 = rdev_get_drvdata(dev);
207 int buck = rdev_get_id(dev) - LP3971_DCDC1; 205 int buck = rdev_get_id(dev) - LP3971_DCDC1;
208 u16 reg; 206 u16 reg;
209 int val;
210 207
211 reg = lp3971_reg_read(lp3971, LP3971_BUCK_TARGET_VOL1_REG(buck)); 208 reg = lp3971_reg_read(lp3971, LP3971_BUCK_TARGET_VOL1_REG(buck));
212 reg &= BUCK_TARGET_VOL_MASK; 209 reg &= BUCK_TARGET_VOL_MASK;
213 210
214 if (reg <= BUCK_TARGET_VOL_MAX_IDX) 211 return reg;
215 val = buck_voltage_map[reg];
216 else {
217 val = 0;
218 dev_warn(&dev->dev, "chip reported incorrect voltage value.\n");
219 }
220
221 return val;
222} 212}
223 213
224static int lp3971_dcdc_set_voltage_sel(struct regulator_dev *dev, 214static int lp3971_dcdc_set_voltage_sel(struct regulator_dev *dev,
@@ -249,7 +239,7 @@ static struct regulator_ops lp3971_dcdc_ops = {
249 .is_enabled = lp3971_dcdc_is_enabled, 239 .is_enabled = lp3971_dcdc_is_enabled,
250 .enable = lp3971_dcdc_enable, 240 .enable = lp3971_dcdc_enable,
251 .disable = lp3971_dcdc_disable, 241 .disable = lp3971_dcdc_disable,
252 .get_voltage = lp3971_dcdc_get_voltage, 242 .get_voltage_sel = lp3971_dcdc_get_voltage_sel,
253 .set_voltage_sel = lp3971_dcdc_set_voltage_sel, 243 .set_voltage_sel = lp3971_dcdc_set_voltage_sel,
254}; 244};
255 245
diff --git a/drivers/regulator/lp3972.c b/drivers/regulator/lp3972.c
index 69c42c318b87..0baabcfb578a 100644
--- a/drivers/regulator/lp3972.c
+++ b/drivers/regulator/lp3972.c
@@ -165,8 +165,6 @@ static const int buck_base_addr[] = {
165#define LP3972_BUCK_VOL_ENABLE_REG(x) (buck_vol_enable_addr[x]) 165#define LP3972_BUCK_VOL_ENABLE_REG(x) (buck_vol_enable_addr[x])
166#define LP3972_BUCK_VOL1_REG(x) (buck_base_addr[x]) 166#define LP3972_BUCK_VOL1_REG(x) (buck_base_addr[x])
167#define LP3972_BUCK_VOL_MASK 0x1f 167#define LP3972_BUCK_VOL_MASK 0x1f
168#define LP3972_BUCK_VOL_MIN_IDX(x) ((x) ? 0x01 : 0x00)
169#define LP3972_BUCK_VOL_MAX_IDX(x) ((x) ? 0x19 : 0x1f)
170 168
171static int lp3972_i2c_read(struct i2c_client *i2c, char reg, int count, 169static int lp3972_i2c_read(struct i2c_client *i2c, char reg, int count,
172 u16 *dest) 170 u16 *dest)
@@ -257,7 +255,7 @@ static int lp3972_ldo_disable(struct regulator_dev *dev)
257 mask, 0); 255 mask, 0);
258} 256}
259 257
260static int lp3972_ldo_get_voltage(struct regulator_dev *dev) 258static int lp3972_ldo_get_voltage_sel(struct regulator_dev *dev)
261{ 259{
262 struct lp3972 *lp3972 = rdev_get_drvdata(dev); 260 struct lp3972 *lp3972 = rdev_get_drvdata(dev);
263 int ldo = rdev_get_id(dev) - LP3972_LDO1; 261 int ldo = rdev_get_id(dev) - LP3972_LDO1;
@@ -267,7 +265,7 @@ static int lp3972_ldo_get_voltage(struct regulator_dev *dev)
267 reg = lp3972_reg_read(lp3972, LP3972_LDO_VOL_CONTR_REG(ldo)); 265 reg = lp3972_reg_read(lp3972, LP3972_LDO_VOL_CONTR_REG(ldo));
268 val = (reg >> LP3972_LDO_VOL_CONTR_SHIFT(ldo)) & mask; 266 val = (reg >> LP3972_LDO_VOL_CONTR_SHIFT(ldo)) & mask;
269 267
270 return dev->desc->volt_table[val]; 268 return val;
271} 269}
272 270
273static int lp3972_ldo_set_voltage_sel(struct regulator_dev *dev, 271static int lp3972_ldo_set_voltage_sel(struct regulator_dev *dev,
@@ -314,7 +312,7 @@ static struct regulator_ops lp3972_ldo_ops = {
314 .is_enabled = lp3972_ldo_is_enabled, 312 .is_enabled = lp3972_ldo_is_enabled,
315 .enable = lp3972_ldo_enable, 313 .enable = lp3972_ldo_enable,
316 .disable = lp3972_ldo_disable, 314 .disable = lp3972_ldo_disable,
317 .get_voltage = lp3972_ldo_get_voltage, 315 .get_voltage_sel = lp3972_ldo_get_voltage_sel,
318 .set_voltage_sel = lp3972_ldo_set_voltage_sel, 316 .set_voltage_sel = lp3972_ldo_set_voltage_sel,
319}; 317};
320 318
@@ -353,24 +351,16 @@ static int lp3972_dcdc_disable(struct regulator_dev *dev)
353 return val; 351 return val;
354} 352}
355 353
356static int lp3972_dcdc_get_voltage(struct regulator_dev *dev) 354static int lp3972_dcdc_get_voltage_sel(struct regulator_dev *dev)
357{ 355{
358 struct lp3972 *lp3972 = rdev_get_drvdata(dev); 356 struct lp3972 *lp3972 = rdev_get_drvdata(dev);
359 int buck = rdev_get_id(dev) - LP3972_DCDC1; 357 int buck = rdev_get_id(dev) - LP3972_DCDC1;
360 u16 reg; 358 u16 reg;
361 int val;
362 359
363 reg = lp3972_reg_read(lp3972, LP3972_BUCK_VOL1_REG(buck)); 360 reg = lp3972_reg_read(lp3972, LP3972_BUCK_VOL1_REG(buck));
364 reg &= LP3972_BUCK_VOL_MASK; 361 reg &= LP3972_BUCK_VOL_MASK;
365 if (reg <= LP3972_BUCK_VOL_MAX_IDX(buck))
366 val = dev->desc->volt_table[reg];
367 else {
368 val = 0;
369 dev_warn(&dev->dev, "chip reported incorrect voltage value."
370 " reg = %d\n", reg);
371 }
372 362
373 return val; 363 return reg;
374} 364}
375 365
376static int lp3972_dcdc_set_voltage_sel(struct regulator_dev *dev, 366static int lp3972_dcdc_set_voltage_sel(struct regulator_dev *dev,
@@ -402,7 +392,7 @@ static struct regulator_ops lp3972_dcdc_ops = {
402 .is_enabled = lp3972_dcdc_is_enabled, 392 .is_enabled = lp3972_dcdc_is_enabled,
403 .enable = lp3972_dcdc_enable, 393 .enable = lp3972_dcdc_enable,
404 .disable = lp3972_dcdc_disable, 394 .disable = lp3972_dcdc_disable,
405 .get_voltage = lp3972_dcdc_get_voltage, 395 .get_voltage_sel = lp3972_dcdc_get_voltage_sel,
406 .set_voltage_sel = lp3972_dcdc_set_voltage_sel, 396 .set_voltage_sel = lp3972_dcdc_set_voltage_sel,
407}; 397};
408 398
diff --git a/drivers/regulator/lp872x.c b/drivers/regulator/lp872x.c
index 9289ead715ca..8e3c7ae0047f 100644
--- a/drivers/regulator/lp872x.c
+++ b/drivers/regulator/lp872x.c
@@ -181,20 +181,6 @@ static inline int lp872x_update_bits(struct lp872x *lp, u8 addr,
181 return regmap_update_bits(lp->regmap, addr, mask, data); 181 return regmap_update_bits(lp->regmap, addr, mask, data);
182} 182}
183 183
184static int _rdev_to_offset(struct regulator_dev *rdev)
185{
186 enum lp872x_regulator_id id = rdev_get_id(rdev);
187
188 switch (id) {
189 case LP8720_ID_LDO1 ... LP8720_ID_BUCK:
190 return id;
191 case LP8725_ID_LDO1 ... LP8725_ID_BUCK2:
192 return id - LP8725_ID_BASE;
193 default:
194 return -EINVAL;
195 }
196}
197
198static int lp872x_get_timestep_usec(struct lp872x *lp) 184static int lp872x_get_timestep_usec(struct lp872x *lp)
199{ 185{
200 enum lp872x_id chip = lp->chipid; 186 enum lp872x_id chip = lp->chipid;
@@ -234,28 +220,20 @@ static int lp872x_get_timestep_usec(struct lp872x *lp)
234static int lp872x_regulator_enable_time(struct regulator_dev *rdev) 220static int lp872x_regulator_enable_time(struct regulator_dev *rdev)
235{ 221{
236 struct lp872x *lp = rdev_get_drvdata(rdev); 222 struct lp872x *lp = rdev_get_drvdata(rdev);
237 enum lp872x_regulator_id regulator = rdev_get_id(rdev); 223 enum lp872x_regulator_id rid = rdev_get_id(rdev);
238 int time_step_us = lp872x_get_timestep_usec(lp); 224 int time_step_us = lp872x_get_timestep_usec(lp);
239 int ret, offset; 225 int ret;
240 u8 addr, val; 226 u8 addr, val;
241 227
242 if (time_step_us < 0) 228 if (time_step_us < 0)
243 return -EINVAL; 229 return -EINVAL;
244 230
245 switch (regulator) { 231 switch (rid) {
246 case LP8720_ID_LDO1 ... LP8720_ID_LDO5: 232 case LP8720_ID_LDO1 ... LP8720_ID_BUCK:
247 case LP8725_ID_LDO1 ... LP8725_ID_LILO2: 233 addr = LP872X_LDO1_VOUT + rid;
248 offset = _rdev_to_offset(rdev);
249 if (offset < 0)
250 return -EINVAL;
251
252 addr = LP872X_LDO1_VOUT + offset;
253 break;
254 case LP8720_ID_BUCK:
255 addr = LP8720_BUCK_VOUT1;
256 break; 234 break;
257 case LP8725_ID_BUCK1: 235 case LP8725_ID_LDO1 ... LP8725_ID_BUCK1:
258 addr = LP8725_BUCK1_VOUT1; 236 addr = LP872X_LDO1_VOUT + rid - LP8725_ID_BASE;
259 break; 237 break;
260 case LP8725_ID_BUCK2: 238 case LP8725_ID_BUCK2:
261 addr = LP8725_BUCK2_VOUT1; 239 addr = LP8725_BUCK2_VOUT1;
diff --git a/drivers/regulator/lp8755.c b/drivers/regulator/lp8755.c
new file mode 100644
index 000000000000..f0f6ea05065b
--- /dev/null
+++ b/drivers/regulator/lp8755.c
@@ -0,0 +1,566 @@
1/*
2 * LP8755 High Performance Power Management Unit : System Interface Driver
3 * (based on rev. 0.26)
4 * Copyright 2012 Texas Instruments
5 *
6 * Author: Daniel(Geon Si) Jeong <daniel.jeong@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/i2c.h>
17#include <linux/err.h>
18#include <linux/irq.h>
19#include <linux/interrupt.h>
20#include <linux/gpio.h>
21#include <linux/regmap.h>
22#include <linux/delay.h>
23#include <linux/uaccess.h>
24#include <linux/regulator/driver.h>
25#include <linux/regulator/machine.h>
26#include <linux/platform_data/lp8755.h>
27
28#define LP8755_REG_BUCK0 0x00
29#define LP8755_REG_BUCK1 0x03
30#define LP8755_REG_BUCK2 0x04
31#define LP8755_REG_BUCK3 0x01
32#define LP8755_REG_BUCK4 0x05
33#define LP8755_REG_BUCK5 0x02
34#define LP8755_REG_MAX 0xFF
35
36#define LP8755_BUCK_EN_M BIT(7)
37#define LP8755_BUCK_LINEAR_OUT_MAX 0x76
38#define LP8755_BUCK_VOUT_M 0x7F
39
40struct lp8755_mphase {
41 int nreg;
42 int buck_num[LP8755_BUCK_MAX];
43};
44
45struct lp8755_chip {
46 struct device *dev;
47 struct regmap *regmap;
48 struct lp8755_platform_data *pdata;
49
50 int irq;
51 unsigned int irqmask;
52
53 int mphase;
54 struct regulator_dev *rdev[LP8755_BUCK_MAX];
55};
56
57/**
58 *lp8755_read : read a single register value from lp8755.
59 *@pchip : device to read from
60 *@reg : register to read from
61 *@val : pointer to store read value
62 */
63static int lp8755_read(struct lp8755_chip *pchip, unsigned int reg,
64 unsigned int *val)
65{
66 return regmap_read(pchip->regmap, reg, val);
67}
68
69/**
70 *lp8755_write : write a single register value to lp8755.
71 *@pchip : device to write to
72 *@reg : register to write to
73 *@val : value to be written
74 */
75static int lp8755_write(struct lp8755_chip *pchip, unsigned int reg,
76 unsigned int val)
77{
78 return regmap_write(pchip->regmap, reg, val);
79}
80
81/**
82 *lp8755_update_bits : set the values of bit fields in lp8755 register.
83 *@pchip : device to read from
84 *@reg : register to update
85 *@mask : bitmask to be changed
86 *@val : value for bitmask
87 */
88static int lp8755_update_bits(struct lp8755_chip *pchip, unsigned int reg,
89 unsigned int mask, unsigned int val)
90{
91 return regmap_update_bits(pchip->regmap, reg, mask, val);
92}
93
94static int lp8755_buck_enable_time(struct regulator_dev *rdev)
95{
96 int ret;
97 unsigned int regval;
98 enum lp8755_bucks id = rdev_get_id(rdev);
99 struct lp8755_chip *pchip = rdev_get_drvdata(rdev);
100
101 ret = lp8755_read(pchip, 0x12 + id, &regval);
102 if (ret < 0) {
103 dev_err(pchip->dev, "i2c acceess error %s\n", __func__);
104 return ret;
105 }
106 return (regval & 0xff) * 100;
107}
108
109static int lp8755_buck_set_mode(struct regulator_dev *rdev, unsigned int mode)
110{
111 int ret;
112 unsigned int regbval = 0x0;
113 enum lp8755_bucks id = rdev_get_id(rdev);
114 struct lp8755_chip *pchip = rdev_get_drvdata(rdev);
115
116 switch (mode) {
117 case REGULATOR_MODE_FAST:
118 /* forced pwm mode */
119 regbval = (0x01 << id);
120 break;
121 case REGULATOR_MODE_NORMAL:
122 /* enable automatic pwm/pfm mode */
123 ret = lp8755_update_bits(pchip, 0x08 + id, 0x20, 0x00);
124 if (ret < 0)
125 goto err_i2c;
126 break;
127 case REGULATOR_MODE_IDLE:
128 /* enable automatic pwm/pfm/lppfm mode */
129 ret = lp8755_update_bits(pchip, 0x08 + id, 0x20, 0x20);
130 if (ret < 0)
131 goto err_i2c;
132
133 ret = lp8755_update_bits(pchip, 0x10, 0x01, 0x01);
134 if (ret < 0)
135 goto err_i2c;
136 break;
137 default:
138 dev_err(pchip->dev, "Not supported buck mode %s\n", __func__);
139 /* forced pwm mode */
140 regbval = (0x01 << id);
141 }
142
143 ret = lp8755_update_bits(pchip, 0x06, 0x01 << id, regbval);
144 if (ret < 0)
145 goto err_i2c;
146 return ret;
147err_i2c:
148 dev_err(pchip->dev, "i2c acceess error %s\n", __func__);
149 return ret;
150}
151
152static unsigned int lp8755_buck_get_mode(struct regulator_dev *rdev)
153{
154 int ret;
155 unsigned int regval;
156 enum lp8755_bucks id = rdev_get_id(rdev);
157 struct lp8755_chip *pchip = rdev_get_drvdata(rdev);
158
159 ret = lp8755_read(pchip, 0x06, &regval);
160 if (ret < 0)
161 goto err_i2c;
162
163 /* mode fast means forced pwm mode */
164 if (regval & (0x01 << id))
165 return REGULATOR_MODE_FAST;
166
167 ret = lp8755_read(pchip, 0x08 + id, &regval);
168 if (ret < 0)
169 goto err_i2c;
170
171 /* mode idle means automatic pwm/pfm/lppfm mode */
172 if (regval & 0x20)
173 return REGULATOR_MODE_IDLE;
174
175 /* mode normal means automatic pwm/pfm mode */
176 return REGULATOR_MODE_NORMAL;
177
178err_i2c:
179 dev_err(pchip->dev, "i2c acceess error %s\n", __func__);
180 return 0;
181}
182
183static int lp8755_buck_set_ramp(struct regulator_dev *rdev, int ramp)
184{
185 int ret;
186 unsigned int regval = 0x00;
187 enum lp8755_bucks id = rdev_get_id(rdev);
188 struct lp8755_chip *pchip = rdev_get_drvdata(rdev);
189
190 /* uV/us */
191 switch (ramp) {
192 case 0 ... 230:
193 regval = 0x07;
194 break;
195 case 231 ... 470:
196 regval = 0x06;
197 break;
198 case 471 ... 940:
199 regval = 0x05;
200 break;
201 case 941 ... 1900:
202 regval = 0x04;
203 break;
204 case 1901 ... 3800:
205 regval = 0x03;
206 break;
207 case 3801 ... 7500:
208 regval = 0x02;
209 break;
210 case 7501 ... 15000:
211 regval = 0x01;
212 break;
213 case 15001 ... 30000:
214 regval = 0x00;
215 break;
216 default:
217 dev_err(pchip->dev,
218 "Not supported ramp value %d %s\n", ramp, __func__);
219 return -EINVAL;
220 }
221
222 ret = lp8755_update_bits(pchip, 0x07 + id, 0x07, regval);
223 if (ret < 0)
224 goto err_i2c;
225 return ret;
226err_i2c:
227 dev_err(pchip->dev, "i2c acceess error %s\n", __func__);
228 return ret;
229}
230
231static struct regulator_ops lp8755_buck_ops = {
232 .list_voltage = regulator_list_voltage_linear,
233 .set_voltage_sel = regulator_set_voltage_sel_regmap,
234 .get_voltage_sel = regulator_get_voltage_sel_regmap,
235 .enable = regulator_enable_regmap,
236 .disable = regulator_disable_regmap,
237 .is_enabled = regulator_is_enabled_regmap,
238 .enable_time = lp8755_buck_enable_time,
239 .set_mode = lp8755_buck_set_mode,
240 .get_mode = lp8755_buck_get_mode,
241 .set_ramp_delay = lp8755_buck_set_ramp,
242};
243
244#define lp8755_rail(_id) "lp8755_buck"#_id
245#define lp8755_buck_init(_id)\
246{\
247 .constraints = {\
248 .name = lp8755_rail(_id),\
249 .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE,\
250 .min_uV = 500000,\
251 .max_uV = 1675000,\
252 },\
253}
254
255static struct regulator_init_data lp8755_reg_default[LP8755_BUCK_MAX] = {
256 [LP8755_BUCK0] = lp8755_buck_init(0),
257 [LP8755_BUCK1] = lp8755_buck_init(1),
258 [LP8755_BUCK2] = lp8755_buck_init(2),
259 [LP8755_BUCK3] = lp8755_buck_init(3),
260 [LP8755_BUCK4] = lp8755_buck_init(4),
261 [LP8755_BUCK5] = lp8755_buck_init(5),
262};
263
264static const struct lp8755_mphase mphase_buck[MPHASE_CONF_MAX] = {
265 { 3, { LP8755_BUCK0, LP8755_BUCK3, LP8755_BUCK5 } },
266 { 6, { LP8755_BUCK0, LP8755_BUCK1, LP8755_BUCK2, LP8755_BUCK3,
267 LP8755_BUCK4, LP8755_BUCK5 } },
268 { 5, { LP8755_BUCK0, LP8755_BUCK2, LP8755_BUCK3, LP8755_BUCK4,
269 LP8755_BUCK5} },
270 { 4, { LP8755_BUCK0, LP8755_BUCK3, LP8755_BUCK4, LP8755_BUCK5} },
271 { 3, { LP8755_BUCK0, LP8755_BUCK4, LP8755_BUCK5} },
272 { 2, { LP8755_BUCK0, LP8755_BUCK5} },
273 { 1, { LP8755_BUCK0} },
274 { 2, { LP8755_BUCK0, LP8755_BUCK3} },
275 { 4, { LP8755_BUCK0, LP8755_BUCK2, LP8755_BUCK3, LP8755_BUCK5} },
276};
277
278static int lp8755_init_data(struct lp8755_chip *pchip)
279{
280 unsigned int regval;
281 int ret, icnt, buck_num;
282 struct lp8755_platform_data *pdata = pchip->pdata;
283
284 /* read back muti-phase configuration */
285 ret = lp8755_read(pchip, 0x3D, &regval);
286 if (ret < 0)
287 goto out_i2c_error;
288 pchip->mphase = regval & 0x0F;
289
290 /* set default data based on multi-phase config */
291 for (icnt = 0; icnt < mphase_buck[pchip->mphase].nreg; icnt++) {
292 buck_num = mphase_buck[pchip->mphase].buck_num[icnt];
293 pdata->buck_data[buck_num] = &lp8755_reg_default[buck_num];
294 }
295 return ret;
296
297out_i2c_error:
298 dev_err(pchip->dev, "i2c acceess error %s\n", __func__);
299 return ret;
300}
301
302#define lp8755_buck_desc(_id)\
303{\
304 .name = lp8755_rail(_id),\
305 .id = LP8755_BUCK##_id,\
306 .ops = &lp8755_buck_ops,\
307 .n_voltages = LP8755_BUCK_LINEAR_OUT_MAX+1,\
308 .uV_step = 10000,\
309 .min_uV = 500000,\
310 .type = REGULATOR_VOLTAGE,\
311 .owner = THIS_MODULE,\
312 .enable_reg = LP8755_REG_BUCK##_id,\
313 .enable_mask = LP8755_BUCK_EN_M,\
314 .vsel_reg = LP8755_REG_BUCK##_id,\
315 .vsel_mask = LP8755_BUCK_VOUT_M,\
316}
317
318static struct regulator_desc lp8755_regulators[] = {
319 lp8755_buck_desc(0),
320 lp8755_buck_desc(1),
321 lp8755_buck_desc(2),
322 lp8755_buck_desc(3),
323 lp8755_buck_desc(4),
324 lp8755_buck_desc(5),
325};
326
327static int lp8755_regulator_init(struct lp8755_chip *pchip)
328{
329 int ret, icnt, buck_num;
330 struct lp8755_platform_data *pdata = pchip->pdata;
331 struct regulator_config rconfig = { };
332
333 rconfig.regmap = pchip->regmap;
334 rconfig.dev = pchip->dev;
335 rconfig.driver_data = pchip;
336
337 for (icnt = 0; icnt < mphase_buck[pchip->mphase].nreg; icnt++) {
338 buck_num = mphase_buck[pchip->mphase].buck_num[icnt];
339 rconfig.init_data = pdata->buck_data[buck_num];
340 rconfig.of_node = pchip->dev->of_node;
341 pchip->rdev[buck_num] =
342 regulator_register(&lp8755_regulators[buck_num], &rconfig);
343 if (IS_ERR(pchip->rdev[buck_num])) {
344 ret = PTR_ERR(pchip->rdev[buck_num]);
345 pchip->rdev[buck_num] = NULL;
346 dev_err(pchip->dev, "regulator init failed: buck %d\n",
347 buck_num);
348 goto err_buck;
349 }
350 }
351
352 return 0;
353
354err_buck:
355 for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
356 regulator_unregister(pchip->rdev[icnt]);
357 return ret;
358}
359
360static irqreturn_t lp8755_irq_handler(int irq, void *data)
361{
362 int ret, icnt;
363 unsigned int flag0, flag1;
364 struct lp8755_chip *pchip = data;
365
366 /* read flag0 register */
367 ret = lp8755_read(pchip, 0x0D, &flag0);
368 if (ret < 0)
369 goto err_i2c;
370 /* clear flag register to pull up int. pin */
371 ret = lp8755_write(pchip, 0x0D, 0x00);
372 if (ret < 0)
373 goto err_i2c;
374
375 /* sent power fault detection event to specific regulator */
376 for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
377 if ((flag0 & (0x4 << icnt))
378 && (pchip->irqmask & (0x04 << icnt))
379 && (pchip->rdev[icnt] != NULL))
380 regulator_notifier_call_chain(pchip->rdev[icnt],
381 LP8755_EVENT_PWR_FAULT,
382 NULL);
383
384 /* read flag1 register */
385 ret = lp8755_read(pchip, 0x0E, &flag1);
386 if (ret < 0)
387 goto err_i2c;
388 /* clear flag register to pull up int. pin */
389 ret = lp8755_write(pchip, 0x0E, 0x00);
390 if (ret < 0)
391 goto err_i2c;
392
393 /* send OCP event to all regualtor devices */
394 if ((flag1 & 0x01) && (pchip->irqmask & 0x01))
395 for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
396 if (pchip->rdev[icnt] != NULL)
397 regulator_notifier_call_chain(pchip->rdev[icnt],
398 LP8755_EVENT_OCP,
399 NULL);
400
401 /* send OVP event to all regualtor devices */
402 if ((flag1 & 0x02) && (pchip->irqmask & 0x02))
403 for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
404 if (pchip->rdev[icnt] != NULL)
405 regulator_notifier_call_chain(pchip->rdev[icnt],
406 LP8755_EVENT_OVP,
407 NULL);
408 return IRQ_HANDLED;
409
410err_i2c:
411 dev_err(pchip->dev, "i2c acceess error %s\n", __func__);
412 return IRQ_NONE;
413}
414
415static int lp8755_int_config(struct lp8755_chip *pchip)
416{
417 int ret;
418 unsigned int regval;
419
420 if (pchip->irq == 0) {
421 dev_warn(pchip->dev, "not use interrupt : %s\n", __func__);
422 return 0;
423 }
424
425 ret = lp8755_read(pchip, 0x0F, &regval);
426 if (ret < 0)
427 goto err_i2c;
428 pchip->irqmask = regval;
429 ret = request_threaded_irq(pchip->irq, NULL, lp8755_irq_handler,
430 IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
431 "lp8755-irq", pchip);
432 if (ret)
433 return ret;
434
435 return ret;
436
437err_i2c:
438 dev_err(pchip->dev, "i2c acceess error %s\n", __func__);
439 return ret;
440}
441
442static const struct regmap_config lp8755_regmap = {
443 .reg_bits = 8,
444 .val_bits = 8,
445 .max_register = LP8755_REG_MAX,
446};
447
448static int lp8755_probe(struct i2c_client *client,
449 const struct i2c_device_id *id)
450{
451 int ret, icnt;
452 struct lp8755_chip *pchip;
453 struct lp8755_platform_data *pdata = client->dev.platform_data;
454
455 if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
456 dev_err(&client->dev, "i2c functionality check fail.\n");
457 return -EOPNOTSUPP;
458 }
459
460 pchip = devm_kzalloc(&client->dev,
461 sizeof(struct lp8755_chip), GFP_KERNEL);
462 if (!pchip)
463 return -ENOMEM;
464
465 pchip->dev = &client->dev;
466 pchip->regmap = devm_regmap_init_i2c(client, &lp8755_regmap);
467 if (IS_ERR(pchip->regmap)) {
468 ret = PTR_ERR(pchip->regmap);
469 dev_err(&client->dev, "fail to allocate regmap %d\n", ret);
470 return ret;
471 }
472 i2c_set_clientdata(client, pchip);
473
474 if (pdata != NULL) {
475 pchip->pdata = pdata;
476 pchip->mphase = pdata->mphase;
477 } else {
478 pchip->pdata = devm_kzalloc(pchip->dev,
479 sizeof(struct lp8755_platform_data),
480 GFP_KERNEL);
481 if (!pchip->pdata)
482 return -ENOMEM;
483 ret = lp8755_init_data(pchip);
484 if (ret < 0) {
485 dev_err(&client->dev, "fail to initialize chip\n");
486 return ret;
487 }
488 }
489
490 ret = lp8755_regulator_init(pchip);
491 if (ret < 0) {
492 dev_err(&client->dev, "fail to initialize regulators\n");
493 goto err_regulator;
494 }
495
496 pchip->irq = client->irq;
497 ret = lp8755_int_config(pchip);
498 if (ret < 0) {
499 dev_err(&client->dev, "fail to irq config\n");
500 goto err_irq;
501 }
502
503 return ret;
504
505err_irq:
506 for (icnt = 0; icnt < mphase_buck[pchip->mphase].nreg; icnt++)
507 regulator_unregister(pchip->rdev[icnt]);
508
509err_regulator:
510 /* output disable */
511 for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
512 lp8755_write(pchip, icnt, 0x00);
513
514 return ret;
515}
516
517static int lp8755_remove(struct i2c_client *client)
518{
519 int icnt;
520 struct lp8755_chip *pchip = i2c_get_clientdata(client);
521
522 for (icnt = 0; icnt < mphase_buck[pchip->mphase].nreg; icnt++)
523 regulator_unregister(pchip->rdev[icnt]);
524
525 for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
526 lp8755_write(pchip, icnt, 0x00);
527
528 if (pchip->irq != 0)
529 free_irq(pchip->irq, pchip);
530
531 return 0;
532}
533
534static const struct i2c_device_id lp8755_id[] = {
535 {LP8755_NAME, 0},
536 {}
537};
538
539MODULE_DEVICE_TABLE(i2c, lp8755_id);
540
541static struct i2c_driver lp8755_i2c_driver = {
542 .driver = {
543 .name = LP8755_NAME,
544 },
545 .probe = lp8755_probe,
546 .remove = lp8755_remove,
547 .id_table = lp8755_id,
548};
549
550static int __init lp8755_init(void)
551{
552 return i2c_add_driver(&lp8755_i2c_driver);
553}
554
555subsys_initcall(lp8755_init);
556
557static void __exit lp8755_exit(void)
558{
559 i2c_del_driver(&lp8755_i2c_driver);
560}
561
562module_exit(lp8755_exit);
563
564MODULE_DESCRIPTION("Texas Instruments lp8755 driver");
565MODULE_AUTHOR("Daniel Jeong <daniel.jeong@ti.com>");
566MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/lp8788-buck.c b/drivers/regulator/lp8788-buck.c
index aef3f2b0c5ea..97891a7ea7b2 100644
--- a/drivers/regulator/lp8788-buck.c
+++ b/drivers/regulator/lp8788-buck.c
@@ -103,16 +103,6 @@ static const int lp8788_buck_vtbl[] = {
103 1950000, 2000000, 103 1950000, 2000000,
104}; 104};
105 105
106static const u8 buck1_vout_addr[] = {
107 LP8788_BUCK1_VOUT0, LP8788_BUCK1_VOUT1,
108 LP8788_BUCK1_VOUT2, LP8788_BUCK1_VOUT3,
109};
110
111static const u8 buck2_vout_addr[] = {
112 LP8788_BUCK2_VOUT0, LP8788_BUCK2_VOUT1,
113 LP8788_BUCK2_VOUT2, LP8788_BUCK2_VOUT3,
114};
115
116static void lp8788_buck1_set_dvs(struct lp8788_buck *buck) 106static void lp8788_buck1_set_dvs(struct lp8788_buck *buck)
117{ 107{
118 struct lp8788_buck1_dvs *dvs = (struct lp8788_buck1_dvs *)buck->dvs; 108 struct lp8788_buck1_dvs *dvs = (struct lp8788_buck1_dvs *)buck->dvs;
@@ -235,7 +225,7 @@ static u8 lp8788_select_buck_vout_addr(struct lp8788_buck *buck,
235 lp8788_read_byte(buck->lp, LP8788_BUCK_DVS_SEL, &val); 225 lp8788_read_byte(buck->lp, LP8788_BUCK_DVS_SEL, &val);
236 idx = (val & LP8788_BUCK1_DVS_M) >> LP8788_BUCK1_DVS_S; 226 idx = (val & LP8788_BUCK1_DVS_M) >> LP8788_BUCK1_DVS_S;
237 } 227 }
238 addr = buck1_vout_addr[idx]; 228 addr = LP8788_BUCK1_VOUT0 + idx;
239 break; 229 break;
240 case BUCK2: 230 case BUCK2:
241 if (mode == EXTPIN) { 231 if (mode == EXTPIN) {
@@ -258,7 +248,7 @@ static u8 lp8788_select_buck_vout_addr(struct lp8788_buck *buck,
258 lp8788_read_byte(buck->lp, LP8788_BUCK_DVS_SEL, &val); 248 lp8788_read_byte(buck->lp, LP8788_BUCK_DVS_SEL, &val);
259 idx = (val & LP8788_BUCK2_DVS_M) >> LP8788_BUCK2_DVS_S; 249 idx = (val & LP8788_BUCK2_DVS_M) >> LP8788_BUCK2_DVS_S;
260 } 250 }
261 addr = buck2_vout_addr[idx]; 251 addr = LP8788_BUCK2_VOUT0 + idx;
262 break; 252 break;
263 default: 253 default:
264 goto err; 254 goto err;
@@ -429,7 +419,8 @@ static struct regulator_desc lp8788_buck_desc[] = {
429 }, 419 },
430}; 420};
431 421
432static int lp8788_dvs_gpio_request(struct lp8788_buck *buck, 422static int lp8788_dvs_gpio_request(struct platform_device *pdev,
423 struct lp8788_buck *buck,
433 enum lp8788_buck_id id) 424 enum lp8788_buck_id id)
434{ 425{
435 struct lp8788_platform_data *pdata = buck->lp->pdata; 426 struct lp8788_platform_data *pdata = buck->lp->pdata;
@@ -440,7 +431,7 @@ static int lp8788_dvs_gpio_request(struct lp8788_buck *buck,
440 switch (id) { 431 switch (id) {
441 case BUCK1: 432 case BUCK1:
442 gpio = pdata->buck1_dvs->gpio; 433 gpio = pdata->buck1_dvs->gpio;
443 ret = devm_gpio_request_one(buck->lp->dev, gpio, DVS_LOW, 434 ret = devm_gpio_request_one(&pdev->dev, gpio, DVS_LOW,
444 b1_name); 435 b1_name);
445 if (ret) 436 if (ret)
446 return ret; 437 return ret;
@@ -448,9 +439,9 @@ static int lp8788_dvs_gpio_request(struct lp8788_buck *buck,
448 buck->dvs = pdata->buck1_dvs; 439 buck->dvs = pdata->buck1_dvs;
449 break; 440 break;
450 case BUCK2: 441 case BUCK2:
451 for (i = 0 ; i < LP8788_NUM_BUCK2_DVS ; i++) { 442 for (i = 0; i < LP8788_NUM_BUCK2_DVS; i++) {
452 gpio = pdata->buck2_dvs->gpio[i]; 443 gpio = pdata->buck2_dvs->gpio[i];
453 ret = devm_gpio_request_one(buck->lp->dev, gpio, 444 ret = devm_gpio_request_one(&pdev->dev, gpio,
454 DVS_LOW, b2_name[i]); 445 DVS_LOW, b2_name[i]);
455 if (ret) 446 if (ret)
456 return ret; 447 return ret;
@@ -464,7 +455,8 @@ static int lp8788_dvs_gpio_request(struct lp8788_buck *buck,
464 return 0; 455 return 0;
465} 456}
466 457
467static int lp8788_init_dvs(struct lp8788_buck *buck, enum lp8788_buck_id id) 458static int lp8788_init_dvs(struct platform_device *pdev,
459 struct lp8788_buck *buck, enum lp8788_buck_id id)
468{ 460{
469 struct lp8788_platform_data *pdata = buck->lp->pdata; 461 struct lp8788_platform_data *pdata = buck->lp->pdata;
470 u8 mask[] = { LP8788_BUCK1_DVS_SEL_M, LP8788_BUCK2_DVS_SEL_M }; 462 u8 mask[] = { LP8788_BUCK1_DVS_SEL_M, LP8788_BUCK2_DVS_SEL_M };
@@ -472,7 +464,7 @@ static int lp8788_init_dvs(struct lp8788_buck *buck, enum lp8788_buck_id id)
472 u8 default_dvs_mode[] = { LP8788_BUCK1_DVS_I2C, LP8788_BUCK2_DVS_I2C }; 464 u8 default_dvs_mode[] = { LP8788_BUCK1_DVS_I2C, LP8788_BUCK2_DVS_I2C };
473 465
474 /* no dvs for buck3, 4 */ 466 /* no dvs for buck3, 4 */
475 if (id == BUCK3 || id == BUCK4) 467 if (id > BUCK2)
476 return 0; 468 return 0;
477 469
478 /* no dvs platform data, then dvs will be selected by I2C registers */ 470 /* no dvs platform data, then dvs will be selected by I2C registers */
@@ -483,7 +475,7 @@ static int lp8788_init_dvs(struct lp8788_buck *buck, enum lp8788_buck_id id)
483 (id == BUCK2 && !pdata->buck2_dvs)) 475 (id == BUCK2 && !pdata->buck2_dvs))
484 goto set_default_dvs_mode; 476 goto set_default_dvs_mode;
485 477
486 if (lp8788_dvs_gpio_request(buck, id)) 478 if (lp8788_dvs_gpio_request(pdev, buck, id))
487 goto set_default_dvs_mode; 479 goto set_default_dvs_mode;
488 480
489 return lp8788_update_bits(buck->lp, LP8788_BUCK_DVS_SEL, mask[id], 481 return lp8788_update_bits(buck->lp, LP8788_BUCK_DVS_SEL, mask[id],
@@ -503,17 +495,20 @@ static int lp8788_buck_probe(struct platform_device *pdev)
503 struct regulator_dev *rdev; 495 struct regulator_dev *rdev;
504 int ret; 496 int ret;
505 497
506 buck = devm_kzalloc(lp->dev, sizeof(struct lp8788_buck), GFP_KERNEL); 498 if (id >= LP8788_NUM_BUCKS)
499 return -EINVAL;
500
501 buck = devm_kzalloc(&pdev->dev, sizeof(struct lp8788_buck), GFP_KERNEL);
507 if (!buck) 502 if (!buck)
508 return -ENOMEM; 503 return -ENOMEM;
509 504
510 buck->lp = lp; 505 buck->lp = lp;
511 506
512 ret = lp8788_init_dvs(buck, id); 507 ret = lp8788_init_dvs(pdev, buck, id);
513 if (ret) 508 if (ret)
514 return ret; 509 return ret;
515 510
516 cfg.dev = lp->dev; 511 cfg.dev = pdev->dev.parent;
517 cfg.init_data = lp->pdata ? lp->pdata->buck_data[id] : NULL; 512 cfg.init_data = lp->pdata ? lp->pdata->buck_data[id] : NULL;
518 cfg.driver_data = buck; 513 cfg.driver_data = buck;
519 cfg.regmap = lp->regmap; 514 cfg.regmap = lp->regmap;
@@ -521,7 +516,7 @@ static int lp8788_buck_probe(struct platform_device *pdev)
521 rdev = regulator_register(&lp8788_buck_desc[id], &cfg); 516 rdev = regulator_register(&lp8788_buck_desc[id], &cfg);
522 if (IS_ERR(rdev)) { 517 if (IS_ERR(rdev)) {
523 ret = PTR_ERR(rdev); 518 ret = PTR_ERR(rdev);
524 dev_err(lp->dev, "BUCK%d regulator register err = %d\n", 519 dev_err(&pdev->dev, "BUCK%d regulator register err = %d\n",
525 id + 1, ret); 520 id + 1, ret);
526 return ret; 521 return ret;
527 } 522 }
diff --git a/drivers/regulator/lp8788-ldo.c b/drivers/regulator/lp8788-ldo.c
index 3792741708ce..cd5a14ad9263 100644
--- a/drivers/regulator/lp8788-ldo.c
+++ b/drivers/regulator/lp8788-ldo.c
@@ -88,11 +88,6 @@
88#define ENABLE GPIOF_OUT_INIT_HIGH 88#define ENABLE GPIOF_OUT_INIT_HIGH
89#define DISABLE GPIOF_OUT_INIT_LOW 89#define DISABLE GPIOF_OUT_INIT_LOW
90 90
91enum lp8788_enable_mode {
92 REGISTER,
93 EXTPIN,
94};
95
96enum lp8788_ldo_id { 91enum lp8788_ldo_id {
97 DLDO1, 92 DLDO1,
98 DLDO2, 93 DLDO2,
@@ -189,114 +184,38 @@ static enum lp8788_ldo_id lp8788_aldo_id[] = {
189 ALDO10, 184 ALDO10,
190}; 185};
191 186
192/* DLDO 7, 9 and 11, ALDO 1 ~ 5 and 7
193 : can be enabled either by external pin or by i2c register */
194static enum lp8788_enable_mode
195lp8788_get_ldo_enable_mode(struct lp8788_ldo *ldo, enum lp8788_ldo_id id)
196{
197 int ret;
198 u8 val, mask;
199
200 ret = lp8788_read_byte(ldo->lp, LP8788_EN_SEL, &val);
201 if (ret)
202 return ret;
203
204 switch (id) {
205 case DLDO7:
206 mask = LP8788_EN_SEL_DLDO7_M;
207 break;
208 case DLDO9:
209 case DLDO11:
210 mask = LP8788_EN_SEL_DLDO911_M;
211 break;
212 case ALDO1:
213 mask = LP8788_EN_SEL_ALDO1_M;
214 break;
215 case ALDO2 ... ALDO4:
216 mask = LP8788_EN_SEL_ALDO234_M;
217 break;
218 case ALDO5:
219 mask = LP8788_EN_SEL_ALDO5_M;
220 break;
221 case ALDO7:
222 mask = LP8788_EN_SEL_ALDO7_M;
223 break;
224 default:
225 return REGISTER;
226 }
227
228 return val & mask ? EXTPIN : REGISTER;
229}
230
231static int lp8788_ldo_ctrl_by_extern_pin(struct lp8788_ldo *ldo, int pinstate)
232{
233 struct lp8788_ldo_enable_pin *pin = ldo->en_pin;
234
235 if (!pin)
236 return -EINVAL;
237
238 if (gpio_is_valid(pin->gpio))
239 gpio_set_value(pin->gpio, pinstate);
240
241 return 0;
242}
243
244static int lp8788_ldo_is_enabled_by_extern_pin(struct lp8788_ldo *ldo)
245{
246 struct lp8788_ldo_enable_pin *pin = ldo->en_pin;
247
248 if (!pin)
249 return -EINVAL;
250
251 return gpio_get_value(pin->gpio) ? 1 : 0;
252}
253
254static int lp8788_ldo_enable(struct regulator_dev *rdev) 187static int lp8788_ldo_enable(struct regulator_dev *rdev)
255{ 188{
256 struct lp8788_ldo *ldo = rdev_get_drvdata(rdev); 189 struct lp8788_ldo *ldo = rdev_get_drvdata(rdev);
257 enum lp8788_ldo_id id = rdev_get_id(rdev);
258 enum lp8788_enable_mode mode = lp8788_get_ldo_enable_mode(ldo, id);
259 190
260 switch (mode) { 191 if (ldo->en_pin) {
261 case EXTPIN: 192 gpio_set_value(ldo->en_pin->gpio, ENABLE);
262 return lp8788_ldo_ctrl_by_extern_pin(ldo, ENABLE); 193 return 0;
263 case REGISTER: 194 } else {
264 return regulator_enable_regmap(rdev); 195 return regulator_enable_regmap(rdev);
265 default:
266 return -EINVAL;
267 } 196 }
268} 197}
269 198
270static int lp8788_ldo_disable(struct regulator_dev *rdev) 199static int lp8788_ldo_disable(struct regulator_dev *rdev)
271{ 200{
272 struct lp8788_ldo *ldo = rdev_get_drvdata(rdev); 201 struct lp8788_ldo *ldo = rdev_get_drvdata(rdev);
273 enum lp8788_ldo_id id = rdev_get_id(rdev);
274 enum lp8788_enable_mode mode = lp8788_get_ldo_enable_mode(ldo, id);
275 202
276 switch (mode) { 203 if (ldo->en_pin) {
277 case EXTPIN: 204 gpio_set_value(ldo->en_pin->gpio, DISABLE);
278 return lp8788_ldo_ctrl_by_extern_pin(ldo, DISABLE); 205 return 0;
279 case REGISTER: 206 } else {
280 return regulator_disable_regmap(rdev); 207 return regulator_disable_regmap(rdev);
281 default:
282 return -EINVAL;
283 } 208 }
284} 209}
285 210
286static int lp8788_ldo_is_enabled(struct regulator_dev *rdev) 211static int lp8788_ldo_is_enabled(struct regulator_dev *rdev)
287{ 212{
288 struct lp8788_ldo *ldo = rdev_get_drvdata(rdev); 213 struct lp8788_ldo *ldo = rdev_get_drvdata(rdev);
289 enum lp8788_ldo_id id = rdev_get_id(rdev);
290 enum lp8788_enable_mode mode = lp8788_get_ldo_enable_mode(ldo, id);
291 214
292 switch (mode) { 215 if (ldo->en_pin)
293 case EXTPIN: 216 return gpio_get_value(ldo->en_pin->gpio) ? 1 : 0;
294 return lp8788_ldo_is_enabled_by_extern_pin(ldo); 217 else
295 case REGISTER:
296 return regulator_is_enabled_regmap(rdev); 218 return regulator_is_enabled_regmap(rdev);
297 default:
298 return -EINVAL;
299 }
300} 219}
301 220
302static int lp8788_ldo_enable_time(struct regulator_dev *rdev) 221static int lp8788_ldo_enable_time(struct regulator_dev *rdev)
@@ -616,10 +535,11 @@ static struct regulator_desc lp8788_aldo_desc[] = {
616 }, 535 },
617}; 536};
618 537
619static int lp8788_gpio_request_ldo_en(struct lp8788_ldo *ldo, 538static int lp8788_gpio_request_ldo_en(struct platform_device *pdev,
539 struct lp8788_ldo *ldo,
620 enum lp8788_ext_ldo_en_id id) 540 enum lp8788_ext_ldo_en_id id)
621{ 541{
622 struct device *dev = ldo->lp->dev; 542 struct device *dev = &pdev->dev;
623 struct lp8788_ldo_enable_pin *pin = ldo->en_pin; 543 struct lp8788_ldo_enable_pin *pin = ldo->en_pin;
624 int ret, gpio, pinstate; 544 int ret, gpio, pinstate;
625 char *name[] = { 545 char *name[] = {
@@ -647,7 +567,8 @@ static int lp8788_gpio_request_ldo_en(struct lp8788_ldo *ldo,
647 return ret; 567 return ret;
648} 568}
649 569
650static int lp8788_config_ldo_enable_mode(struct lp8788_ldo *ldo, 570static int lp8788_config_ldo_enable_mode(struct platform_device *pdev,
571 struct lp8788_ldo *ldo,
651 enum lp8788_ldo_id id) 572 enum lp8788_ldo_id id)
652{ 573{
653 int ret; 574 int ret;
@@ -693,9 +614,11 @@ static int lp8788_config_ldo_enable_mode(struct lp8788_ldo *ldo,
693 614
694 ldo->en_pin = pdata->ldo_pin[enable_id]; 615 ldo->en_pin = pdata->ldo_pin[enable_id];
695 616
696 ret = lp8788_gpio_request_ldo_en(ldo, enable_id); 617 ret = lp8788_gpio_request_ldo_en(pdev, ldo, enable_id);
697 if (ret) 618 if (ret) {
619 ldo->en_pin = NULL;
698 goto set_default_ldo_enable_mode; 620 goto set_default_ldo_enable_mode;
621 }
699 622
700 return ret; 623 return ret;
701 624
@@ -712,16 +635,16 @@ static int lp8788_dldo_probe(struct platform_device *pdev)
712 struct regulator_dev *rdev; 635 struct regulator_dev *rdev;
713 int ret; 636 int ret;
714 637
715 ldo = devm_kzalloc(lp->dev, sizeof(struct lp8788_ldo), GFP_KERNEL); 638 ldo = devm_kzalloc(&pdev->dev, sizeof(struct lp8788_ldo), GFP_KERNEL);
716 if (!ldo) 639 if (!ldo)
717 return -ENOMEM; 640 return -ENOMEM;
718 641
719 ldo->lp = lp; 642 ldo->lp = lp;
720 ret = lp8788_config_ldo_enable_mode(ldo, lp8788_dldo_id[id]); 643 ret = lp8788_config_ldo_enable_mode(pdev, ldo, lp8788_dldo_id[id]);
721 if (ret) 644 if (ret)
722 return ret; 645 return ret;
723 646
724 cfg.dev = lp->dev; 647 cfg.dev = pdev->dev.parent;
725 cfg.init_data = lp->pdata ? lp->pdata->dldo_data[id] : NULL; 648 cfg.init_data = lp->pdata ? lp->pdata->dldo_data[id] : NULL;
726 cfg.driver_data = ldo; 649 cfg.driver_data = ldo;
727 cfg.regmap = lp->regmap; 650 cfg.regmap = lp->regmap;
@@ -729,7 +652,7 @@ static int lp8788_dldo_probe(struct platform_device *pdev)
729 rdev = regulator_register(&lp8788_dldo_desc[id], &cfg); 652 rdev = regulator_register(&lp8788_dldo_desc[id], &cfg);
730 if (IS_ERR(rdev)) { 653 if (IS_ERR(rdev)) {
731 ret = PTR_ERR(rdev); 654 ret = PTR_ERR(rdev);
732 dev_err(lp->dev, "DLDO%d regulator register err = %d\n", 655 dev_err(&pdev->dev, "DLDO%d regulator register err = %d\n",
733 id + 1, ret); 656 id + 1, ret);
734 return ret; 657 return ret;
735 } 658 }
@@ -768,16 +691,16 @@ static int lp8788_aldo_probe(struct platform_device *pdev)
768 struct regulator_dev *rdev; 691 struct regulator_dev *rdev;
769 int ret; 692 int ret;
770 693
771 ldo = devm_kzalloc(lp->dev, sizeof(struct lp8788_ldo), GFP_KERNEL); 694 ldo = devm_kzalloc(&pdev->dev, sizeof(struct lp8788_ldo), GFP_KERNEL);
772 if (!ldo) 695 if (!ldo)
773 return -ENOMEM; 696 return -ENOMEM;
774 697
775 ldo->lp = lp; 698 ldo->lp = lp;
776 ret = lp8788_config_ldo_enable_mode(ldo, lp8788_aldo_id[id]); 699 ret = lp8788_config_ldo_enable_mode(pdev, ldo, lp8788_aldo_id[id]);
777 if (ret) 700 if (ret)
778 return ret; 701 return ret;
779 702
780 cfg.dev = lp->dev; 703 cfg.dev = pdev->dev.parent;
781 cfg.init_data = lp->pdata ? lp->pdata->aldo_data[id] : NULL; 704 cfg.init_data = lp->pdata ? lp->pdata->aldo_data[id] : NULL;
782 cfg.driver_data = ldo; 705 cfg.driver_data = ldo;
783 cfg.regmap = lp->regmap; 706 cfg.regmap = lp->regmap;
@@ -785,7 +708,7 @@ static int lp8788_aldo_probe(struct platform_device *pdev)
785 rdev = regulator_register(&lp8788_aldo_desc[id], &cfg); 708 rdev = regulator_register(&lp8788_aldo_desc[id], &cfg);
786 if (IS_ERR(rdev)) { 709 if (IS_ERR(rdev)) {
787 ret = PTR_ERR(rdev); 710 ret = PTR_ERR(rdev);
788 dev_err(lp->dev, "ALDO%d regulator register err = %d\n", 711 dev_err(&pdev->dev, "ALDO%d regulator register err = %d\n",
789 id + 1, ret); 712 id + 1, ret);
790 return ret; 713 return ret;
791 } 714 }
diff --git a/drivers/regulator/max77686.c b/drivers/regulator/max77686.c
index b85040caaea3..e4586ee8858d 100644
--- a/drivers/regulator/max77686.c
+++ b/drivers/regulator/max77686.c
@@ -75,13 +75,14 @@ static int max77686_buck_set_suspend_disable(struct regulator_dev *rdev)
75{ 75{
76 unsigned int val; 76 unsigned int val;
77 struct max77686_data *max77686 = rdev_get_drvdata(rdev); 77 struct max77686_data *max77686 = rdev_get_drvdata(rdev);
78 int id = rdev_get_id(rdev);
78 79
79 if (rdev->desc->id == MAX77686_BUCK1) 80 if (id == MAX77686_BUCK1)
80 val = 0x1; 81 val = 0x1;
81 else 82 else
82 val = 0x1 << MAX77686_OPMODE_BUCK234_SHIFT; 83 val = 0x1 << MAX77686_OPMODE_BUCK234_SHIFT;
83 84
84 max77686->opmode[rdev->desc->id] = val; 85 max77686->opmode[id] = val;
85 return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, 86 return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
86 rdev->desc->enable_mask, 87 rdev->desc->enable_mask,
87 val); 88 val);
@@ -93,9 +94,10 @@ static int max77686_set_suspend_mode(struct regulator_dev *rdev,
93{ 94{
94 struct max77686_data *max77686 = rdev_get_drvdata(rdev); 95 struct max77686_data *max77686 = rdev_get_drvdata(rdev);
95 unsigned int val; 96 unsigned int val;
97 int id = rdev_get_id(rdev);
96 98
97 /* BUCK[5-9] doesn't support this feature */ 99 /* BUCK[5-9] doesn't support this feature */
98 if (rdev->desc->id >= MAX77686_BUCK5) 100 if (id >= MAX77686_BUCK5)
99 return 0; 101 return 0;
100 102
101 switch (mode) { 103 switch (mode) {
@@ -111,7 +113,7 @@ static int max77686_set_suspend_mode(struct regulator_dev *rdev,
111 return -EINVAL; 113 return -EINVAL;
112 } 114 }
113 115
114 max77686->opmode[rdev->desc->id] = val; 116 max77686->opmode[id] = val;
115 return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, 117 return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
116 rdev->desc->enable_mask, 118 rdev->desc->enable_mask,
117 val); 119 val);
@@ -140,7 +142,7 @@ static int max77686_ldo_set_suspend_mode(struct regulator_dev *rdev,
140 return -EINVAL; 142 return -EINVAL;
141 } 143 }
142 144
143 max77686->opmode[rdev->desc->id] = val; 145 max77686->opmode[rdev_get_id(rdev)] = val;
144 return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, 146 return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
145 rdev->desc->enable_mask, 147 rdev->desc->enable_mask,
146 val); 148 val);
@@ -152,7 +154,7 @@ static int max77686_enable(struct regulator_dev *rdev)
152 154
153 return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, 155 return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
154 rdev->desc->enable_mask, 156 rdev->desc->enable_mask,
155 max77686->opmode[rdev->desc->id]); 157 max77686->opmode[rdev_get_id(rdev)]);
156} 158}
157 159
158static int max77686_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay) 160static int max77686_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
@@ -379,9 +381,10 @@ static struct regulator_desc regulators[] = {
379}; 381};
380 382
381#ifdef CONFIG_OF 383#ifdef CONFIG_OF
382static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev, 384static int max77686_pmic_dt_parse_pdata(struct platform_device *pdev,
383 struct max77686_platform_data *pdata) 385 struct max77686_platform_data *pdata)
384{ 386{
387 struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent);
385 struct device_node *pmic_np, *regulators_np; 388 struct device_node *pmic_np, *regulators_np;
386 struct max77686_regulator_data *rdata; 389 struct max77686_regulator_data *rdata;
387 struct of_regulator_match rmatch; 390 struct of_regulator_match rmatch;
@@ -390,15 +393,15 @@ static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev,
390 pmic_np = iodev->dev->of_node; 393 pmic_np = iodev->dev->of_node;
391 regulators_np = of_find_node_by_name(pmic_np, "voltage-regulators"); 394 regulators_np = of_find_node_by_name(pmic_np, "voltage-regulators");
392 if (!regulators_np) { 395 if (!regulators_np) {
393 dev_err(iodev->dev, "could not find regulators sub-node\n"); 396 dev_err(&pdev->dev, "could not find regulators sub-node\n");
394 return -EINVAL; 397 return -EINVAL;
395 } 398 }
396 399
397 pdata->num_regulators = ARRAY_SIZE(regulators); 400 pdata->num_regulators = ARRAY_SIZE(regulators);
398 rdata = devm_kzalloc(iodev->dev, sizeof(*rdata) * 401 rdata = devm_kzalloc(&pdev->dev, sizeof(*rdata) *
399 pdata->num_regulators, GFP_KERNEL); 402 pdata->num_regulators, GFP_KERNEL);
400 if (!rdata) { 403 if (!rdata) {
401 dev_err(iodev->dev, 404 dev_err(&pdev->dev,
402 "could not allocate memory for regulator data\n"); 405 "could not allocate memory for regulator data\n");
403 return -ENOMEM; 406 return -ENOMEM;
404 } 407 }
@@ -407,7 +410,7 @@ static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev,
407 rmatch.name = regulators[i].name; 410 rmatch.name = regulators[i].name;
408 rmatch.init_data = NULL; 411 rmatch.init_data = NULL;
409 rmatch.of_node = NULL; 412 rmatch.of_node = NULL;
410 of_regulator_match(iodev->dev, regulators_np, &rmatch, 1); 413 of_regulator_match(&pdev->dev, regulators_np, &rmatch, 1);
411 rdata[i].initdata = rmatch.init_data; 414 rdata[i].initdata = rmatch.init_data;
412 rdata[i].of_node = rmatch.of_node; 415 rdata[i].of_node = rmatch.of_node;
413 } 416 }
@@ -417,7 +420,7 @@ static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev,
417 return 0; 420 return 0;
418} 421}
419#else 422#else
420static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev, 423static int max77686_pmic_dt_parse_pdata(struct platform_device *pdev,
421 struct max77686_platform_data *pdata) 424 struct max77686_platform_data *pdata)
422{ 425{
423 return 0; 426 return 0;
@@ -440,7 +443,7 @@ static int max77686_pmic_probe(struct platform_device *pdev)
440 } 443 }
441 444
442 if (iodev->dev->of_node) { 445 if (iodev->dev->of_node) {
443 ret = max77686_pmic_dt_parse_pdata(iodev, pdata); 446 ret = max77686_pmic_dt_parse_pdata(pdev, pdata);
444 if (ret) 447 if (ret)
445 return ret; 448 return ret;
446 } 449 }
diff --git a/drivers/regulator/max8907-regulator.c b/drivers/regulator/max8907-regulator.c
index d1a77512d83e..4568c15fa78d 100644
--- a/drivers/regulator/max8907-regulator.c
+++ b/drivers/regulator/max8907-regulator.c
@@ -224,11 +224,11 @@ static struct of_regulator_match max8907_matches[] = {
224 224
225static int max8907_regulator_parse_dt(struct platform_device *pdev) 225static int max8907_regulator_parse_dt(struct platform_device *pdev)
226{ 226{
227 struct device_node *np = pdev->dev.parent->of_node; 227 struct device_node *np, *regulators;
228 struct device_node *regulators;
229 int ret; 228 int ret;
230 229
231 if (!pdev->dev.parent->of_node) 230 np = of_node_get(pdev->dev.parent->of_node);
231 if (!np)
232 return 0; 232 return 0;
233 233
234 regulators = of_find_node_by_name(np, "regulators"); 234 regulators = of_find_node_by_name(np, "regulators");
@@ -237,9 +237,9 @@ static int max8907_regulator_parse_dt(struct platform_device *pdev)
237 return -EINVAL; 237 return -EINVAL;
238 } 238 }
239 239
240 ret = of_regulator_match(pdev->dev.parent, regulators, 240 ret = of_regulator_match(&pdev->dev, regulators, max8907_matches,
241 max8907_matches,
242 ARRAY_SIZE(max8907_matches)); 241 ARRAY_SIZE(max8907_matches));
242 of_node_put(regulators);
243 if (ret < 0) { 243 if (ret < 0) {
244 dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", 244 dev_err(&pdev->dev, "Error parsing regulator init data: %d\n",
245 ret); 245 ret);
diff --git a/drivers/regulator/max8925-regulator.c b/drivers/regulator/max8925-regulator.c
index 446a85445553..0d5f64a805a0 100644
--- a/drivers/regulator/max8925-regulator.c
+++ b/drivers/regulator/max8925-regulator.c
@@ -252,7 +252,7 @@ static int max8925_regulator_dt_init(struct platform_device *pdev,
252{ 252{
253 struct device_node *nproot, *np; 253 struct device_node *nproot, *np;
254 int rcount; 254 int rcount;
255 nproot = pdev->dev.parent->of_node; 255 nproot = of_node_get(pdev->dev.parent->of_node);
256 if (!nproot) 256 if (!nproot)
257 return -ENODEV; 257 return -ENODEV;
258 np = of_find_node_by_name(nproot, "regulators"); 258 np = of_find_node_by_name(nproot, "regulators");
@@ -263,6 +263,7 @@ static int max8925_regulator_dt_init(struct platform_device *pdev,
263 263
264 rcount = of_regulator_match(&pdev->dev, np, 264 rcount = of_regulator_match(&pdev->dev, np,
265 &max8925_regulator_matches[ridx], 1); 265 &max8925_regulator_matches[ridx], 1);
266 of_node_put(np);
266 if (rcount < 0) 267 if (rcount < 0)
267 return -ENODEV; 268 return -ENODEV;
268 config->init_data = max8925_regulator_matches[ridx].init_data; 269 config->init_data = max8925_regulator_matches[ridx].init_data;
diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c
index 02be7fcae32f..0ac7a87519b4 100644
--- a/drivers/regulator/max8997.c
+++ b/drivers/regulator/max8997.c
@@ -54,6 +54,13 @@ struct max8997_data {
54 u8 saved_states[MAX8997_REG_MAX]; 54 u8 saved_states[MAX8997_REG_MAX];
55}; 55};
56 56
57static const unsigned int safeoutvolt[] = {
58 4850000,
59 4900000,
60 4950000,
61 3300000,
62};
63
57static inline void max8997_set_gpio(struct max8997_data *max8997) 64static inline void max8997_set_gpio(struct max8997_data *max8997)
58{ 65{
59 int set3 = (max8997->buck125_gpioindex) & 0x1; 66 int set3 = (max8997->buck125_gpioindex) & 0x1;
@@ -130,29 +137,6 @@ static const struct voltage_map_desc *reg_voltage_map[] = {
130 [MAX8997_CHARGER_TOPOFF] = &topoff_current_map_desc, 137 [MAX8997_CHARGER_TOPOFF] = &topoff_current_map_desc,
131}; 138};
132 139
133static int max8997_list_voltage_safeout(struct regulator_dev *rdev,
134 unsigned int selector)
135{
136 int rid = rdev_get_id(rdev);
137
138 if (rid == MAX8997_ESAFEOUT1 || rid == MAX8997_ESAFEOUT2) {
139 switch (selector) {
140 case 0:
141 return 4850000;
142 case 1:
143 return 4900000;
144 case 2:
145 return 4950000;
146 case 3:
147 return 3300000;
148 default:
149 return -EINVAL;
150 }
151 }
152
153 return -EINVAL;
154}
155
156static int max8997_list_voltage_charger_cv(struct regulator_dev *rdev, 140static int max8997_list_voltage_charger_cv(struct regulator_dev *rdev,
157 unsigned int selector) 141 unsigned int selector)
158{ 142{
@@ -522,7 +506,7 @@ static int max8997_set_voltage_ldobuck(struct regulator_dev *rdev,
522 return ret; 506 return ret;
523} 507}
524 508
525static int max8997_set_voltage_ldobuck_time_sel(struct regulator_dev *rdev, 509static int max8997_set_voltage_buck_time_sel(struct regulator_dev *rdev,
526 unsigned int old_selector, 510 unsigned int old_selector,
527 unsigned int new_selector) 511 unsigned int new_selector)
528{ 512{
@@ -720,49 +704,23 @@ out:
720 return 0; 704 return 0;
721} 705}
722 706
723static const int safeoutvolt[] = {
724 3300000,
725 4850000,
726 4900000,
727 4950000,
728};
729
730/* For SAFEOUT1 and SAFEOUT2 */ 707/* For SAFEOUT1 and SAFEOUT2 */
731static int max8997_set_voltage_safeout(struct regulator_dev *rdev, 708static int max8997_set_voltage_safeout_sel(struct regulator_dev *rdev,
732 int min_uV, int max_uV, unsigned *selector) 709 unsigned selector)
733{ 710{
734 struct max8997_data *max8997 = rdev_get_drvdata(rdev); 711 struct max8997_data *max8997 = rdev_get_drvdata(rdev);
735 struct i2c_client *i2c = max8997->iodev->i2c; 712 struct i2c_client *i2c = max8997->iodev->i2c;
736 int rid = rdev_get_id(rdev); 713 int rid = rdev_get_id(rdev);
737 int reg, shift = 0, mask, ret; 714 int reg, shift = 0, mask, ret;
738 int i = 0;
739 u8 val;
740 715
741 if (rid != MAX8997_ESAFEOUT1 && rid != MAX8997_ESAFEOUT2) 716 if (rid != MAX8997_ESAFEOUT1 && rid != MAX8997_ESAFEOUT2)
742 return -EINVAL; 717 return -EINVAL;
743 718
744 for (i = 0; i < ARRAY_SIZE(safeoutvolt); i++) {
745 if (min_uV <= safeoutvolt[i] &&
746 max_uV >= safeoutvolt[i])
747 break;
748 }
749
750 if (i >= ARRAY_SIZE(safeoutvolt))
751 return -EINVAL;
752
753 if (i == 0)
754 val = 0x3;
755 else
756 val = i - 1;
757
758 ret = max8997_get_voltage_register(rdev, &reg, &shift, &mask); 719 ret = max8997_get_voltage_register(rdev, &reg, &shift, &mask);
759 if (ret) 720 if (ret)
760 return ret; 721 return ret;
761 722
762 ret = max8997_update_reg(i2c, reg, val << shift, mask << shift); 723 return max8997_update_reg(i2c, reg, selector << shift, mask << shift);
763 *selector = val;
764
765 return ret;
766} 724}
767 725
768static int max8997_reg_disable_suspend(struct regulator_dev *rdev) 726static int max8997_reg_disable_suspend(struct regulator_dev *rdev)
@@ -799,7 +757,6 @@ static struct regulator_ops max8997_ldo_ops = {
799 .disable = max8997_reg_disable, 757 .disable = max8997_reg_disable,
800 .get_voltage_sel = max8997_get_voltage_sel, 758 .get_voltage_sel = max8997_get_voltage_sel,
801 .set_voltage = max8997_set_voltage_ldobuck, 759 .set_voltage = max8997_set_voltage_ldobuck,
802 .set_voltage_time_sel = max8997_set_voltage_ldobuck_time_sel,
803 .set_suspend_disable = max8997_reg_disable_suspend, 760 .set_suspend_disable = max8997_reg_disable_suspend,
804}; 761};
805 762
@@ -810,7 +767,7 @@ static struct regulator_ops max8997_buck_ops = {
810 .disable = max8997_reg_disable, 767 .disable = max8997_reg_disable,
811 .get_voltage_sel = max8997_get_voltage_sel, 768 .get_voltage_sel = max8997_get_voltage_sel,
812 .set_voltage = max8997_set_voltage_buck, 769 .set_voltage = max8997_set_voltage_buck,
813 .set_voltage_time_sel = max8997_set_voltage_ldobuck_time_sel, 770 .set_voltage_time_sel = max8997_set_voltage_buck_time_sel,
814 .set_suspend_disable = max8997_reg_disable_suspend, 771 .set_suspend_disable = max8997_reg_disable_suspend,
815}; 772};
816 773
@@ -823,12 +780,12 @@ static struct regulator_ops max8997_fixedvolt_ops = {
823}; 780};
824 781
825static struct regulator_ops max8997_safeout_ops = { 782static struct regulator_ops max8997_safeout_ops = {
826 .list_voltage = max8997_list_voltage_safeout, 783 .list_voltage = regulator_list_voltage_table,
827 .is_enabled = max8997_reg_is_enabled, 784 .is_enabled = max8997_reg_is_enabled,
828 .enable = max8997_reg_enable, 785 .enable = max8997_reg_enable,
829 .disable = max8997_reg_disable, 786 .disable = max8997_reg_disable,
830 .get_voltage_sel = max8997_get_voltage_sel, 787 .get_voltage_sel = max8997_get_voltage_sel,
831 .set_voltage = max8997_set_voltage_safeout, 788 .set_voltage_sel = max8997_set_voltage_safeout_sel,
832 .set_suspend_disable = max8997_reg_disable_suspend, 789 .set_suspend_disable = max8997_reg_disable_suspend,
833}; 790};
834 791
@@ -934,7 +891,7 @@ static struct regulator_desc regulators[] = {
934}; 891};
935 892
936#ifdef CONFIG_OF 893#ifdef CONFIG_OF
937static int max8997_pmic_dt_parse_dvs_gpio(struct max8997_dev *iodev, 894static int max8997_pmic_dt_parse_dvs_gpio(struct platform_device *pdev,
938 struct max8997_platform_data *pdata, 895 struct max8997_platform_data *pdata,
939 struct device_node *pmic_np) 896 struct device_node *pmic_np)
940{ 897{
@@ -944,7 +901,7 @@ static int max8997_pmic_dt_parse_dvs_gpio(struct max8997_dev *iodev,
944 gpio = of_get_named_gpio(pmic_np, 901 gpio = of_get_named_gpio(pmic_np,
945 "max8997,pmic-buck125-dvs-gpios", i); 902 "max8997,pmic-buck125-dvs-gpios", i);
946 if (!gpio_is_valid(gpio)) { 903 if (!gpio_is_valid(gpio)) {
947 dev_err(iodev->dev, "invalid gpio[%d]: %d\n", i, gpio); 904 dev_err(&pdev->dev, "invalid gpio[%d]: %d\n", i, gpio);
948 return -EINVAL; 905 return -EINVAL;
949 } 906 }
950 pdata->buck125_gpios[i] = gpio; 907 pdata->buck125_gpios[i] = gpio;
@@ -952,35 +909,34 @@ static int max8997_pmic_dt_parse_dvs_gpio(struct max8997_dev *iodev,
952 return 0; 909 return 0;
953} 910}
954 911
955static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev, 912static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev,
956 struct max8997_platform_data *pdata) 913 struct max8997_platform_data *pdata)
957{ 914{
915 struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
958 struct device_node *pmic_np, *regulators_np, *reg_np; 916 struct device_node *pmic_np, *regulators_np, *reg_np;
959 struct max8997_regulator_data *rdata; 917 struct max8997_regulator_data *rdata;
960 unsigned int i, dvs_voltage_nr = 1, ret; 918 unsigned int i, dvs_voltage_nr = 1, ret;
961 919
962 pmic_np = iodev->dev->of_node; 920 pmic_np = of_node_get(iodev->dev->of_node);
963 if (!pmic_np) { 921 if (!pmic_np) {
964 dev_err(iodev->dev, "could not find pmic sub-node\n"); 922 dev_err(&pdev->dev, "could not find pmic sub-node\n");
965 return -ENODEV; 923 return -ENODEV;
966 } 924 }
967 925
968 regulators_np = of_find_node_by_name(pmic_np, "regulators"); 926 regulators_np = of_find_node_by_name(pmic_np, "regulators");
969 if (!regulators_np) { 927 if (!regulators_np) {
970 dev_err(iodev->dev, "could not find regulators sub-node\n"); 928 dev_err(&pdev->dev, "could not find regulators sub-node\n");
971 return -EINVAL; 929 return -EINVAL;
972 } 930 }
973 931
974 /* count the number of regulators to be supported in pmic */ 932 /* count the number of regulators to be supported in pmic */
975 pdata->num_regulators = 0; 933 pdata->num_regulators = of_get_child_count(regulators_np);
976 for_each_child_of_node(regulators_np, reg_np)
977 pdata->num_regulators++;
978 934
979 rdata = devm_kzalloc(iodev->dev, sizeof(*rdata) * 935 rdata = devm_kzalloc(&pdev->dev, sizeof(*rdata) *
980 pdata->num_regulators, GFP_KERNEL); 936 pdata->num_regulators, GFP_KERNEL);
981 if (!rdata) { 937 if (!rdata) {
982 dev_err(iodev->dev, "could not allocate memory for " 938 of_node_put(regulators_np);
983 "regulator data\n"); 939 dev_err(&pdev->dev, "could not allocate memory for regulator data\n");
984 return -ENOMEM; 940 return -ENOMEM;
985 } 941 }
986 942
@@ -991,17 +947,18 @@ static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev,
991 break; 947 break;
992 948
993 if (i == ARRAY_SIZE(regulators)) { 949 if (i == ARRAY_SIZE(regulators)) {
994 dev_warn(iodev->dev, "don't know how to configure " 950 dev_warn(&pdev->dev, "don't know how to configure regulator %s\n",
995 "regulator %s\n", reg_np->name); 951 reg_np->name);
996 continue; 952 continue;
997 } 953 }
998 954
999 rdata->id = i; 955 rdata->id = i;
1000 rdata->initdata = of_get_regulator_init_data( 956 rdata->initdata = of_get_regulator_init_data(&pdev->dev,
1001 iodev->dev, reg_np); 957 reg_np);
1002 rdata->reg_node = reg_np; 958 rdata->reg_node = reg_np;
1003 rdata++; 959 rdata++;
1004 } 960 }
961 of_node_put(regulators_np);
1005 962
1006 if (of_get_property(pmic_np, "max8997,pmic-buck1-uses-gpio-dvs", NULL)) 963 if (of_get_property(pmic_np, "max8997,pmic-buck1-uses-gpio-dvs", NULL))
1007 pdata->buck1_gpiodvs = true; 964 pdata->buck1_gpiodvs = true;
@@ -1014,7 +971,7 @@ static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev,
1014 971
1015 if (pdata->buck1_gpiodvs || pdata->buck2_gpiodvs || 972 if (pdata->buck1_gpiodvs || pdata->buck2_gpiodvs ||
1016 pdata->buck5_gpiodvs) { 973 pdata->buck5_gpiodvs) {
1017 ret = max8997_pmic_dt_parse_dvs_gpio(iodev, pdata, pmic_np); 974 ret = max8997_pmic_dt_parse_dvs_gpio(pdev, pdata, pmic_np);
1018 if (ret) 975 if (ret)
1019 return -EINVAL; 976 return -EINVAL;
1020 977
@@ -1025,8 +982,7 @@ static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev,
1025 } else { 982 } else {
1026 if (pdata->buck125_default_idx >= 8) { 983 if (pdata->buck125_default_idx >= 8) {
1027 pdata->buck125_default_idx = 0; 984 pdata->buck125_default_idx = 0;
1028 dev_info(iodev->dev, "invalid value for " 985 dev_info(&pdev->dev, "invalid value for default dvs index, using 0 instead\n");
1029 "default dvs index, using 0 instead\n");
1030 } 986 }
1031 } 987 }
1032 988
@@ -1040,28 +996,28 @@ static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev,
1040 if (of_property_read_u32_array(pmic_np, 996 if (of_property_read_u32_array(pmic_np,
1041 "max8997,pmic-buck1-dvs-voltage", 997 "max8997,pmic-buck1-dvs-voltage",
1042 pdata->buck1_voltage, dvs_voltage_nr)) { 998 pdata->buck1_voltage, dvs_voltage_nr)) {
1043 dev_err(iodev->dev, "buck1 voltages not specified\n"); 999 dev_err(&pdev->dev, "buck1 voltages not specified\n");
1044 return -EINVAL; 1000 return -EINVAL;
1045 } 1001 }
1046 1002
1047 if (of_property_read_u32_array(pmic_np, 1003 if (of_property_read_u32_array(pmic_np,
1048 "max8997,pmic-buck2-dvs-voltage", 1004 "max8997,pmic-buck2-dvs-voltage",
1049 pdata->buck2_voltage, dvs_voltage_nr)) { 1005 pdata->buck2_voltage, dvs_voltage_nr)) {
1050 dev_err(iodev->dev, "buck2 voltages not specified\n"); 1006 dev_err(&pdev->dev, "buck2 voltages not specified\n");
1051 return -EINVAL; 1007 return -EINVAL;
1052 } 1008 }
1053 1009
1054 if (of_property_read_u32_array(pmic_np, 1010 if (of_property_read_u32_array(pmic_np,
1055 "max8997,pmic-buck5-dvs-voltage", 1011 "max8997,pmic-buck5-dvs-voltage",
1056 pdata->buck5_voltage, dvs_voltage_nr)) { 1012 pdata->buck5_voltage, dvs_voltage_nr)) {
1057 dev_err(iodev->dev, "buck5 voltages not specified\n"); 1013 dev_err(&pdev->dev, "buck5 voltages not specified\n");
1058 return -EINVAL; 1014 return -EINVAL;
1059 } 1015 }
1060 1016
1061 return 0; 1017 return 0;
1062} 1018}
1063#else 1019#else
1064static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev, 1020static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev,
1065 struct max8997_platform_data *pdata) 1021 struct max8997_platform_data *pdata)
1066{ 1022{
1067 return 0; 1023 return 0;
@@ -1085,7 +1041,7 @@ static int max8997_pmic_probe(struct platform_device *pdev)
1085 } 1041 }
1086 1042
1087 if (iodev->dev->of_node) { 1043 if (iodev->dev->of_node) {
1088 ret = max8997_pmic_dt_parse_pdata(iodev, pdata); 1044 ret = max8997_pmic_dt_parse_pdata(pdev, pdata);
1089 if (ret) 1045 if (ret)
1090 return ret; 1046 return ret;
1091 } 1047 }
@@ -1234,13 +1190,15 @@ static int max8997_pmic_probe(struct platform_device *pdev)
1234 int id = pdata->regulators[i].id; 1190 int id = pdata->regulators[i].id;
1235 1191
1236 desc = reg_voltage_map[id]; 1192 desc = reg_voltage_map[id];
1237 if (desc) 1193 if (desc) {
1238 regulators[id].n_voltages = 1194 regulators[id].n_voltages =
1239 (desc->max - desc->min) / desc->step + 1; 1195 (desc->max - desc->min) / desc->step + 1;
1240 else if (id == MAX8997_ESAFEOUT1 || id == MAX8997_ESAFEOUT2) 1196 } else if (id == MAX8997_ESAFEOUT1 || id == MAX8997_ESAFEOUT2) {
1241 regulators[id].n_voltages = 4; 1197 regulators[id].volt_table = safeoutvolt;
1242 else if (id == MAX8997_CHARGER_CV) 1198 regulators[id].n_voltages = ARRAY_SIZE(safeoutvolt);
1199 } else if (id == MAX8997_CHARGER_CV) {
1243 regulators[id].n_voltages = 16; 1200 regulators[id].n_voltages = 16;
1201 }
1244 1202
1245 config.dev = max8997->dev; 1203 config.dev = max8997->dev;
1246 config.init_data = pdata->regulators[i].initdata; 1204 config.init_data = pdata->regulators[i].initdata;
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index 1f0df4046b86..b588f07c7cad 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -65,7 +65,7 @@ static const struct voltage_map_desc ldo9_voltage_map_desc = {
65 .min = 2800000, .step = 100000, .max = 3100000, 65 .min = 2800000, .step = 100000, .max = 3100000,
66}; 66};
67static const struct voltage_map_desc ldo10_voltage_map_desc = { 67static const struct voltage_map_desc ldo10_voltage_map_desc = {
68 .min = 95000, .step = 50000, .max = 1300000, 68 .min = 950000, .step = 50000, .max = 1300000,
69}; 69};
70static const struct voltage_map_desc ldo1213_voltage_map_desc = { 70static const struct voltage_map_desc ldo1213_voltage_map_desc = {
71 .min = 800000, .step = 100000, .max = 3300000, 71 .min = 800000, .step = 100000, .max = 3300000,
@@ -311,25 +311,13 @@ static int max8998_set_voltage_buck_sel(struct regulator_dev *rdev,
311 dev_get_platdata(max8998->iodev->dev); 311 dev_get_platdata(max8998->iodev->dev);
312 struct i2c_client *i2c = max8998->iodev->i2c; 312 struct i2c_client *i2c = max8998->iodev->i2c;
313 int buck = rdev_get_id(rdev); 313 int buck = rdev_get_id(rdev);
314 int reg, shift = 0, mask, ret; 314 int reg, shift = 0, mask, ret, j;
315 int j, previous_sel;
316 static u8 buck1_last_val; 315 static u8 buck1_last_val;
317 316
318 ret = max8998_get_voltage_register(rdev, &reg, &shift, &mask); 317 ret = max8998_get_voltage_register(rdev, &reg, &shift, &mask);
319 if (ret) 318 if (ret)
320 return ret; 319 return ret;
321 320
322 previous_sel = max8998_get_voltage_sel(rdev);
323
324 /* Check if voltage needs to be changed */
325 /* if previous_voltage equal new voltage, return */
326 if (previous_sel == selector) {
327 dev_dbg(max8998->dev, "No voltage change, old:%d, new:%d\n",
328 regulator_list_voltage_linear(rdev, previous_sel),
329 regulator_list_voltage_linear(rdev, selector));
330 return ret;
331 }
332
333 switch (buck) { 321 switch (buck) {
334 case MAX8998_BUCK1: 322 case MAX8998_BUCK1:
335 dev_dbg(max8998->dev, 323 dev_dbg(max8998->dev,
diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
index 0d84b1f33199..9891aec47b57 100644
--- a/drivers/regulator/mc13892-regulator.c
+++ b/drivers/regulator/mc13892-regulator.c
@@ -164,6 +164,14 @@ static const unsigned int mc13892_sw1[] = {
164 1350000, 1375000 164 1350000, 1375000
165}; 165};
166 166
167/*
168 * Note: this table is used to derive SWxVSEL by index into
169 * the array. Offset the values by the index of 1100000uV
170 * to get the actual register value for that voltage selector
171 * if the HI bit is to be set as well.
172 */
173#define MC13892_SWxHI_SEL_OFFSET 20
174
167static const unsigned int mc13892_sw[] = { 175static const unsigned int mc13892_sw[] = {
168 600000, 625000, 650000, 675000, 700000, 725000, 176 600000, 625000, 650000, 675000, 700000, 725000,
169 750000, 775000, 800000, 825000, 850000, 875000, 177 750000, 775000, 800000, 825000, 850000, 875000,
@@ -239,7 +247,6 @@ static const unsigned int mc13892_pwgtdrv[] = {
239}; 247};
240 248
241static struct regulator_ops mc13892_gpo_regulator_ops; 249static struct regulator_ops mc13892_gpo_regulator_ops;
242/* sw regulators need special care due to the "hi bit" */
243static struct regulator_ops mc13892_sw_regulator_ops; 250static struct regulator_ops mc13892_sw_regulator_ops;
244 251
245 252
@@ -396,7 +403,7 @@ static int mc13892_sw_regulator_get_voltage_sel(struct regulator_dev *rdev)
396{ 403{
397 struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); 404 struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
398 int ret, id = rdev_get_id(rdev); 405 int ret, id = rdev_get_id(rdev);
399 unsigned int val; 406 unsigned int val, selector;
400 407
401 dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id); 408 dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
402 409
@@ -407,12 +414,28 @@ static int mc13892_sw_regulator_get_voltage_sel(struct regulator_dev *rdev)
407 if (ret) 414 if (ret)
408 return ret; 415 return ret;
409 416
410 val = (val & mc13892_regulators[id].vsel_mask) 417 /*
411 >> mc13892_regulators[id].vsel_shift; 418 * Figure out if the HI bit is set inside the switcher mode register
419 * since this means the selector value we return is at a different
420 * offset into the selector table.
421 *
422 * According to the MC13892 documentation note 59 (Table 47) the SW1
423 * buck switcher does not support output range programming therefore
424 * the HI bit must always remain 0. So do not do anything strange if
425 * our register is MC13892_SWITCHERS0.
426 */
427
428 selector = val & mc13892_regulators[id].vsel_mask;
429
430 if ((mc13892_regulators[id].vsel_reg != MC13892_SWITCHERS0) &&
431 (val & MC13892_SWITCHERS0_SWxHI)) {
432 selector += MC13892_SWxHI_SEL_OFFSET;
433 }
412 434
413 dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val); 435 dev_dbg(rdev_get_dev(rdev), "%s id: %d val: 0x%08x selector: %d\n",
436 __func__, id, val, selector);
414 437
415 return val; 438 return selector;
416} 439}
417 440
418static int mc13892_sw_regulator_set_voltage_sel(struct regulator_dev *rdev, 441static int mc13892_sw_regulator_set_voltage_sel(struct regulator_dev *rdev,
@@ -425,18 +448,35 @@ static int mc13892_sw_regulator_set_voltage_sel(struct regulator_dev *rdev,
425 448
426 volt = rdev->desc->volt_table[selector]; 449 volt = rdev->desc->volt_table[selector];
427 mask = mc13892_regulators[id].vsel_mask; 450 mask = mc13892_regulators[id].vsel_mask;
428 reg_value = selector << mc13892_regulators[id].vsel_shift; 451 reg_value = selector;
429 452
430 if (volt > 1375000) { 453 /*
431 mask |= MC13892_SWITCHERS0_SWxHI; 454 * Don't mess with the HI bit or support HI voltage offsets for SW1.
432 reg_value |= MC13892_SWITCHERS0_SWxHI; 455 *
433 } else if (volt < 1100000) { 456 * Since the get_voltage_sel callback has given a fudged value for
434 mask |= MC13892_SWITCHERS0_SWxHI; 457 * the selector offset, we need to back out that offset if HI is
435 reg_value &= ~MC13892_SWITCHERS0_SWxHI; 458 * to be set so we write the correct value to the register.
459 *
460 * The HI bit addition and selector offset handling COULD be more
461 * complicated by shifting and masking off the voltage selector part
462 * of the register then logical OR it back in, but since the selector
463 * is at bits 4:0 there is very little point. This makes the whole
464 * thing more readable and we do far less work.
465 */
466
467 if (mc13892_regulators[id].vsel_reg != MC13892_SWITCHERS0) {
468 if (volt > 1375000) {
469 reg_value -= MC13892_SWxHI_SEL_OFFSET;
470 reg_value |= MC13892_SWITCHERS0_SWxHI;
471 mask |= MC13892_SWITCHERS0_SWxHI;
472 } else if (volt < 1100000) {
473 reg_value &= ~MC13892_SWITCHERS0_SWxHI;
474 mask |= MC13892_SWITCHERS0_SWxHI;
475 }
436 } 476 }
437 477
438 mc13xxx_lock(priv->mc13xxx); 478 mc13xxx_lock(priv->mc13xxx);
439 ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13892_regulators[id].reg, mask, 479 ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13892_regulators[id].vsel_reg, mask,
440 reg_value); 480 reg_value);
441 mc13xxx_unlock(priv->mc13xxx); 481 mc13xxx_unlock(priv->mc13xxx);
442 482
@@ -495,15 +535,18 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
495 struct mc13xxx_regulator_init_data *mc13xxx_data; 535 struct mc13xxx_regulator_init_data *mc13xxx_data;
496 struct regulator_config config = { }; 536 struct regulator_config config = { };
497 int i, ret; 537 int i, ret;
498 int num_regulators = 0; 538 int num_regulators = 0, num_parsed;
499 u32 val; 539 u32 val;
500 540
501 num_regulators = mc13xxx_get_num_regulators_dt(pdev); 541 num_regulators = mc13xxx_get_num_regulators_dt(pdev);
542
502 if (num_regulators <= 0 && pdata) 543 if (num_regulators <= 0 && pdata)
503 num_regulators = pdata->num_regulators; 544 num_regulators = pdata->num_regulators;
504 if (num_regulators <= 0) 545 if (num_regulators <= 0)
505 return -EINVAL; 546 return -EINVAL;
506 547
548 num_parsed = num_regulators;
549
507 priv = devm_kzalloc(&pdev->dev, sizeof(*priv) + 550 priv = devm_kzalloc(&pdev->dev, sizeof(*priv) +
508 num_regulators * sizeof(priv->regulators[0]), 551 num_regulators * sizeof(priv->regulators[0]),
509 GFP_KERNEL); 552 GFP_KERNEL);
@@ -520,7 +563,7 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
520 if (ret) 563 if (ret)
521 goto err_unlock; 564 goto err_unlock;
522 565
523 /* enable switch auto mode */ 566 /* enable switch auto mode (on 2.0A silicon only) */
524 if ((val & 0x0000FFFF) == 0x45d0) { 567 if ((val & 0x0000FFFF) == 0x45d0) {
525 ret = mc13xxx_reg_rmw(mc13892, MC13892_SWITCHERS4, 568 ret = mc13xxx_reg_rmw(mc13892, MC13892_SWITCHERS4,
526 MC13892_SWITCHERS4_SW1MODE_M | 569 MC13892_SWITCHERS4_SW1MODE_M |
@@ -546,7 +589,39 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
546 = mc13892_vcam_get_mode; 589 = mc13892_vcam_get_mode;
547 590
548 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators, 591 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
549 ARRAY_SIZE(mc13892_regulators)); 592 ARRAY_SIZE(mc13892_regulators),
593 &num_parsed);
594
595 /*
596 * Perform a little sanity check on the regulator tree - if we found
597 * a number of regulators from mc13xxx_get_num_regulators_dt and
598 * then parsed a smaller number in mc13xxx_parse_regulators_dt then
599 * there is a regulator defined in the regulators node which has
600 * not matched any usable regulator in the driver. In this case,
601 * there is one missing and what will happen is the first regulator
602 * will get registered again.
603 *
604 * Fix this by basically making our number of registerable regulators
605 * equal to the number of regulators we parsed. We are allocating
606 * too much memory for priv, but this is unavoidable at this point.
607 *
608 * As an example of how this can happen, try making a typo in your
609 * regulators node (vviohi {} instead of viohi {}) so that the name
610 * does not match..
611 *
612 * The check will basically pass for platform data (non-DT) because
613 * mc13xxx_parse_regulators_dt for !CONFIG_OF will not touch num_parsed.
614 *
615 */
616 if (num_parsed != num_regulators) {
617 dev_warn(&pdev->dev,
618 "parsed %d != regulators %d - check your device tree!\n",
619 num_parsed, num_regulators);
620
621 num_regulators = num_parsed;
622 priv->num_regulators = num_regulators;
623 }
624
550 for (i = 0; i < num_regulators; i++) { 625 for (i = 0; i < num_regulators; i++) {
551 struct regulator_init_data *init_data; 626 struct regulator_init_data *init_data;
552 struct regulator_desc *desc; 627 struct regulator_desc *desc;
diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c
index 4ed89c654110..23cf9f9c383b 100644
--- a/drivers/regulator/mc13xxx-regulator-core.c
+++ b/drivers/regulator/mc13xxx-regulator-core.c
@@ -164,29 +164,30 @@ EXPORT_SYMBOL_GPL(mc13xxx_fixed_regulator_ops);
164#ifdef CONFIG_OF 164#ifdef CONFIG_OF
165int mc13xxx_get_num_regulators_dt(struct platform_device *pdev) 165int mc13xxx_get_num_regulators_dt(struct platform_device *pdev)
166{ 166{
167 struct device_node *parent, *child; 167 struct device_node *parent;
168 int num = 0; 168 int num;
169 169
170 of_node_get(pdev->dev.parent->of_node); 170 of_node_get(pdev->dev.parent->of_node);
171 parent = of_find_node_by_name(pdev->dev.parent->of_node, "regulators"); 171 parent = of_find_node_by_name(pdev->dev.parent->of_node, "regulators");
172 if (!parent) 172 if (!parent)
173 return -ENODEV; 173 return -ENODEV;
174 174
175 for_each_child_of_node(parent, child) 175 num = of_get_child_count(parent);
176 num++; 176 of_node_put(parent);
177
178 return num; 177 return num;
179} 178}
180EXPORT_SYMBOL_GPL(mc13xxx_get_num_regulators_dt); 179EXPORT_SYMBOL_GPL(mc13xxx_get_num_regulators_dt);
181 180
182struct mc13xxx_regulator_init_data *mc13xxx_parse_regulators_dt( 181struct mc13xxx_regulator_init_data *mc13xxx_parse_regulators_dt(
183 struct platform_device *pdev, struct mc13xxx_regulator *regulators, 182 struct platform_device *pdev, struct mc13xxx_regulator *regulators,
184 int num_regulators) 183 int num_regulators, int *num_parsed)
185{ 184{
186 struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev); 185 struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev);
187 struct mc13xxx_regulator_init_data *data, *p; 186 struct mc13xxx_regulator_init_data *data, *p;
188 struct device_node *parent, *child; 187 struct device_node *parent, *child;
189 int i; 188 int i, parsed = 0;
189
190 *num_parsed = 0;
190 191
191 of_node_get(pdev->dev.parent->of_node); 192 of_node_get(pdev->dev.parent->of_node);
192 parent = of_find_node_by_name(pdev->dev.parent->of_node, "regulators"); 193 parent = of_find_node_by_name(pdev->dev.parent->of_node, "regulators");
@@ -195,24 +196,32 @@ struct mc13xxx_regulator_init_data *mc13xxx_parse_regulators_dt(
195 196
196 data = devm_kzalloc(&pdev->dev, sizeof(*data) * priv->num_regulators, 197 data = devm_kzalloc(&pdev->dev, sizeof(*data) * priv->num_regulators,
197 GFP_KERNEL); 198 GFP_KERNEL);
198 if (!data) 199 if (!data) {
200 of_node_put(parent);
199 return NULL; 201 return NULL;
202 }
203
200 p = data; 204 p = data;
201 205
202 for_each_child_of_node(parent, child) { 206 for_each_child_of_node(parent, child) {
203 for (i = 0; i < num_regulators; i++) { 207 for (i = 0; i < num_regulators; i++) {
204 if (!of_node_cmp(child->name, 208 if (!of_node_cmp(child->name,
205 regulators[i].desc.name)) { 209 regulators[i].desc.name)) {
210
206 p->id = i; 211 p->id = i;
207 p->init_data = of_get_regulator_init_data( 212 p->init_data = of_get_regulator_init_data(
208 &pdev->dev, child); 213 &pdev->dev, child);
209 p->node = child; 214 p->node = child;
210 p++; 215 p++;
216
217 parsed++;
211 break; 218 break;
212 } 219 }
213 } 220 }
214 } 221 }
222 of_node_put(parent);
215 223
224 *num_parsed = parsed;
216 return data; 225 return data;
217} 226}
218EXPORT_SYMBOL_GPL(mc13xxx_parse_regulators_dt); 227EXPORT_SYMBOL_GPL(mc13xxx_parse_regulators_dt);
diff --git a/drivers/regulator/mc13xxx.h b/drivers/regulator/mc13xxx.h
index 06c8903f182a..007f83387fd6 100644
--- a/drivers/regulator/mc13xxx.h
+++ b/drivers/regulator/mc13xxx.h
@@ -39,7 +39,7 @@ extern int mc13xxx_fixed_regulator_set_voltage(struct regulator_dev *rdev,
39extern int mc13xxx_get_num_regulators_dt(struct platform_device *pdev); 39extern int mc13xxx_get_num_regulators_dt(struct platform_device *pdev);
40extern struct mc13xxx_regulator_init_data *mc13xxx_parse_regulators_dt( 40extern struct mc13xxx_regulator_init_data *mc13xxx_parse_regulators_dt(
41 struct platform_device *pdev, struct mc13xxx_regulator *regulators, 41 struct platform_device *pdev, struct mc13xxx_regulator *regulators,
42 int num_regulators); 42 int num_regulators, int *num_parsed);
43#else 43#else
44static inline int mc13xxx_get_num_regulators_dt(struct platform_device *pdev) 44static inline int mc13xxx_get_num_regulators_dt(struct platform_device *pdev)
45{ 45{
@@ -48,7 +48,7 @@ static inline int mc13xxx_get_num_regulators_dt(struct platform_device *pdev)
48 48
49static inline struct mc13xxx_regulator_init_data *mc13xxx_parse_regulators_dt( 49static inline struct mc13xxx_regulator_init_data *mc13xxx_parse_regulators_dt(
50 struct platform_device *pdev, struct mc13xxx_regulator *regulators, 50 struct platform_device *pdev, struct mc13xxx_regulator *regulators,
51 int num_regulators) 51 int num_regulators, int *num_parsed)
52{ 52{
53 return NULL; 53 return NULL;
54} 54}
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 6f684916fd79..66ca769287ab 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -120,6 +120,12 @@ int of_regulator_match(struct device *dev, struct device_node *node,
120 if (!dev || !node) 120 if (!dev || !node)
121 return -EINVAL; 121 return -EINVAL;
122 122
123 for (i = 0; i < num_matches; i++) {
124 struct of_regulator_match *match = &matches[i];
125 match->init_data = NULL;
126 match->of_node = NULL;
127 }
128
123 for_each_child_of_node(node, child) { 129 for_each_child_of_node(node, child) {
124 name = of_get_property(child, 130 name = of_get_property(child,
125 "regulator-compatible", NULL); 131 "regulator-compatible", NULL);
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index c9e912f583bc..cde13bb5a8fb 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -527,6 +527,7 @@ static void palmas_dt_to_pdata(struct device *dev,
527 u32 prop; 527 u32 prop;
528 int idx, ret; 528 int idx, ret;
529 529
530 node = of_node_get(node);
530 regulators = of_find_node_by_name(node, "regulators"); 531 regulators = of_find_node_by_name(node, "regulators");
531 if (!regulators) { 532 if (!regulators) {
532 dev_info(dev, "regulator node not found\n"); 533 dev_info(dev, "regulator node not found\n");
@@ -535,6 +536,7 @@ static void palmas_dt_to_pdata(struct device *dev,
535 536
536 ret = of_regulator_match(dev, regulators, palmas_matches, 537 ret = of_regulator_match(dev, regulators, palmas_matches,
537 PALMAS_NUM_REGS); 538 PALMAS_NUM_REGS);
539 of_node_put(regulators);
538 if (ret < 0) { 540 if (ret < 0) {
539 dev_err(dev, "Error parsing regulator init data: %d\n", ret); 541 dev_err(dev, "Error parsing regulator init data: %d\n", ret);
540 return; 542 return;
@@ -566,11 +568,6 @@ static void palmas_dt_to_pdata(struct device *dev,
566 pdata->reg_init[idx]->mode_sleep = prop; 568 pdata->reg_init[idx]->mode_sleep = prop;
567 569
568 ret = of_property_read_u32(palmas_matches[idx].of_node, 570 ret = of_property_read_u32(palmas_matches[idx].of_node,
569 "ti,warm_reset", &prop);
570 if (!ret)
571 pdata->reg_init[idx]->warm_reset = prop;
572
573 ret = of_property_read_u32(palmas_matches[idx].of_node,
574 "ti,tstep", &prop); 571 "ti,tstep", &prop);
575 if (!ret) 572 if (!ret)
576 pdata->reg_init[idx]->tstep = prop; 573 pdata->reg_init[idx]->tstep = prop;
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index bd062a2ffbe2..cd9ea2ea1826 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -174,9 +174,9 @@ static struct regulator_ops s2mps11_buck_ops = {
174 .min_uV = S2MPS11_BUCK_MIN2, \ 174 .min_uV = S2MPS11_BUCK_MIN2, \
175 .uV_step = S2MPS11_BUCK_STEP2, \ 175 .uV_step = S2MPS11_BUCK_STEP2, \
176 .n_voltages = S2MPS11_BUCK_N_VOLTAGES, \ 176 .n_voltages = S2MPS11_BUCK_N_VOLTAGES, \
177 .vsel_reg = S2MPS11_REG_B9CTRL2, \ 177 .vsel_reg = S2MPS11_REG_B10CTRL2, \
178 .vsel_mask = S2MPS11_BUCK_VSEL_MASK, \ 178 .vsel_mask = S2MPS11_BUCK_VSEL_MASK, \
179 .enable_reg = S2MPS11_REG_B9CTRL1, \ 179 .enable_reg = S2MPS11_REG_B10CTRL1, \
180 .enable_mask = S2MPS11_ENABLE_MASK \ 180 .enable_mask = S2MPS11_ENABLE_MASK \
181} 181}
182 182
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index 33b65c9ad5d5..8a831947c351 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -14,6 +14,7 @@
14#include <linux/bug.h> 14#include <linux/bug.h>
15#include <linux/err.h> 15#include <linux/err.h>
16#include <linux/gpio.h> 16#include <linux/gpio.h>
17#include <linux/of_gpio.h>
17#include <linux/slab.h> 18#include <linux/slab.h>
18#include <linux/module.h> 19#include <linux/module.h>
19#include <linux/platform_device.h> 20#include <linux/platform_device.h>
@@ -21,6 +22,9 @@
21#include <linux/regulator/machine.h> 22#include <linux/regulator/machine.h>
22#include <linux/mfd/samsung/core.h> 23#include <linux/mfd/samsung/core.h>
23#include <linux/mfd/samsung/s5m8767.h> 24#include <linux/mfd/samsung/s5m8767.h>
25#include <linux/regulator/of_regulator.h>
26
27#define S5M8767_OPMODE_NORMAL_MODE 0x1
24 28
25struct s5m8767_info { 29struct s5m8767_info {
26 struct device *dev; 30 struct device *dev;
@@ -255,10 +259,8 @@ static int s5m8767_reg_disable(struct regulator_dev *rdev)
255 return sec_reg_update(s5m8767->iodev, reg, ~mask, mask); 259 return sec_reg_update(s5m8767->iodev, reg, ~mask, mask);
256} 260}
257 261
258static int s5m8767_get_voltage_register(struct regulator_dev *rdev, int *_reg) 262static int s5m8767_get_vsel_reg(int reg_id, struct s5m8767_info *s5m8767)
259{ 263{
260 struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
261 int reg_id = rdev_get_id(rdev);
262 int reg; 264 int reg;
263 265
264 switch (reg_id) { 266 switch (reg_id) {
@@ -296,43 +298,18 @@ static int s5m8767_get_voltage_register(struct regulator_dev *rdev, int *_reg)
296 return -EINVAL; 298 return -EINVAL;
297 } 299 }
298 300
299 *_reg = reg; 301 return reg;
300
301 return 0;
302}
303
304static int s5m8767_get_voltage_sel(struct regulator_dev *rdev)
305{
306 struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
307 int reg, mask, ret;
308 int reg_id = rdev_get_id(rdev);
309 unsigned int val;
310
311 ret = s5m8767_get_voltage_register(rdev, &reg);
312 if (ret)
313 return ret;
314
315 mask = (reg_id < S5M8767_BUCK1) ? 0x3f : 0xff;
316
317 ret = sec_reg_read(s5m8767->iodev, reg, &val);
318 if (ret)
319 return ret;
320
321 val &= mask;
322
323 return val;
324} 302}
325 303
326static int s5m8767_convert_voltage_to_sel( 304static int s5m8767_convert_voltage_to_sel(const struct sec_voltage_desc *desc,
327 const struct sec_voltage_desc *desc, 305 int min_vol)
328 int min_vol, int max_vol)
329{ 306{
330 int selector = 0; 307 int selector = 0;
331 308
332 if (desc == NULL) 309 if (desc == NULL)
333 return -EINVAL; 310 return -EINVAL;
334 311
335 if (max_vol < desc->min || min_vol > desc->max) 312 if (min_vol > desc->max)
336 return -EINVAL; 313 return -EINVAL;
337 314
338 if (min_vol < desc->min) 315 if (min_vol < desc->min)
@@ -340,7 +317,7 @@ static int s5m8767_convert_voltage_to_sel(
340 317
341 selector = DIV_ROUND_UP(min_vol - desc->min, desc->step); 318 selector = DIV_ROUND_UP(min_vol - desc->min, desc->step);
342 319
343 if (desc->min + desc->step * selector > max_vol) 320 if (desc->min + desc->step * selector > desc->max)
344 return -EINVAL; 321 return -EINVAL;
345 322
346 return selector; 323 return selector;
@@ -373,15 +350,13 @@ static int s5m8767_set_voltage_sel(struct regulator_dev *rdev,
373{ 350{
374 struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev); 351 struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
375 int reg_id = rdev_get_id(rdev); 352 int reg_id = rdev_get_id(rdev);
376 int reg, mask, ret = 0, old_index, index = 0; 353 int old_index, index = 0;
377 u8 *buck234_vol = NULL; 354 u8 *buck234_vol = NULL;
378 355
379 switch (reg_id) { 356 switch (reg_id) {
380 case S5M8767_LDO1 ... S5M8767_LDO28: 357 case S5M8767_LDO1 ... S5M8767_LDO28:
381 mask = 0x3f;
382 break; 358 break;
383 case S5M8767_BUCK1 ... S5M8767_BUCK6: 359 case S5M8767_BUCK1 ... S5M8767_BUCK6:
384 mask = 0xff;
385 if (reg_id == S5M8767_BUCK2 && s5m8767->buck2_gpiodvs) 360 if (reg_id == S5M8767_BUCK2 && s5m8767->buck2_gpiodvs)
386 buck234_vol = &s5m8767->buck2_vol[0]; 361 buck234_vol = &s5m8767->buck2_vol[0];
387 else if (reg_id == S5M8767_BUCK3 && s5m8767->buck3_gpiodvs) 362 else if (reg_id == S5M8767_BUCK3 && s5m8767->buck3_gpiodvs)
@@ -392,7 +367,6 @@ static int s5m8767_set_voltage_sel(struct regulator_dev *rdev,
392 case S5M8767_BUCK7 ... S5M8767_BUCK8: 367 case S5M8767_BUCK7 ... S5M8767_BUCK8:
393 return -EINVAL; 368 return -EINVAL;
394 case S5M8767_BUCK9: 369 case S5M8767_BUCK9:
395 mask = 0xff;
396 break; 370 break;
397 default: 371 default:
398 return -EINVAL; 372 return -EINVAL;
@@ -412,11 +386,7 @@ static int s5m8767_set_voltage_sel(struct regulator_dev *rdev,
412 else 386 else
413 return s5m8767_set_low(s5m8767); 387 return s5m8767_set_low(s5m8767);
414 } else { 388 } else {
415 ret = s5m8767_get_voltage_register(rdev, &reg); 389 return regulator_set_voltage_sel_regmap(rdev, selector);
416 if (ret)
417 return ret;
418
419 return sec_reg_update(s5m8767->iodev, reg, selector, mask);
420 } 390 }
421} 391}
422 392
@@ -441,7 +411,7 @@ static struct regulator_ops s5m8767_ops = {
441 .is_enabled = s5m8767_reg_is_enabled, 411 .is_enabled = s5m8767_reg_is_enabled,
442 .enable = s5m8767_reg_enable, 412 .enable = s5m8767_reg_enable,
443 .disable = s5m8767_reg_disable, 413 .disable = s5m8767_reg_disable,
444 .get_voltage_sel = s5m8767_get_voltage_sel, 414 .get_voltage_sel = regulator_get_voltage_sel_regmap,
445 .set_voltage_sel = s5m8767_set_voltage_sel, 415 .set_voltage_sel = s5m8767_set_voltage_sel,
446 .set_voltage_time_sel = s5m8767_set_voltage_time_sel, 416 .set_voltage_time_sel = s5m8767_set_voltage_time_sel,
447}; 417};
@@ -508,10 +478,182 @@ static struct regulator_desc regulators[] = {
508 s5m8767_regulator_desc(BUCK9), 478 s5m8767_regulator_desc(BUCK9),
509}; 479};
510 480
481#ifdef CONFIG_OF
482static int s5m8767_pmic_dt_parse_dvs_gpio(struct sec_pmic_dev *iodev,
483 struct sec_platform_data *pdata,
484 struct device_node *pmic_np)
485{
486 int i, gpio;
487
488 for (i = 0; i < 3; i++) {
489 gpio = of_get_named_gpio(pmic_np,
490 "s5m8767,pmic-buck-dvs-gpios", i);
491 if (!gpio_is_valid(gpio)) {
492 dev_err(iodev->dev, "invalid gpio[%d]: %d\n", i, gpio);
493 return -EINVAL;
494 }
495 pdata->buck_gpios[i] = gpio;
496 }
497 return 0;
498}
499
500static int s5m8767_pmic_dt_parse_ds_gpio(struct sec_pmic_dev *iodev,
501 struct sec_platform_data *pdata,
502 struct device_node *pmic_np)
503{
504 int i, gpio;
505
506 for (i = 0; i < 3; i++) {
507 gpio = of_get_named_gpio(pmic_np,
508 "s5m8767,pmic-buck-ds-gpios", i);
509 if (!gpio_is_valid(gpio)) {
510 dev_err(iodev->dev, "invalid gpio[%d]: %d\n", i, gpio);
511 return -EINVAL;
512 }
513 pdata->buck_ds[i] = gpio;
514 }
515 return 0;
516}
517
518static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
519 struct sec_platform_data *pdata)
520{
521 struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
522 struct device_node *pmic_np, *regulators_np, *reg_np;
523 struct sec_regulator_data *rdata;
524 struct sec_opmode_data *rmode;
525 unsigned int i, dvs_voltage_nr = 1, ret;
526
527 pmic_np = iodev->dev->of_node;
528 if (!pmic_np) {
529 dev_err(iodev->dev, "could not find pmic sub-node\n");
530 return -ENODEV;
531 }
532
533 regulators_np = of_find_node_by_name(pmic_np, "regulators");
534 if (!regulators_np) {
535 dev_err(iodev->dev, "could not find regulators sub-node\n");
536 return -EINVAL;
537 }
538
539 /* count the number of regulators to be supported in pmic */
540 pdata->num_regulators = of_get_child_count(regulators_np);
541
542 rdata = devm_kzalloc(&pdev->dev, sizeof(*rdata) *
543 pdata->num_regulators, GFP_KERNEL);
544 if (!rdata) {
545 dev_err(iodev->dev,
546 "could not allocate memory for regulator data\n");
547 return -ENOMEM;
548 }
549
550 rmode = devm_kzalloc(&pdev->dev, sizeof(*rmode) *
551 pdata->num_regulators, GFP_KERNEL);
552 if (!rdata) {
553 dev_err(iodev->dev,
554 "could not allocate memory for regulator mode\n");
555 return -ENOMEM;
556 }
557
558 pdata->regulators = rdata;
559 pdata->opmode = rmode;
560 for_each_child_of_node(regulators_np, reg_np) {
561 for (i = 0; i < ARRAY_SIZE(regulators); i++)
562 if (!of_node_cmp(reg_np->name, regulators[i].name))
563 break;
564
565 if (i == ARRAY_SIZE(regulators)) {
566 dev_warn(iodev->dev,
567 "don't know how to configure regulator %s\n",
568 reg_np->name);
569 continue;
570 }
571
572 rdata->id = i;
573 rdata->initdata = of_get_regulator_init_data(
574 &pdev->dev, reg_np);
575 rdata->reg_node = reg_np;
576 rdata++;
577 rmode->id = i;
578 if (of_property_read_u32(reg_np, "op_mode",
579 &rmode->mode)) {
580 dev_warn(iodev->dev,
581 "no op_mode property property at %s\n",
582 reg_np->full_name);
583
584 rmode->mode = S5M8767_OPMODE_NORMAL_MODE;
585 }
586 rmode++;
587 }
588
589 if (of_get_property(pmic_np, "s5m8767,pmic-buck2-uses-gpio-dvs", NULL))
590 pdata->buck2_gpiodvs = true;
591
592 if (of_get_property(pmic_np, "s5m8767,pmic-buck3-uses-gpio-dvs", NULL))
593 pdata->buck3_gpiodvs = true;
594
595 if (of_get_property(pmic_np, "s5m8767,pmic-buck4-uses-gpio-dvs", NULL))
596 pdata->buck4_gpiodvs = true;
597
598 if (pdata->buck2_gpiodvs || pdata->buck3_gpiodvs ||
599 pdata->buck4_gpiodvs) {
600 ret = s5m8767_pmic_dt_parse_dvs_gpio(iodev, pdata, pmic_np);
601 if (ret)
602 return -EINVAL;
603
604 if (of_property_read_u32(pmic_np,
605 "s5m8767,pmic-buck-default-dvs-idx",
606 &pdata->buck_default_idx)) {
607 pdata->buck_default_idx = 0;
608 } else {
609 if (pdata->buck_default_idx >= 8) {
610 pdata->buck_default_idx = 0;
611 dev_info(iodev->dev,
612 "invalid value for default dvs index, use 0\n");
613 }
614 }
615 dvs_voltage_nr = 8;
616 }
617
618 ret = s5m8767_pmic_dt_parse_ds_gpio(iodev, pdata, pmic_np);
619 if (ret)
620 return -EINVAL;
621
622 if (of_property_read_u32_array(pmic_np,
623 "s5m8767,pmic-buck2-dvs-voltage",
624 pdata->buck2_voltage, dvs_voltage_nr)) {
625 dev_err(iodev->dev, "buck2 voltages not specified\n");
626 return -EINVAL;
627 }
628
629 if (of_property_read_u32_array(pmic_np,
630 "s5m8767,pmic-buck3-dvs-voltage",
631 pdata->buck3_voltage, dvs_voltage_nr)) {
632 dev_err(iodev->dev, "buck3 voltages not specified\n");
633 return -EINVAL;
634 }
635
636 if (of_property_read_u32_array(pmic_np,
637 "s5m8767,pmic-buck4-dvs-voltage",
638 pdata->buck4_voltage, dvs_voltage_nr)) {
639 dev_err(iodev->dev, "buck4 voltages not specified\n");
640 return -EINVAL;
641 }
642
643 return 0;
644}
645#else
646static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
647 struct sec_platform_data *pdata)
648{
649 return 0;
650}
651#endif /* CONFIG_OF */
652
511static int s5m8767_pmic_probe(struct platform_device *pdev) 653static int s5m8767_pmic_probe(struct platform_device *pdev)
512{ 654{
513 struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent); 655 struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
514 struct sec_platform_data *pdata = dev_get_platdata(iodev->dev); 656 struct sec_platform_data *pdata = iodev->pdata;
515 struct regulator_config config = { }; 657 struct regulator_config config = { };
516 struct regulator_dev **rdev; 658 struct regulator_dev **rdev;
517 struct s5m8767_info *s5m8767; 659 struct s5m8767_info *s5m8767;
@@ -522,6 +664,12 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
522 return -ENODEV; 664 return -ENODEV;
523 } 665 }
524 666
667 if (iodev->dev->of_node) {
668 ret = s5m8767_pmic_dt_parse_pdata(pdev, pdata);
669 if (ret)
670 return ret;
671 }
672
525 if (pdata->buck2_gpiodvs) { 673 if (pdata->buck2_gpiodvs) {
526 if (pdata->buck3_gpiodvs || pdata->buck4_gpiodvs) { 674 if (pdata->buck3_gpiodvs || pdata->buck4_gpiodvs) {
527 dev_err(&pdev->dev, "S5M8767 GPIO DVS NOT VALID\n"); 675 dev_err(&pdev->dev, "S5M8767 GPIO DVS NOT VALID\n");
@@ -577,23 +725,17 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
577 s5m8767->opmode = pdata->opmode; 725 s5m8767->opmode = pdata->opmode;
578 726
579 buck_init = s5m8767_convert_voltage_to_sel(&buck_voltage_val2, 727 buck_init = s5m8767_convert_voltage_to_sel(&buck_voltage_val2,
580 pdata->buck2_init, 728 pdata->buck2_init);
581 pdata->buck2_init +
582 buck_voltage_val2.step);
583 729
584 sec_reg_write(s5m8767->iodev, S5M8767_REG_BUCK2DVS2, buck_init); 730 sec_reg_write(s5m8767->iodev, S5M8767_REG_BUCK2DVS2, buck_init);
585 731
586 buck_init = s5m8767_convert_voltage_to_sel(&buck_voltage_val2, 732 buck_init = s5m8767_convert_voltage_to_sel(&buck_voltage_val2,
587 pdata->buck3_init, 733 pdata->buck3_init);
588 pdata->buck3_init +
589 buck_voltage_val2.step);
590 734
591 sec_reg_write(s5m8767->iodev, S5M8767_REG_BUCK3DVS2, buck_init); 735 sec_reg_write(s5m8767->iodev, S5M8767_REG_BUCK3DVS2, buck_init);
592 736
593 buck_init = s5m8767_convert_voltage_to_sel(&buck_voltage_val2, 737 buck_init = s5m8767_convert_voltage_to_sel(&buck_voltage_val2,
594 pdata->buck4_init, 738 pdata->buck4_init);
595 pdata->buck4_init +
596 buck_voltage_val2.step);
597 739
598 sec_reg_write(s5m8767->iodev, S5M8767_REG_BUCK4DVS2, buck_init); 740 sec_reg_write(s5m8767->iodev, S5M8767_REG_BUCK4DVS2, buck_init);
599 741
@@ -602,27 +744,21 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
602 s5m8767->buck2_vol[i] = 744 s5m8767->buck2_vol[i] =
603 s5m8767_convert_voltage_to_sel( 745 s5m8767_convert_voltage_to_sel(
604 &buck_voltage_val2, 746 &buck_voltage_val2,
605 pdata->buck2_voltage[i], 747 pdata->buck2_voltage[i]);
606 pdata->buck2_voltage[i] +
607 buck_voltage_val2.step);
608 } 748 }
609 749
610 if (s5m8767->buck3_gpiodvs) { 750 if (s5m8767->buck3_gpiodvs) {
611 s5m8767->buck3_vol[i] = 751 s5m8767->buck3_vol[i] =
612 s5m8767_convert_voltage_to_sel( 752 s5m8767_convert_voltage_to_sel(
613 &buck_voltage_val2, 753 &buck_voltage_val2,
614 pdata->buck3_voltage[i], 754 pdata->buck3_voltage[i]);
615 pdata->buck3_voltage[i] +
616 buck_voltage_val2.step);
617 } 755 }
618 756
619 if (s5m8767->buck4_gpiodvs) { 757 if (s5m8767->buck4_gpiodvs) {
620 s5m8767->buck4_vol[i] = 758 s5m8767->buck4_vol[i] =
621 s5m8767_convert_voltage_to_sel( 759 s5m8767_convert_voltage_to_sel(
622 &buck_voltage_val2, 760 &buck_voltage_val2,
623 pdata->buck4_voltage[i], 761 pdata->buck4_voltage[i]);
624 pdata->buck4_voltage[i] +
625 buck_voltage_val2.step);
626 } 762 }
627 } 763 }
628 764
@@ -760,11 +896,19 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
760 (desc->max - desc->min) / desc->step + 1; 896 (desc->max - desc->min) / desc->step + 1;
761 regulators[id].min_uV = desc->min; 897 regulators[id].min_uV = desc->min;
762 regulators[id].uV_step = desc->step; 898 regulators[id].uV_step = desc->step;
899 regulators[id].vsel_reg =
900 s5m8767_get_vsel_reg(id, s5m8767);
901 if (id < S5M8767_BUCK1)
902 regulators[id].vsel_mask = 0x3f;
903 else
904 regulators[id].vsel_mask = 0xff;
763 } 905 }
764 906
765 config.dev = s5m8767->dev; 907 config.dev = s5m8767->dev;
766 config.init_data = pdata->regulators[i].initdata; 908 config.init_data = pdata->regulators[i].initdata;
767 config.driver_data = s5m8767; 909 config.driver_data = s5m8767;
910 config.regmap = iodev->regmap;
911 config.of_node = pdata->regulators[i].reg_node;
768 912
769 rdev[i] = regulator_register(&regulators[id], &config); 913 rdev[i] = regulator_register(&regulators[id], &config);
770 if (IS_ERR(rdev[i])) { 914 if (IS_ERR(rdev[i])) {
diff --git a/drivers/regulator/tps51632-regulator.c b/drivers/regulator/tps51632-regulator.c
index ab21133e6784..6e67be75ea1b 100644
--- a/drivers/regulator/tps51632-regulator.c
+++ b/drivers/regulator/tps51632-regulator.c
@@ -28,10 +28,13 @@
28#include <linux/init.h> 28#include <linux/init.h>
29#include <linux/kernel.h> 29#include <linux/kernel.h>
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/of.h>
32#include <linux/of_device.h>
31#include <linux/platform_device.h> 33#include <linux/platform_device.h>
32#include <linux/regmap.h> 34#include <linux/regmap.h>
33#include <linux/regulator/driver.h> 35#include <linux/regulator/driver.h>
34#include <linux/regulator/machine.h> 36#include <linux/regulator/machine.h>
37#include <linux/regulator/of_regulator.h>
35#include <linux/regulator/tps51632-regulator.h> 38#include <linux/regulator/tps51632-regulator.h>
36#include <linux/slab.h> 39#include <linux/slab.h>
37 40
@@ -85,49 +88,8 @@ struct tps51632_chip {
85 struct regulator_desc desc; 88 struct regulator_desc desc;
86 struct regulator_dev *rdev; 89 struct regulator_dev *rdev;
87 struct regmap *regmap; 90 struct regmap *regmap;
88 bool enable_pwm_dvfs;
89}; 91};
90 92
91static int tps51632_dcdc_get_voltage_sel(struct regulator_dev *rdev)
92{
93 struct tps51632_chip *tps = rdev_get_drvdata(rdev);
94 unsigned int data;
95 int ret;
96 unsigned int reg = TPS51632_VOLTAGE_SELECT_REG;
97 int vsel;
98
99 if (tps->enable_pwm_dvfs)
100 reg = TPS51632_VOLTAGE_BASE_REG;
101
102 ret = regmap_read(tps->regmap, reg, &data);
103 if (ret < 0) {
104 dev_err(tps->dev, "reg read failed, err %d\n", ret);
105 return ret;
106 }
107
108 vsel = data & TPS51632_VOUT_MASK;
109 return vsel;
110}
111
112static int tps51632_dcdc_set_voltage_sel(struct regulator_dev *rdev,
113 unsigned selector)
114{
115 struct tps51632_chip *tps = rdev_get_drvdata(rdev);
116 int ret;
117 unsigned int reg = TPS51632_VOLTAGE_SELECT_REG;
118
119 if (tps->enable_pwm_dvfs)
120 reg = TPS51632_VOLTAGE_BASE_REG;
121
122 if (selector > TPS51632_MAX_VSEL)
123 return -EINVAL;
124
125 ret = regmap_write(tps->regmap, reg, selector);
126 if (ret < 0)
127 dev_err(tps->dev, "reg write failed, err %d\n", ret);
128 return ret;
129}
130
131static int tps51632_dcdc_set_ramp_delay(struct regulator_dev *rdev, 93static int tps51632_dcdc_set_ramp_delay(struct regulator_dev *rdev,
132 int ramp_delay) 94 int ramp_delay)
133{ 95{
@@ -144,8 +106,8 @@ static int tps51632_dcdc_set_ramp_delay(struct regulator_dev *rdev,
144} 106}
145 107
146static struct regulator_ops tps51632_dcdc_ops = { 108static struct regulator_ops tps51632_dcdc_ops = {
147 .get_voltage_sel = tps51632_dcdc_get_voltage_sel, 109 .get_voltage_sel = regulator_get_voltage_sel_regmap,
148 .set_voltage_sel = tps51632_dcdc_set_voltage_sel, 110 .set_voltage_sel = regulator_set_voltage_sel_regmap,
149 .list_voltage = regulator_list_voltage_linear, 111 .list_voltage = regulator_list_voltage_linear,
150 .set_voltage_time_sel = regulator_set_voltage_time_sel, 112 .set_voltage_time_sel = regulator_set_voltage_time_sel,
151 .set_ramp_delay = tps51632_dcdc_set_ramp_delay, 113 .set_ramp_delay = tps51632_dcdc_set_ramp_delay,
@@ -162,7 +124,6 @@ static int tps51632_init_dcdc(struct tps51632_chip *tps,
162 goto skip_pwm_config; 124 goto skip_pwm_config;
163 125
164 control |= TPS51632_DVFS_PWMEN; 126 control |= TPS51632_DVFS_PWMEN;
165 tps->enable_pwm_dvfs = pdata->enable_pwm_dvfs;
166 vsel = TPS51632_VOLT_VSEL(pdata->base_voltage_uV); 127 vsel = TPS51632_VOLT_VSEL(pdata->base_voltage_uV);
167 ret = regmap_write(tps->regmap, TPS51632_VOLTAGE_BASE_REG, vsel); 128 ret = regmap_write(tps->regmap, TPS51632_VOLTAGE_BASE_REG, vsel);
168 if (ret < 0) { 129 if (ret < 0) {
@@ -205,22 +166,96 @@ skip_pwm_config:
205 return ret; 166 return ret;
206} 167}
207 168
208static bool rd_wr_reg(struct device *dev, unsigned int reg) 169static bool is_volatile_reg(struct device *dev, unsigned int reg)
170{
171 switch (reg) {
172 case TPS51632_OFFSET_REG:
173 case TPS51632_FAULT_REG:
174 case TPS51632_IMON_REG:
175 return true;
176 default:
177 return false;
178 }
179}
180
181static bool is_read_reg(struct device *dev, unsigned int reg)
209{ 182{
210 if ((reg >= 0x8) && (reg <= 0x10)) 183 switch (reg) {
184 case 0x08 ... 0x0F:
211 return false; 185 return false;
212 return true; 186 default:
187 return true;
188 }
189}
190
191static bool is_write_reg(struct device *dev, unsigned int reg)
192{
193 switch (reg) {
194 case TPS51632_VOLTAGE_SELECT_REG:
195 case TPS51632_VOLTAGE_BASE_REG:
196 case TPS51632_VMAX_REG:
197 case TPS51632_DVFS_CONTROL_REG:
198 case TPS51632_POWER_STATE_REG:
199 case TPS51632_SLEW_REGS:
200 return true;
201 default:
202 return false;
203 }
213} 204}
214 205
215static const struct regmap_config tps51632_regmap_config = { 206static const struct regmap_config tps51632_regmap_config = {
216 .reg_bits = 8, 207 .reg_bits = 8,
217 .val_bits = 8, 208 .val_bits = 8,
218 .writeable_reg = rd_wr_reg, 209 .writeable_reg = is_write_reg,
219 .readable_reg = rd_wr_reg, 210 .readable_reg = is_read_reg,
211 .volatile_reg = is_volatile_reg,
220 .max_register = TPS51632_MAX_REG - 1, 212 .max_register = TPS51632_MAX_REG - 1,
221 .cache_type = REGCACHE_RBTREE, 213 .cache_type = REGCACHE_RBTREE,
222}; 214};
223 215
216#if defined(CONFIG_OF)
217static const struct of_device_id tps51632_of_match[] = {
218 { .compatible = "ti,tps51632",},
219 {},
220};
221MODULE_DEVICE_TABLE(of, tps51632_of_match);
222
223static struct tps51632_regulator_platform_data *
224 of_get_tps51632_platform_data(struct device *dev)
225{
226 struct tps51632_regulator_platform_data *pdata;
227 struct device_node *np = dev->of_node;
228
229 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
230 if (!pdata) {
231 dev_err(dev, "Memory alloc failed for platform data\n");
232 return NULL;
233 }
234
235 pdata->reg_init_data = of_get_regulator_init_data(dev, dev->of_node);
236 if (!pdata->reg_init_data) {
237 dev_err(dev, "Not able to get OF regulator init data\n");
238 return NULL;
239 }
240
241 pdata->enable_pwm_dvfs =
242 of_property_read_bool(np, "ti,enable-pwm-dvfs");
243 pdata->dvfs_step_20mV = of_property_read_bool(np, "ti,dvfs-step-20mV");
244
245 pdata->base_voltage_uV = pdata->reg_init_data->constraints.min_uV ? :
246 TPS51632_MIN_VOLATGE;
247 pdata->max_voltage_uV = pdata->reg_init_data->constraints.max_uV ? :
248 TPS51632_MAX_VOLATGE;
249 return pdata;
250}
251#else
252static struct tps51632_regulator_platform_data *
253 of_get_tps51632_platform_data(struct device *dev)
254{
255 return NULL;
256}
257#endif
258
224static int tps51632_probe(struct i2c_client *client, 259static int tps51632_probe(struct i2c_client *client,
225 const struct i2c_device_id *id) 260 const struct i2c_device_id *id)
226{ 261{
@@ -230,7 +265,19 @@ static int tps51632_probe(struct i2c_client *client,
230 int ret; 265 int ret;
231 struct regulator_config config = { }; 266 struct regulator_config config = { };
232 267
268 if (client->dev.of_node) {
269 const struct of_device_id *match;
270 match = of_match_device(of_match_ptr(tps51632_of_match),
271 &client->dev);
272 if (!match) {
273 dev_err(&client->dev, "Error: No device match found\n");
274 return -ENODEV;
275 }
276 }
277
233 pdata = client->dev.platform_data; 278 pdata = client->dev.platform_data;
279 if (!pdata && client->dev.of_node)
280 pdata = of_get_tps51632_platform_data(&client->dev);
234 if (!pdata) { 281 if (!pdata) {
235 dev_err(&client->dev, "No Platform data\n"); 282 dev_err(&client->dev, "No Platform data\n");
236 return -EINVAL; 283 return -EINVAL;
@@ -269,6 +316,12 @@ static int tps51632_probe(struct i2c_client *client,
269 tps->desc.type = REGULATOR_VOLTAGE; 316 tps->desc.type = REGULATOR_VOLTAGE;
270 tps->desc.owner = THIS_MODULE; 317 tps->desc.owner = THIS_MODULE;
271 318
319 if (pdata->enable_pwm_dvfs)
320 tps->desc.vsel_reg = TPS51632_VOLTAGE_BASE_REG;
321 else
322 tps->desc.vsel_reg = TPS51632_VOLTAGE_SELECT_REG;
323 tps->desc.vsel_mask = TPS51632_VOUT_MASK;
324
272 tps->regmap = devm_regmap_init_i2c(client, &tps51632_regmap_config); 325 tps->regmap = devm_regmap_init_i2c(client, &tps51632_regmap_config);
273 if (IS_ERR(tps->regmap)) { 326 if (IS_ERR(tps->regmap)) {
274 ret = PTR_ERR(tps->regmap); 327 ret = PTR_ERR(tps->regmap);
@@ -319,6 +372,7 @@ static struct i2c_driver tps51632_i2c_driver = {
319 .driver = { 372 .driver = {
320 .name = "tps51632", 373 .name = "tps51632",
321 .owner = THIS_MODULE, 374 .owner = THIS_MODULE,
375 .of_match_table = of_match_ptr(tps51632_of_match),
322 }, 376 },
323 .probe = tps51632_probe, 377 .probe = tps51632_probe,
324 .remove = tps51632_remove, 378 .remove = tps51632_remove,
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index 0233cfb56560..54aa2da7283b 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -23,8 +23,10 @@
23#include <linux/regulator/driver.h> 23#include <linux/regulator/driver.h>
24#include <linux/regulator/machine.h> 24#include <linux/regulator/machine.h>
25#include <linux/regulator/tps6507x.h> 25#include <linux/regulator/tps6507x.h>
26#include <linux/of.h>
26#include <linux/slab.h> 27#include <linux/slab.h>
27#include <linux/mfd/tps6507x.h> 28#include <linux/mfd/tps6507x.h>
29#include <linux/regulator/of_regulator.h>
28 30
29/* DCDC's */ 31/* DCDC's */
30#define TPS6507X_DCDC_1 0 32#define TPS6507X_DCDC_1 0
@@ -356,6 +358,80 @@ static struct regulator_ops tps6507x_pmic_ops = {
356 .list_voltage = regulator_list_voltage_table, 358 .list_voltage = regulator_list_voltage_table,
357}; 359};
358 360
361#ifdef CONFIG_OF
362static struct of_regulator_match tps6507x_matches[] = {
363 { .name = "VDCDC1"},
364 { .name = "VDCDC2"},
365 { .name = "VDCDC3"},
366 { .name = "LDO1"},
367 { .name = "LDO2"},
368};
369
370static struct tps6507x_board *tps6507x_parse_dt_reg_data(
371 struct platform_device *pdev,
372 struct of_regulator_match **tps6507x_reg_matches)
373{
374 struct tps6507x_board *tps_board;
375 struct device_node *np = pdev->dev.parent->of_node;
376 struct device_node *regulators;
377 struct of_regulator_match *matches;
378 static struct regulator_init_data *reg_data;
379 int idx = 0, count, ret;
380
381 tps_board = devm_kzalloc(&pdev->dev, sizeof(*tps_board),
382 GFP_KERNEL);
383 if (!tps_board) {
384 dev_err(&pdev->dev, "Failure to alloc pdata for regulators.\n");
385 return NULL;
386 }
387
388 regulators = of_find_node_by_name(np, "regulators");
389 if (!regulators) {
390 dev_err(&pdev->dev, "regulator node not found\n");
391 return NULL;
392 }
393
394 count = ARRAY_SIZE(tps6507x_matches);
395 matches = tps6507x_matches;
396
397 ret = of_regulator_match(&pdev->dev, regulators, matches, count);
398 if (ret < 0) {
399 dev_err(&pdev->dev, "Error parsing regulator init data: %d\n",
400 ret);
401 return NULL;
402 }
403
404 *tps6507x_reg_matches = matches;
405
406 reg_data = devm_kzalloc(&pdev->dev, (sizeof(struct regulator_init_data)
407 * TPS6507X_NUM_REGULATOR), GFP_KERNEL);
408 if (!reg_data) {
409 dev_err(&pdev->dev, "Failure to alloc init data for regulators.\n");
410 return NULL;
411 }
412
413 tps_board->tps6507x_pmic_init_data = reg_data;
414
415 for (idx = 0; idx < count; idx++) {
416 if (!matches[idx].init_data || !matches[idx].of_node)
417 continue;
418
419 memcpy(&reg_data[idx], matches[idx].init_data,
420 sizeof(struct regulator_init_data));
421
422 }
423
424 return tps_board;
425}
426#else
427static inline struct tps6507x_board *tps6507x_parse_dt_reg_data(
428 struct platform_device *pdev,
429 struct of_regulator_match **tps6507x_reg_matches)
430{
431 *tps6507x_reg_matches = NULL;
432 return NULL;
433}
434#endif
359static int tps6507x_pmic_probe(struct platform_device *pdev) 435static int tps6507x_pmic_probe(struct platform_device *pdev)
360{ 436{
361 struct tps6507x_dev *tps6507x_dev = dev_get_drvdata(pdev->dev.parent); 437 struct tps6507x_dev *tps6507x_dev = dev_get_drvdata(pdev->dev.parent);
@@ -365,8 +441,10 @@ static int tps6507x_pmic_probe(struct platform_device *pdev)
365 struct regulator_dev *rdev; 441 struct regulator_dev *rdev;
366 struct tps6507x_pmic *tps; 442 struct tps6507x_pmic *tps;
367 struct tps6507x_board *tps_board; 443 struct tps6507x_board *tps_board;
444 struct of_regulator_match *tps6507x_reg_matches = NULL;
368 int i; 445 int i;
369 int error; 446 int error;
447 unsigned int prop;
370 448
371 /** 449 /**
372 * tps_board points to pmic related constants 450 * tps_board points to pmic related constants
@@ -374,6 +452,9 @@ static int tps6507x_pmic_probe(struct platform_device *pdev)
374 */ 452 */
375 453
376 tps_board = dev_get_platdata(tps6507x_dev->dev); 454 tps_board = dev_get_platdata(tps6507x_dev->dev);
455 if (!tps_board && tps6507x_dev->dev->of_node)
456 tps_board = tps6507x_parse_dt_reg_data(pdev,
457 &tps6507x_reg_matches);
377 if (!tps_board) 458 if (!tps_board)
378 return -EINVAL; 459 return -EINVAL;
379 460
@@ -415,6 +496,17 @@ static int tps6507x_pmic_probe(struct platform_device *pdev)
415 config.init_data = init_data; 496 config.init_data = init_data;
416 config.driver_data = tps; 497 config.driver_data = tps;
417 498
499 if (tps6507x_reg_matches) {
500 error = of_property_read_u32(
501 tps6507x_reg_matches[i].of_node,
502 "ti,defdcdc_default", &prop);
503
504 if (!error)
505 tps->info[i]->defdcdc_default = prop;
506
507 config.of_node = tps6507x_reg_matches[i].of_node;
508 }
509
418 rdev = regulator_register(&tps->desc[i], &config); 510 rdev = regulator_register(&tps->desc[i], &config);
419 if (IS_ERR(rdev)) { 511 if (IS_ERR(rdev)) {
420 dev_err(tps6507x_dev->dev, 512 dev_err(tps6507x_dev->dev,
diff --git a/drivers/regulator/tps65090-regulator.c b/drivers/regulator/tps65090-regulator.c
index 41c391789c97..c8e70451df38 100644
--- a/drivers/regulator/tps65090-regulator.c
+++ b/drivers/regulator/tps65090-regulator.c
@@ -19,11 +19,13 @@
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/gpio.h> 21#include <linux/gpio.h>
22#include <linux/of_gpio.h>
22#include <linux/slab.h> 23#include <linux/slab.h>
23#include <linux/err.h> 24#include <linux/err.h>
24#include <linux/platform_device.h> 25#include <linux/platform_device.h>
25#include <linux/regulator/driver.h> 26#include <linux/regulator/driver.h>
26#include <linux/regulator/machine.h> 27#include <linux/regulator/machine.h>
28#include <linux/regulator/of_regulator.h>
27#include <linux/mfd/tps65090.h> 29#include <linux/mfd/tps65090.h>
28 30
29struct tps65090_regulator { 31struct tps65090_regulator {
@@ -67,8 +69,8 @@ static struct regulator_desc tps65090_regulator_desc[] = {
67 tps65090_REG_DESC(FET5, "infet5", 0x13, tps65090_reg_contol_ops), 69 tps65090_REG_DESC(FET5, "infet5", 0x13, tps65090_reg_contol_ops),
68 tps65090_REG_DESC(FET6, "infet6", 0x14, tps65090_reg_contol_ops), 70 tps65090_REG_DESC(FET6, "infet6", 0x14, tps65090_reg_contol_ops),
69 tps65090_REG_DESC(FET7, "infet7", 0x15, tps65090_reg_contol_ops), 71 tps65090_REG_DESC(FET7, "infet7", 0x15, tps65090_reg_contol_ops),
70 tps65090_REG_DESC(LDO1, "vsys_l1", 0, tps65090_ldo_ops), 72 tps65090_REG_DESC(LDO1, "vsys-l1", 0, tps65090_ldo_ops),
71 tps65090_REG_DESC(LDO2, "vsys_l2", 0, tps65090_ldo_ops), 73 tps65090_REG_DESC(LDO2, "vsys-l2", 0, tps65090_ldo_ops),
72}; 74};
73 75
74static inline bool is_dcdc(int id) 76static inline bool is_dcdc(int id)
@@ -138,6 +140,92 @@ static void tps65090_configure_regulator_config(
138 } 140 }
139} 141}
140 142
143#ifdef CONFIG_OF
144static struct of_regulator_match tps65090_matches[] = {
145 { .name = "dcdc1", },
146 { .name = "dcdc2", },
147 { .name = "dcdc3", },
148 { .name = "fet1", },
149 { .name = "fet2", },
150 { .name = "fet3", },
151 { .name = "fet4", },
152 { .name = "fet5", },
153 { .name = "fet6", },
154 { .name = "fet7", },
155 { .name = "ldo1", },
156 { .name = "ldo2", },
157};
158
159static struct tps65090_platform_data *tps65090_parse_dt_reg_data(
160 struct platform_device *pdev,
161 struct of_regulator_match **tps65090_reg_matches)
162{
163 struct tps65090_platform_data *tps65090_pdata;
164 struct device_node *np = pdev->dev.parent->of_node;
165 struct device_node *regulators;
166 int idx = 0, ret;
167 struct tps65090_regulator_plat_data *reg_pdata;
168
169 tps65090_pdata = devm_kzalloc(&pdev->dev, sizeof(*tps65090_pdata),
170 GFP_KERNEL);
171 if (!tps65090_pdata) {
172 dev_err(&pdev->dev, "Memory alloc for tps65090_pdata failed\n");
173 return ERR_PTR(-ENOMEM);
174 }
175
176 reg_pdata = devm_kzalloc(&pdev->dev, TPS65090_REGULATOR_MAX *
177 sizeof(*reg_pdata), GFP_KERNEL);
178 if (!reg_pdata) {
179 dev_err(&pdev->dev, "Memory alloc for reg_pdata failed\n");
180 return ERR_PTR(-ENOMEM);
181 }
182
183 regulators = of_find_node_by_name(np, "regulators");
184 if (!regulators) {
185 dev_err(&pdev->dev, "regulator node not found\n");
186 return ERR_PTR(-ENODEV);
187 }
188
189 ret = of_regulator_match(&pdev->dev, regulators, tps65090_matches,
190 ARRAY_SIZE(tps65090_matches));
191 if (ret < 0) {
192 dev_err(&pdev->dev,
193 "Error parsing regulator init data: %d\n", ret);
194 return ERR_PTR(ret);
195 }
196
197 *tps65090_reg_matches = tps65090_matches;
198 for (idx = 0; idx < ARRAY_SIZE(tps65090_matches); idx++) {
199 struct regulator_init_data *ri_data;
200 struct tps65090_regulator_plat_data *rpdata;
201
202 rpdata = &reg_pdata[idx];
203 ri_data = tps65090_matches[idx].init_data;
204 if (!ri_data || !tps65090_matches[idx].of_node)
205 continue;
206
207 rpdata->reg_init_data = ri_data;
208 rpdata->enable_ext_control = of_property_read_bool(
209 tps65090_matches[idx].of_node,
210 "ti,enable-ext-control");
211 if (rpdata->enable_ext_control)
212 rpdata->gpio = of_get_named_gpio(np,
213 "dcdc-ext-control-gpios", 0);
214
215 tps65090_pdata->reg_pdata[idx] = rpdata;
216 }
217 return tps65090_pdata;
218}
219#else
220static inline struct tps65090_platform_data *tps65090_parse_dt_reg_data(
221 struct platform_device *pdev,
222 struct of_regulator_match **tps65090_reg_matches)
223{
224 *tps65090_reg_matches = NULL;
225 return NULL;
226}
227#endif
228
141static int tps65090_regulator_probe(struct platform_device *pdev) 229static int tps65090_regulator_probe(struct platform_device *pdev)
142{ 230{
143 struct tps65090 *tps65090_mfd = dev_get_drvdata(pdev->dev.parent); 231 struct tps65090 *tps65090_mfd = dev_get_drvdata(pdev->dev.parent);
@@ -147,15 +235,19 @@ static int tps65090_regulator_probe(struct platform_device *pdev)
147 struct tps65090_regulator_plat_data *tps_pdata; 235 struct tps65090_regulator_plat_data *tps_pdata;
148 struct tps65090_regulator *pmic; 236 struct tps65090_regulator *pmic;
149 struct tps65090_platform_data *tps65090_pdata; 237 struct tps65090_platform_data *tps65090_pdata;
238 struct of_regulator_match *tps65090_reg_matches = NULL;
150 int num; 239 int num;
151 int ret; 240 int ret;
152 241
153 dev_dbg(&pdev->dev, "Probing regulator\n"); 242 dev_dbg(&pdev->dev, "Probing regulator\n");
154 243
155 tps65090_pdata = dev_get_platdata(pdev->dev.parent); 244 tps65090_pdata = dev_get_platdata(pdev->dev.parent);
156 if (!tps65090_pdata) { 245 if (!tps65090_pdata && tps65090_mfd->dev->of_node)
246 tps65090_pdata = tps65090_parse_dt_reg_data(pdev,
247 &tps65090_reg_matches);
248 if (IS_ERR_OR_NULL(tps65090_pdata)) {
157 dev_err(&pdev->dev, "Platform data missing\n"); 249 dev_err(&pdev->dev, "Platform data missing\n");
158 return -EINVAL; 250 return tps65090_pdata ? PTR_ERR(tps65090_pdata) : -EINVAL;
159 } 251 }
160 252
161 pmic = devm_kzalloc(&pdev->dev, TPS65090_REGULATOR_MAX * sizeof(*pmic), 253 pmic = devm_kzalloc(&pdev->dev, TPS65090_REGULATOR_MAX * sizeof(*pmic),
@@ -192,13 +284,17 @@ static int tps65090_regulator_probe(struct platform_device *pdev)
192 } 284 }
193 } 285 }
194 286
195 config.dev = &pdev->dev; 287 config.dev = pdev->dev.parent;
196 config.driver_data = ri; 288 config.driver_data = ri;
197 config.regmap = tps65090_mfd->rmap; 289 config.regmap = tps65090_mfd->rmap;
198 if (tps_pdata) 290 if (tps_pdata)
199 config.init_data = tps_pdata->reg_init_data; 291 config.init_data = tps_pdata->reg_init_data;
200 else 292 else
201 config.init_data = NULL; 293 config.init_data = NULL;
294 if (tps65090_reg_matches)
295 config.of_node = tps65090_reg_matches[num].of_node;
296 else
297 config.of_node = NULL;
202 298
203 rdev = regulator_register(ri->desc, &config); 299 rdev = regulator_register(ri->desc, &config);
204 if (IS_ERR(rdev)) { 300 if (IS_ERR(rdev)) {
diff --git a/drivers/regulator/tps65217-regulator.c b/drivers/regulator/tps65217-regulator.c
index 73dce7664126..df395187c063 100644
--- a/drivers/regulator/tps65217-regulator.c
+++ b/drivers/regulator/tps65217-regulator.c
@@ -305,8 +305,8 @@ static struct tps65217_board *tps65217_parse_dt(struct platform_device *pdev)
305 if (!regs) 305 if (!regs)
306 return NULL; 306 return NULL;
307 307
308 count = of_regulator_match(pdev->dev.parent, regs, 308 count = of_regulator_match(&pdev->dev, regs, reg_matches,
309 reg_matches, TPS65217_NUM_REGULATOR); 309 TPS65217_NUM_REGULATOR);
310 of_node_put(regs); 310 of_node_put(regs);
311 if ((count < 0) || (count > TPS65217_NUM_REGULATOR)) 311 if ((count < 0) || (count > TPS65217_NUM_REGULATOR))
312 return NULL; 312 return NULL;
diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c
index f86da672c758..e68382d0e1ea 100644
--- a/drivers/regulator/tps6586x-regulator.c
+++ b/drivers/regulator/tps6586x-regulator.c
@@ -61,10 +61,6 @@ struct tps6586x_regulator {
61 61
62 int enable_bit[2]; 62 int enable_bit[2];
63 int enable_reg[2]; 63 int enable_reg[2];
64
65 /* for DVM regulators */
66 int go_reg;
67 int go_bit;
68}; 64};
69 65
70static inline struct device *to_tps6586x_dev(struct regulator_dev *rdev) 66static inline struct device *to_tps6586x_dev(struct regulator_dev *rdev)
@@ -72,37 +68,10 @@ static inline struct device *to_tps6586x_dev(struct regulator_dev *rdev)
72 return rdev_get_dev(rdev)->parent; 68 return rdev_get_dev(rdev)->parent;
73} 69}
74 70
75static int tps6586x_set_voltage_sel(struct regulator_dev *rdev,
76 unsigned selector)
77{
78 struct tps6586x_regulator *ri = rdev_get_drvdata(rdev);
79 struct device *parent = to_tps6586x_dev(rdev);
80 int ret, val, rid = rdev_get_id(rdev);
81 uint8_t mask;
82
83 val = selector << (ffs(rdev->desc->vsel_mask) - 1);
84 mask = rdev->desc->vsel_mask;
85
86 ret = tps6586x_update(parent, rdev->desc->vsel_reg, val, mask);
87 if (ret)
88 return ret;
89
90 /* Update go bit for DVM regulators */
91 switch (rid) {
92 case TPS6586X_ID_LDO_2:
93 case TPS6586X_ID_LDO_4:
94 case TPS6586X_ID_SM_0:
95 case TPS6586X_ID_SM_1:
96 ret = tps6586x_set_bits(parent, ri->go_reg, 1 << ri->go_bit);
97 break;
98 }
99 return ret;
100}
101
102static struct regulator_ops tps6586x_regulator_ops = { 71static struct regulator_ops tps6586x_regulator_ops = {
103 .list_voltage = regulator_list_voltage_table, 72 .list_voltage = regulator_list_voltage_table,
104 .get_voltage_sel = regulator_get_voltage_sel_regmap, 73 .get_voltage_sel = regulator_get_voltage_sel_regmap,
105 .set_voltage_sel = tps6586x_set_voltage_sel, 74 .set_voltage_sel = regulator_set_voltage_sel_regmap,
106 75
107 .is_enabled = regulator_is_enabled_regmap, 76 .is_enabled = regulator_is_enabled_regmap,
108 .enable = regulator_enable_regmap, 77 .enable = regulator_enable_regmap,
@@ -142,7 +111,7 @@ static const unsigned int tps6586x_dvm_voltages[] = {
142}; 111};
143 112
144#define TPS6586X_REGULATOR(_id, _pin_name, vdata, vreg, shift, nbits, \ 113#define TPS6586X_REGULATOR(_id, _pin_name, vdata, vreg, shift, nbits, \
145 ereg0, ebit0, ereg1, ebit1) \ 114 ereg0, ebit0, ereg1, ebit1, goreg, gobit) \
146 .desc = { \ 115 .desc = { \
147 .supply_name = _pin_name, \ 116 .supply_name = _pin_name, \
148 .name = "REG-" #_id, \ 117 .name = "REG-" #_id, \
@@ -156,29 +125,26 @@ static const unsigned int tps6586x_dvm_voltages[] = {
156 .enable_mask = 1 << (ebit0), \ 125 .enable_mask = 1 << (ebit0), \
157 .vsel_reg = TPS6586X_##vreg, \ 126 .vsel_reg = TPS6586X_##vreg, \
158 .vsel_mask = ((1 << (nbits)) - 1) << (shift), \ 127 .vsel_mask = ((1 << (nbits)) - 1) << (shift), \
128 .apply_reg = (goreg), \
129 .apply_bit = (gobit), \
159 }, \ 130 }, \
160 .enable_reg[0] = TPS6586X_SUPPLY##ereg0, \ 131 .enable_reg[0] = TPS6586X_SUPPLY##ereg0, \
161 .enable_bit[0] = (ebit0), \ 132 .enable_bit[0] = (ebit0), \
162 .enable_reg[1] = TPS6586X_SUPPLY##ereg1, \ 133 .enable_reg[1] = TPS6586X_SUPPLY##ereg1, \
163 .enable_bit[1] = (ebit1), 134 .enable_bit[1] = (ebit1),
164 135
165#define TPS6586X_REGULATOR_DVM_GOREG(goreg, gobit) \
166 .go_reg = TPS6586X_##goreg, \
167 .go_bit = (gobit),
168
169#define TPS6586X_LDO(_id, _pname, vdata, vreg, shift, nbits, \ 136#define TPS6586X_LDO(_id, _pname, vdata, vreg, shift, nbits, \
170 ereg0, ebit0, ereg1, ebit1) \ 137 ereg0, ebit0, ereg1, ebit1) \
171{ \ 138{ \
172 TPS6586X_REGULATOR(_id, _pname, vdata, vreg, shift, nbits, \ 139 TPS6586X_REGULATOR(_id, _pname, vdata, vreg, shift, nbits, \
173 ereg0, ebit0, ereg1, ebit1) \ 140 ereg0, ebit0, ereg1, ebit1, 0, 0) \
174} 141}
175 142
176#define TPS6586X_DVM(_id, _pname, vdata, vreg, shift, nbits, \ 143#define TPS6586X_DVM(_id, _pname, vdata, vreg, shift, nbits, \
177 ereg0, ebit0, ereg1, ebit1, goreg, gobit) \ 144 ereg0, ebit0, ereg1, ebit1, goreg, gobit) \
178{ \ 145{ \
179 TPS6586X_REGULATOR(_id, _pname, vdata, vreg, shift, nbits, \ 146 TPS6586X_REGULATOR(_id, _pname, vdata, vreg, shift, nbits, \
180 ereg0, ebit0, ereg1, ebit1) \ 147 ereg0, ebit0, ereg1, ebit1, goreg, gobit) \
181 TPS6586X_REGULATOR_DVM_GOREG(goreg, gobit) \
182} 148}
183 149
184#define TPS6586X_SYS_REGULATOR() \ 150#define TPS6586X_SYS_REGULATOR() \
@@ -207,13 +173,13 @@ static struct tps6586x_regulator tps6586x_regulator[] = {
207 TPS6586X_LDO(SM_2, "vin-sm2", sm2, SUPPLYV2, 0, 5, ENC, 7, END, 7), 173 TPS6586X_LDO(SM_2, "vin-sm2", sm2, SUPPLYV2, 0, 5, ENC, 7, END, 7),
208 174
209 TPS6586X_DVM(LDO_2, "vinldo23", dvm, LDO2BV1, 0, 5, ENA, 3, 175 TPS6586X_DVM(LDO_2, "vinldo23", dvm, LDO2BV1, 0, 5, ENA, 3,
210 ENB, 3, VCC2, 6), 176 ENB, 3, TPS6586X_VCC2, BIT(6)),
211 TPS6586X_DVM(LDO_4, "vinldo4", ldo4, LDO4V1, 0, 5, ENC, 3, 177 TPS6586X_DVM(LDO_4, "vinldo4", ldo4, LDO4V1, 0, 5, ENC, 3,
212 END, 3, VCC1, 6), 178 END, 3, TPS6586X_VCC1, BIT(6)),
213 TPS6586X_DVM(SM_0, "vin-sm0", dvm, SM0V1, 0, 5, ENA, 1, 179 TPS6586X_DVM(SM_0, "vin-sm0", dvm, SM0V1, 0, 5, ENA, 1,
214 ENB, 1, VCC1, 2), 180 ENB, 1, TPS6586X_VCC1, BIT(2)),
215 TPS6586X_DVM(SM_1, "vin-sm1", dvm, SM1V1, 0, 5, ENA, 0, 181 TPS6586X_DVM(SM_1, "vin-sm1", dvm, SM1V1, 0, 5, ENA, 0,
216 ENB, 0, VCC1, 0), 182 ENB, 0, TPS6586X_VCC1, BIT(0)),
217}; 183};
218 184
219/* 185/*
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index 59c3770fa77d..6ba6931ac855 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -964,8 +964,7 @@ static struct tps65910_board *tps65910_parse_dt_reg_data(
964{ 964{
965 struct tps65910_board *pmic_plat_data; 965 struct tps65910_board *pmic_plat_data;
966 struct tps65910 *tps65910 = dev_get_drvdata(pdev->dev.parent); 966 struct tps65910 *tps65910 = dev_get_drvdata(pdev->dev.parent);
967 struct device_node *np = pdev->dev.parent->of_node; 967 struct device_node *np, *regulators;
968 struct device_node *regulators;
969 struct of_regulator_match *matches; 968 struct of_regulator_match *matches;
970 unsigned int prop; 969 unsigned int prop;
971 int idx = 0, ret, count; 970 int idx = 0, ret, count;
@@ -978,6 +977,7 @@ static struct tps65910_board *tps65910_parse_dt_reg_data(
978 return NULL; 977 return NULL;
979 } 978 }
980 979
980 np = of_node_get(pdev->dev.parent->of_node);
981 regulators = of_find_node_by_name(np, "regulators"); 981 regulators = of_find_node_by_name(np, "regulators");
982 if (!regulators) { 982 if (!regulators) {
983 dev_err(&pdev->dev, "regulator node not found\n"); 983 dev_err(&pdev->dev, "regulator node not found\n");
@@ -994,11 +994,13 @@ static struct tps65910_board *tps65910_parse_dt_reg_data(
994 matches = tps65911_matches; 994 matches = tps65911_matches;
995 break; 995 break;
996 default: 996 default:
997 of_node_put(regulators);
997 dev_err(&pdev->dev, "Invalid tps chip version\n"); 998 dev_err(&pdev->dev, "Invalid tps chip version\n");
998 return NULL; 999 return NULL;
999 } 1000 }
1000 1001
1001 ret = of_regulator_match(pdev->dev.parent, regulators, matches, count); 1002 ret = of_regulator_match(&pdev->dev, regulators, matches, count);
1003 of_node_put(regulators);
1002 if (ret < 0) { 1004 if (ret < 0) {
1003 dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", 1005 dev_err(&pdev->dev, "Error parsing regulator init data: %d\n",
1004 ret); 1006 ret);
diff --git a/drivers/regulator/tps80031-regulator.c b/drivers/regulator/tps80031-regulator.c
index b15d711bc8c6..9019d0e7ecb6 100644
--- a/drivers/regulator/tps80031-regulator.c
+++ b/drivers/regulator/tps80031-regulator.c
@@ -728,7 +728,7 @@ static int tps80031_regulator_probe(struct platform_device *pdev)
728 } 728 }
729 } 729 }
730 rdev = regulator_register(&ri->rinfo->desc, &config); 730 rdev = regulator_register(&ri->rinfo->desc, &config);
731 if (IS_ERR_OR_NULL(rdev)) { 731 if (IS_ERR(rdev)) {
732 dev_err(&pdev->dev, 732 dev_err(&pdev->dev,
733 "register regulator failed %s\n", 733 "register regulator failed %s\n",
734 ri->rinfo->desc.name); 734 ri->rinfo->desc.name);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 923a9da9c829..5e44eaabf457 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -20,14 +20,24 @@ if RTC_CLASS
20config RTC_HCTOSYS 20config RTC_HCTOSYS
21 bool "Set system time from RTC on startup and resume" 21 bool "Set system time from RTC on startup and resume"
22 default y 22 default y
23 depends on !ALWAYS_USE_PERSISTENT_CLOCK
23 help 24 help
24 If you say yes here, the system time (wall clock) will be set using 25 If you say yes here, the system time (wall clock) will be set using
25 the value read from a specified RTC device. This is useful to avoid 26 the value read from a specified RTC device. This is useful to avoid
26 unnecessary fsck runs at boot time, and to network better. 27 unnecessary fsck runs at boot time, and to network better.
27 28
29config RTC_SYSTOHC
30 bool "Set the RTC time based on NTP synchronization"
31 default y
32 depends on !ALWAYS_USE_PERSISTENT_CLOCK
33 help
34 If you say yes here, the system time (wall clock) will be stored
35 in the RTC specified by RTC_HCTOSYS_DEVICE approximately every 11
36 minutes if userspace reports synchronized NTP status.
37
28config RTC_HCTOSYS_DEVICE 38config RTC_HCTOSYS_DEVICE
29 string "RTC used to set the system time" 39 string "RTC used to set the system time"
30 depends on RTC_HCTOSYS = y 40 depends on RTC_HCTOSYS = y || RTC_SYSTOHC = y
31 default "rtc0" 41 default "rtc0"
32 help 42 help
33 The RTC device that will be used to (re)initialize the system 43 The RTC device that will be used to (re)initialize the system
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 4418ef3f9ecc..ec2988b00a44 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -6,6 +6,7 @@ ccflags-$(CONFIG_RTC_DEBUG) := -DDEBUG
6 6
7obj-$(CONFIG_RTC_LIB) += rtc-lib.o 7obj-$(CONFIG_RTC_LIB) += rtc-lib.o
8obj-$(CONFIG_RTC_HCTOSYS) += hctosys.o 8obj-$(CONFIG_RTC_HCTOSYS) += hctosys.o
9obj-$(CONFIG_RTC_SYSTOHC) += systohc.o
9obj-$(CONFIG_RTC_CLASS) += rtc-core.o 10obj-$(CONFIG_RTC_CLASS) += rtc-core.o
10rtc-core-y := class.o interface.o 11rtc-core-y := class.o interface.o
11 12
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index 5143629dedbd..26388f182594 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -50,6 +50,10 @@ static int rtc_suspend(struct device *dev, pm_message_t mesg)
50 struct rtc_device *rtc = to_rtc_device(dev); 50 struct rtc_device *rtc = to_rtc_device(dev);
51 struct rtc_time tm; 51 struct rtc_time tm;
52 struct timespec delta, delta_delta; 52 struct timespec delta, delta_delta;
53
54 if (has_persistent_clock())
55 return 0;
56
53 if (strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE) != 0) 57 if (strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE) != 0)
54 return 0; 58 return 0;
55 59
@@ -88,6 +92,9 @@ static int rtc_resume(struct device *dev)
88 struct timespec new_system, new_rtc; 92 struct timespec new_system, new_rtc;
89 struct timespec sleep_time; 93 struct timespec sleep_time;
90 94
95 if (has_persistent_clock())
96 return 0;
97
91 rtc_hctosys_ret = -ENODEV; 98 rtc_hctosys_ret = -ENODEV;
92 if (strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE) != 0) 99 if (strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE) != 0)
93 return 0; 100 return 0;
diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c
index afb7cfa85ccc..c016ad81767a 100644
--- a/drivers/rtc/rtc-isl1208.c
+++ b/drivers/rtc/rtc-isl1208.c
@@ -506,6 +506,7 @@ isl1208_rtc_interrupt(int irq, void *data)
506{ 506{
507 unsigned long timeout = jiffies + msecs_to_jiffies(1000); 507 unsigned long timeout = jiffies + msecs_to_jiffies(1000);
508 struct i2c_client *client = data; 508 struct i2c_client *client = data;
509 struct rtc_device *rtc = i2c_get_clientdata(client);
509 int handled = 0, sr, err; 510 int handled = 0, sr, err;
510 511
511 /* 512 /*
@@ -528,6 +529,8 @@ isl1208_rtc_interrupt(int irq, void *data)
528 if (sr & ISL1208_REG_SR_ALM) { 529 if (sr & ISL1208_REG_SR_ALM) {
529 dev_dbg(&client->dev, "alarm!\n"); 530 dev_dbg(&client->dev, "alarm!\n");
530 531
532 rtc_update_irq(rtc, 1, RTC_IRQF | RTC_AF);
533
531 /* Clear the alarm */ 534 /* Clear the alarm */
532 sr &= ~ISL1208_REG_SR_ALM; 535 sr &= ~ISL1208_REG_SR_ALM;
533 sr = i2c_smbus_write_byte_data(client, ISL1208_REG_SR, sr); 536 sr = i2c_smbus_write_byte_data(client, ISL1208_REG_SR, sr);
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index 08378e3cc21c..81c5077feff3 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -44,6 +44,7 @@
44#define RTC_YMR 0x34 /* Year match register */ 44#define RTC_YMR 0x34 /* Year match register */
45#define RTC_YLR 0x38 /* Year data load register */ 45#define RTC_YLR 0x38 /* Year data load register */
46 46
47#define RTC_CR_EN (1 << 0) /* counter enable bit */
47#define RTC_CR_CWEN (1 << 26) /* Clockwatch enable bit */ 48#define RTC_CR_CWEN (1 << 26) /* Clockwatch enable bit */
48 49
49#define RTC_TCR_EN (1 << 1) /* Periodic timer enable bit */ 50#define RTC_TCR_EN (1 << 1) /* Periodic timer enable bit */
@@ -320,7 +321,7 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
320 struct pl031_local *ldata; 321 struct pl031_local *ldata;
321 struct pl031_vendor_data *vendor = id->data; 322 struct pl031_vendor_data *vendor = id->data;
322 struct rtc_class_ops *ops = &vendor->ops; 323 struct rtc_class_ops *ops = &vendor->ops;
323 unsigned long time; 324 unsigned long time, data;
324 325
325 ret = amba_request_regions(adev, NULL); 326 ret = amba_request_regions(adev, NULL);
326 if (ret) 327 if (ret)
@@ -345,10 +346,13 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
345 dev_dbg(&adev->dev, "designer ID = 0x%02x\n", amba_manf(adev)); 346 dev_dbg(&adev->dev, "designer ID = 0x%02x\n", amba_manf(adev));
346 dev_dbg(&adev->dev, "revision = 0x%01x\n", amba_rev(adev)); 347 dev_dbg(&adev->dev, "revision = 0x%01x\n", amba_rev(adev));
347 348
349 data = readl(ldata->base + RTC_CR);
348 /* Enable the clockwatch on ST Variants */ 350 /* Enable the clockwatch on ST Variants */
349 if (vendor->clockwatch) 351 if (vendor->clockwatch)
350 writel(readl(ldata->base + RTC_CR) | RTC_CR_CWEN, 352 data |= RTC_CR_CWEN;
351 ldata->base + RTC_CR); 353 else
354 data |= RTC_CR_EN;
355 writel(data, ldata->base + RTC_CR);
352 356
353 /* 357 /*
354 * On ST PL031 variants, the RTC reset value does not provide correct 358 * On ST PL031 variants, the RTC reset value does not provide correct
diff --git a/drivers/rtc/rtc-vt8500.c b/drivers/rtc/rtc-vt8500.c
index 00c930f4b6f3..2730533e2d2d 100644
--- a/drivers/rtc/rtc-vt8500.c
+++ b/drivers/rtc/rtc-vt8500.c
@@ -137,7 +137,7 @@ static int vt8500_rtc_set_time(struct device *dev, struct rtc_time *tm)
137 return -EINVAL; 137 return -EINVAL;
138 } 138 }
139 139
140 writel((bin2bcd(tm->tm_year - 100) << DATE_YEAR_S) 140 writel((bin2bcd(tm->tm_year % 100) << DATE_YEAR_S)
141 | (bin2bcd(tm->tm_mon + 1) << DATE_MONTH_S) 141 | (bin2bcd(tm->tm_mon + 1) << DATE_MONTH_S)
142 | (bin2bcd(tm->tm_mday)) 142 | (bin2bcd(tm->tm_mday))
143 | ((tm->tm_year >= 200) << DATE_CENTURY_S), 143 | ((tm->tm_year >= 200) << DATE_CENTURY_S),
diff --git a/drivers/rtc/systohc.c b/drivers/rtc/systohc.c
new file mode 100644
index 000000000000..bf3e242ccc5c
--- /dev/null
+++ b/drivers/rtc/systohc.c
@@ -0,0 +1,44 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published by
4 * the Free Software Foundation.
5 *
6 */
7#include <linux/rtc.h>
8#include <linux/time.h>
9
10/**
11 * rtc_set_ntp_time - Save NTP synchronized time to the RTC
12 * @now: Current time of day
13 *
14 * Replacement for the NTP platform function update_persistent_clock
15 * that stores time for later retrieval by rtc_hctosys.
16 *
17 * Returns 0 on successful RTC update, -ENODEV if a RTC update is not
18 * possible at all, and various other -errno for specific temporary failure
19 * cases.
20 *
21 * If temporary failure is indicated the caller should try again 'soon'
22 */
23int rtc_set_ntp_time(struct timespec now)
24{
25 struct rtc_device *rtc;
26 struct rtc_time tm;
27 int err = -ENODEV;
28
29 if (now.tv_nsec < (NSEC_PER_SEC >> 1))
30 rtc_time_to_tm(now.tv_sec, &tm);
31 else
32 rtc_time_to_tm(now.tv_sec + 1, &tm);
33
34 rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
35 if (rtc) {
36 /* rtc_hctosys exclusively uses UTC, so we call set_time here,
37 * not set_mmss. */
38 if (rtc->ops && (rtc->ops->set_time || rtc->ops->set_mmss))
39 err = rtc_set_time(rtc, &tm);
40 rtc_class_close(rtc);
41 }
42
43 return err;
44}
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index d73fdcfeb45a..2839baa82a5a 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -633,7 +633,7 @@ static int isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
633 return -ENOMEM; 633 return -ENOMEM;
634 pci_set_drvdata(pdev, pci_info); 634 pci_set_drvdata(pdev, pci_info);
635 635
636 if (efi_enabled) 636 if (efi_enabled(EFI_RUNTIME_SERVICES))
637 orom = isci_get_efi_var(pdev); 637 orom = isci_get_efi_var(pdev);
638 638
639 if (!orom) 639 if (!orom)
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 2e188e1127eb..e79884e997ae 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -297,9 +297,20 @@ config SPI_PPC4xx
297 help 297 help
298 This selects a driver for the PPC4xx SPI Controller. 298 This selects a driver for the PPC4xx SPI Controller.
299 299
300config SPI_PXA2XX_PXADMA
301 bool "PXA2xx SSP legacy PXA DMA API support"
302 depends on SPI_PXA2XX && ARCH_PXA
303 help
304 Enable PXA private legacy DMA API support. Note that this is
305 deprecated in favor of generic DMA engine API.
306
307config SPI_PXA2XX_DMA
308 def_bool y
309 depends on SPI_PXA2XX && !SPI_PXA2XX_PXADMA
310
300config SPI_PXA2XX 311config SPI_PXA2XX
301 tristate "PXA2xx SSP SPI master" 312 tristate "PXA2xx SSP SPI master"
302 depends on (ARCH_PXA || (X86_32 && PCI)) && EXPERIMENTAL 313 depends on ARCH_PXA || PCI || ACPI
303 select PXA_SSP if ARCH_PXA 314 select PXA_SSP if ARCH_PXA
304 help 315 help
305 This enables using a PXA2xx or Sodaville SSP port as a SPI master 316 This enables using a PXA2xx or Sodaville SSP port as a SPI master
@@ -307,7 +318,7 @@ config SPI_PXA2XX
307 additional documentation can be found a Documentation/spi/pxa2xx. 318 additional documentation can be found a Documentation/spi/pxa2xx.
308 319
309config SPI_PXA2XX_PCI 320config SPI_PXA2XX_PCI
310 def_bool SPI_PXA2XX && X86_32 && PCI 321 def_tristate SPI_PXA2XX && PCI
311 322
312config SPI_RSPI 323config SPI_RSPI
313 tristate "Renesas RSPI controller" 324 tristate "Renesas RSPI controller"
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 64e970ba261c..e53c30941340 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -47,7 +47,10 @@ obj-$(CONFIG_SPI_OMAP24XX) += spi-omap2-mcspi.o
47obj-$(CONFIG_SPI_ORION) += spi-orion.o 47obj-$(CONFIG_SPI_ORION) += spi-orion.o
48obj-$(CONFIG_SPI_PL022) += spi-pl022.o 48obj-$(CONFIG_SPI_PL022) += spi-pl022.o
49obj-$(CONFIG_SPI_PPC4xx) += spi-ppc4xx.o 49obj-$(CONFIG_SPI_PPC4xx) += spi-ppc4xx.o
50obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx.o 50spi-pxa2xx-platform-objs := spi-pxa2xx.o
51spi-pxa2xx-platform-$(CONFIG_SPI_PXA2XX_PXADMA) += spi-pxa2xx-pxadma.o
52spi-pxa2xx-platform-$(CONFIG_SPI_PXA2XX_DMA) += spi-pxa2xx-dma.o
53obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx-platform.o
51obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o 54obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o
52obj-$(CONFIG_SPI_RSPI) += spi-rspi.o 55obj-$(CONFIG_SPI_RSPI) += spi-rspi.o
53obj-$(CONFIG_SPI_S3C24XX) += spi-s3c24xx-hw.o 56obj-$(CONFIG_SPI_S3C24XX) += spi-s3c24xx-hw.o
diff --git a/drivers/spi/spi-altera.c b/drivers/spi/spi-altera.c
index 5e7314ac51e9..a537f8dffc09 100644
--- a/drivers/spi/spi-altera.c
+++ b/drivers/spi/spi-altera.c
@@ -134,7 +134,7 @@ static int altera_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
134 hw->tx = t->tx_buf; 134 hw->tx = t->tx_buf;
135 hw->rx = t->rx_buf; 135 hw->rx = t->rx_buf;
136 hw->count = 0; 136 hw->count = 0;
137 hw->bytes_per_word = (t->bits_per_word ? : spi->bits_per_word) / 8; 137 hw->bytes_per_word = t->bits_per_word / 8;
138 hw->len = t->len / hw->bytes_per_word; 138 hw->len = t->len / hw->bytes_per_word;
139 139
140 if (hw->irq >= 0) { 140 if (hw->irq >= 0) {
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index 9a5d7791c5fb..e504b7636058 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -24,17 +24,24 @@
24#include <linux/spi/spi_bitbang.h> 24#include <linux/spi/spi_bitbang.h>
25#include <linux/bitops.h> 25#include <linux/bitops.h>
26#include <linux/gpio.h> 26#include <linux/gpio.h>
27#include <linux/clk.h>
28#include <linux/err.h>
27 29
28#include <asm/mach-ath79/ar71xx_regs.h> 30#include <asm/mach-ath79/ar71xx_regs.h>
29#include <asm/mach-ath79/ath79_spi_platform.h> 31#include <asm/mach-ath79/ath79_spi_platform.h>
30 32
31#define DRV_NAME "ath79-spi" 33#define DRV_NAME "ath79-spi"
32 34
35#define ATH79_SPI_RRW_DELAY_FACTOR 12000
36#define MHZ (1000 * 1000)
37
33struct ath79_spi { 38struct ath79_spi {
34 struct spi_bitbang bitbang; 39 struct spi_bitbang bitbang;
35 u32 ioc_base; 40 u32 ioc_base;
36 u32 reg_ctrl; 41 u32 reg_ctrl;
37 void __iomem *base; 42 void __iomem *base;
43 struct clk *clk;
44 unsigned rrw_delay;
38}; 45};
39 46
40static inline u32 ath79_spi_rr(struct ath79_spi *sp, unsigned reg) 47static inline u32 ath79_spi_rr(struct ath79_spi *sp, unsigned reg)
@@ -52,6 +59,12 @@ static inline struct ath79_spi *ath79_spidev_to_sp(struct spi_device *spi)
52 return spi_master_get_devdata(spi->master); 59 return spi_master_get_devdata(spi->master);
53} 60}
54 61
62static inline void ath79_spi_delay(struct ath79_spi *sp, unsigned nsecs)
63{
64 if (nsecs > sp->rrw_delay)
65 ndelay(nsecs - sp->rrw_delay);
66}
67
55static void ath79_spi_chipselect(struct spi_device *spi, int is_active) 68static void ath79_spi_chipselect(struct spi_device *spi, int is_active)
56{ 69{
57 struct ath79_spi *sp = ath79_spidev_to_sp(spi); 70 struct ath79_spi *sp = ath79_spidev_to_sp(spi);
@@ -83,15 +96,8 @@ static void ath79_spi_chipselect(struct spi_device *spi, int is_active)
83 96
84} 97}
85 98
86static int ath79_spi_setup_cs(struct spi_device *spi) 99static void ath79_spi_enable(struct ath79_spi *sp)
87{ 100{
88 struct ath79_spi *sp = ath79_spidev_to_sp(spi);
89 struct ath79_spi_controller_data *cdata;
90
91 cdata = spi->controller_data;
92 if (spi->chip_select && !cdata)
93 return -EINVAL;
94
95 /* enable GPIO mode */ 101 /* enable GPIO mode */
96 ath79_spi_wr(sp, AR71XX_SPI_REG_FS, AR71XX_SPI_FS_GPIO); 102 ath79_spi_wr(sp, AR71XX_SPI_REG_FS, AR71XX_SPI_FS_GPIO);
97 103
@@ -101,44 +107,48 @@ static int ath79_spi_setup_cs(struct spi_device *spi)
101 107
102 /* TODO: setup speed? */ 108 /* TODO: setup speed? */
103 ath79_spi_wr(sp, AR71XX_SPI_REG_CTRL, 0x43); 109 ath79_spi_wr(sp, AR71XX_SPI_REG_CTRL, 0x43);
110}
104 111
105 if (spi->chip_select) { 112static void ath79_spi_disable(struct ath79_spi *sp)
106 int status = 0; 113{
114 /* restore CTRL register */
115 ath79_spi_wr(sp, AR71XX_SPI_REG_CTRL, sp->reg_ctrl);
116 /* disable GPIO mode */
117 ath79_spi_wr(sp, AR71XX_SPI_REG_FS, 0);
118}
107 119
108 status = gpio_request(cdata->gpio, dev_name(&spi->dev)); 120static int ath79_spi_setup_cs(struct spi_device *spi)
109 if (status) 121{
110 return status; 122 struct ath79_spi_controller_data *cdata;
123 int status;
111 124
112 status = gpio_direction_output(cdata->gpio, 125 cdata = spi->controller_data;
113 spi->mode & SPI_CS_HIGH); 126 if (spi->chip_select && !cdata)
114 if (status) { 127 return -EINVAL;
115 gpio_free(cdata->gpio); 128
116 return status; 129 status = 0;
117 } 130 if (spi->chip_select) {
118 } else { 131 unsigned long flags;
132
133 flags = GPIOF_DIR_OUT;
119 if (spi->mode & SPI_CS_HIGH) 134 if (spi->mode & SPI_CS_HIGH)
120 sp->ioc_base |= AR71XX_SPI_IOC_CS0; 135 flags |= GPIOF_INIT_HIGH;
121 else 136 else
122 sp->ioc_base &= ~AR71XX_SPI_IOC_CS0; 137 flags |= GPIOF_INIT_LOW;
123 ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base); 138
139 status = gpio_request_one(cdata->gpio, flags,
140 dev_name(&spi->dev));
124 } 141 }
125 142
126 return 0; 143 return status;
127} 144}
128 145
129static void ath79_spi_cleanup_cs(struct spi_device *spi) 146static void ath79_spi_cleanup_cs(struct spi_device *spi)
130{ 147{
131 struct ath79_spi *sp = ath79_spidev_to_sp(spi);
132
133 if (spi->chip_select) { 148 if (spi->chip_select) {
134 struct ath79_spi_controller_data *cdata = spi->controller_data; 149 struct ath79_spi_controller_data *cdata = spi->controller_data;
135 gpio_free(cdata->gpio); 150 gpio_free(cdata->gpio);
136 } 151 }
137
138 /* restore CTRL register */
139 ath79_spi_wr(sp, AR71XX_SPI_REG_CTRL, sp->reg_ctrl);
140 /* disable GPIO mode */
141 ath79_spi_wr(sp, AR71XX_SPI_REG_FS, 0);
142} 152}
143 153
144static int ath79_spi_setup(struct spi_device *spi) 154static int ath79_spi_setup(struct spi_device *spi)
@@ -184,7 +194,11 @@ static u32 ath79_spi_txrx_mode0(struct spi_device *spi, unsigned nsecs,
184 194
185 /* setup MSB (to slave) on trailing edge */ 195 /* setup MSB (to slave) on trailing edge */
186 ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, out); 196 ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, out);
197 ath79_spi_delay(sp, nsecs);
187 ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, out | AR71XX_SPI_IOC_CLK); 198 ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, out | AR71XX_SPI_IOC_CLK);
199 ath79_spi_delay(sp, nsecs);
200 if (bits == 1)
201 ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, out);
188 202
189 word <<= 1; 203 word <<= 1;
190 } 204 }
@@ -198,6 +212,7 @@ static int ath79_spi_probe(struct platform_device *pdev)
198 struct ath79_spi *sp; 212 struct ath79_spi *sp;
199 struct ath79_spi_platform_data *pdata; 213 struct ath79_spi_platform_data *pdata;
200 struct resource *r; 214 struct resource *r;
215 unsigned long rate;
201 int ret; 216 int ret;
202 217
203 master = spi_alloc_master(&pdev->dev, sizeof(*sp)); 218 master = spi_alloc_master(&pdev->dev, sizeof(*sp));
@@ -236,12 +251,39 @@ static int ath79_spi_probe(struct platform_device *pdev)
236 goto err_put_master; 251 goto err_put_master;
237 } 252 }
238 253
254 sp->clk = clk_get(&pdev->dev, "ahb");
255 if (IS_ERR(sp->clk)) {
256 ret = PTR_ERR(sp->clk);
257 goto err_unmap;
258 }
259
260 ret = clk_enable(sp->clk);
261 if (ret)
262 goto err_clk_put;
263
264 rate = DIV_ROUND_UP(clk_get_rate(sp->clk), MHZ);
265 if (!rate) {
266 ret = -EINVAL;
267 goto err_clk_disable;
268 }
269
270 sp->rrw_delay = ATH79_SPI_RRW_DELAY_FACTOR / rate;
271 dev_dbg(&pdev->dev, "register read/write delay is %u nsecs\n",
272 sp->rrw_delay);
273
274 ath79_spi_enable(sp);
239 ret = spi_bitbang_start(&sp->bitbang); 275 ret = spi_bitbang_start(&sp->bitbang);
240 if (ret) 276 if (ret)
241 goto err_unmap; 277 goto err_disable;
242 278
243 return 0; 279 return 0;
244 280
281err_disable:
282 ath79_spi_disable(sp);
283err_clk_disable:
284 clk_disable(sp->clk);
285err_clk_put:
286 clk_put(sp->clk);
245err_unmap: 287err_unmap:
246 iounmap(sp->base); 288 iounmap(sp->base);
247err_put_master: 289err_put_master:
@@ -256,6 +298,9 @@ static int ath79_spi_remove(struct platform_device *pdev)
256 struct ath79_spi *sp = platform_get_drvdata(pdev); 298 struct ath79_spi *sp = platform_get_drvdata(pdev);
257 299
258 spi_bitbang_stop(&sp->bitbang); 300 spi_bitbang_stop(&sp->bitbang);
301 ath79_spi_disable(sp);
302 clk_disable(sp->clk);
303 clk_put(sp->clk);
259 iounmap(sp->base); 304 iounmap(sp->base);
260 platform_set_drvdata(pdev, NULL); 305 platform_set_drvdata(pdev, NULL);
261 spi_master_put(sp->bitbang.master); 306 spi_master_put(sp->bitbang.master);
@@ -263,9 +308,15 @@ static int ath79_spi_remove(struct platform_device *pdev)
263 return 0; 308 return 0;
264} 309}
265 310
311static void ath79_spi_shutdown(struct platform_device *pdev)
312{
313 ath79_spi_remove(pdev);
314}
315
266static struct platform_driver ath79_spi_driver = { 316static struct platform_driver ath79_spi_driver = {
267 .probe = ath79_spi_probe, 317 .probe = ath79_spi_probe,
268 .remove = ath79_spi_remove, 318 .remove = ath79_spi_remove,
319 .shutdown = ath79_spi_shutdown,
269 .driver = { 320 .driver = {
270 .name = DRV_NAME, 321 .name = DRV_NAME,
271 .owner = THIS_MODULE, 322 .owner = THIS_MODULE,
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index ab34497bcfee..656d137db253 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -1088,7 +1088,7 @@ static struct platform_driver atmel_spi_driver = {
1088 .suspend = atmel_spi_suspend, 1088 .suspend = atmel_spi_suspend,
1089 .resume = atmel_spi_resume, 1089 .resume = atmel_spi_resume,
1090 .probe = atmel_spi_probe, 1090 .probe = atmel_spi_probe,
1091 .remove = __exit_p(atmel_spi_remove), 1091 .remove = atmel_spi_remove,
1092}; 1092};
1093module_platform_driver(atmel_spi_driver); 1093module_platform_driver(atmel_spi_driver);
1094 1094
diff --git a/drivers/spi/spi-au1550.c b/drivers/spi/spi-au1550.c
index 4de66d1cfe51..44dd34b6ad09 100644
--- a/drivers/spi/spi-au1550.c
+++ b/drivers/spi/spi-au1550.c
@@ -717,7 +717,7 @@ static void au1550_spi_bits_handlers_set(struct au1550_spi *hw, int bpw)
717 } 717 }
718} 718}
719 719
720static void __init au1550_spi_setup_psc_as_spi(struct au1550_spi *hw) 720static void au1550_spi_setup_psc_as_spi(struct au1550_spi *hw)
721{ 721{
722 u32 stat, cfg; 722 u32 stat, cfg;
723 723
@@ -766,7 +766,7 @@ static void __init au1550_spi_setup_psc_as_spi(struct au1550_spi *hw)
766} 766}
767 767
768 768
769static int __init au1550_spi_probe(struct platform_device *pdev) 769static int au1550_spi_probe(struct platform_device *pdev)
770{ 770{
771 struct au1550_spi *hw; 771 struct au1550_spi *hw;
772 struct spi_master *master; 772 struct spi_master *master;
@@ -968,7 +968,7 @@ err_nomem:
968 return err; 968 return err;
969} 969}
970 970
971static int __exit au1550_spi_remove(struct platform_device *pdev) 971static int au1550_spi_remove(struct platform_device *pdev)
972{ 972{
973 struct au1550_spi *hw = platform_get_drvdata(pdev); 973 struct au1550_spi *hw = platform_get_drvdata(pdev);
974 974
@@ -997,7 +997,7 @@ static int __exit au1550_spi_remove(struct platform_device *pdev)
997MODULE_ALIAS("platform:au1550-spi"); 997MODULE_ALIAS("platform:au1550-spi");
998 998
999static struct platform_driver au1550_spi_drv = { 999static struct platform_driver au1550_spi_drv = {
1000 .remove = __exit_p(au1550_spi_remove), 1000 .remove = au1550_spi_remove,
1001 .driver = { 1001 .driver = {
1002 .name = "au1550-spi", 1002 .name = "au1550-spi",
1003 .owner = THIS_MODULE, 1003 .owner = THIS_MODULE,
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index f44ab5508535..9578af782a77 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -37,6 +37,8 @@
37 37
38#define PFX KBUILD_MODNAME 38#define PFX KBUILD_MODNAME
39 39
40#define BCM63XX_SPI_MAX_PREPEND 15
41
40struct bcm63xx_spi { 42struct bcm63xx_spi {
41 struct completion done; 43 struct completion done;
42 44
@@ -49,16 +51,10 @@ struct bcm63xx_spi {
49 unsigned int msg_type_shift; 51 unsigned int msg_type_shift;
50 unsigned int msg_ctl_width; 52 unsigned int msg_ctl_width;
51 53
52 /* Data buffers */
53 const unsigned char *tx_ptr;
54 unsigned char *rx_ptr;
55
56 /* data iomem */ 54 /* data iomem */
57 u8 __iomem *tx_io; 55 u8 __iomem *tx_io;
58 const u8 __iomem *rx_io; 56 const u8 __iomem *rx_io;
59 57
60 int remaining_bytes;
61
62 struct clk *clk; 58 struct clk *clk;
63 struct platform_device *pdev; 59 struct platform_device *pdev;
64}; 60};
@@ -175,24 +171,17 @@ static int bcm63xx_spi_setup(struct spi_device *spi)
175 return 0; 171 return 0;
176} 172}
177 173
178/* Fill the TX FIFO with as many bytes as possible */ 174static int bcm63xx_txrx_bufs(struct spi_device *spi, struct spi_transfer *first,
179static void bcm63xx_spi_fill_tx_fifo(struct bcm63xx_spi *bs) 175 unsigned int num_transfers)
180{
181 u8 size;
182
183 /* Fill the Tx FIFO with as many bytes as possible */
184 size = bs->remaining_bytes < bs->fifo_size ? bs->remaining_bytes :
185 bs->fifo_size;
186 memcpy_toio(bs->tx_io, bs->tx_ptr, size);
187 bs->remaining_bytes -= size;
188}
189
190static unsigned int bcm63xx_txrx_bufs(struct spi_device *spi,
191 struct spi_transfer *t)
192{ 176{
193 struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master); 177 struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master);
194 u16 msg_ctl; 178 u16 msg_ctl;
195 u16 cmd; 179 u16 cmd;
180 u8 rx_tail;
181 unsigned int i, timeout = 0, prepend_len = 0, len = 0;
182 struct spi_transfer *t = first;
183 bool do_rx = false;
184 bool do_tx = false;
196 185
197 /* Disable the CMD_DONE interrupt */ 186 /* Disable the CMD_DONE interrupt */
198 bcm_spi_writeb(bs, 0, SPI_INT_MASK); 187 bcm_spi_writeb(bs, 0, SPI_INT_MASK);
@@ -200,25 +189,45 @@ static unsigned int bcm63xx_txrx_bufs(struct spi_device *spi,
200 dev_dbg(&spi->dev, "txrx: tx %p, rx %p, len %d\n", 189 dev_dbg(&spi->dev, "txrx: tx %p, rx %p, len %d\n",
201 t->tx_buf, t->rx_buf, t->len); 190 t->tx_buf, t->rx_buf, t->len);
202 191
203 /* Transmitter is inhibited */ 192 if (num_transfers > 1 && t->tx_buf && t->len <= BCM63XX_SPI_MAX_PREPEND)
204 bs->tx_ptr = t->tx_buf; 193 prepend_len = t->len;
205 bs->rx_ptr = t->rx_buf;
206 194
207 if (t->tx_buf) { 195 /* prepare the buffer */
208 bs->remaining_bytes = t->len; 196 for (i = 0; i < num_transfers; i++) {
209 bcm63xx_spi_fill_tx_fifo(bs); 197 if (t->tx_buf) {
198 do_tx = true;
199 memcpy_toio(bs->tx_io + len, t->tx_buf, t->len);
200
201 /* don't prepend more than one tx */
202 if (t != first)
203 prepend_len = 0;
204 }
205
206 if (t->rx_buf) {
207 do_rx = true;
208 /* prepend is half-duplex write only */
209 if (t == first)
210 prepend_len = 0;
211 }
212
213 len += t->len;
214
215 t = list_entry(t->transfer_list.next, struct spi_transfer,
216 transfer_list);
210 } 217 }
211 218
219 len -= prepend_len;
220
212 init_completion(&bs->done); 221 init_completion(&bs->done);
213 222
214 /* Fill in the Message control register */ 223 /* Fill in the Message control register */
215 msg_ctl = (t->len << SPI_BYTE_CNT_SHIFT); 224 msg_ctl = (len << SPI_BYTE_CNT_SHIFT);
216 225
217 if (t->rx_buf && t->tx_buf) 226 if (do_rx && do_tx && prepend_len == 0)
218 msg_ctl |= (SPI_FD_RW << bs->msg_type_shift); 227 msg_ctl |= (SPI_FD_RW << bs->msg_type_shift);
219 else if (t->rx_buf) 228 else if (do_rx)
220 msg_ctl |= (SPI_HD_R << bs->msg_type_shift); 229 msg_ctl |= (SPI_HD_R << bs->msg_type_shift);
221 else if (t->tx_buf) 230 else if (do_tx)
222 msg_ctl |= (SPI_HD_W << bs->msg_type_shift); 231 msg_ctl |= (SPI_HD_W << bs->msg_type_shift);
223 232
224 switch (bs->msg_ctl_width) { 233 switch (bs->msg_ctl_width) {
@@ -232,14 +241,41 @@ static unsigned int bcm63xx_txrx_bufs(struct spi_device *spi,
232 241
233 /* Issue the transfer */ 242 /* Issue the transfer */
234 cmd = SPI_CMD_START_IMMEDIATE; 243 cmd = SPI_CMD_START_IMMEDIATE;
235 cmd |= (0 << SPI_CMD_PREPEND_BYTE_CNT_SHIFT); 244 cmd |= (prepend_len << SPI_CMD_PREPEND_BYTE_CNT_SHIFT);
236 cmd |= (spi->chip_select << SPI_CMD_DEVICE_ID_SHIFT); 245 cmd |= (spi->chip_select << SPI_CMD_DEVICE_ID_SHIFT);
237 bcm_spi_writew(bs, cmd, SPI_CMD); 246 bcm_spi_writew(bs, cmd, SPI_CMD);
238 247
239 /* Enable the CMD_DONE interrupt */ 248 /* Enable the CMD_DONE interrupt */
240 bcm_spi_writeb(bs, SPI_INTR_CMD_DONE, SPI_INT_MASK); 249 bcm_spi_writeb(bs, SPI_INTR_CMD_DONE, SPI_INT_MASK);
241 250
242 return t->len - bs->remaining_bytes; 251 timeout = wait_for_completion_timeout(&bs->done, HZ);
252 if (!timeout)
253 return -ETIMEDOUT;
254
255 /* read out all data */
256 rx_tail = bcm_spi_readb(bs, SPI_RX_TAIL);
257
258 if (do_rx && rx_tail != len)
259 return -EIO;
260
261 if (!rx_tail)
262 return 0;
263
264 len = 0;
265 t = first;
266 /* Read out all the data */
267 for (i = 0; i < num_transfers; i++) {
268 if (t->rx_buf)
269 memcpy_fromio(t->rx_buf, bs->rx_io + len, t->len);
270
271 if (t != first || prepend_len == 0)
272 len += t->len;
273
274 t = list_entry(t->transfer_list.next, struct spi_transfer,
275 transfer_list);
276 }
277
278 return 0;
243} 279}
244 280
245static int bcm63xx_spi_prepare_transfer(struct spi_master *master) 281static int bcm63xx_spi_prepare_transfer(struct spi_master *master)
@@ -264,41 +300,76 @@ static int bcm63xx_spi_transfer_one(struct spi_master *master,
264 struct spi_message *m) 300 struct spi_message *m)
265{ 301{
266 struct bcm63xx_spi *bs = spi_master_get_devdata(master); 302 struct bcm63xx_spi *bs = spi_master_get_devdata(master);
267 struct spi_transfer *t; 303 struct spi_transfer *t, *first = NULL;
268 struct spi_device *spi = m->spi; 304 struct spi_device *spi = m->spi;
269 int status = 0; 305 int status = 0;
270 unsigned int timeout = 0; 306 unsigned int n_transfers = 0, total_len = 0;
271 307 bool can_use_prepend = false;
308
309 /*
310 * This SPI controller does not support keeping CS active after a
311 * transfer.
312 * Work around this by merging as many transfers we can into one big
313 * full-duplex transfers.
314 */
272 list_for_each_entry(t, &m->transfers, transfer_list) { 315 list_for_each_entry(t, &m->transfers, transfer_list) {
273 unsigned int len = t->len;
274 u8 rx_tail;
275
276 status = bcm63xx_spi_check_transfer(spi, t); 316 status = bcm63xx_spi_check_transfer(spi, t);
277 if (status < 0) 317 if (status < 0)
278 goto exit; 318 goto exit;
279 319
280 /* configure adapter for a new transfer */ 320 if (!first)
281 bcm63xx_spi_setup_transfer(spi, t); 321 first = t;
322
323 n_transfers++;
324 total_len += t->len;
325
326 if (n_transfers == 2 && !first->rx_buf && !t->tx_buf &&
327 first->len <= BCM63XX_SPI_MAX_PREPEND)
328 can_use_prepend = true;
329 else if (can_use_prepend && t->tx_buf)
330 can_use_prepend = false;
331
332 /* we can only transfer one fifo worth of data */
333 if ((can_use_prepend &&
334 total_len > (bs->fifo_size + BCM63XX_SPI_MAX_PREPEND)) ||
335 (!can_use_prepend && total_len > bs->fifo_size)) {
336 dev_err(&spi->dev, "unable to do transfers larger than FIFO size (%i > %i)\n",
337 total_len, bs->fifo_size);
338 status = -EINVAL;
339 goto exit;
340 }
341
342 /* all combined transfers have to have the same speed */
343 if (t->speed_hz != first->speed_hz) {
344 dev_err(&spi->dev, "unable to change speed between transfers\n");
345 status = -EINVAL;
346 goto exit;
347 }
282 348
283 while (len) { 349 /* CS will be deasserted directly after transfer */
284 /* send the data */ 350 if (t->delay_usecs) {
285 len -= bcm63xx_txrx_bufs(spi, t); 351 dev_err(&spi->dev, "unable to keep CS asserted after transfer\n");
352 status = -EINVAL;
353 goto exit;
354 }
355
356 if (t->cs_change ||
357 list_is_last(&t->transfer_list, &m->transfers)) {
358 /* configure adapter for a new transfer */
359 bcm63xx_spi_setup_transfer(spi, first);
286 360
287 timeout = wait_for_completion_timeout(&bs->done, HZ); 361 /* send the data */
288 if (!timeout) { 362 status = bcm63xx_txrx_bufs(spi, first, n_transfers);
289 status = -ETIMEDOUT; 363 if (status)
290 goto exit; 364 goto exit;
291 }
292 365
293 /* read out all data */ 366 m->actual_length += total_len;
294 rx_tail = bcm_spi_readb(bs, SPI_RX_TAIL);
295 367
296 /* Read out all the data */ 368 first = NULL;
297 if (rx_tail) 369 n_transfers = 0;
298 memcpy_fromio(bs->rx_ptr, bs->rx_io, rx_tail); 370 total_len = 0;
371 can_use_prepend = false;
299 } 372 }
300
301 m->actual_length += t->len;
302 } 373 }
303exit: 374exit:
304 m->status = status; 375 m->status = status;
diff --git a/drivers/spi/spi-bfin-sport.c b/drivers/spi/spi-bfin-sport.c
index ac7ffca7ba47..39b0d1711b4e 100644
--- a/drivers/spi/spi-bfin-sport.c
+++ b/drivers/spi/spi-bfin-sport.c
@@ -416,8 +416,7 @@ bfin_sport_spi_pump_transfers(unsigned long data)
416 drv_data->cs_change = transfer->cs_change; 416 drv_data->cs_change = transfer->cs_change;
417 417
418 /* Bits per word setup */ 418 /* Bits per word setup */
419 bits_per_word = transfer->bits_per_word ? : 419 bits_per_word = transfer->bits_per_word;
420 message->spi->bits_per_word ? : 8;
421 if (bits_per_word % 16 == 0) 420 if (bits_per_word % 16 == 0)
422 drv_data->ops = &bfin_sport_transfer_ops_u16; 421 drv_data->ops = &bfin_sport_transfer_ops_u16;
423 else 422 else
diff --git a/drivers/spi/spi-bfin5xx.c b/drivers/spi/spi-bfin5xx.c
index 0429d833f75b..317f564c899c 100644
--- a/drivers/spi/spi-bfin5xx.c
+++ b/drivers/spi/spi-bfin5xx.c
@@ -642,8 +642,7 @@ static void bfin_spi_pump_transfers(unsigned long data)
642 drv_data->cs_change = transfer->cs_change; 642 drv_data->cs_change = transfer->cs_change;
643 643
644 /* Bits per word setup */ 644 /* Bits per word setup */
645 bits_per_word = transfer->bits_per_word ? : 645 bits_per_word = transfer->bits_per_word;
646 message->spi->bits_per_word ? : 8;
647 if (bits_per_word % 16 == 0) { 646 if (bits_per_word % 16 == 0) {
648 drv_data->n_bytes = bits_per_word/8; 647 drv_data->n_bytes = bits_per_word/8;
649 drv_data->len = (transfer->len) >> 1; 648 drv_data->len = (transfer->len) >> 1;
@@ -1274,7 +1273,7 @@ static int bfin_spi_destroy_queue(struct bfin_spi_master_data *drv_data)
1274 return 0; 1273 return 0;
1275} 1274}
1276 1275
1277static int __init bfin_spi_probe(struct platform_device *pdev) 1276static int bfin_spi_probe(struct platform_device *pdev)
1278{ 1277{
1279 struct device *dev = &pdev->dev; 1278 struct device *dev = &pdev->dev;
1280 struct bfin5xx_spi_master *platform_info; 1279 struct bfin5xx_spi_master *platform_info;
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c
index 8b3d8efafd3c..a63d7da3bfe2 100644
--- a/drivers/spi/spi-bitbang.c
+++ b/drivers/spi/spi-bitbang.c
@@ -69,7 +69,7 @@ static unsigned bitbang_txrx_8(
69 unsigned ns, 69 unsigned ns,
70 struct spi_transfer *t 70 struct spi_transfer *t
71) { 71) {
72 unsigned bits = t->bits_per_word ? : spi->bits_per_word; 72 unsigned bits = t->bits_per_word;
73 unsigned count = t->len; 73 unsigned count = t->len;
74 const u8 *tx = t->tx_buf; 74 const u8 *tx = t->tx_buf;
75 u8 *rx = t->rx_buf; 75 u8 *rx = t->rx_buf;
@@ -95,7 +95,7 @@ static unsigned bitbang_txrx_16(
95 unsigned ns, 95 unsigned ns,
96 struct spi_transfer *t 96 struct spi_transfer *t
97) { 97) {
98 unsigned bits = t->bits_per_word ? : spi->bits_per_word; 98 unsigned bits = t->bits_per_word;
99 unsigned count = t->len; 99 unsigned count = t->len;
100 const u16 *tx = t->tx_buf; 100 const u16 *tx = t->tx_buf;
101 u16 *rx = t->rx_buf; 101 u16 *rx = t->rx_buf;
@@ -121,7 +121,7 @@ static unsigned bitbang_txrx_32(
121 unsigned ns, 121 unsigned ns,
122 struct spi_transfer *t 122 struct spi_transfer *t
123) { 123) {
124 unsigned bits = t->bits_per_word ? : spi->bits_per_word; 124 unsigned bits = t->bits_per_word;
125 unsigned count = t->len; 125 unsigned count = t->len;
126 const u32 *tx = t->tx_buf; 126 const u32 *tx = t->tx_buf;
127 u32 *rx = t->rx_buf; 127 u32 *rx = t->rx_buf;
@@ -427,40 +427,41 @@ EXPORT_SYMBOL_GPL(spi_bitbang_transfer);
427 */ 427 */
428int spi_bitbang_start(struct spi_bitbang *bitbang) 428int spi_bitbang_start(struct spi_bitbang *bitbang)
429{ 429{
430 int status; 430 struct spi_master *master = bitbang->master;
431 int status;
431 432
432 if (!bitbang->master || !bitbang->chipselect) 433 if (!master || !bitbang->chipselect)
433 return -EINVAL; 434 return -EINVAL;
434 435
435 INIT_WORK(&bitbang->work, bitbang_work); 436 INIT_WORK(&bitbang->work, bitbang_work);
436 spin_lock_init(&bitbang->lock); 437 spin_lock_init(&bitbang->lock);
437 INIT_LIST_HEAD(&bitbang->queue); 438 INIT_LIST_HEAD(&bitbang->queue);
438 439
439 if (!bitbang->master->mode_bits) 440 if (!master->mode_bits)
440 bitbang->master->mode_bits = SPI_CPOL | SPI_CPHA | bitbang->flags; 441 master->mode_bits = SPI_CPOL | SPI_CPHA | bitbang->flags;
441 442
442 if (!bitbang->master->transfer) 443 if (!master->transfer)
443 bitbang->master->transfer = spi_bitbang_transfer; 444 master->transfer = spi_bitbang_transfer;
444 if (!bitbang->txrx_bufs) { 445 if (!bitbang->txrx_bufs) {
445 bitbang->use_dma = 0; 446 bitbang->use_dma = 0;
446 bitbang->txrx_bufs = spi_bitbang_bufs; 447 bitbang->txrx_bufs = spi_bitbang_bufs;
447 if (!bitbang->master->setup) { 448 if (!master->setup) {
448 if (!bitbang->setup_transfer) 449 if (!bitbang->setup_transfer)
449 bitbang->setup_transfer = 450 bitbang->setup_transfer =
450 spi_bitbang_setup_transfer; 451 spi_bitbang_setup_transfer;
451 bitbang->master->setup = spi_bitbang_setup; 452 master->setup = spi_bitbang_setup;
452 bitbang->master->cleanup = spi_bitbang_cleanup; 453 master->cleanup = spi_bitbang_cleanup;
453 } 454 }
454 } else if (!bitbang->master->setup) 455 } else if (!master->setup)
455 return -EINVAL; 456 return -EINVAL;
456 if (bitbang->master->transfer == spi_bitbang_transfer && 457 if (master->transfer == spi_bitbang_transfer &&
457 !bitbang->setup_transfer) 458 !bitbang->setup_transfer)
458 return -EINVAL; 459 return -EINVAL;
459 460
460 /* this task is the only thing to touch the SPI bits */ 461 /* this task is the only thing to touch the SPI bits */
461 bitbang->busy = 0; 462 bitbang->busy = 0;
462 bitbang->workqueue = create_singlethread_workqueue( 463 bitbang->workqueue = create_singlethread_workqueue(
463 dev_name(bitbang->master->dev.parent)); 464 dev_name(master->dev.parent));
464 if (bitbang->workqueue == NULL) { 465 if (bitbang->workqueue == NULL) {
465 status = -EBUSY; 466 status = -EBUSY;
466 goto err1; 467 goto err1;
@@ -469,7 +470,7 @@ int spi_bitbang_start(struct spi_bitbang *bitbang)
469 /* driver may get busy before register() returns, especially 470 /* driver may get busy before register() returns, especially
470 * if someone registered boardinfo for devices 471 * if someone registered boardinfo for devices
471 */ 472 */
472 status = spi_register_master(bitbang->master); 473 status = spi_register_master(master);
473 if (status < 0) 474 if (status < 0)
474 goto err2; 475 goto err2;
475 476
diff --git a/drivers/spi/spi-clps711x.c b/drivers/spi/spi-clps711x.c
index 1366c4620d5d..a11cbf02691a 100644
--- a/drivers/spi/spi-clps711x.c
+++ b/drivers/spi/spi-clps711x.c
@@ -68,7 +68,7 @@ static int spi_clps711x_setup_xfer(struct spi_device *spi,
68 struct spi_transfer *xfer) 68 struct spi_transfer *xfer)
69{ 69{
70 u32 speed = xfer->speed_hz ? : spi->max_speed_hz; 70 u32 speed = xfer->speed_hz ? : spi->max_speed_hz;
71 u8 bpw = xfer->bits_per_word ? : spi->bits_per_word; 71 u8 bpw = xfer->bits_per_word;
72 struct spi_clps711x_data *hw = spi_master_get_devdata(spi->master); 72 struct spi_clps711x_data *hw = spi_master_get_devdata(spi->master);
73 73
74 if (bpw != 8) { 74 if (bpw != 8) {
diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c
index 58466b810da4..7b5cc9e4e94d 100644
--- a/drivers/spi/spi-coldfire-qspi.c
+++ b/drivers/spi/spi-coldfire-qspi.c
@@ -329,8 +329,7 @@ static int mcfqspi_transfer_one_message(struct spi_master *master,
329 mcfqspi_cs_select(mcfqspi, spi->chip_select, cs_high); 329 mcfqspi_cs_select(mcfqspi, spi->chip_select, cs_high);
330 330
331 mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE); 331 mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE);
332 if ((t->bits_per_word ? t->bits_per_word : 332 if (t->bits_per_word == 8)
333 spi->bits_per_word) == 8)
334 mcfqspi_transfer_msg8(mcfqspi, t->len, t->tx_buf, 333 mcfqspi_transfer_msg8(mcfqspi, t->len, t->tx_buf,
335 t->rx_buf); 334 t->rx_buf);
336 else 335 else
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 13661e129d96..8234d2259722 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -28,6 +28,8 @@
28#include <linux/dmaengine.h> 28#include <linux/dmaengine.h>
29#include <linux/dma-mapping.h> 29#include <linux/dma-mapping.h>
30#include <linux/edma.h> 30#include <linux/edma.h>
31#include <linux/of.h>
32#include <linux/of_device.h>
31#include <linux/spi/spi.h> 33#include <linux/spi/spi.h>
32#include <linux/spi/spi_bitbang.h> 34#include <linux/spi/spi_bitbang.h>
33#include <linux/slab.h> 35#include <linux/slab.h>
@@ -135,7 +137,7 @@ struct davinci_spi {
135 int dma_rx_chnum; 137 int dma_rx_chnum;
136 int dma_tx_chnum; 138 int dma_tx_chnum;
137 139
138 struct davinci_spi_platform_data *pdata; 140 struct davinci_spi_platform_data pdata;
139 141
140 void (*get_rx)(u32 rx_data, struct davinci_spi *); 142 void (*get_rx)(u32 rx_data, struct davinci_spi *);
141 u32 (*get_tx)(struct davinci_spi *); 143 u32 (*get_tx)(struct davinci_spi *);
@@ -213,7 +215,7 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value)
213 bool gpio_chipsel = false; 215 bool gpio_chipsel = false;
214 216
215 dspi = spi_master_get_devdata(spi->master); 217 dspi = spi_master_get_devdata(spi->master);
216 pdata = dspi->pdata; 218 pdata = &dspi->pdata;
217 219
218 if (pdata->chip_sel && chip_sel < pdata->num_chipselect && 220 if (pdata->chip_sel && chip_sel < pdata->num_chipselect &&
219 pdata->chip_sel[chip_sel] != SPI_INTERN_CS) 221 pdata->chip_sel[chip_sel] != SPI_INTERN_CS)
@@ -392,7 +394,7 @@ static int davinci_spi_setup(struct spi_device *spi)
392 struct davinci_spi_platform_data *pdata; 394 struct davinci_spi_platform_data *pdata;
393 395
394 dspi = spi_master_get_devdata(spi->master); 396 dspi = spi_master_get_devdata(spi->master);
395 pdata = dspi->pdata; 397 pdata = &dspi->pdata;
396 398
397 /* if bits per word length is zero then set it default 8 */ 399 /* if bits per word length is zero then set it default 8 */
398 if (!spi->bits_per_word) 400 if (!spi->bits_per_word)
@@ -534,7 +536,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
534 struct scatterlist sg_rx, sg_tx; 536 struct scatterlist sg_rx, sg_tx;
535 537
536 dspi = spi_master_get_devdata(spi->master); 538 dspi = spi_master_get_devdata(spi->master);
537 pdata = dspi->pdata; 539 pdata = &dspi->pdata;
538 spicfg = (struct davinci_spi_config *)spi->controller_data; 540 spicfg = (struct davinci_spi_config *)spi->controller_data;
539 if (!spicfg) 541 if (!spicfg)
540 spicfg = &davinci_spi_default_cfg; 542 spicfg = &davinci_spi_default_cfg;
@@ -700,6 +702,19 @@ err_alloc_dummy_buf:
700} 702}
701 703
702/** 704/**
705 * dummy_thread_fn - dummy thread function
706 * @irq: IRQ number for this SPI Master
707 * @context_data: structure for SPI Master controller davinci_spi
708 *
709 * This is to satisfy the request_threaded_irq() API so that the irq
710 * handler is called in interrupt context.
711 */
712static irqreturn_t dummy_thread_fn(s32 irq, void *data)
713{
714 return IRQ_HANDLED;
715}
716
717/**
703 * davinci_spi_irq - Interrupt handler for SPI Master Controller 718 * davinci_spi_irq - Interrupt handler for SPI Master Controller
704 * @irq: IRQ number for this SPI Master 719 * @irq: IRQ number for this SPI Master
705 * @context_data: structure for SPI Master controller davinci_spi 720 * @context_data: structure for SPI Master controller davinci_spi
@@ -758,6 +773,70 @@ rx_dma_failed:
758 return r; 773 return r;
759} 774}
760 775
776#if defined(CONFIG_OF)
777static const struct of_device_id davinci_spi_of_match[] = {
778 {
779 .compatible = "ti,dm644x-spi",
780 },
781 {
782 .compatible = "ti,da8xx-spi",
783 .data = (void *)SPI_VERSION_2,
784 },
785 { },
786};
787MODULE_DEVICE_TABLE(of, davini_spi_of_match);
788
789/**
790 * spi_davinci_get_pdata - Get platform data from DTS binding
791 * @pdev: ptr to platform data
792 * @dspi: ptr to driver data
793 *
794 * Parses and populates pdata in dspi from device tree bindings.
795 *
796 * NOTE: Not all platform data params are supported currently.
797 */
798static int spi_davinci_get_pdata(struct platform_device *pdev,
799 struct davinci_spi *dspi)
800{
801 struct device_node *node = pdev->dev.of_node;
802 struct davinci_spi_platform_data *pdata;
803 unsigned int num_cs, intr_line = 0;
804 const struct of_device_id *match;
805
806 pdata = &dspi->pdata;
807
808 pdata->version = SPI_VERSION_1;
809 match = of_match_device(of_match_ptr(davinci_spi_of_match),
810 &pdev->dev);
811 if (!match)
812 return -ENODEV;
813
814 /* match data has the SPI version number for SPI_VERSION_2 */
815 if (match->data == (void *)SPI_VERSION_2)
816 pdata->version = SPI_VERSION_2;
817
818 /*
819 * default num_cs is 1 and all chipsel are internal to the chip
820 * indicated by chip_sel being NULL. GPIO based CS is not
821 * supported yet in DT bindings.
822 */
823 num_cs = 1;
824 of_property_read_u32(node, "num-cs", &num_cs);
825 pdata->num_chipselect = num_cs;
826 of_property_read_u32(node, "ti,davinci-spi-intr-line", &intr_line);
827 pdata->intr_line = intr_line;
828 return 0;
829}
830#else
831#define davinci_spi_of_match NULL
832static struct davinci_spi_platform_data
833 *spi_davinci_get_pdata(struct platform_device *pdev,
834 struct davinci_spi *dspi)
835{
836 return -ENODEV;
837}
838#endif
839
761/** 840/**
762 * davinci_spi_probe - probe function for SPI Master Controller 841 * davinci_spi_probe - probe function for SPI Master Controller
763 * @pdev: platform_device structure which contains plateform specific data 842 * @pdev: platform_device structure which contains plateform specific data
@@ -780,12 +859,6 @@ static int davinci_spi_probe(struct platform_device *pdev)
780 int i = 0, ret = 0; 859 int i = 0, ret = 0;
781 u32 spipc0; 860 u32 spipc0;
782 861
783 pdata = pdev->dev.platform_data;
784 if (pdata == NULL) {
785 ret = -ENODEV;
786 goto err;
787 }
788
789 master = spi_alloc_master(&pdev->dev, sizeof(struct davinci_spi)); 862 master = spi_alloc_master(&pdev->dev, sizeof(struct davinci_spi));
790 if (master == NULL) { 863 if (master == NULL) {
791 ret = -ENOMEM; 864 ret = -ENOMEM;
@@ -800,6 +873,19 @@ static int davinci_spi_probe(struct platform_device *pdev)
800 goto free_master; 873 goto free_master;
801 } 874 }
802 875
876 if (pdev->dev.platform_data) {
877 pdata = pdev->dev.platform_data;
878 dspi->pdata = *pdata;
879 } else {
880 /* update dspi pdata with that from the DT */
881 ret = spi_davinci_get_pdata(pdev, dspi);
882 if (ret < 0)
883 goto free_master;
884 }
885
886 /* pdata in dspi is now updated and point pdata to that */
887 pdata = &dspi->pdata;
888
803 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 889 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
804 if (r == NULL) { 890 if (r == NULL) {
805 ret = -ENOENT; 891 ret = -ENOENT;
@@ -807,7 +893,6 @@ static int davinci_spi_probe(struct platform_device *pdev)
807 } 893 }
808 894
809 dspi->pbase = r->start; 895 dspi->pbase = r->start;
810 dspi->pdata = pdata;
811 896
812 mem = request_mem_region(r->start, resource_size(r), pdev->name); 897 mem = request_mem_region(r->start, resource_size(r), pdev->name);
813 if (mem == NULL) { 898 if (mem == NULL) {
@@ -827,8 +912,8 @@ static int davinci_spi_probe(struct platform_device *pdev)
827 goto unmap_io; 912 goto unmap_io;
828 } 913 }
829 914
830 ret = request_irq(dspi->irq, davinci_spi_irq, 0, dev_name(&pdev->dev), 915 ret = request_threaded_irq(dspi->irq, davinci_spi_irq, dummy_thread_fn,
831 dspi); 916 0, dev_name(&pdev->dev), dspi);
832 if (ret) 917 if (ret)
833 goto unmap_io; 918 goto unmap_io;
834 919
@@ -843,8 +928,9 @@ static int davinci_spi_probe(struct platform_device *pdev)
843 ret = -ENODEV; 928 ret = -ENODEV;
844 goto put_master; 929 goto put_master;
845 } 930 }
846 clk_enable(dspi->clk); 931 clk_prepare_enable(dspi->clk);
847 932
933 master->dev.of_node = pdev->dev.of_node;
848 master->bus_num = pdev->id; 934 master->bus_num = pdev->id;
849 master->num_chipselect = pdata->num_chipselect; 935 master->num_chipselect = pdata->num_chipselect;
850 master->setup = davinci_spi_setup; 936 master->setup = davinci_spi_setup;
@@ -927,7 +1013,7 @@ free_dma:
927 dma_release_channel(dspi->dma_rx); 1013 dma_release_channel(dspi->dma_rx);
928 dma_release_channel(dspi->dma_tx); 1014 dma_release_channel(dspi->dma_tx);
929free_clk: 1015free_clk:
930 clk_disable(dspi->clk); 1016 clk_disable_unprepare(dspi->clk);
931 clk_put(dspi->clk); 1017 clk_put(dspi->clk);
932put_master: 1018put_master:
933 spi_master_put(master); 1019 spi_master_put(master);
@@ -963,7 +1049,7 @@ static int davinci_spi_remove(struct platform_device *pdev)
963 1049
964 spi_bitbang_stop(&dspi->bitbang); 1050 spi_bitbang_stop(&dspi->bitbang);
965 1051
966 clk_disable(dspi->clk); 1052 clk_disable_unprepare(dspi->clk);
967 clk_put(dspi->clk); 1053 clk_put(dspi->clk);
968 spi_master_put(master); 1054 spi_master_put(master);
969 free_irq(dspi->irq, dspi); 1055 free_irq(dspi->irq, dspi);
@@ -978,6 +1064,7 @@ static struct platform_driver davinci_spi_driver = {
978 .driver = { 1064 .driver = {
979 .name = "spi_davinci", 1065 .name = "spi_davinci",
980 .owner = THIS_MODULE, 1066 .owner = THIS_MODULE,
1067 .of_match_table = davinci_spi_of_match,
981 }, 1068 },
982 .probe = davinci_spi_probe, 1069 .probe = davinci_spi_probe,
983 .remove = davinci_spi_remove, 1070 .remove = davinci_spi_remove,
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
index acb1e1935c5a..aecbff16ad60 100644
--- a/drivers/spi/spi-ep93xx.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -446,7 +446,7 @@ static inline int bits_per_word(const struct ep93xx_spi *espi)
446 struct spi_message *msg = espi->current_msg; 446 struct spi_message *msg = espi->current_msg;
447 struct spi_transfer *t = msg->state; 447 struct spi_transfer *t = msg->state;
448 448
449 return t->bits_per_word ? t->bits_per_word : msg->spi->bits_per_word; 449 return t->bits_per_word;
450} 450}
451 451
452static void ep93xx_do_write(struct ep93xx_spi *espi, struct spi_transfer *t) 452static void ep93xx_do_write(struct ep93xx_spi *espi, struct spi_transfer *t)
diff --git a/drivers/spi/spi-falcon.c b/drivers/spi/spi-falcon.c
index 6a6f62ec2840..c7a74f0ef892 100644
--- a/drivers/spi/spi-falcon.c
+++ b/drivers/spi/spi-falcon.c
@@ -398,7 +398,7 @@ static int falcon_sflash_xfer_one(struct spi_master *master,
398 } 398 }
399 399
400 m->status = ret; 400 m->status = ret;
401 m->complete(m->context); 401 spi_finalize_current_message(master);
402 402
403 return 0; 403 return 0;
404} 404}
@@ -423,6 +423,7 @@ static int falcon_sflash_probe(struct platform_device *pdev)
423 423
424 master->mode_bits = SPI_MODE_3; 424 master->mode_bits = SPI_MODE_3;
425 master->num_chipselect = 1; 425 master->num_chipselect = 1;
426 master->flags = SPI_MASTER_HALF_DUPLEX;
426 master->bus_num = -1; 427 master->bus_num = -1;
427 master->setup = falcon_sflash_setup; 428 master->setup = falcon_sflash_setup;
428 master->prepare_transfer_hardware = falcon_sflash_prepare_xfer; 429 master->prepare_transfer_hardware = falcon_sflash_prepare_xfer;
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 1a7f6359d998..086a9eef2e05 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -947,12 +947,12 @@ static int of_fsl_spi_get_chipselects(struct device *dev)
947 struct device_node *np = dev->of_node; 947 struct device_node *np = dev->of_node;
948 struct fsl_spi_platform_data *pdata = dev->platform_data; 948 struct fsl_spi_platform_data *pdata = dev->platform_data;
949 struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata); 949 struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
950 unsigned int ngpios; 950 int ngpios;
951 int i = 0; 951 int i = 0;
952 int ret; 952 int ret;
953 953
954 ngpios = of_gpio_count(np); 954 ngpios = of_gpio_count(np);
955 if (!ngpios) { 955 if (ngpios <= 0) {
956 /* 956 /*
957 * SPI w/o chip-select line. One SPI device is still permitted 957 * SPI w/o chip-select line. One SPI device is still permitted
958 * though. 958 * though.
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index c7cf0b7a069b..9ddef55a7165 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -365,9 +365,26 @@ static int spi_gpio_probe_dt(struct platform_device *pdev)
365 if (!pdata) 365 if (!pdata)
366 return -ENOMEM; 366 return -ENOMEM;
367 367
368 pdata->sck = of_get_named_gpio(np, "gpio-sck", 0); 368 ret = of_get_named_gpio(np, "gpio-sck", 0);
369 pdata->miso = of_get_named_gpio(np, "gpio-miso", 0); 369 if (ret < 0) {
370 pdata->mosi = of_get_named_gpio(np, "gpio-mosi", 0); 370 dev_err(&pdev->dev, "gpio-sck property not found\n");
371 goto error_free;
372 }
373 pdata->sck = ret;
374
375 ret = of_get_named_gpio(np, "gpio-miso", 0);
376 if (ret < 0) {
377 dev_info(&pdev->dev, "gpio-miso property not found, switching to no-rx mode\n");
378 pdata->miso = SPI_GPIO_NO_MISO;
379 } else
380 pdata->miso = ret;
381
382 ret = of_get_named_gpio(np, "gpio-mosi", 0);
383 if (ret < 0) {
384 dev_info(&pdev->dev, "gpio-mosi property not found, switching to no-tx mode\n");
385 pdata->mosi = SPI_GPIO_NO_MOSI;
386 } else
387 pdata->mosi = ret;
371 388
372 ret = of_property_read_u32(np, "num-chipselects", &tmp); 389 ret = of_property_read_u32(np, "num-chipselects", &tmp);
373 if (ret < 0) { 390 if (ret < 0) {
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 904913290aa5..0befeeb522f4 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -949,3 +949,4 @@ module_platform_driver(spi_imx_driver);
949MODULE_DESCRIPTION("SPI Master Controller driver"); 949MODULE_DESCRIPTION("SPI Master Controller driver");
950MODULE_AUTHOR("Sascha Hauer, Pengutronix"); 950MODULE_AUTHOR("Sascha Hauer, Pengutronix");
951MODULE_LICENSE("GPL"); 951MODULE_LICENSE("GPL");
952MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c
index cb3a3106bd4f..89480b281d74 100644
--- a/drivers/spi/spi-mpc512x-psc.c
+++ b/drivers/spi/spi-mpc512x-psc.c
@@ -438,6 +438,7 @@ static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
438 master->num_chipselect = pdata->max_chipselect; 438 master->num_chipselect = pdata->max_chipselect;
439 } 439 }
440 440
441 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
441 master->setup = mpc512x_psc_spi_setup; 442 master->setup = mpc512x_psc_spi_setup;
442 master->transfer = mpc512x_psc_spi_transfer; 443 master->transfer = mpc512x_psc_spi_transfer;
443 master->cleanup = mpc512x_psc_spi_cleanup; 444 master->cleanup = mpc512x_psc_spi_cleanup;
@@ -522,17 +523,11 @@ static int mpc512x_psc_spi_of_probe(struct platform_device *op)
522 regaddr64 = of_translate_address(op->dev.of_node, regaddr_p); 523 regaddr64 = of_translate_address(op->dev.of_node, regaddr_p);
523 524
524 /* get PSC id (0..11, used by port_config) */ 525 /* get PSC id (0..11, used by port_config) */
525 if (op->dev.platform_data == NULL) { 526 id = of_alias_get_id(op->dev.of_node, "spi");
526 const u32 *psc_nump; 527 if (id < 0) {
527 528 dev_err(&op->dev, "no alias id for %s\n",
528 psc_nump = of_get_property(op->dev.of_node, "cell-index", NULL); 529 op->dev.of_node->full_name);
529 if (!psc_nump || *psc_nump > 11) { 530 return id;
530 dev_err(&op->dev, "mpc512x_psc_spi: Device node %s "
531 "has invalid cell-index property\n",
532 op->dev.of_node->full_name);
533 return -EINVAL;
534 }
535 id = *psc_nump;
536 } 531 }
537 532
538 return mpc512x_psc_spi_do_probe(&op->dev, (u32) regaddr64, (u32) size64, 533 return mpc512x_psc_spi_do_probe(&op->dev, (u32) regaddr64, (u32) size64,
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index a3ede249d05d..e3d8b3197d22 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -241,6 +241,7 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi, int cs,
241 INIT_COMPLETION(spi->c); 241 INIT_COMPLETION(spi->c);
242 242
243 ctrl0 = readl(ssp->base + HW_SSP_CTRL0); 243 ctrl0 = readl(ssp->base + HW_SSP_CTRL0);
244 ctrl0 &= ~BM_SSP_CTRL0_XFER_COUNT;
244 ctrl0 |= BM_SSP_CTRL0_DATA_XFER | mxs_spi_cs_to_reg(cs); 245 ctrl0 |= BM_SSP_CTRL0_DATA_XFER | mxs_spi_cs_to_reg(cs);
245 246
246 if (*first) 247 if (*first)
@@ -256,8 +257,10 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi, int cs,
256 if ((sg_count + 1 == sgs) && *last) 257 if ((sg_count + 1 == sgs) && *last)
257 ctrl0 |= BM_SSP_CTRL0_IGNORE_CRC; 258 ctrl0 |= BM_SSP_CTRL0_IGNORE_CRC;
258 259
259 if (ssp->devid == IMX23_SSP) 260 if (ssp->devid == IMX23_SSP) {
261 ctrl0 &= ~BM_SSP_CTRL0_XFER_COUNT;
260 ctrl0 |= min; 262 ctrl0 |= min;
263 }
261 264
262 dma_xfer[sg_count].pio[0] = ctrl0; 265 dma_xfer[sg_count].pio[0] = ctrl0;
263 dma_xfer[sg_count].pio[3] = min; 266 dma_xfer[sg_count].pio[3] = min;
diff --git a/drivers/spi/spi-oc-tiny.c b/drivers/spi/spi-oc-tiny.c
index 432e66ec3088..cb2e284bd814 100644
--- a/drivers/spi/spi-oc-tiny.c
+++ b/drivers/spi/spi-oc-tiny.c
@@ -54,7 +54,7 @@ struct tiny_spi {
54 unsigned int txc, rxc; 54 unsigned int txc, rxc;
55 const u8 *txp; 55 const u8 *txp;
56 u8 *rxp; 56 u8 *rxp;
57 unsigned int gpio_cs_count; 57 int gpio_cs_count;
58 int *gpio_cs; 58 int *gpio_cs;
59}; 59};
60 60
@@ -74,7 +74,7 @@ static void tiny_spi_chipselect(struct spi_device *spi, int is_active)
74{ 74{
75 struct tiny_spi *hw = tiny_spi_to_hw(spi); 75 struct tiny_spi *hw = tiny_spi_to_hw(spi);
76 76
77 if (hw->gpio_cs_count) { 77 if (hw->gpio_cs_count > 0) {
78 gpio_set_value(hw->gpio_cs[spi->chip_select], 78 gpio_set_value(hw->gpio_cs[spi->chip_select],
79 (spi->mode & SPI_CS_HIGH) ? is_active : !is_active); 79 (spi->mode & SPI_CS_HIGH) ? is_active : !is_active);
80 } 80 }
@@ -254,7 +254,7 @@ static int tiny_spi_of_probe(struct platform_device *pdev)
254 if (!np) 254 if (!np)
255 return 0; 255 return 0;
256 hw->gpio_cs_count = of_gpio_count(np); 256 hw->gpio_cs_count = of_gpio_count(np);
257 if (hw->gpio_cs_count) { 257 if (hw->gpio_cs_count > 0) {
258 hw->gpio_cs = devm_kzalloc(&pdev->dev, 258 hw->gpio_cs = devm_kzalloc(&pdev->dev,
259 hw->gpio_cs_count * sizeof(unsigned int), 259 hw->gpio_cs_count * sizeof(unsigned int),
260 GFP_KERNEL); 260 GFP_KERNEL);
@@ -352,7 +352,7 @@ static int tiny_spi_probe(struct platform_device *pdev)
352 goto exit_gpio; 352 goto exit_gpio;
353 gpio_direction_output(hw->gpio_cs[i], 1); 353 gpio_direction_output(hw->gpio_cs[i], 1);
354 } 354 }
355 hw->bitbang.master->num_chipselect = max(1U, hw->gpio_cs_count); 355 hw->bitbang.master->num_chipselect = max(1, hw->gpio_cs_count);
356 356
357 /* register our spi controller */ 357 /* register our spi controller */
358 err = spi_bitbang_start(&hw->bitbang); 358 err = spi_bitbang_start(&hw->bitbang);
diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c
index 3aef7fa7d5b8..78d29a18dcc4 100644
--- a/drivers/spi/spi-omap-100k.c
+++ b/drivers/spi/spi-omap-100k.c
@@ -481,7 +481,7 @@ static int omap1_spi100k_transfer(struct spi_device *spi, struct spi_message *m)
481 return 0; 481 return 0;
482} 482}
483 483
484static int __init omap1_spi100k_reset(struct omap1_spi100k *spi100k) 484static int omap1_spi100k_reset(struct omap1_spi100k *spi100k)
485{ 485{
486 return 0; 486 return 0;
487} 487}
@@ -560,7 +560,7 @@ err1:
560 return status; 560 return status;
561} 561}
562 562
563static int __exit omap1_spi100k_remove(struct platform_device *pdev) 563static int omap1_spi100k_remove(struct platform_device *pdev)
564{ 564{
565 struct spi_master *master; 565 struct spi_master *master;
566 struct omap1_spi100k *spi100k; 566 struct omap1_spi100k *spi100k;
@@ -604,7 +604,7 @@ static struct platform_driver omap1_spi100k_driver = {
604 .name = "omap1_spi100k", 604 .name = "omap1_spi100k",
605 .owner = THIS_MODULE, 605 .owner = THIS_MODULE,
606 }, 606 },
607 .remove = __exit_p(omap1_spi100k_remove), 607 .remove = omap1_spi100k_remove,
608}; 608};
609 609
610 610
diff --git a/drivers/spi/spi-omap-uwire.c b/drivers/spi/spi-omap-uwire.c
index 0a94d9dc9c31..102b233b50c4 100644
--- a/drivers/spi/spi-omap-uwire.c
+++ b/drivers/spi/spi-omap-uwire.c
@@ -476,7 +476,7 @@ static void uwire_off(struct uwire_spi *uwire)
476 spi_master_put(uwire->bitbang.master); 476 spi_master_put(uwire->bitbang.master);
477} 477}
478 478
479static int __init uwire_probe(struct platform_device *pdev) 479static int uwire_probe(struct platform_device *pdev)
480{ 480{
481 struct spi_master *master; 481 struct spi_master *master;
482 struct uwire_spi *uwire; 482 struct uwire_spi *uwire;
@@ -536,7 +536,7 @@ static int __init uwire_probe(struct platform_device *pdev)
536 return status; 536 return status;
537} 537}
538 538
539static int __exit uwire_remove(struct platform_device *pdev) 539static int uwire_remove(struct platform_device *pdev)
540{ 540{
541 struct uwire_spi *uwire = dev_get_drvdata(&pdev->dev); 541 struct uwire_spi *uwire = dev_get_drvdata(&pdev->dev);
542 int status; 542 int status;
@@ -557,7 +557,7 @@ static struct platform_driver uwire_driver = {
557 .name = "omap_uwire", 557 .name = "omap_uwire",
558 .owner = THIS_MODULE, 558 .owner = THIS_MODULE,
559 }, 559 },
560 .remove = __exit_p(uwire_remove), 560 .remove = uwire_remove,
561 // suspend ... unuse ck 561 // suspend ... unuse ck
562 // resume ... use ck 562 // resume ... use ck
563}; 563};
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index b610f522ca44..69945b014c96 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -298,10 +298,10 @@ static void omap2_mcspi_rx_callback(void *data)
298 struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master); 298 struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
299 struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select]; 299 struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select];
300 300
301 complete(&mcspi_dma->dma_rx_completion);
302
303 /* We must disable the DMA RX request */ 301 /* We must disable the DMA RX request */
304 omap2_mcspi_set_dma_req(spi, 1, 0); 302 omap2_mcspi_set_dma_req(spi, 1, 0);
303
304 complete(&mcspi_dma->dma_rx_completion);
305} 305}
306 306
307static void omap2_mcspi_tx_callback(void *data) 307static void omap2_mcspi_tx_callback(void *data)
@@ -310,10 +310,10 @@ static void omap2_mcspi_tx_callback(void *data)
310 struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master); 310 struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
311 struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select]; 311 struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select];
312 312
313 complete(&mcspi_dma->dma_tx_completion);
314
315 /* We must disable the DMA TX request */ 313 /* We must disable the DMA TX request */
316 omap2_mcspi_set_dma_req(spi, 0, 0); 314 omap2_mcspi_set_dma_req(spi, 0, 0);
315
316 complete(&mcspi_dma->dma_tx_completion);
317} 317}
318 318
319static void omap2_mcspi_tx_dma(struct spi_device *spi, 319static void omap2_mcspi_tx_dma(struct spi_device *spi,
@@ -927,6 +927,7 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
927 927
928 struct spi_device *spi; 928 struct spi_device *spi;
929 struct spi_transfer *t = NULL; 929 struct spi_transfer *t = NULL;
930 struct spi_master *master;
930 int cs_active = 0; 931 int cs_active = 0;
931 struct omap2_mcspi_cs *cs; 932 struct omap2_mcspi_cs *cs;
932 struct omap2_mcspi_device_config *cd; 933 struct omap2_mcspi_device_config *cd;
@@ -935,6 +936,7 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
935 u32 chconf; 936 u32 chconf;
936 937
937 spi = m->spi; 938 spi = m->spi;
939 master = spi->master;
938 cs = spi->controller_state; 940 cs = spi->controller_state;
939 cd = spi->controller_data; 941 cd = spi->controller_data;
940 942
@@ -952,6 +954,14 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
952 if (!t->speed_hz && !t->bits_per_word) 954 if (!t->speed_hz && !t->bits_per_word)
953 par_override = 0; 955 par_override = 0;
954 } 956 }
957 if (cd && cd->cs_per_word) {
958 chconf = mcspi->ctx.modulctrl;
959 chconf &= ~OMAP2_MCSPI_MODULCTRL_SINGLE;
960 mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, chconf);
961 mcspi->ctx.modulctrl =
962 mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL);
963 }
964
955 965
956 if (!cs_active) { 966 if (!cs_active) {
957 omap2_mcspi_force_cs(spi, 1); 967 omap2_mcspi_force_cs(spi, 1);
@@ -1013,6 +1023,14 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
1013 if (cs_active) 1023 if (cs_active)
1014 omap2_mcspi_force_cs(spi, 0); 1024 omap2_mcspi_force_cs(spi, 0);
1015 1025
1026 if (cd && cd->cs_per_word) {
1027 chconf = mcspi->ctx.modulctrl;
1028 chconf |= OMAP2_MCSPI_MODULCTRL_SINGLE;
1029 mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, chconf);
1030 mcspi->ctx.modulctrl =
1031 mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL);
1032 }
1033
1016 omap2_mcspi_set_enable(spi, 0); 1034 omap2_mcspi_set_enable(spi, 0);
1017 1035
1018 m->status = status; 1036 m->status = status;
@@ -1020,7 +1038,7 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
1020} 1038}
1021 1039
1022static int omap2_mcspi_transfer_one_message(struct spi_master *master, 1040static int omap2_mcspi_transfer_one_message(struct spi_master *master,
1023 struct spi_message *m) 1041 struct spi_message *m)
1024{ 1042{
1025 struct omap2_mcspi *mcspi; 1043 struct omap2_mcspi *mcspi;
1026 struct spi_transfer *t; 1044 struct spi_transfer *t;
@@ -1041,7 +1059,7 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master,
1041 || (len && !(rx_buf || tx_buf)) 1059 || (len && !(rx_buf || tx_buf))
1042 || (t->bits_per_word && 1060 || (t->bits_per_word &&
1043 ( t->bits_per_word < 4 1061 ( t->bits_per_word < 4
1044 || t->bits_per_word > 32))) { 1062 || t->bits_per_word > 32))) {
1045 dev_dbg(mcspi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n", 1063 dev_dbg(mcspi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n",
1046 t->speed_hz, 1064 t->speed_hz,
1047 len, 1065 len,
@@ -1052,8 +1070,8 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master,
1052 } 1070 }
1053 if (t->speed_hz && t->speed_hz < (OMAP2_MCSPI_MAX_FREQ >> 15)) { 1071 if (t->speed_hz && t->speed_hz < (OMAP2_MCSPI_MAX_FREQ >> 15)) {
1054 dev_dbg(mcspi->dev, "speed_hz %d below minimum %d Hz\n", 1072 dev_dbg(mcspi->dev, "speed_hz %d below minimum %d Hz\n",
1055 t->speed_hz, 1073 t->speed_hz,
1056 OMAP2_MCSPI_MAX_FREQ >> 15); 1074 OMAP2_MCSPI_MAX_FREQ >> 15);
1057 return -EINVAL; 1075 return -EINVAL;
1058 } 1076 }
1059 1077
@@ -1099,7 +1117,7 @@ static int omap2_mcspi_master_setup(struct omap2_mcspi *mcspi)
1099 return ret; 1117 return ret;
1100 1118
1101 mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE, 1119 mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE,
1102 OMAP2_MCSPI_WAKEUPENABLE_WKEN); 1120 OMAP2_MCSPI_WAKEUPENABLE_WKEN);
1103 ctx->wakeupenable = OMAP2_MCSPI_WAKEUPENABLE_WKEN; 1121 ctx->wakeupenable = OMAP2_MCSPI_WAKEUPENABLE_WKEN;
1104 1122
1105 omap2_mcspi_set_master_mode(master); 1123 omap2_mcspi_set_master_mode(master);
@@ -1228,7 +1246,7 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
1228 1246
1229 sprintf(dma_ch_name, "rx%d", i); 1247 sprintf(dma_ch_name, "rx%d", i);
1230 dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA, 1248 dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
1231 dma_ch_name); 1249 dma_ch_name);
1232 if (!dma_res) { 1250 if (!dma_res) {
1233 dev_dbg(&pdev->dev, "cannot get DMA RX channel\n"); 1251 dev_dbg(&pdev->dev, "cannot get DMA RX channel\n");
1234 status = -ENODEV; 1252 status = -ENODEV;
@@ -1238,7 +1256,7 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
1238 mcspi->dma_channels[i].dma_rx_sync_dev = dma_res->start; 1256 mcspi->dma_channels[i].dma_rx_sync_dev = dma_res->start;
1239 sprintf(dma_ch_name, "tx%d", i); 1257 sprintf(dma_ch_name, "tx%d", i);
1240 dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA, 1258 dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
1241 dma_ch_name); 1259 dma_ch_name);
1242 if (!dma_res) { 1260 if (!dma_res) {
1243 dev_dbg(&pdev->dev, "cannot get DMA TX channel\n"); 1261 dev_dbg(&pdev->dev, "cannot get DMA TX channel\n");
1244 status = -ENODEV; 1262 status = -ENODEV;
@@ -1254,7 +1272,7 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
1254 pinctrl = devm_pinctrl_get_select_default(&pdev->dev); 1272 pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
1255 if (IS_ERR(pinctrl)) 1273 if (IS_ERR(pinctrl))
1256 dev_warn(&pdev->dev, 1274 dev_warn(&pdev->dev,
1257 "pins are not configured from the driver\n"); 1275 "pins are not configured from the driver\n");
1258 1276
1259 pm_runtime_use_autosuspend(&pdev->dev); 1277 pm_runtime_use_autosuspend(&pdev->dev);
1260 pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT); 1278 pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index b7e718254b1d..66a5f82cf138 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -366,7 +366,7 @@ msg_done:
366 return 0; 366 return 0;
367} 367}
368 368
369static int __init orion_spi_reset(struct orion_spi *orion_spi) 369static int orion_spi_reset(struct orion_spi *orion_spi)
370{ 370{
371 /* Verify that the CS is deasserted */ 371 /* Verify that the CS is deasserted */
372 orion_spi_set_cs(orion_spi, 0); 372 orion_spi_set_cs(orion_spi, 0);
@@ -396,7 +396,7 @@ static int orion_spi_setup(struct spi_device *spi)
396 return 0; 396 return 0;
397} 397}
398 398
399static int __init orion_spi_probe(struct platform_device *pdev) 399static int orion_spi_probe(struct platform_device *pdev)
400{ 400{
401 struct spi_master *master; 401 struct spi_master *master;
402 struct orion_spi *spi; 402 struct orion_spi *spi;
@@ -479,7 +479,7 @@ out:
479} 479}
480 480
481 481
482static int __exit orion_spi_remove(struct platform_device *pdev) 482static int orion_spi_remove(struct platform_device *pdev)
483{ 483{
484 struct spi_master *master; 484 struct spi_master *master;
485 struct resource *r; 485 struct resource *r;
@@ -513,20 +513,11 @@ static struct platform_driver orion_spi_driver = {
513 .owner = THIS_MODULE, 513 .owner = THIS_MODULE,
514 .of_match_table = of_match_ptr(orion_spi_of_match_table), 514 .of_match_table = of_match_ptr(orion_spi_of_match_table),
515 }, 515 },
516 .remove = __exit_p(orion_spi_remove), 516 .probe = orion_spi_probe,
517 .remove = orion_spi_remove,
517}; 518};
518 519
519static int __init orion_spi_init(void) 520module_platform_driver(orion_spi_driver);
520{
521 return platform_driver_probe(&orion_spi_driver, orion_spi_probe);
522}
523module_init(orion_spi_init);
524
525static void __exit orion_spi_exit(void)
526{
527 platform_driver_unregister(&orion_spi_driver);
528}
529module_exit(orion_spi_exit);
530 521
531MODULE_DESCRIPTION("Orion SPI driver"); 522MODULE_DESCRIPTION("Orion SPI driver");
532MODULE_AUTHOR("Shadi Ammouri <shadi@marvell.com>"); 523MODULE_AUTHOR("Shadi Ammouri <shadi@marvell.com>");
diff --git a/drivers/spi/spi-ppc4xx.c b/drivers/spi/spi-ppc4xx.c
index 7a85f22b6474..357f183a4fb7 100644
--- a/drivers/spi/spi-ppc4xx.c
+++ b/drivers/spi/spi-ppc4xx.c
@@ -389,7 +389,7 @@ static void free_gpios(struct ppc4xx_spi *hw)
389/* 389/*
390 * platform_device layer stuff... 390 * platform_device layer stuff...
391 */ 391 */
392static int __init spi_ppc4xx_of_probe(struct platform_device *op) 392static int spi_ppc4xx_of_probe(struct platform_device *op)
393{ 393{
394 struct ppc4xx_spi *hw; 394 struct ppc4xx_spi *hw;
395 struct spi_master *master; 395 struct spi_master *master;
@@ -419,7 +419,7 @@ static int __init spi_ppc4xx_of_probe(struct platform_device *op)
419 * This includes both "null" gpio's and real ones. 419 * This includes both "null" gpio's and real ones.
420 */ 420 */
421 num_gpios = of_gpio_count(np); 421 num_gpios = of_gpio_count(np);
422 if (num_gpios) { 422 if (num_gpios > 0) {
423 int i; 423 int i;
424 424
425 hw->gpios = kzalloc(sizeof(int) * num_gpios, GFP_KERNEL); 425 hw->gpios = kzalloc(sizeof(int) * num_gpios, GFP_KERNEL);
@@ -471,7 +471,7 @@ static int __init spi_ppc4xx_of_probe(struct platform_device *op)
471 SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST; 471 SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST;
472 472
473 /* this many pins in all GPIO controllers */ 473 /* this many pins in all GPIO controllers */
474 bbp->master->num_chipselect = num_gpios; 474 bbp->master->num_chipselect = num_gpios > 0 ? num_gpios : 0;
475 475
476 /* Get the clock for the OPB */ 476 /* Get the clock for the OPB */
477 opbnp = of_find_compatible_node(NULL, NULL, "ibm,opb"); 477 opbnp = of_find_compatible_node(NULL, NULL, "ibm,opb");
@@ -560,7 +560,7 @@ free_master:
560 return ret; 560 return ret;
561} 561}
562 562
563static int __exit spi_ppc4xx_of_remove(struct platform_device *op) 563static int spi_ppc4xx_of_remove(struct platform_device *op)
564{ 564{
565 struct spi_master *master = dev_get_drvdata(&op->dev); 565 struct spi_master *master = dev_get_drvdata(&op->dev);
566 struct ppc4xx_spi *hw = spi_master_get_devdata(master); 566 struct ppc4xx_spi *hw = spi_master_get_devdata(master);
@@ -583,7 +583,7 @@ MODULE_DEVICE_TABLE(of, spi_ppc4xx_of_match);
583 583
584static struct platform_driver spi_ppc4xx_of_driver = { 584static struct platform_driver spi_ppc4xx_of_driver = {
585 .probe = spi_ppc4xx_of_probe, 585 .probe = spi_ppc4xx_of_probe,
586 .remove = __exit_p(spi_ppc4xx_of_remove), 586 .remove = spi_ppc4xx_of_remove,
587 .driver = { 587 .driver = {
588 .name = DRIVER_NAME, 588 .name = DRIVER_NAME,
589 .owner = THIS_MODULE, 589 .owner = THIS_MODULE,
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c
new file mode 100644
index 000000000000..c735c5a008a2
--- /dev/null
+++ b/drivers/spi/spi-pxa2xx-dma.c
@@ -0,0 +1,392 @@
1/*
2 * PXA2xx SPI DMA engine support.
3 *
4 * Copyright (C) 2013, Intel Corporation
5 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/init.h>
13#include <linux/device.h>
14#include <linux/dma-mapping.h>
15#include <linux/dmaengine.h>
16#include <linux/pxa2xx_ssp.h>
17#include <linux/scatterlist.h>
18#include <linux/sizes.h>
19#include <linux/spi/spi.h>
20#include <linux/spi/pxa2xx_spi.h>
21
22#include "spi-pxa2xx.h"
23
24static int pxa2xx_spi_map_dma_buffer(struct driver_data *drv_data,
25 enum dma_data_direction dir)
26{
27 int i, nents, len = drv_data->len;
28 struct scatterlist *sg;
29 struct device *dmadev;
30 struct sg_table *sgt;
31 void *buf, *pbuf;
32
33 /*
34 * Some DMA controllers have problems transferring buffers that are
35 * not multiple of 4 bytes. So we truncate the transfer so that it
36 * is suitable for such controllers, and handle the trailing bytes
37 * manually after the DMA completes.
38 *
39 * REVISIT: It would be better if this information could be
40 * retrieved directly from the DMA device in a similar way than
41 * ->copy_align etc. is done.
42 */
43 len = ALIGN(drv_data->len, 4);
44
45 if (dir == DMA_TO_DEVICE) {
46 dmadev = drv_data->tx_chan->device->dev;
47 sgt = &drv_data->tx_sgt;
48 buf = drv_data->tx;
49 drv_data->tx_map_len = len;
50 } else {
51 dmadev = drv_data->rx_chan->device->dev;
52 sgt = &drv_data->rx_sgt;
53 buf = drv_data->rx;
54 drv_data->rx_map_len = len;
55 }
56
57 nents = DIV_ROUND_UP(len, SZ_2K);
58 if (nents != sgt->nents) {
59 int ret;
60
61 sg_free_table(sgt);
62 ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
63 if (ret)
64 return ret;
65 }
66
67 pbuf = buf;
68 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
69 size_t bytes = min_t(size_t, len, SZ_2K);
70
71 if (buf)
72 sg_set_buf(sg, pbuf, bytes);
73 else
74 sg_set_buf(sg, drv_data->dummy, bytes);
75
76 pbuf += bytes;
77 len -= bytes;
78 }
79
80 nents = dma_map_sg(dmadev, sgt->sgl, sgt->nents, dir);
81 if (!nents)
82 return -ENOMEM;
83
84 return nents;
85}
86
87static void pxa2xx_spi_unmap_dma_buffer(struct driver_data *drv_data,
88 enum dma_data_direction dir)
89{
90 struct device *dmadev;
91 struct sg_table *sgt;
92
93 if (dir == DMA_TO_DEVICE) {
94 dmadev = drv_data->tx_chan->device->dev;
95 sgt = &drv_data->tx_sgt;
96 } else {
97 dmadev = drv_data->rx_chan->device->dev;
98 sgt = &drv_data->rx_sgt;
99 }
100
101 dma_unmap_sg(dmadev, sgt->sgl, sgt->nents, dir);
102}
103
104static void pxa2xx_spi_unmap_dma_buffers(struct driver_data *drv_data)
105{
106 if (!drv_data->dma_mapped)
107 return;
108
109 pxa2xx_spi_unmap_dma_buffer(drv_data, DMA_FROM_DEVICE);
110 pxa2xx_spi_unmap_dma_buffer(drv_data, DMA_TO_DEVICE);
111
112 drv_data->dma_mapped = 0;
113}
114
115static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
116 bool error)
117{
118 struct spi_message *msg = drv_data->cur_msg;
119
120 /*
121 * It is possible that one CPU is handling ROR interrupt and other
122 * just gets DMA completion. Calling pump_transfers() twice for the
123 * same transfer leads to problems thus we prevent concurrent calls
124 * by using ->dma_running.
125 */
126 if (atomic_dec_and_test(&drv_data->dma_running)) {
127 void __iomem *reg = drv_data->ioaddr;
128
129 /*
130 * If the other CPU is still handling the ROR interrupt we
131 * might not know about the error yet. So we re-check the
132 * ROR bit here before we clear the status register.
133 */
134 if (!error) {
135 u32 status = read_SSSR(reg) & drv_data->mask_sr;
136 error = status & SSSR_ROR;
137 }
138
139 /* Clear status & disable interrupts */
140 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
141 write_SSSR_CS(drv_data, drv_data->clear_sr);
142 if (!pxa25x_ssp_comp(drv_data))
143 write_SSTO(0, reg);
144
145 if (!error) {
146 pxa2xx_spi_unmap_dma_buffers(drv_data);
147
148 /* Handle the last bytes of unaligned transfer */
149 drv_data->tx += drv_data->tx_map_len;
150 drv_data->write(drv_data);
151
152 drv_data->rx += drv_data->rx_map_len;
153 drv_data->read(drv_data);
154
155 msg->actual_length += drv_data->len;
156 msg->state = pxa2xx_spi_next_transfer(drv_data);
157 } else {
158 /* In case we got an error we disable the SSP now */
159 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
160
161 msg->state = ERROR_STATE;
162 }
163
164 tasklet_schedule(&drv_data->pump_transfers);
165 }
166}
167
168static void pxa2xx_spi_dma_callback(void *data)
169{
170 pxa2xx_spi_dma_transfer_complete(data, false);
171}
172
173static struct dma_async_tx_descriptor *
174pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data,
175 enum dma_transfer_direction dir)
176{
177 struct pxa2xx_spi_master *pdata = drv_data->master_info;
178 struct chip_data *chip = drv_data->cur_chip;
179 enum dma_slave_buswidth width;
180 struct dma_slave_config cfg;
181 struct dma_chan *chan;
182 struct sg_table *sgt;
183 int nents, ret;
184
185 switch (drv_data->n_bytes) {
186 case 1:
187 width = DMA_SLAVE_BUSWIDTH_1_BYTE;
188 break;
189 case 2:
190 width = DMA_SLAVE_BUSWIDTH_2_BYTES;
191 break;
192 default:
193 width = DMA_SLAVE_BUSWIDTH_4_BYTES;
194 break;
195 }
196
197 memset(&cfg, 0, sizeof(cfg));
198 cfg.direction = dir;
199
200 if (dir == DMA_MEM_TO_DEV) {
201 cfg.dst_addr = drv_data->ssdr_physical;
202 cfg.dst_addr_width = width;
203 cfg.dst_maxburst = chip->dma_burst_size;
204 cfg.slave_id = pdata->tx_slave_id;
205
206 sgt = &drv_data->tx_sgt;
207 nents = drv_data->tx_nents;
208 chan = drv_data->tx_chan;
209 } else {
210 cfg.src_addr = drv_data->ssdr_physical;
211 cfg.src_addr_width = width;
212 cfg.src_maxburst = chip->dma_burst_size;
213 cfg.slave_id = pdata->rx_slave_id;
214
215 sgt = &drv_data->rx_sgt;
216 nents = drv_data->rx_nents;
217 chan = drv_data->rx_chan;
218 }
219
220 ret = dmaengine_slave_config(chan, &cfg);
221 if (ret) {
222 dev_warn(&drv_data->pdev->dev, "DMA slave config failed\n");
223 return NULL;
224 }
225
226 return dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir,
227 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
228}
229
230static bool pxa2xx_spi_dma_filter(struct dma_chan *chan, void *param)
231{
232 const struct pxa2xx_spi_master *pdata = param;
233
234 return chan->chan_id == pdata->tx_chan_id ||
235 chan->chan_id == pdata->rx_chan_id;
236}
237
238bool pxa2xx_spi_dma_is_possible(size_t len)
239{
240 return len <= MAX_DMA_LEN;
241}
242
243int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data)
244{
245 const struct chip_data *chip = drv_data->cur_chip;
246 int ret;
247
248 if (!chip->enable_dma)
249 return 0;
250
251 /* Don't bother with DMA if we can't do even a single burst */
252 if (drv_data->len < chip->dma_burst_size)
253 return 0;
254
255 ret = pxa2xx_spi_map_dma_buffer(drv_data, DMA_TO_DEVICE);
256 if (ret <= 0) {
257 dev_warn(&drv_data->pdev->dev, "failed to DMA map TX\n");
258 return 0;
259 }
260
261 drv_data->tx_nents = ret;
262
263 ret = pxa2xx_spi_map_dma_buffer(drv_data, DMA_FROM_DEVICE);
264 if (ret <= 0) {
265 pxa2xx_spi_unmap_dma_buffer(drv_data, DMA_TO_DEVICE);
266 dev_warn(&drv_data->pdev->dev, "failed to DMA map RX\n");
267 return 0;
268 }
269
270 drv_data->rx_nents = ret;
271 return 1;
272}
273
274irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
275{
276 u32 status;
277
278 status = read_SSSR(drv_data->ioaddr) & drv_data->mask_sr;
279 if (status & SSSR_ROR) {
280 dev_err(&drv_data->pdev->dev, "FIFO overrun\n");
281
282 dmaengine_terminate_all(drv_data->rx_chan);
283 dmaengine_terminate_all(drv_data->tx_chan);
284
285 pxa2xx_spi_dma_transfer_complete(drv_data, true);
286 return IRQ_HANDLED;
287 }
288
289 return IRQ_NONE;
290}
291
292int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst)
293{
294 struct dma_async_tx_descriptor *tx_desc, *rx_desc;
295
296 tx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_MEM_TO_DEV);
297 if (!tx_desc) {
298 dev_err(&drv_data->pdev->dev,
299 "failed to get DMA TX descriptor\n");
300 return -EBUSY;
301 }
302
303 rx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_DEV_TO_MEM);
304 if (!rx_desc) {
305 dev_err(&drv_data->pdev->dev,
306 "failed to get DMA RX descriptor\n");
307 return -EBUSY;
308 }
309
310 /* We are ready when RX completes */
311 rx_desc->callback = pxa2xx_spi_dma_callback;
312 rx_desc->callback_param = drv_data;
313
314 dmaengine_submit(rx_desc);
315 dmaengine_submit(tx_desc);
316 return 0;
317}
318
319void pxa2xx_spi_dma_start(struct driver_data *drv_data)
320{
321 dma_async_issue_pending(drv_data->rx_chan);
322 dma_async_issue_pending(drv_data->tx_chan);
323
324 atomic_set(&drv_data->dma_running, 1);
325}
326
327int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
328{
329 struct pxa2xx_spi_master *pdata = drv_data->master_info;
330 dma_cap_mask_t mask;
331
332 dma_cap_zero(mask);
333 dma_cap_set(DMA_SLAVE, mask);
334
335 drv_data->dummy = devm_kzalloc(&drv_data->pdev->dev, SZ_2K, GFP_KERNEL);
336 if (!drv_data->dummy)
337 return -ENOMEM;
338
339 drv_data->tx_chan = dma_request_channel(mask, pxa2xx_spi_dma_filter,
340 pdata);
341 if (!drv_data->tx_chan)
342 return -ENODEV;
343
344 drv_data->rx_chan = dma_request_channel(mask, pxa2xx_spi_dma_filter,
345 pdata);
346 if (!drv_data->rx_chan) {
347 dma_release_channel(drv_data->tx_chan);
348 drv_data->tx_chan = NULL;
349 return -ENODEV;
350 }
351
352 return 0;
353}
354
355void pxa2xx_spi_dma_release(struct driver_data *drv_data)
356{
357 if (drv_data->rx_chan) {
358 dmaengine_terminate_all(drv_data->rx_chan);
359 dma_release_channel(drv_data->rx_chan);
360 sg_free_table(&drv_data->rx_sgt);
361 drv_data->rx_chan = NULL;
362 }
363 if (drv_data->tx_chan) {
364 dmaengine_terminate_all(drv_data->tx_chan);
365 dma_release_channel(drv_data->tx_chan);
366 sg_free_table(&drv_data->tx_sgt);
367 drv_data->tx_chan = NULL;
368 }
369}
370
371void pxa2xx_spi_dma_resume(struct driver_data *drv_data)
372{
373}
374
375int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
376 struct spi_device *spi,
377 u8 bits_per_word, u32 *burst_code,
378 u32 *threshold)
379{
380 struct pxa2xx_spi_chip *chip_info = spi->controller_data;
381
382 /*
383 * If the DMA burst size is given in chip_info we use that,
384 * otherwise we use the default. Also we use the default FIFO
385 * thresholds for now.
386 */
387 *burst_code = chip_info ? chip_info->dma_burst_size : 16;
388 *threshold = SSCR1_RxTresh(RX_THRESH_DFLT)
389 | SSCR1_TxTresh(TX_THRESH_DFLT);
390
391 return 0;
392}
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index cf95587eefde..364964d2ed04 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -8,147 +8,58 @@
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/spi/pxa2xx_spi.h> 9#include <linux/spi/pxa2xx_spi.h>
10 10
11struct ce4100_info {
12 struct ssp_device ssp;
13 struct platform_device *spi_pdev;
14};
15
16static DEFINE_MUTEX(ssp_lock);
17static LIST_HEAD(ssp_list);
18
19struct ssp_device *pxa_ssp_request(int port, const char *label)
20{
21 struct ssp_device *ssp = NULL;
22
23 mutex_lock(&ssp_lock);
24
25 list_for_each_entry(ssp, &ssp_list, node) {
26 if (ssp->port_id == port && ssp->use_count == 0) {
27 ssp->use_count++;
28 ssp->label = label;
29 break;
30 }
31 }
32
33 mutex_unlock(&ssp_lock);
34
35 if (&ssp->node == &ssp_list)
36 return NULL;
37
38 return ssp;
39}
40EXPORT_SYMBOL_GPL(pxa_ssp_request);
41
42void pxa_ssp_free(struct ssp_device *ssp)
43{
44 mutex_lock(&ssp_lock);
45 if (ssp->use_count) {
46 ssp->use_count--;
47 ssp->label = NULL;
48 } else
49 dev_err(&ssp->pdev->dev, "device already free\n");
50 mutex_unlock(&ssp_lock);
51}
52EXPORT_SYMBOL_GPL(pxa_ssp_free);
53
54static int ce4100_spi_probe(struct pci_dev *dev, 11static int ce4100_spi_probe(struct pci_dev *dev,
55 const struct pci_device_id *ent) 12 const struct pci_device_id *ent)
56{ 13{
14 struct platform_device_info pi;
57 int ret; 15 int ret;
58 resource_size_t phys_beg;
59 resource_size_t phys_len;
60 struct ce4100_info *spi_info;
61 struct platform_device *pdev; 16 struct platform_device *pdev;
62 struct pxa2xx_spi_master spi_pdata; 17 struct pxa2xx_spi_master spi_pdata;
63 struct ssp_device *ssp; 18 struct ssp_device *ssp;
64 19
65 ret = pci_enable_device(dev); 20 ret = pcim_enable_device(dev);
66 if (ret) 21 if (ret)
67 return ret; 22 return ret;
68 23
69 phys_beg = pci_resource_start(dev, 0); 24 ret = pcim_iomap_regions(dev, 1 << 0, "PXA2xx SPI");
70 phys_len = pci_resource_len(dev, 0); 25 if (!ret)
71
72 if (!request_mem_region(phys_beg, phys_len,
73 "CE4100 SPI")) {
74 dev_err(&dev->dev, "Can't request register space.\n");
75 ret = -EBUSY;
76 return ret; 26 return ret;
77 }
78 27
79 pdev = platform_device_alloc("pxa2xx-spi", dev->devfn);
80 spi_info = kzalloc(sizeof(*spi_info), GFP_KERNEL);
81 if (!pdev || !spi_info ) {
82 ret = -ENOMEM;
83 goto err_nomem;
84 }
85 memset(&spi_pdata, 0, sizeof(spi_pdata)); 28 memset(&spi_pdata, 0, sizeof(spi_pdata));
86 spi_pdata.num_chipselect = dev->devfn; 29 spi_pdata.num_chipselect = dev->devfn;
87 30
88 ret = platform_device_add_data(pdev, &spi_pdata, sizeof(spi_pdata)); 31 ssp = &spi_pdata.ssp;
89 if (ret)
90 goto err_nomem;
91
92 pdev->dev.parent = &dev->dev;
93 pdev->dev.of_node = dev->dev.of_node;
94 ssp = &spi_info->ssp;
95 ssp->phys_base = pci_resource_start(dev, 0); 32 ssp->phys_base = pci_resource_start(dev, 0);
96 ssp->mmio_base = ioremap(phys_beg, phys_len); 33 ssp->mmio_base = pcim_iomap_table(dev)[0];
97 if (!ssp->mmio_base) { 34 if (!ssp->mmio_base) {
98 dev_err(&pdev->dev, "failed to ioremap() registers\n"); 35 dev_err(&dev->dev, "failed to ioremap() registers\n");
99 ret = -EIO; 36 return -EIO;
100 goto err_nomem;
101 } 37 }
102 ssp->irq = dev->irq; 38 ssp->irq = dev->irq;
103 ssp->port_id = pdev->id; 39 ssp->port_id = dev->devfn;
104 ssp->type = PXA25x_SSP; 40 ssp->type = PXA25x_SSP;
105 41
106 mutex_lock(&ssp_lock); 42 memset(&pi, 0, sizeof(pi));
107 list_add(&ssp->node, &ssp_list); 43 pi.parent = &dev->dev;
108 mutex_unlock(&ssp_lock); 44 pi.name = "pxa2xx-spi";
45 pi.id = ssp->port_id;
46 pi.data = &spi_pdata;
47 pi.size_data = sizeof(spi_pdata);
109 48
110 pci_set_drvdata(dev, spi_info); 49 pdev = platform_device_register_full(&pi);
50 if (!pdev)
51 return -ENOMEM;
111 52
112 ret = platform_device_add(pdev); 53 pci_set_drvdata(dev, pdev);
113 if (ret)
114 goto err_dev_add;
115 54
116 return ret; 55 return 0;
117
118err_dev_add:
119 pci_set_drvdata(dev, NULL);
120 mutex_lock(&ssp_lock);
121 list_del(&ssp->node);
122 mutex_unlock(&ssp_lock);
123 iounmap(ssp->mmio_base);
124
125err_nomem:
126 release_mem_region(phys_beg, phys_len);
127 platform_device_put(pdev);
128 kfree(spi_info);
129 return ret;
130} 56}
131 57
132static void ce4100_spi_remove(struct pci_dev *dev) 58static void ce4100_spi_remove(struct pci_dev *dev)
133{ 59{
134 struct ce4100_info *spi_info; 60 struct platform_device *pdev = pci_get_drvdata(dev);
135 struct ssp_device *ssp;
136
137 spi_info = pci_get_drvdata(dev);
138 ssp = &spi_info->ssp;
139 platform_device_unregister(spi_info->spi_pdev);
140
141 iounmap(ssp->mmio_base);
142 release_mem_region(pci_resource_start(dev, 0),
143 pci_resource_len(dev, 0));
144
145 mutex_lock(&ssp_lock);
146 list_del(&ssp->node);
147 mutex_unlock(&ssp_lock);
148 61
149 pci_set_drvdata(dev, NULL); 62 platform_device_unregister(pdev);
150 pci_disable_device(dev);
151 kfree(spi_info);
152} 63}
153 64
154static DEFINE_PCI_DEVICE_TABLE(ce4100_spi_devices) = { 65static DEFINE_PCI_DEVICE_TABLE(ce4100_spi_devices) = {
diff --git a/drivers/spi/spi-pxa2xx-pxadma.c b/drivers/spi/spi-pxa2xx-pxadma.c
new file mode 100644
index 000000000000..2916efc7cfe5
--- /dev/null
+++ b/drivers/spi/spi-pxa2xx-pxadma.c
@@ -0,0 +1,490 @@
1/*
2 * PXA2xx SPI private DMA support.
3 *
4 * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#include <linux/init.h>
22#include <linux/delay.h>
23#include <linux/device.h>
24#include <linux/dma-mapping.h>
25#include <linux/pxa2xx_ssp.h>
26#include <linux/spi/spi.h>
27#include <linux/spi/pxa2xx_spi.h>
28
29#include "spi-pxa2xx.h"
30
31#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
32#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK)
33
34bool pxa2xx_spi_dma_is_possible(size_t len)
35{
36 /* Try to map dma buffer and do a dma transfer if successful, but
37 * only if the length is non-zero and less than MAX_DMA_LEN.
38 *
39 * Zero-length non-descriptor DMA is illegal on PXA2xx; force use
40 * of PIO instead. Care is needed above because the transfer may
41 * have have been passed with buffers that are already dma mapped.
42 * A zero-length transfer in PIO mode will not try to write/read
43 * to/from the buffers
44 *
45 * REVISIT large transfers are exactly where we most want to be
46 * using DMA. If this happens much, split those transfers into
47 * multiple DMA segments rather than forcing PIO.
48 */
49 return len > 0 && len <= MAX_DMA_LEN;
50}
51
52int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data)
53{
54 struct spi_message *msg = drv_data->cur_msg;
55 struct device *dev = &msg->spi->dev;
56
57 if (!drv_data->cur_chip->enable_dma)
58 return 0;
59
60 if (msg->is_dma_mapped)
61 return drv_data->rx_dma && drv_data->tx_dma;
62
63 if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx))
64 return 0;
65
66 /* Modify setup if rx buffer is null */
67 if (drv_data->rx == NULL) {
68 *drv_data->null_dma_buf = 0;
69 drv_data->rx = drv_data->null_dma_buf;
70 drv_data->rx_map_len = 4;
71 } else
72 drv_data->rx_map_len = drv_data->len;
73
74
75 /* Modify setup if tx buffer is null */
76 if (drv_data->tx == NULL) {
77 *drv_data->null_dma_buf = 0;
78 drv_data->tx = drv_data->null_dma_buf;
79 drv_data->tx_map_len = 4;
80 } else
81 drv_data->tx_map_len = drv_data->len;
82
83 /* Stream map the tx buffer. Always do DMA_TO_DEVICE first
84 * so we flush the cache *before* invalidating it, in case
85 * the tx and rx buffers overlap.
86 */
87 drv_data->tx_dma = dma_map_single(dev, drv_data->tx,
88 drv_data->tx_map_len, DMA_TO_DEVICE);
89 if (dma_mapping_error(dev, drv_data->tx_dma))
90 return 0;
91
92 /* Stream map the rx buffer */
93 drv_data->rx_dma = dma_map_single(dev, drv_data->rx,
94 drv_data->rx_map_len, DMA_FROM_DEVICE);
95 if (dma_mapping_error(dev, drv_data->rx_dma)) {
96 dma_unmap_single(dev, drv_data->tx_dma,
97 drv_data->tx_map_len, DMA_TO_DEVICE);
98 return 0;
99 }
100
101 return 1;
102}
103
104static void pxa2xx_spi_unmap_dma_buffers(struct driver_data *drv_data)
105{
106 struct device *dev;
107
108 if (!drv_data->dma_mapped)
109 return;
110
111 if (!drv_data->cur_msg->is_dma_mapped) {
112 dev = &drv_data->cur_msg->spi->dev;
113 dma_unmap_single(dev, drv_data->rx_dma,
114 drv_data->rx_map_len, DMA_FROM_DEVICE);
115 dma_unmap_single(dev, drv_data->tx_dma,
116 drv_data->tx_map_len, DMA_TO_DEVICE);
117 }
118
119 drv_data->dma_mapped = 0;
120}
121
122static int wait_ssp_rx_stall(void const __iomem *ioaddr)
123{
124 unsigned long limit = loops_per_jiffy << 1;
125
126 while ((read_SSSR(ioaddr) & SSSR_BSY) && --limit)
127 cpu_relax();
128
129 return limit;
130}
131
132static int wait_dma_channel_stop(int channel)
133{
134 unsigned long limit = loops_per_jiffy << 1;
135
136 while (!(DCSR(channel) & DCSR_STOPSTATE) && --limit)
137 cpu_relax();
138
139 return limit;
140}
141
142static void pxa2xx_spi_dma_error_stop(struct driver_data *drv_data,
143 const char *msg)
144{
145 void __iomem *reg = drv_data->ioaddr;
146
147 /* Stop and reset */
148 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
149 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
150 write_SSSR_CS(drv_data, drv_data->clear_sr);
151 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
152 if (!pxa25x_ssp_comp(drv_data))
153 write_SSTO(0, reg);
154 pxa2xx_spi_flush(drv_data);
155 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
156
157 pxa2xx_spi_unmap_dma_buffers(drv_data);
158
159 dev_err(&drv_data->pdev->dev, "%s\n", msg);
160
161 drv_data->cur_msg->state = ERROR_STATE;
162 tasklet_schedule(&drv_data->pump_transfers);
163}
164
165static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data)
166{
167 void __iomem *reg = drv_data->ioaddr;
168 struct spi_message *msg = drv_data->cur_msg;
169
170 /* Clear and disable interrupts on SSP and DMA channels*/
171 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
172 write_SSSR_CS(drv_data, drv_data->clear_sr);
173 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
174 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
175
176 if (wait_dma_channel_stop(drv_data->rx_channel) == 0)
177 dev_err(&drv_data->pdev->dev,
178 "dma_handler: dma rx channel stop failed\n");
179
180 if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
181 dev_err(&drv_data->pdev->dev,
182 "dma_transfer: ssp rx stall failed\n");
183
184 pxa2xx_spi_unmap_dma_buffers(drv_data);
185
186 /* update the buffer pointer for the amount completed in dma */
187 drv_data->rx += drv_data->len -
188 (DCMD(drv_data->rx_channel) & DCMD_LENGTH);
189
190 /* read trailing data from fifo, it does not matter how many
191 * bytes are in the fifo just read until buffer is full
192 * or fifo is empty, which ever occurs first */
193 drv_data->read(drv_data);
194
195 /* return count of what was actually read */
196 msg->actual_length += drv_data->len -
197 (drv_data->rx_end - drv_data->rx);
198
199 /* Transfer delays and chip select release are
200 * handled in pump_transfers or giveback
201 */
202
203 /* Move to next transfer */
204 msg->state = pxa2xx_spi_next_transfer(drv_data);
205
206 /* Schedule transfer tasklet */
207 tasklet_schedule(&drv_data->pump_transfers);
208}
209
210void pxa2xx_spi_dma_handler(int channel, void *data)
211{
212 struct driver_data *drv_data = data;
213 u32 irq_status = DCSR(channel) & DMA_INT_MASK;
214
215 if (irq_status & DCSR_BUSERR) {
216
217 if (channel == drv_data->tx_channel)
218 pxa2xx_spi_dma_error_stop(drv_data,
219 "dma_handler: bad bus address on tx channel");
220 else
221 pxa2xx_spi_dma_error_stop(drv_data,
222 "dma_handler: bad bus address on rx channel");
223 return;
224 }
225
226 /* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */
227 if ((channel == drv_data->tx_channel)
228 && (irq_status & DCSR_ENDINTR)
229 && (drv_data->ssp_type == PXA25x_SSP)) {
230
231 /* Wait for rx to stall */
232 if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
233 dev_err(&drv_data->pdev->dev,
234 "dma_handler: ssp rx stall failed\n");
235
236 /* finish this transfer, start the next */
237 pxa2xx_spi_dma_transfer_complete(drv_data);
238 }
239}
240
241irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
242{
243 u32 irq_status;
244 void __iomem *reg = drv_data->ioaddr;
245
246 irq_status = read_SSSR(reg) & drv_data->mask_sr;
247 if (irq_status & SSSR_ROR) {
248 pxa2xx_spi_dma_error_stop(drv_data,
249 "dma_transfer: fifo overrun");
250 return IRQ_HANDLED;
251 }
252
253 /* Check for false positive timeout */
254 if ((irq_status & SSSR_TINT)
255 && (DCSR(drv_data->tx_channel) & DCSR_RUN)) {
256 write_SSSR(SSSR_TINT, reg);
257 return IRQ_HANDLED;
258 }
259
260 if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) {
261
262 /* Clear and disable timeout interrupt, do the rest in
263 * dma_transfer_complete */
264 if (!pxa25x_ssp_comp(drv_data))
265 write_SSTO(0, reg);
266
267 /* finish this transfer, start the next */
268 pxa2xx_spi_dma_transfer_complete(drv_data);
269
270 return IRQ_HANDLED;
271 }
272
273 /* Opps problem detected */
274 return IRQ_NONE;
275}
276
277int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst)
278{
279 u32 dma_width;
280
281 switch (drv_data->n_bytes) {
282 case 1:
283 dma_width = DCMD_WIDTH1;
284 break;
285 case 2:
286 dma_width = DCMD_WIDTH2;
287 break;
288 default:
289 dma_width = DCMD_WIDTH4;
290 break;
291 }
292
293 /* Setup rx DMA Channel */
294 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
295 DSADR(drv_data->rx_channel) = drv_data->ssdr_physical;
296 DTADR(drv_data->rx_channel) = drv_data->rx_dma;
297 if (drv_data->rx == drv_data->null_dma_buf)
298 /* No target address increment */
299 DCMD(drv_data->rx_channel) = DCMD_FLOWSRC
300 | dma_width
301 | dma_burst
302 | drv_data->len;
303 else
304 DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR
305 | DCMD_FLOWSRC
306 | dma_width
307 | dma_burst
308 | drv_data->len;
309
310 /* Setup tx DMA Channel */
311 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
312 DSADR(drv_data->tx_channel) = drv_data->tx_dma;
313 DTADR(drv_data->tx_channel) = drv_data->ssdr_physical;
314 if (drv_data->tx == drv_data->null_dma_buf)
315 /* No source address increment */
316 DCMD(drv_data->tx_channel) = DCMD_FLOWTRG
317 | dma_width
318 | dma_burst
319 | drv_data->len;
320 else
321 DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR
322 | DCMD_FLOWTRG
323 | dma_width
324 | dma_burst
325 | drv_data->len;
326
327 /* Enable dma end irqs on SSP to detect end of transfer */
328 if (drv_data->ssp_type == PXA25x_SSP)
329 DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN;
330
331 return 0;
332}
333
334void pxa2xx_spi_dma_start(struct driver_data *drv_data)
335{
336 DCSR(drv_data->rx_channel) |= DCSR_RUN;
337 DCSR(drv_data->tx_channel) |= DCSR_RUN;
338}
339
340int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
341{
342 struct device *dev = &drv_data->pdev->dev;
343 struct ssp_device *ssp = drv_data->ssp;
344
345 /* Get two DMA channels (rx and tx) */
346 drv_data->rx_channel = pxa_request_dma("pxa2xx_spi_ssp_rx",
347 DMA_PRIO_HIGH,
348 pxa2xx_spi_dma_handler,
349 drv_data);
350 if (drv_data->rx_channel < 0) {
351 dev_err(dev, "problem (%d) requesting rx channel\n",
352 drv_data->rx_channel);
353 return -ENODEV;
354 }
355 drv_data->tx_channel = pxa_request_dma("pxa2xx_spi_ssp_tx",
356 DMA_PRIO_MEDIUM,
357 pxa2xx_spi_dma_handler,
358 drv_data);
359 if (drv_data->tx_channel < 0) {
360 dev_err(dev, "problem (%d) requesting tx channel\n",
361 drv_data->tx_channel);
362 pxa_free_dma(drv_data->rx_channel);
363 return -ENODEV;
364 }
365
366 DRCMR(ssp->drcmr_rx) = DRCMR_MAPVLD | drv_data->rx_channel;
367 DRCMR(ssp->drcmr_tx) = DRCMR_MAPVLD | drv_data->tx_channel;
368
369 return 0;
370}
371
372void pxa2xx_spi_dma_release(struct driver_data *drv_data)
373{
374 struct ssp_device *ssp = drv_data->ssp;
375
376 DRCMR(ssp->drcmr_rx) = 0;
377 DRCMR(ssp->drcmr_tx) = 0;
378
379 if (drv_data->tx_channel != 0)
380 pxa_free_dma(drv_data->tx_channel);
381 if (drv_data->rx_channel != 0)
382 pxa_free_dma(drv_data->rx_channel);
383}
384
385void pxa2xx_spi_dma_resume(struct driver_data *drv_data)
386{
387 if (drv_data->rx_channel != -1)
388 DRCMR(drv_data->ssp->drcmr_rx) =
389 DRCMR_MAPVLD | drv_data->rx_channel;
390 if (drv_data->tx_channel != -1)
391 DRCMR(drv_data->ssp->drcmr_tx) =
392 DRCMR_MAPVLD | drv_data->tx_channel;
393}
394
395int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
396 struct spi_device *spi,
397 u8 bits_per_word, u32 *burst_code,
398 u32 *threshold)
399{
400 struct pxa2xx_spi_chip *chip_info =
401 (struct pxa2xx_spi_chip *)spi->controller_data;
402 int bytes_per_word;
403 int burst_bytes;
404 int thresh_words;
405 int req_burst_size;
406 int retval = 0;
407
408 /* Set the threshold (in registers) to equal the same amount of data
409 * as represented by burst size (in bytes). The computation below
410 * is (burst_size rounded up to nearest 8 byte, word or long word)
411 * divided by (bytes/register); the tx threshold is the inverse of
412 * the rx, so that there will always be enough data in the rx fifo
413 * to satisfy a burst, and there will always be enough space in the
414 * tx fifo to accept a burst (a tx burst will overwrite the fifo if
415 * there is not enough space), there must always remain enough empty
416 * space in the rx fifo for any data loaded to the tx fifo.
417 * Whenever burst_size (in bytes) equals bits/word, the fifo threshold
418 * will be 8, or half the fifo;
419 * The threshold can only be set to 2, 4 or 8, but not 16, because
420 * to burst 16 to the tx fifo, the fifo would have to be empty;
421 * however, the minimum fifo trigger level is 1, and the tx will
422 * request service when the fifo is at this level, with only 15 spaces.
423 */
424
425 /* find bytes/word */
426 if (bits_per_word <= 8)
427 bytes_per_word = 1;
428 else if (bits_per_word <= 16)
429 bytes_per_word = 2;
430 else
431 bytes_per_word = 4;
432
433 /* use struct pxa2xx_spi_chip->dma_burst_size if available */
434 if (chip_info)
435 req_burst_size = chip_info->dma_burst_size;
436 else {
437 switch (chip->dma_burst_size) {
438 default:
439 /* if the default burst size is not set,
440 * do it now */
441 chip->dma_burst_size = DCMD_BURST8;
442 case DCMD_BURST8:
443 req_burst_size = 8;
444 break;
445 case DCMD_BURST16:
446 req_burst_size = 16;
447 break;
448 case DCMD_BURST32:
449 req_burst_size = 32;
450 break;
451 }
452 }
453 if (req_burst_size <= 8) {
454 *burst_code = DCMD_BURST8;
455 burst_bytes = 8;
456 } else if (req_burst_size <= 16) {
457 if (bytes_per_word == 1) {
458 /* don't burst more than 1/2 the fifo */
459 *burst_code = DCMD_BURST8;
460 burst_bytes = 8;
461 retval = 1;
462 } else {
463 *burst_code = DCMD_BURST16;
464 burst_bytes = 16;
465 }
466 } else {
467 if (bytes_per_word == 1) {
468 /* don't burst more than 1/2 the fifo */
469 *burst_code = DCMD_BURST8;
470 burst_bytes = 8;
471 retval = 1;
472 } else if (bytes_per_word == 2) {
473 /* don't burst more than 1/2 the fifo */
474 *burst_code = DCMD_BURST16;
475 burst_bytes = 16;
476 retval = 1;
477 } else {
478 *burst_code = DCMD_BURST32;
479 burst_bytes = 32;
480 }
481 }
482
483 thresh_words = burst_bytes / bytes_per_word;
484
485 /* thresh_words will be between 2 and 8 */
486 *threshold = (SSCR1_RxTresh(thresh_words) & SSCR1_RFT)
487 | (SSCR1_TxTresh(16-thresh_words) & SSCR1_TFT);
488
489 return retval;
490}
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 5c8c4f5883c4..90b27a3508a6 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs 2 * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
3 * Copyright (C) 2013, Intel Corporation
3 * 4 *
4 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -24,17 +25,20 @@
24#include <linux/interrupt.h> 25#include <linux/interrupt.h>
25#include <linux/platform_device.h> 26#include <linux/platform_device.h>
26#include <linux/spi/pxa2xx_spi.h> 27#include <linux/spi/pxa2xx_spi.h>
27#include <linux/dma-mapping.h>
28#include <linux/spi/spi.h> 28#include <linux/spi/spi.h>
29#include <linux/workqueue.h> 29#include <linux/workqueue.h>
30#include <linux/delay.h> 30#include <linux/delay.h>
31#include <linux/gpio.h> 31#include <linux/gpio.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/clk.h>
34#include <linux/pm_runtime.h>
35#include <linux/acpi.h>
33 36
34#include <asm/io.h> 37#include <asm/io.h>
35#include <asm/irq.h> 38#include <asm/irq.h>
36#include <asm/delay.h> 39#include <asm/delay.h>
37 40
41#include "spi-pxa2xx.h"
38 42
39MODULE_AUTHOR("Stephen Street"); 43MODULE_AUTHOR("Stephen Street");
40MODULE_DESCRIPTION("PXA2xx SSP SPI Controller"); 44MODULE_DESCRIPTION("PXA2xx SSP SPI Controller");
@@ -45,12 +49,6 @@ MODULE_ALIAS("platform:pxa2xx-spi");
45 49
46#define TIMOUT_DFLT 1000 50#define TIMOUT_DFLT 1000
47 51
48#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
49#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK)
50#define IS_DMA_ALIGNED(x) ((((u32)(x)) & 0x07) == 0)
51#define MAX_DMA_LEN 8191
52#define DMA_ALIGNMENT 8
53
54/* 52/*
55 * for testing SSCR1 changes that require SSP restart, basically 53 * for testing SSCR1 changes that require SSP restart, basically
56 * everything except the service and interrupt enables, the pxa270 developer 54 * everything except the service and interrupt enables, the pxa270 developer
@@ -65,115 +63,97 @@ MODULE_ALIAS("platform:pxa2xx-spi");
65 | SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \ 63 | SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \
66 | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM) 64 | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
67 65
68#define DEFINE_SSP_REG(reg, off) \ 66#define LPSS_RX_THRESH_DFLT 64
69static inline u32 read_##reg(void const __iomem *p) \ 67#define LPSS_TX_LOTHRESH_DFLT 160
70{ return __raw_readl(p + (off)); } \ 68#define LPSS_TX_HITHRESH_DFLT 224
71\
72static inline void write_##reg(u32 v, void __iomem *p) \
73{ __raw_writel(v, p + (off)); }
74
75DEFINE_SSP_REG(SSCR0, 0x00)
76DEFINE_SSP_REG(SSCR1, 0x04)
77DEFINE_SSP_REG(SSSR, 0x08)
78DEFINE_SSP_REG(SSITR, 0x0c)
79DEFINE_SSP_REG(SSDR, 0x10)
80DEFINE_SSP_REG(SSTO, 0x28)
81DEFINE_SSP_REG(SSPSP, 0x2c)
82
83#define START_STATE ((void*)0)
84#define RUNNING_STATE ((void*)1)
85#define DONE_STATE ((void*)2)
86#define ERROR_STATE ((void*)-1)
87
88#define QUEUE_RUNNING 0
89#define QUEUE_STOPPED 1
90
91struct driver_data {
92 /* Driver model hookup */
93 struct platform_device *pdev;
94
95 /* SSP Info */
96 struct ssp_device *ssp;
97 69
98 /* SPI framework hookup */ 70/* Offset from drv_data->lpss_base */
99 enum pxa_ssp_type ssp_type; 71#define SPI_CS_CONTROL 0x18
100 struct spi_master *master; 72#define SPI_CS_CONTROL_SW_MODE BIT(0)
73#define SPI_CS_CONTROL_CS_HIGH BIT(1)
101 74
102 /* PXA hookup */ 75static bool is_lpss_ssp(const struct driver_data *drv_data)
103 struct pxa2xx_spi_master *master_info; 76{
104 77 return drv_data->ssp_type == LPSS_SSP;
105 /* DMA setup stuff */ 78}
106 int rx_channel;
107 int tx_channel;
108 u32 *null_dma_buf;
109
110 /* SSP register addresses */
111 void __iomem *ioaddr;
112 u32 ssdr_physical;
113
114 /* SSP masks*/
115 u32 dma_cr1;
116 u32 int_cr1;
117 u32 clear_sr;
118 u32 mask_sr;
119
120 /* Driver message queue */
121 struct workqueue_struct *workqueue;
122 struct work_struct pump_messages;
123 spinlock_t lock;
124 struct list_head queue;
125 int busy;
126 int run;
127
128 /* Message Transfer pump */
129 struct tasklet_struct pump_transfers;
130
131 /* Current message transfer state info */
132 struct spi_message* cur_msg;
133 struct spi_transfer* cur_transfer;
134 struct chip_data *cur_chip;
135 size_t len;
136 void *tx;
137 void *tx_end;
138 void *rx;
139 void *rx_end;
140 int dma_mapped;
141 dma_addr_t rx_dma;
142 dma_addr_t tx_dma;
143 size_t rx_map_len;
144 size_t tx_map_len;
145 u8 n_bytes;
146 u32 dma_width;
147 int (*write)(struct driver_data *drv_data);
148 int (*read)(struct driver_data *drv_data);
149 irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
150 void (*cs_control)(u32 command);
151};
152 79
153struct chip_data { 80/*
154 u32 cr0; 81 * Read and write LPSS SSP private registers. Caller must first check that
155 u32 cr1; 82 * is_lpss_ssp() returns true before these can be called.
156 u32 psp; 83 */
157 u32 timeout; 84static u32 __lpss_ssp_read_priv(struct driver_data *drv_data, unsigned offset)
158 u8 n_bytes; 85{
159 u32 dma_width; 86 WARN_ON(!drv_data->lpss_base);
160 u32 dma_burst_size; 87 return readl(drv_data->lpss_base + offset);
161 u32 threshold; 88}
162 u32 dma_threshold;
163 u8 enable_dma;
164 u8 bits_per_word;
165 u32 speed_hz;
166 union {
167 int gpio_cs;
168 unsigned int frm;
169 };
170 int gpio_cs_inverted;
171 int (*write)(struct driver_data *drv_data);
172 int (*read)(struct driver_data *drv_data);
173 void (*cs_control)(u32 command);
174};
175 89
176static void pump_messages(struct work_struct *work); 90static void __lpss_ssp_write_priv(struct driver_data *drv_data,
91 unsigned offset, u32 value)
92{
93 WARN_ON(!drv_data->lpss_base);
94 writel(value, drv_data->lpss_base + offset);
95}
96
97/*
98 * lpss_ssp_setup - perform LPSS SSP specific setup
99 * @drv_data: pointer to the driver private data
100 *
101 * Perform LPSS SSP specific setup. This function must be called first if
102 * one is going to use LPSS SSP private registers.
103 */
104static void lpss_ssp_setup(struct driver_data *drv_data)
105{
106 unsigned offset = 0x400;
107 u32 value, orig;
108
109 if (!is_lpss_ssp(drv_data))
110 return;
111
112 /*
113 * Perform auto-detection of the LPSS SSP private registers. They
114 * can be either at 1k or 2k offset from the base address.
115 */
116 orig = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
117
118 value = orig | SPI_CS_CONTROL_SW_MODE;
119 writel(value, drv_data->ioaddr + offset + SPI_CS_CONTROL);
120 value = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
121 if (value != (orig | SPI_CS_CONTROL_SW_MODE)) {
122 offset = 0x800;
123 goto detection_done;
124 }
125
126 value &= ~SPI_CS_CONTROL_SW_MODE;
127 writel(value, drv_data->ioaddr + offset + SPI_CS_CONTROL);
128 value = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
129 if (value != orig) {
130 offset = 0x800;
131 goto detection_done;
132 }
133
134detection_done:
135 /* Now set the LPSS base */
136 drv_data->lpss_base = drv_data->ioaddr + offset;
137
138 /* Enable software chip select control */
139 value = SPI_CS_CONTROL_SW_MODE | SPI_CS_CONTROL_CS_HIGH;
140 __lpss_ssp_write_priv(drv_data, SPI_CS_CONTROL, value);
141}
142
143static void lpss_ssp_cs_control(struct driver_data *drv_data, bool enable)
144{
145 u32 value;
146
147 if (!is_lpss_ssp(drv_data))
148 return;
149
150 value = __lpss_ssp_read_priv(drv_data, SPI_CS_CONTROL);
151 if (enable)
152 value &= ~SPI_CS_CONTROL_CS_HIGH;
153 else
154 value |= SPI_CS_CONTROL_CS_HIGH;
155 __lpss_ssp_write_priv(drv_data, SPI_CS_CONTROL, value);
156}
177 157
178static void cs_assert(struct driver_data *drv_data) 158static void cs_assert(struct driver_data *drv_data)
179{ 159{
@@ -189,8 +169,12 @@ static void cs_assert(struct driver_data *drv_data)
189 return; 169 return;
190 } 170 }
191 171
192 if (gpio_is_valid(chip->gpio_cs)) 172 if (gpio_is_valid(chip->gpio_cs)) {
193 gpio_set_value(chip->gpio_cs, chip->gpio_cs_inverted); 173 gpio_set_value(chip->gpio_cs, chip->gpio_cs_inverted);
174 return;
175 }
176
177 lpss_ssp_cs_control(drv_data, true);
194} 178}
195 179
196static void cs_deassert(struct driver_data *drv_data) 180static void cs_deassert(struct driver_data *drv_data)
@@ -205,30 +189,15 @@ static void cs_deassert(struct driver_data *drv_data)
205 return; 189 return;
206 } 190 }
207 191
208 if (gpio_is_valid(chip->gpio_cs)) 192 if (gpio_is_valid(chip->gpio_cs)) {
209 gpio_set_value(chip->gpio_cs, !chip->gpio_cs_inverted); 193 gpio_set_value(chip->gpio_cs, !chip->gpio_cs_inverted);
210} 194 return;
211 195 }
212static void write_SSSR_CS(struct driver_data *drv_data, u32 val)
213{
214 void __iomem *reg = drv_data->ioaddr;
215
216 if (drv_data->ssp_type == CE4100_SSP)
217 val |= read_SSSR(reg) & SSSR_ALT_FRM_MASK;
218
219 write_SSSR(val, reg);
220}
221 196
222static int pxa25x_ssp_comp(struct driver_data *drv_data) 197 lpss_ssp_cs_control(drv_data, false);
223{
224 if (drv_data->ssp_type == PXA25x_SSP)
225 return 1;
226 if (drv_data->ssp_type == CE4100_SSP)
227 return 1;
228 return 0;
229} 198}
230 199
231static int flush(struct driver_data *drv_data) 200int pxa2xx_spi_flush(struct driver_data *drv_data)
232{ 201{
233 unsigned long limit = loops_per_jiffy << 1; 202 unsigned long limit = loops_per_jiffy << 1;
234 203
@@ -354,7 +323,7 @@ static int u32_reader(struct driver_data *drv_data)
354 return drv_data->rx == drv_data->rx_end; 323 return drv_data->rx == drv_data->rx_end;
355} 324}
356 325
357static void *next_transfer(struct driver_data *drv_data) 326void *pxa2xx_spi_next_transfer(struct driver_data *drv_data)
358{ 327{
359 struct spi_message *msg = drv_data->cur_msg; 328 struct spi_message *msg = drv_data->cur_msg;
360 struct spi_transfer *trans = drv_data->cur_transfer; 329 struct spi_transfer *trans = drv_data->cur_transfer;
@@ -370,89 +339,15 @@ static void *next_transfer(struct driver_data *drv_data)
370 return DONE_STATE; 339 return DONE_STATE;
371} 340}
372 341
373static int map_dma_buffers(struct driver_data *drv_data)
374{
375 struct spi_message *msg = drv_data->cur_msg;
376 struct device *dev = &msg->spi->dev;
377
378 if (!drv_data->cur_chip->enable_dma)
379 return 0;
380
381 if (msg->is_dma_mapped)
382 return drv_data->rx_dma && drv_data->tx_dma;
383
384 if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx))
385 return 0;
386
387 /* Modify setup if rx buffer is null */
388 if (drv_data->rx == NULL) {
389 *drv_data->null_dma_buf = 0;
390 drv_data->rx = drv_data->null_dma_buf;
391 drv_data->rx_map_len = 4;
392 } else
393 drv_data->rx_map_len = drv_data->len;
394
395
396 /* Modify setup if tx buffer is null */
397 if (drv_data->tx == NULL) {
398 *drv_data->null_dma_buf = 0;
399 drv_data->tx = drv_data->null_dma_buf;
400 drv_data->tx_map_len = 4;
401 } else
402 drv_data->tx_map_len = drv_data->len;
403
404 /* Stream map the tx buffer. Always do DMA_TO_DEVICE first
405 * so we flush the cache *before* invalidating it, in case
406 * the tx and rx buffers overlap.
407 */
408 drv_data->tx_dma = dma_map_single(dev, drv_data->tx,
409 drv_data->tx_map_len, DMA_TO_DEVICE);
410 if (dma_mapping_error(dev, drv_data->tx_dma))
411 return 0;
412
413 /* Stream map the rx buffer */
414 drv_data->rx_dma = dma_map_single(dev, drv_data->rx,
415 drv_data->rx_map_len, DMA_FROM_DEVICE);
416 if (dma_mapping_error(dev, drv_data->rx_dma)) {
417 dma_unmap_single(dev, drv_data->tx_dma,
418 drv_data->tx_map_len, DMA_TO_DEVICE);
419 return 0;
420 }
421
422 return 1;
423}
424
425static void unmap_dma_buffers(struct driver_data *drv_data)
426{
427 struct device *dev;
428
429 if (!drv_data->dma_mapped)
430 return;
431
432 if (!drv_data->cur_msg->is_dma_mapped) {
433 dev = &drv_data->cur_msg->spi->dev;
434 dma_unmap_single(dev, drv_data->rx_dma,
435 drv_data->rx_map_len, DMA_FROM_DEVICE);
436 dma_unmap_single(dev, drv_data->tx_dma,
437 drv_data->tx_map_len, DMA_TO_DEVICE);
438 }
439
440 drv_data->dma_mapped = 0;
441}
442
443/* caller already set message->status; dma and pio irqs are blocked */ 342/* caller already set message->status; dma and pio irqs are blocked */
444static void giveback(struct driver_data *drv_data) 343static void giveback(struct driver_data *drv_data)
445{ 344{
446 struct spi_transfer* last_transfer; 345 struct spi_transfer* last_transfer;
447 unsigned long flags;
448 struct spi_message *msg; 346 struct spi_message *msg;
449 347
450 spin_lock_irqsave(&drv_data->lock, flags);
451 msg = drv_data->cur_msg; 348 msg = drv_data->cur_msg;
452 drv_data->cur_msg = NULL; 349 drv_data->cur_msg = NULL;
453 drv_data->cur_transfer = NULL; 350 drv_data->cur_transfer = NULL;
454 queue_work(drv_data->workqueue, &drv_data->pump_messages);
455 spin_unlock_irqrestore(&drv_data->lock, flags);
456 351
457 last_transfer = list_entry(msg->transfers.prev, 352 last_transfer = list_entry(msg->transfers.prev,
458 struct spi_transfer, 353 struct spi_transfer,
@@ -481,13 +376,7 @@ static void giveback(struct driver_data *drv_data)
481 */ 376 */
482 377
483 /* get a pointer to the next message, if any */ 378 /* get a pointer to the next message, if any */
484 spin_lock_irqsave(&drv_data->lock, flags); 379 next_msg = spi_get_next_queued_message(drv_data->master);
485 if (list_empty(&drv_data->queue))
486 next_msg = NULL;
487 else
488 next_msg = list_entry(drv_data->queue.next,
489 struct spi_message, queue);
490 spin_unlock_irqrestore(&drv_data->lock, flags);
491 380
492 /* see if the next and current messages point 381 /* see if the next and current messages point
493 * to the same chip 382 * to the same chip
@@ -498,168 +387,10 @@ static void giveback(struct driver_data *drv_data)
498 cs_deassert(drv_data); 387 cs_deassert(drv_data);
499 } 388 }
500 389
501 msg->state = NULL; 390 spi_finalize_current_message(drv_data->master);
502 if (msg->complete)
503 msg->complete(msg->context);
504
505 drv_data->cur_chip = NULL; 391 drv_data->cur_chip = NULL;
506} 392}
507 393
508static int wait_ssp_rx_stall(void const __iomem *ioaddr)
509{
510 unsigned long limit = loops_per_jiffy << 1;
511
512 while ((read_SSSR(ioaddr) & SSSR_BSY) && --limit)
513 cpu_relax();
514
515 return limit;
516}
517
518static int wait_dma_channel_stop(int channel)
519{
520 unsigned long limit = loops_per_jiffy << 1;
521
522 while (!(DCSR(channel) & DCSR_STOPSTATE) && --limit)
523 cpu_relax();
524
525 return limit;
526}
527
528static void dma_error_stop(struct driver_data *drv_data, const char *msg)
529{
530 void __iomem *reg = drv_data->ioaddr;
531
532 /* Stop and reset */
533 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
534 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
535 write_SSSR_CS(drv_data, drv_data->clear_sr);
536 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
537 if (!pxa25x_ssp_comp(drv_data))
538 write_SSTO(0, reg);
539 flush(drv_data);
540 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
541
542 unmap_dma_buffers(drv_data);
543
544 dev_err(&drv_data->pdev->dev, "%s\n", msg);
545
546 drv_data->cur_msg->state = ERROR_STATE;
547 tasklet_schedule(&drv_data->pump_transfers);
548}
549
550static void dma_transfer_complete(struct driver_data *drv_data)
551{
552 void __iomem *reg = drv_data->ioaddr;
553 struct spi_message *msg = drv_data->cur_msg;
554
555 /* Clear and disable interrupts on SSP and DMA channels*/
556 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
557 write_SSSR_CS(drv_data, drv_data->clear_sr);
558 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
559 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
560
561 if (wait_dma_channel_stop(drv_data->rx_channel) == 0)
562 dev_err(&drv_data->pdev->dev,
563 "dma_handler: dma rx channel stop failed\n");
564
565 if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
566 dev_err(&drv_data->pdev->dev,
567 "dma_transfer: ssp rx stall failed\n");
568
569 unmap_dma_buffers(drv_data);
570
571 /* update the buffer pointer for the amount completed in dma */
572 drv_data->rx += drv_data->len -
573 (DCMD(drv_data->rx_channel) & DCMD_LENGTH);
574
575 /* read trailing data from fifo, it does not matter how many
576 * bytes are in the fifo just read until buffer is full
577 * or fifo is empty, which ever occurs first */
578 drv_data->read(drv_data);
579
580 /* return count of what was actually read */
581 msg->actual_length += drv_data->len -
582 (drv_data->rx_end - drv_data->rx);
583
584 /* Transfer delays and chip select release are
585 * handled in pump_transfers or giveback
586 */
587
588 /* Move to next transfer */
589 msg->state = next_transfer(drv_data);
590
591 /* Schedule transfer tasklet */
592 tasklet_schedule(&drv_data->pump_transfers);
593}
594
595static void dma_handler(int channel, void *data)
596{
597 struct driver_data *drv_data = data;
598 u32 irq_status = DCSR(channel) & DMA_INT_MASK;
599
600 if (irq_status & DCSR_BUSERR) {
601
602 if (channel == drv_data->tx_channel)
603 dma_error_stop(drv_data,
604 "dma_handler: "
605 "bad bus address on tx channel");
606 else
607 dma_error_stop(drv_data,
608 "dma_handler: "
609 "bad bus address on rx channel");
610 return;
611 }
612
613 /* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */
614 if ((channel == drv_data->tx_channel)
615 && (irq_status & DCSR_ENDINTR)
616 && (drv_data->ssp_type == PXA25x_SSP)) {
617
618 /* Wait for rx to stall */
619 if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
620 dev_err(&drv_data->pdev->dev,
621 "dma_handler: ssp rx stall failed\n");
622
623 /* finish this transfer, start the next */
624 dma_transfer_complete(drv_data);
625 }
626}
627
628static irqreturn_t dma_transfer(struct driver_data *drv_data)
629{
630 u32 irq_status;
631 void __iomem *reg = drv_data->ioaddr;
632
633 irq_status = read_SSSR(reg) & drv_data->mask_sr;
634 if (irq_status & SSSR_ROR) {
635 dma_error_stop(drv_data, "dma_transfer: fifo overrun");
636 return IRQ_HANDLED;
637 }
638
639 /* Check for false positive timeout */
640 if ((irq_status & SSSR_TINT)
641 && (DCSR(drv_data->tx_channel) & DCSR_RUN)) {
642 write_SSSR(SSSR_TINT, reg);
643 return IRQ_HANDLED;
644 }
645
646 if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) {
647
648 /* Clear and disable timeout interrupt, do the rest in
649 * dma_transfer_complete */
650 if (!pxa25x_ssp_comp(drv_data))
651 write_SSTO(0, reg);
652
653 /* finish this transfer, start the next */
654 dma_transfer_complete(drv_data);
655
656 return IRQ_HANDLED;
657 }
658
659 /* Opps problem detected */
660 return IRQ_NONE;
661}
662
663static void reset_sccr1(struct driver_data *drv_data) 394static void reset_sccr1(struct driver_data *drv_data)
664{ 395{
665 void __iomem *reg = drv_data->ioaddr; 396 void __iomem *reg = drv_data->ioaddr;
@@ -681,7 +412,7 @@ static void int_error_stop(struct driver_data *drv_data, const char* msg)
681 reset_sccr1(drv_data); 412 reset_sccr1(drv_data);
682 if (!pxa25x_ssp_comp(drv_data)) 413 if (!pxa25x_ssp_comp(drv_data))
683 write_SSTO(0, reg); 414 write_SSTO(0, reg);
684 flush(drv_data); 415 pxa2xx_spi_flush(drv_data);
685 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); 416 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
686 417
687 dev_err(&drv_data->pdev->dev, "%s\n", msg); 418 dev_err(&drv_data->pdev->dev, "%s\n", msg);
@@ -709,7 +440,7 @@ static void int_transfer_complete(struct driver_data *drv_data)
709 */ 440 */
710 441
711 /* Move to next transfer */ 442 /* Move to next transfer */
712 drv_data->cur_msg->state = next_transfer(drv_data); 443 drv_data->cur_msg->state = pxa2xx_spi_next_transfer(drv_data);
713 444
714 /* Schedule transfer tasklet */ 445 /* Schedule transfer tasklet */
715 tasklet_schedule(&drv_data->pump_transfers); 446 tasklet_schedule(&drv_data->pump_transfers);
@@ -789,10 +520,20 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
789{ 520{
790 struct driver_data *drv_data = dev_id; 521 struct driver_data *drv_data = dev_id;
791 void __iomem *reg = drv_data->ioaddr; 522 void __iomem *reg = drv_data->ioaddr;
792 u32 sccr1_reg = read_SSCR1(reg); 523 u32 sccr1_reg;
793 u32 mask = drv_data->mask_sr; 524 u32 mask = drv_data->mask_sr;
794 u32 status; 525 u32 status;
795 526
527 /*
528 * The IRQ might be shared with other peripherals so we must first
529 * check that are we RPM suspended or not. If we are we assume that
530 * the IRQ was not for us (we shouldn't be RPM suspended when the
531 * interrupt is enabled).
532 */
533 if (pm_runtime_suspended(&drv_data->pdev->dev))
534 return IRQ_NONE;
535
536 sccr1_reg = read_SSCR1(reg);
796 status = read_SSSR(reg); 537 status = read_SSSR(reg);
797 538
798 /* Ignore possible writes if we don't need to write */ 539 /* Ignore possible writes if we don't need to write */
@@ -820,106 +561,12 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
820 return drv_data->transfer_handler(drv_data); 561 return drv_data->transfer_handler(drv_data);
821} 562}
822 563
823static int set_dma_burst_and_threshold(struct chip_data *chip, 564static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate)
824 struct spi_device *spi,
825 u8 bits_per_word, u32 *burst_code,
826 u32 *threshold)
827{ 565{
828 struct pxa2xx_spi_chip *chip_info = 566 unsigned long ssp_clk = drv_data->max_clk_rate;
829 (struct pxa2xx_spi_chip *)spi->controller_data; 567 const struct ssp_device *ssp = drv_data->ssp;
830 int bytes_per_word;
831 int burst_bytes;
832 int thresh_words;
833 int req_burst_size;
834 int retval = 0;
835
836 /* Set the threshold (in registers) to equal the same amount of data
837 * as represented by burst size (in bytes). The computation below
838 * is (burst_size rounded up to nearest 8 byte, word or long word)
839 * divided by (bytes/register); the tx threshold is the inverse of
840 * the rx, so that there will always be enough data in the rx fifo
841 * to satisfy a burst, and there will always be enough space in the
842 * tx fifo to accept a burst (a tx burst will overwrite the fifo if
843 * there is not enough space), there must always remain enough empty
844 * space in the rx fifo for any data loaded to the tx fifo.
845 * Whenever burst_size (in bytes) equals bits/word, the fifo threshold
846 * will be 8, or half the fifo;
847 * The threshold can only be set to 2, 4 or 8, but not 16, because
848 * to burst 16 to the tx fifo, the fifo would have to be empty;
849 * however, the minimum fifo trigger level is 1, and the tx will
850 * request service when the fifo is at this level, with only 15 spaces.
851 */
852 568
853 /* find bytes/word */ 569 rate = min_t(int, ssp_clk, rate);
854 if (bits_per_word <= 8)
855 bytes_per_word = 1;
856 else if (bits_per_word <= 16)
857 bytes_per_word = 2;
858 else
859 bytes_per_word = 4;
860
861 /* use struct pxa2xx_spi_chip->dma_burst_size if available */
862 if (chip_info)
863 req_burst_size = chip_info->dma_burst_size;
864 else {
865 switch (chip->dma_burst_size) {
866 default:
867 /* if the default burst size is not set,
868 * do it now */
869 chip->dma_burst_size = DCMD_BURST8;
870 case DCMD_BURST8:
871 req_burst_size = 8;
872 break;
873 case DCMD_BURST16:
874 req_burst_size = 16;
875 break;
876 case DCMD_BURST32:
877 req_burst_size = 32;
878 break;
879 }
880 }
881 if (req_burst_size <= 8) {
882 *burst_code = DCMD_BURST8;
883 burst_bytes = 8;
884 } else if (req_burst_size <= 16) {
885 if (bytes_per_word == 1) {
886 /* don't burst more than 1/2 the fifo */
887 *burst_code = DCMD_BURST8;
888 burst_bytes = 8;
889 retval = 1;
890 } else {
891 *burst_code = DCMD_BURST16;
892 burst_bytes = 16;
893 }
894 } else {
895 if (bytes_per_word == 1) {
896 /* don't burst more than 1/2 the fifo */
897 *burst_code = DCMD_BURST8;
898 burst_bytes = 8;
899 retval = 1;
900 } else if (bytes_per_word == 2) {
901 /* don't burst more than 1/2 the fifo */
902 *burst_code = DCMD_BURST16;
903 burst_bytes = 16;
904 retval = 1;
905 } else {
906 *burst_code = DCMD_BURST32;
907 burst_bytes = 32;
908 }
909 }
910
911 thresh_words = burst_bytes / bytes_per_word;
912
913 /* thresh_words will be between 2 and 8 */
914 *threshold = (SSCR1_RxTresh(thresh_words) & SSCR1_RFT)
915 | (SSCR1_TxTresh(16-thresh_words) & SSCR1_TFT);
916
917 return retval;
918}
919
920static unsigned int ssp_get_clk_div(struct ssp_device *ssp, int rate)
921{
922 unsigned long ssp_clk = clk_get_rate(ssp->clk);
923 570
924 if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP) 571 if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP)
925 return ((ssp_clk / (2 * rate) - 1) & 0xff) << 8; 572 return ((ssp_clk / (2 * rate) - 1) & 0xff) << 8;
@@ -934,7 +581,6 @@ static void pump_transfers(unsigned long data)
934 struct spi_transfer *transfer = NULL; 581 struct spi_transfer *transfer = NULL;
935 struct spi_transfer *previous = NULL; 582 struct spi_transfer *previous = NULL;
936 struct chip_data *chip = NULL; 583 struct chip_data *chip = NULL;
937 struct ssp_device *ssp = drv_data->ssp;
938 void __iomem *reg = drv_data->ioaddr; 584 void __iomem *reg = drv_data->ioaddr;
939 u32 clk_div = 0; 585 u32 clk_div = 0;
940 u8 bits = 0; 586 u8 bits = 0;
@@ -976,8 +622,8 @@ static void pump_transfers(unsigned long data)
976 cs_deassert(drv_data); 622 cs_deassert(drv_data);
977 } 623 }
978 624
979 /* Check for transfers that need multiple DMA segments */ 625 /* Check if we can DMA this transfer */
980 if (transfer->len > MAX_DMA_LEN && chip->enable_dma) { 626 if (!pxa2xx_spi_dma_is_possible(transfer->len) && chip->enable_dma) {
981 627
982 /* reject already-mapped transfers; PIO won't always work */ 628 /* reject already-mapped transfers; PIO won't always work */
983 if (message->is_dma_mapped 629 if (message->is_dma_mapped
@@ -1000,21 +646,20 @@ static void pump_transfers(unsigned long data)
1000 } 646 }
1001 647
1002 /* Setup the transfer state based on the type of transfer */ 648 /* Setup the transfer state based on the type of transfer */
1003 if (flush(drv_data) == 0) { 649 if (pxa2xx_spi_flush(drv_data) == 0) {
1004 dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n"); 650 dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n");
1005 message->status = -EIO; 651 message->status = -EIO;
1006 giveback(drv_data); 652 giveback(drv_data);
1007 return; 653 return;
1008 } 654 }
1009 drv_data->n_bytes = chip->n_bytes; 655 drv_data->n_bytes = chip->n_bytes;
1010 drv_data->dma_width = chip->dma_width;
1011 drv_data->tx = (void *)transfer->tx_buf; 656 drv_data->tx = (void *)transfer->tx_buf;
1012 drv_data->tx_end = drv_data->tx + transfer->len; 657 drv_data->tx_end = drv_data->tx + transfer->len;
1013 drv_data->rx = transfer->rx_buf; 658 drv_data->rx = transfer->rx_buf;
1014 drv_data->rx_end = drv_data->rx + transfer->len; 659 drv_data->rx_end = drv_data->rx + transfer->len;
1015 drv_data->rx_dma = transfer->rx_dma; 660 drv_data->rx_dma = transfer->rx_dma;
1016 drv_data->tx_dma = transfer->tx_dma; 661 drv_data->tx_dma = transfer->tx_dma;
1017 drv_data->len = transfer->len & DCMD_LENGTH; 662 drv_data->len = transfer->len;
1018 drv_data->write = drv_data->tx ? chip->write : null_writer; 663 drv_data->write = drv_data->tx ? chip->write : null_writer;
1019 drv_data->read = drv_data->rx ? chip->read : null_reader; 664 drv_data->read = drv_data->rx ? chip->read : null_reader;
1020 665
@@ -1031,25 +676,22 @@ static void pump_transfers(unsigned long data)
1031 if (transfer->bits_per_word) 676 if (transfer->bits_per_word)
1032 bits = transfer->bits_per_word; 677 bits = transfer->bits_per_word;
1033 678
1034 clk_div = ssp_get_clk_div(ssp, speed); 679 clk_div = ssp_get_clk_div(drv_data, speed);
1035 680
1036 if (bits <= 8) { 681 if (bits <= 8) {
1037 drv_data->n_bytes = 1; 682 drv_data->n_bytes = 1;
1038 drv_data->dma_width = DCMD_WIDTH1;
1039 drv_data->read = drv_data->read != null_reader ? 683 drv_data->read = drv_data->read != null_reader ?
1040 u8_reader : null_reader; 684 u8_reader : null_reader;
1041 drv_data->write = drv_data->write != null_writer ? 685 drv_data->write = drv_data->write != null_writer ?
1042 u8_writer : null_writer; 686 u8_writer : null_writer;
1043 } else if (bits <= 16) { 687 } else if (bits <= 16) {
1044 drv_data->n_bytes = 2; 688 drv_data->n_bytes = 2;
1045 drv_data->dma_width = DCMD_WIDTH2;
1046 drv_data->read = drv_data->read != null_reader ? 689 drv_data->read = drv_data->read != null_reader ?
1047 u16_reader : null_reader; 690 u16_reader : null_reader;
1048 drv_data->write = drv_data->write != null_writer ? 691 drv_data->write = drv_data->write != null_writer ?
1049 u16_writer : null_writer; 692 u16_writer : null_writer;
1050 } else if (bits <= 32) { 693 } else if (bits <= 32) {
1051 drv_data->n_bytes = 4; 694 drv_data->n_bytes = 4;
1052 drv_data->dma_width = DCMD_WIDTH4;
1053 drv_data->read = drv_data->read != null_reader ? 695 drv_data->read = drv_data->read != null_reader ?
1054 u32_reader : null_reader; 696 u32_reader : null_reader;
1055 drv_data->write = drv_data->write != null_writer ? 697 drv_data->write = drv_data->write != null_writer ?
@@ -1058,7 +700,8 @@ static void pump_transfers(unsigned long data)
1058 /* if bits/word is changed in dma mode, then must check the 700 /* if bits/word is changed in dma mode, then must check the
1059 * thresholds and burst also */ 701 * thresholds and burst also */
1060 if (chip->enable_dma) { 702 if (chip->enable_dma) {
1061 if (set_dma_burst_and_threshold(chip, message->spi, 703 if (pxa2xx_spi_set_dma_burst_and_threshold(chip,
704 message->spi,
1062 bits, &dma_burst, 705 bits, &dma_burst,
1063 &dma_thresh)) 706 &dma_thresh))
1064 if (printk_ratelimit()) 707 if (printk_ratelimit())
@@ -1077,70 +720,21 @@ static void pump_transfers(unsigned long data)
1077 720
1078 message->state = RUNNING_STATE; 721 message->state = RUNNING_STATE;
1079 722
1080 /* Try to map dma buffer and do a dma transfer if successful, but
1081 * only if the length is non-zero and less than MAX_DMA_LEN.
1082 *
1083 * Zero-length non-descriptor DMA is illegal on PXA2xx; force use
1084 * of PIO instead. Care is needed above because the transfer may
1085 * have have been passed with buffers that are already dma mapped.
1086 * A zero-length transfer in PIO mode will not try to write/read
1087 * to/from the buffers
1088 *
1089 * REVISIT large transfers are exactly where we most want to be
1090 * using DMA. If this happens much, split those transfers into
1091 * multiple DMA segments rather than forcing PIO.
1092 */
1093 drv_data->dma_mapped = 0; 723 drv_data->dma_mapped = 0;
1094 if (drv_data->len > 0 && drv_data->len <= MAX_DMA_LEN) 724 if (pxa2xx_spi_dma_is_possible(drv_data->len))
1095 drv_data->dma_mapped = map_dma_buffers(drv_data); 725 drv_data->dma_mapped = pxa2xx_spi_map_dma_buffers(drv_data);
1096 if (drv_data->dma_mapped) { 726 if (drv_data->dma_mapped) {
1097 727
1098 /* Ensure we have the correct interrupt handler */ 728 /* Ensure we have the correct interrupt handler */
1099 drv_data->transfer_handler = dma_transfer; 729 drv_data->transfer_handler = pxa2xx_spi_dma_transfer;
1100 730
1101 /* Setup rx DMA Channel */ 731 pxa2xx_spi_dma_prepare(drv_data, dma_burst);
1102 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
1103 DSADR(drv_data->rx_channel) = drv_data->ssdr_physical;
1104 DTADR(drv_data->rx_channel) = drv_data->rx_dma;
1105 if (drv_data->rx == drv_data->null_dma_buf)
1106 /* No target address increment */
1107 DCMD(drv_data->rx_channel) = DCMD_FLOWSRC
1108 | drv_data->dma_width
1109 | dma_burst
1110 | drv_data->len;
1111 else
1112 DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR
1113 | DCMD_FLOWSRC
1114 | drv_data->dma_width
1115 | dma_burst
1116 | drv_data->len;
1117
1118 /* Setup tx DMA Channel */
1119 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
1120 DSADR(drv_data->tx_channel) = drv_data->tx_dma;
1121 DTADR(drv_data->tx_channel) = drv_data->ssdr_physical;
1122 if (drv_data->tx == drv_data->null_dma_buf)
1123 /* No source address increment */
1124 DCMD(drv_data->tx_channel) = DCMD_FLOWTRG
1125 | drv_data->dma_width
1126 | dma_burst
1127 | drv_data->len;
1128 else
1129 DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR
1130 | DCMD_FLOWTRG
1131 | drv_data->dma_width
1132 | dma_burst
1133 | drv_data->len;
1134
1135 /* Enable dma end irqs on SSP to detect end of transfer */
1136 if (drv_data->ssp_type == PXA25x_SSP)
1137 DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN;
1138 732
1139 /* Clear status and start DMA engine */ 733 /* Clear status and start DMA engine */
1140 cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1; 734 cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1;
1141 write_SSSR(drv_data->clear_sr, reg); 735 write_SSSR(drv_data->clear_sr, reg);
1142 DCSR(drv_data->rx_channel) |= DCSR_RUN; 736
1143 DCSR(drv_data->tx_channel) |= DCSR_RUN; 737 pxa2xx_spi_dma_start(drv_data);
1144 } else { 738 } else {
1145 /* Ensure we have the correct interrupt handler */ 739 /* Ensure we have the correct interrupt handler */
1146 drv_data->transfer_handler = interrupt_transfer; 740 drv_data->transfer_handler = interrupt_transfer;
@@ -1150,6 +744,13 @@ static void pump_transfers(unsigned long data)
1150 write_SSSR_CS(drv_data, drv_data->clear_sr); 744 write_SSSR_CS(drv_data, drv_data->clear_sr);
1151 } 745 }
1152 746
747 if (is_lpss_ssp(drv_data)) {
748 if ((read_SSIRF(reg) & 0xff) != chip->lpss_rx_threshold)
749 write_SSIRF(chip->lpss_rx_threshold, reg);
750 if ((read_SSITF(reg) & 0xffff) != chip->lpss_tx_threshold)
751 write_SSITF(chip->lpss_tx_threshold, reg);
752 }
753
1153 /* see if we need to reload the config registers */ 754 /* see if we need to reload the config registers */
1154 if ((read_SSCR0(reg) != cr0) 755 if ((read_SSCR0(reg) != cr0)
1155 || (read_SSCR1(reg) & SSCR1_CHANGE_MASK) != 756 || (read_SSCR1(reg) & SSCR1_CHANGE_MASK) !=
@@ -1176,31 +777,12 @@ static void pump_transfers(unsigned long data)
1176 write_SSCR1(cr1, reg); 777 write_SSCR1(cr1, reg);
1177} 778}
1178 779
1179static void pump_messages(struct work_struct *work) 780static int pxa2xx_spi_transfer_one_message(struct spi_master *master,
781 struct spi_message *msg)
1180{ 782{
1181 struct driver_data *drv_data = 783 struct driver_data *drv_data = spi_master_get_devdata(master);
1182 container_of(work, struct driver_data, pump_messages);
1183 unsigned long flags;
1184
1185 /* Lock queue and check for queue work */
1186 spin_lock_irqsave(&drv_data->lock, flags);
1187 if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) {
1188 drv_data->busy = 0;
1189 spin_unlock_irqrestore(&drv_data->lock, flags);
1190 return;
1191 }
1192
1193 /* Make sure we are not already running a message */
1194 if (drv_data->cur_msg) {
1195 spin_unlock_irqrestore(&drv_data->lock, flags);
1196 return;
1197 }
1198
1199 /* Extract head of queue */
1200 drv_data->cur_msg = list_entry(drv_data->queue.next,
1201 struct spi_message, queue);
1202 list_del_init(&drv_data->cur_msg->queue);
1203 784
785 drv_data->cur_msg = msg;
1204 /* Initial message state*/ 786 /* Initial message state*/
1205 drv_data->cur_msg->state = START_STATE; 787 drv_data->cur_msg->state = START_STATE;
1206 drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next, 788 drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
@@ -1213,34 +795,27 @@ static void pump_messages(struct work_struct *work)
1213 795
1214 /* Mark as busy and launch transfers */ 796 /* Mark as busy and launch transfers */
1215 tasklet_schedule(&drv_data->pump_transfers); 797 tasklet_schedule(&drv_data->pump_transfers);
1216 798 return 0;
1217 drv_data->busy = 1;
1218 spin_unlock_irqrestore(&drv_data->lock, flags);
1219} 799}
1220 800
1221static int transfer(struct spi_device *spi, struct spi_message *msg) 801static int pxa2xx_spi_prepare_transfer(struct spi_master *master)
1222{ 802{
1223 struct driver_data *drv_data = spi_master_get_devdata(spi->master); 803 struct driver_data *drv_data = spi_master_get_devdata(master);
1224 unsigned long flags;
1225
1226 spin_lock_irqsave(&drv_data->lock, flags);
1227
1228 if (drv_data->run == QUEUE_STOPPED) {
1229 spin_unlock_irqrestore(&drv_data->lock, flags);
1230 return -ESHUTDOWN;
1231 }
1232
1233 msg->actual_length = 0;
1234 msg->status = -EINPROGRESS;
1235 msg->state = START_STATE;
1236 804
1237 list_add_tail(&msg->queue, &drv_data->queue); 805 pm_runtime_get_sync(&drv_data->pdev->dev);
806 return 0;
807}
1238 808
1239 if (drv_data->run == QUEUE_RUNNING && !drv_data->busy) 809static int pxa2xx_spi_unprepare_transfer(struct spi_master *master)
1240 queue_work(drv_data->workqueue, &drv_data->pump_messages); 810{
811 struct driver_data *drv_data = spi_master_get_devdata(master);
1241 812
1242 spin_unlock_irqrestore(&drv_data->lock, flags); 813 /* Disable the SSP now */
814 write_SSCR0(read_SSCR0(drv_data->ioaddr) & ~SSCR0_SSE,
815 drv_data->ioaddr);
1243 816
817 pm_runtime_mark_last_busy(&drv_data->pdev->dev);
818 pm_runtime_put_autosuspend(&drv_data->pdev->dev);
1244 return 0; 819 return 0;
1245} 820}
1246 821
@@ -1287,10 +862,18 @@ static int setup(struct spi_device *spi)
1287 struct pxa2xx_spi_chip *chip_info = NULL; 862 struct pxa2xx_spi_chip *chip_info = NULL;
1288 struct chip_data *chip; 863 struct chip_data *chip;
1289 struct driver_data *drv_data = spi_master_get_devdata(spi->master); 864 struct driver_data *drv_data = spi_master_get_devdata(spi->master);
1290 struct ssp_device *ssp = drv_data->ssp;
1291 unsigned int clk_div; 865 unsigned int clk_div;
1292 uint tx_thres = TX_THRESH_DFLT; 866 uint tx_thres, tx_hi_thres, rx_thres;
1293 uint rx_thres = RX_THRESH_DFLT; 867
868 if (is_lpss_ssp(drv_data)) {
869 tx_thres = LPSS_TX_LOTHRESH_DFLT;
870 tx_hi_thres = LPSS_TX_HITHRESH_DFLT;
871 rx_thres = LPSS_RX_THRESH_DFLT;
872 } else {
873 tx_thres = TX_THRESH_DFLT;
874 tx_hi_thres = 0;
875 rx_thres = RX_THRESH_DFLT;
876 }
1294 877
1295 if (!pxa25x_ssp_comp(drv_data) 878 if (!pxa25x_ssp_comp(drv_data)
1296 && (spi->bits_per_word < 4 || spi->bits_per_word > 32)) { 879 && (spi->bits_per_word < 4 || spi->bits_per_word > 32)) {
@@ -1330,8 +913,6 @@ static int setup(struct spi_device *spi)
1330 chip->gpio_cs = -1; 913 chip->gpio_cs = -1;
1331 chip->enable_dma = 0; 914 chip->enable_dma = 0;
1332 chip->timeout = TIMOUT_DFLT; 915 chip->timeout = TIMOUT_DFLT;
1333 chip->dma_burst_size = drv_data->master_info->enable_dma ?
1334 DCMD_BURST8 : 0;
1335 } 916 }
1336 917
1337 /* protocol drivers may change the chip settings, so... 918 /* protocol drivers may change the chip settings, so...
@@ -1345,23 +926,37 @@ static int setup(struct spi_device *spi)
1345 chip->timeout = chip_info->timeout; 926 chip->timeout = chip_info->timeout;
1346 if (chip_info->tx_threshold) 927 if (chip_info->tx_threshold)
1347 tx_thres = chip_info->tx_threshold; 928 tx_thres = chip_info->tx_threshold;
929 if (chip_info->tx_hi_threshold)
930 tx_hi_thres = chip_info->tx_hi_threshold;
1348 if (chip_info->rx_threshold) 931 if (chip_info->rx_threshold)
1349 rx_thres = chip_info->rx_threshold; 932 rx_thres = chip_info->rx_threshold;
1350 chip->enable_dma = drv_data->master_info->enable_dma; 933 chip->enable_dma = drv_data->master_info->enable_dma;
1351 chip->dma_threshold = 0; 934 chip->dma_threshold = 0;
1352 if (chip_info->enable_loopback) 935 if (chip_info->enable_loopback)
1353 chip->cr1 = SSCR1_LBM; 936 chip->cr1 = SSCR1_LBM;
937 } else if (ACPI_HANDLE(&spi->dev)) {
938 /*
939 * Slave devices enumerated from ACPI namespace don't
940 * usually have chip_info but we still might want to use
941 * DMA with them.
942 */
943 chip->enable_dma = drv_data->master_info->enable_dma;
1354 } 944 }
1355 945
1356 chip->threshold = (SSCR1_RxTresh(rx_thres) & SSCR1_RFT) | 946 chip->threshold = (SSCR1_RxTresh(rx_thres) & SSCR1_RFT) |
1357 (SSCR1_TxTresh(tx_thres) & SSCR1_TFT); 947 (SSCR1_TxTresh(tx_thres) & SSCR1_TFT);
1358 948
949 chip->lpss_rx_threshold = SSIRF_RxThresh(rx_thres);
950 chip->lpss_tx_threshold = SSITF_TxLoThresh(tx_thres)
951 | SSITF_TxHiThresh(tx_hi_thres);
952
1359 /* set dma burst and threshold outside of chip_info path so that if 953 /* set dma burst and threshold outside of chip_info path so that if
1360 * chip_info goes away after setting chip->enable_dma, the 954 * chip_info goes away after setting chip->enable_dma, the
1361 * burst and threshold can still respond to changes in bits_per_word */ 955 * burst and threshold can still respond to changes in bits_per_word */
1362 if (chip->enable_dma) { 956 if (chip->enable_dma) {
1363 /* set up legal burst and threshold for dma */ 957 /* set up legal burst and threshold for dma */
1364 if (set_dma_burst_and_threshold(chip, spi, spi->bits_per_word, 958 if (pxa2xx_spi_set_dma_burst_and_threshold(chip, spi,
959 spi->bits_per_word,
1365 &chip->dma_burst_size, 960 &chip->dma_burst_size,
1366 &chip->dma_threshold)) { 961 &chip->dma_threshold)) {
1367 dev_warn(&spi->dev, "in setup: DMA burst size reduced " 962 dev_warn(&spi->dev, "in setup: DMA burst size reduced "
@@ -1369,7 +964,7 @@ static int setup(struct spi_device *spi)
1369 } 964 }
1370 } 965 }
1371 966
1372 clk_div = ssp_get_clk_div(ssp, spi->max_speed_hz); 967 clk_div = ssp_get_clk_div(drv_data, spi->max_speed_hz);
1373 chip->speed_hz = spi->max_speed_hz; 968 chip->speed_hz = spi->max_speed_hz;
1374 969
1375 chip->cr0 = clk_div 970 chip->cr0 = clk_div
@@ -1382,32 +977,32 @@ static int setup(struct spi_device *spi)
1382 chip->cr1 |= (((spi->mode & SPI_CPHA) != 0) ? SSCR1_SPH : 0) 977 chip->cr1 |= (((spi->mode & SPI_CPHA) != 0) ? SSCR1_SPH : 0)
1383 | (((spi->mode & SPI_CPOL) != 0) ? SSCR1_SPO : 0); 978 | (((spi->mode & SPI_CPOL) != 0) ? SSCR1_SPO : 0);
1384 979
980 if (spi->mode & SPI_LOOP)
981 chip->cr1 |= SSCR1_LBM;
982
1385 /* NOTE: PXA25x_SSP _could_ use external clocking ... */ 983 /* NOTE: PXA25x_SSP _could_ use external clocking ... */
1386 if (!pxa25x_ssp_comp(drv_data)) 984 if (!pxa25x_ssp_comp(drv_data))
1387 dev_dbg(&spi->dev, "%ld Hz actual, %s\n", 985 dev_dbg(&spi->dev, "%ld Hz actual, %s\n",
1388 clk_get_rate(ssp->clk) 986 drv_data->max_clk_rate
1389 / (1 + ((chip->cr0 & SSCR0_SCR(0xfff)) >> 8)), 987 / (1 + ((chip->cr0 & SSCR0_SCR(0xfff)) >> 8)),
1390 chip->enable_dma ? "DMA" : "PIO"); 988 chip->enable_dma ? "DMA" : "PIO");
1391 else 989 else
1392 dev_dbg(&spi->dev, "%ld Hz actual, %s\n", 990 dev_dbg(&spi->dev, "%ld Hz actual, %s\n",
1393 clk_get_rate(ssp->clk) / 2 991 drv_data->max_clk_rate / 2
1394 / (1 + ((chip->cr0 & SSCR0_SCR(0x0ff)) >> 8)), 992 / (1 + ((chip->cr0 & SSCR0_SCR(0x0ff)) >> 8)),
1395 chip->enable_dma ? "DMA" : "PIO"); 993 chip->enable_dma ? "DMA" : "PIO");
1396 994
1397 if (spi->bits_per_word <= 8) { 995 if (spi->bits_per_word <= 8) {
1398 chip->n_bytes = 1; 996 chip->n_bytes = 1;
1399 chip->dma_width = DCMD_WIDTH1;
1400 chip->read = u8_reader; 997 chip->read = u8_reader;
1401 chip->write = u8_writer; 998 chip->write = u8_writer;
1402 } else if (spi->bits_per_word <= 16) { 999 } else if (spi->bits_per_word <= 16) {
1403 chip->n_bytes = 2; 1000 chip->n_bytes = 2;
1404 chip->dma_width = DCMD_WIDTH2;
1405 chip->read = u16_reader; 1001 chip->read = u16_reader;
1406 chip->write = u16_writer; 1002 chip->write = u16_writer;
1407 } else if (spi->bits_per_word <= 32) { 1003 } else if (spi->bits_per_word <= 32) {
1408 chip->cr0 |= SSCR0_EDSS; 1004 chip->cr0 |= SSCR0_EDSS;
1409 chip->n_bytes = 4; 1005 chip->n_bytes = 4;
1410 chip->dma_width = DCMD_WIDTH4;
1411 chip->read = u32_reader; 1006 chip->read = u32_reader;
1412 chip->write = u32_writer; 1007 chip->write = u32_writer;
1413 } else { 1008 } else {
@@ -1438,93 +1033,98 @@ static void cleanup(struct spi_device *spi)
1438 kfree(chip); 1033 kfree(chip);
1439} 1034}
1440 1035
1441static int init_queue(struct driver_data *drv_data) 1036#ifdef CONFIG_ACPI
1037static int pxa2xx_spi_acpi_add_dma(struct acpi_resource *res, void *data)
1442{ 1038{
1443 INIT_LIST_HEAD(&drv_data->queue); 1039 struct pxa2xx_spi_master *pdata = data;
1444 spin_lock_init(&drv_data->lock); 1040
1445 1041 if (res->type == ACPI_RESOURCE_TYPE_FIXED_DMA) {
1446 drv_data->run = QUEUE_STOPPED; 1042 const struct acpi_resource_fixed_dma *dma;
1447 drv_data->busy = 0; 1043
1448 1044 dma = &res->data.fixed_dma;
1449 tasklet_init(&drv_data->pump_transfers, 1045 if (pdata->tx_slave_id < 0) {
1450 pump_transfers, (unsigned long)drv_data); 1046 pdata->tx_slave_id = dma->request_lines;
1451 1047 pdata->tx_chan_id = dma->channels;
1452 INIT_WORK(&drv_data->pump_messages, pump_messages); 1048 } else if (pdata->rx_slave_id < 0) {
1453 drv_data->workqueue = create_singlethread_workqueue( 1049 pdata->rx_slave_id = dma->request_lines;
1454 dev_name(drv_data->master->dev.parent)); 1050 pdata->rx_chan_id = dma->channels;
1455 if (drv_data->workqueue == NULL) 1051 }
1456 return -EBUSY; 1052 }
1457 1053
1458 return 0; 1054 /* Tell the ACPI core to skip this resource */
1055 return 1;
1459} 1056}
1460 1057
1461static int start_queue(struct driver_data *drv_data) 1058static struct pxa2xx_spi_master *
1059pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
1462{ 1060{
1463 unsigned long flags; 1061 struct pxa2xx_spi_master *pdata;
1464 1062 struct list_head resource_list;
1465 spin_lock_irqsave(&drv_data->lock, flags); 1063 struct acpi_device *adev;
1466 1064 struct ssp_device *ssp;
1467 if (drv_data->run == QUEUE_RUNNING || drv_data->busy) { 1065 struct resource *res;
1468 spin_unlock_irqrestore(&drv_data->lock, flags); 1066 int devid;
1469 return -EBUSY; 1067
1068 if (!ACPI_HANDLE(&pdev->dev) ||
1069 acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev))
1070 return NULL;
1071
1072 pdata = devm_kzalloc(&pdev->dev, sizeof(*ssp), GFP_KERNEL);
1073 if (!pdata) {
1074 dev_err(&pdev->dev,
1075 "failed to allocate memory for platform data\n");
1076 return NULL;
1470 } 1077 }
1471 1078
1472 drv_data->run = QUEUE_RUNNING; 1079 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1473 drv_data->cur_msg = NULL; 1080 if (!res)
1474 drv_data->cur_transfer = NULL; 1081 return NULL;
1475 drv_data->cur_chip = NULL;
1476 spin_unlock_irqrestore(&drv_data->lock, flags);
1477
1478 queue_work(drv_data->workqueue, &drv_data->pump_messages);
1479 1082
1480 return 0; 1083 ssp = &pdata->ssp;
1481}
1482 1084
1483static int stop_queue(struct driver_data *drv_data) 1085 ssp->phys_base = res->start;
1484{ 1086 ssp->mmio_base = devm_request_and_ioremap(&pdev->dev, res);
1485 unsigned long flags; 1087 if (!ssp->mmio_base) {
1486 unsigned limit = 500; 1088 dev_err(&pdev->dev, "failed to ioremap mmio_base\n");
1487 int status = 0; 1089 return NULL;
1488
1489 spin_lock_irqsave(&drv_data->lock, flags);
1490
1491 /* This is a bit lame, but is optimized for the common execution path.
1492 * A wait_queue on the drv_data->busy could be used, but then the common
1493 * execution path (pump_messages) would be required to call wake_up or
1494 * friends on every SPI message. Do this instead */
1495 drv_data->run = QUEUE_STOPPED;
1496 while ((!list_empty(&drv_data->queue) || drv_data->busy) && limit--) {
1497 spin_unlock_irqrestore(&drv_data->lock, flags);
1498 msleep(10);
1499 spin_lock_irqsave(&drv_data->lock, flags);
1500 } 1090 }
1501 1091
1502 if (!list_empty(&drv_data->queue) || drv_data->busy) 1092 ssp->clk = devm_clk_get(&pdev->dev, NULL);
1503 status = -EBUSY; 1093 ssp->irq = platform_get_irq(pdev, 0);
1094 ssp->type = LPSS_SSP;
1095 ssp->pdev = pdev;
1504 1096
1505 spin_unlock_irqrestore(&drv_data->lock, flags); 1097 ssp->port_id = -1;
1098 if (adev->pnp.unique_id && !kstrtoint(adev->pnp.unique_id, 0, &devid))
1099 ssp->port_id = devid;
1506 1100
1507 return status; 1101 pdata->num_chipselect = 1;
1508} 1102 pdata->rx_slave_id = -1;
1103 pdata->tx_slave_id = -1;
1509 1104
1510static int destroy_queue(struct driver_data *drv_data) 1105 INIT_LIST_HEAD(&resource_list);
1511{ 1106 acpi_dev_get_resources(adev, &resource_list, pxa2xx_spi_acpi_add_dma,
1512 int status; 1107 pdata);
1108 acpi_dev_free_resource_list(&resource_list);
1513 1109
1514 status = stop_queue(drv_data); 1110 pdata->enable_dma = pdata->rx_slave_id >= 0 && pdata->tx_slave_id >= 0;
1515 /* we are unloading the module or failing to load (only two calls
1516 * to this routine), and neither call can handle a return value.
1517 * However, destroy_workqueue calls flush_workqueue, and that will
1518 * block until all work is done. If the reason that stop_queue
1519 * timed out is that the work will never finish, then it does no
1520 * good to call destroy_workqueue, so return anyway. */
1521 if (status != 0)
1522 return status;
1523 1111
1524 destroy_workqueue(drv_data->workqueue); 1112 return pdata;
1113}
1525 1114
1526 return 0; 1115static struct acpi_device_id pxa2xx_spi_acpi_match[] = {
1116 { "INT33C0", 0 },
1117 { "INT33C1", 0 },
1118 { },
1119};
1120MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match);
1121#else
1122static inline struct pxa2xx_spi_master *
1123pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
1124{
1125 return NULL;
1527} 1126}
1127#endif
1528 1128
1529static int pxa2xx_spi_probe(struct platform_device *pdev) 1129static int pxa2xx_spi_probe(struct platform_device *pdev)
1530{ 1130{
@@ -1535,11 +1135,21 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
1535 struct ssp_device *ssp; 1135 struct ssp_device *ssp;
1536 int status; 1136 int status;
1537 1137
1538 platform_info = dev->platform_data; 1138 platform_info = dev_get_platdata(dev);
1139 if (!platform_info) {
1140 platform_info = pxa2xx_spi_acpi_get_pdata(pdev);
1141 if (!platform_info) {
1142 dev_err(&pdev->dev, "missing platform data\n");
1143 return -ENODEV;
1144 }
1145 }
1539 1146
1540 ssp = pxa_ssp_request(pdev->id, pdev->name); 1147 ssp = pxa_ssp_request(pdev->id, pdev->name);
1541 if (ssp == NULL) { 1148 if (!ssp)
1542 dev_err(&pdev->dev, "failed to request SSP%d\n", pdev->id); 1149 ssp = &platform_info->ssp;
1150
1151 if (!ssp->mmio_base) {
1152 dev_err(&pdev->dev, "failed to get ssp\n");
1543 return -ENODEV; 1153 return -ENODEV;
1544 } 1154 }
1545 1155
@@ -1558,19 +1168,21 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
1558 1168
1559 master->dev.parent = &pdev->dev; 1169 master->dev.parent = &pdev->dev;
1560 master->dev.of_node = pdev->dev.of_node; 1170 master->dev.of_node = pdev->dev.of_node;
1171 ACPI_HANDLE_SET(&master->dev, ACPI_HANDLE(&pdev->dev));
1561 /* the spi->mode bits understood by this driver: */ 1172 /* the spi->mode bits understood by this driver: */
1562 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 1173 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
1563 1174
1564 master->bus_num = pdev->id; 1175 master->bus_num = ssp->port_id;
1565 master->num_chipselect = platform_info->num_chipselect; 1176 master->num_chipselect = platform_info->num_chipselect;
1566 master->dma_alignment = DMA_ALIGNMENT; 1177 master->dma_alignment = DMA_ALIGNMENT;
1567 master->cleanup = cleanup; 1178 master->cleanup = cleanup;
1568 master->setup = setup; 1179 master->setup = setup;
1569 master->transfer = transfer; 1180 master->transfer_one_message = pxa2xx_spi_transfer_one_message;
1181 master->prepare_transfer_hardware = pxa2xx_spi_prepare_transfer;
1182 master->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer;
1570 1183
1571 drv_data->ssp_type = ssp->type; 1184 drv_data->ssp_type = ssp->type;
1572 drv_data->null_dma_buf = (u32 *)ALIGN((u32)(drv_data + 1185 drv_data->null_dma_buf = (u32 *)PTR_ALIGN(&drv_data[1], DMA_ALIGNMENT);
1573 sizeof(struct driver_data)), 8);
1574 1186
1575 drv_data->ioaddr = ssp->mmio_base; 1187 drv_data->ioaddr = ssp->mmio_base;
1576 drv_data->ssdr_physical = ssp->phys_base + SSDR; 1188 drv_data->ssdr_physical = ssp->phys_base + SSDR;
@@ -1581,7 +1193,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
1581 drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR; 1193 drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR;
1582 } else { 1194 } else {
1583 drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE; 1195 drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE;
1584 drv_data->dma_cr1 = SSCR1_TSRE | SSCR1_RSRE | SSCR1_TINTE; 1196 drv_data->dma_cr1 = DEFAULT_DMA_CR1;
1585 drv_data->clear_sr = SSSR_ROR | SSSR_TINT; 1197 drv_data->clear_sr = SSSR_ROR | SSSR_TINT;
1586 drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR; 1198 drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR;
1587 } 1199 }
@@ -1597,35 +1209,17 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
1597 drv_data->tx_channel = -1; 1209 drv_data->tx_channel = -1;
1598 drv_data->rx_channel = -1; 1210 drv_data->rx_channel = -1;
1599 if (platform_info->enable_dma) { 1211 if (platform_info->enable_dma) {
1600 1212 status = pxa2xx_spi_dma_setup(drv_data);
1601 /* Get two DMA channels (rx and tx) */ 1213 if (status) {
1602 drv_data->rx_channel = pxa_request_dma("pxa2xx_spi_ssp_rx", 1214 dev_warn(dev, "failed to setup DMA, using PIO\n");
1603 DMA_PRIO_HIGH, 1215 platform_info->enable_dma = false;
1604 dma_handler,
1605 drv_data);
1606 if (drv_data->rx_channel < 0) {
1607 dev_err(dev, "problem (%d) requesting rx channel\n",
1608 drv_data->rx_channel);
1609 status = -ENODEV;
1610 goto out_error_irq_alloc;
1611 } 1216 }
1612 drv_data->tx_channel = pxa_request_dma("pxa2xx_spi_ssp_tx",
1613 DMA_PRIO_MEDIUM,
1614 dma_handler,
1615 drv_data);
1616 if (drv_data->tx_channel < 0) {
1617 dev_err(dev, "problem (%d) requesting tx channel\n",
1618 drv_data->tx_channel);
1619 status = -ENODEV;
1620 goto out_error_dma_alloc;
1621 }
1622
1623 DRCMR(ssp->drcmr_rx) = DRCMR_MAPVLD | drv_data->rx_channel;
1624 DRCMR(ssp->drcmr_tx) = DRCMR_MAPVLD | drv_data->tx_channel;
1625 } 1217 }
1626 1218
1627 /* Enable SOC clock */ 1219 /* Enable SOC clock */
1628 clk_enable(ssp->clk); 1220 clk_prepare_enable(ssp->clk);
1221
1222 drv_data->max_clk_rate = clk_get_rate(ssp->clk);
1629 1223
1630 /* Load default SSP configuration */ 1224 /* Load default SSP configuration */
1631 write_SSCR0(0, drv_data->ioaddr); 1225 write_SSCR0(0, drv_data->ioaddr);
@@ -1640,41 +1234,29 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
1640 write_SSTO(0, drv_data->ioaddr); 1234 write_SSTO(0, drv_data->ioaddr);
1641 write_SSPSP(0, drv_data->ioaddr); 1235 write_SSPSP(0, drv_data->ioaddr);
1642 1236
1643 /* Initial and start queue */ 1237 lpss_ssp_setup(drv_data);
1644 status = init_queue(drv_data); 1238
1645 if (status != 0) { 1239 tasklet_init(&drv_data->pump_transfers, pump_transfers,
1646 dev_err(&pdev->dev, "problem initializing queue\n"); 1240 (unsigned long)drv_data);
1647 goto out_error_clock_enabled;
1648 }
1649 status = start_queue(drv_data);
1650 if (status != 0) {
1651 dev_err(&pdev->dev, "problem starting queue\n");
1652 goto out_error_clock_enabled;
1653 }
1654 1241
1655 /* Register with the SPI framework */ 1242 /* Register with the SPI framework */
1656 platform_set_drvdata(pdev, drv_data); 1243 platform_set_drvdata(pdev, drv_data);
1657 status = spi_register_master(master); 1244 status = spi_register_master(master);
1658 if (status != 0) { 1245 if (status != 0) {
1659 dev_err(&pdev->dev, "problem registering spi master\n"); 1246 dev_err(&pdev->dev, "problem registering spi master\n");
1660 goto out_error_queue_alloc; 1247 goto out_error_clock_enabled;
1661 } 1248 }
1662 1249
1663 return status; 1250 pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
1251 pm_runtime_use_autosuspend(&pdev->dev);
1252 pm_runtime_set_active(&pdev->dev);
1253 pm_runtime_enable(&pdev->dev);
1664 1254
1665out_error_queue_alloc: 1255 return status;
1666 destroy_queue(drv_data);
1667 1256
1668out_error_clock_enabled: 1257out_error_clock_enabled:
1669 clk_disable(ssp->clk); 1258 clk_disable_unprepare(ssp->clk);
1670 1259 pxa2xx_spi_dma_release(drv_data);
1671out_error_dma_alloc:
1672 if (drv_data->tx_channel != -1)
1673 pxa_free_dma(drv_data->tx_channel);
1674 if (drv_data->rx_channel != -1)
1675 pxa_free_dma(drv_data->rx_channel);
1676
1677out_error_irq_alloc:
1678 free_irq(ssp->irq, drv_data); 1260 free_irq(ssp->irq, drv_data);
1679 1261
1680out_error_master_alloc: 1262out_error_master_alloc:
@@ -1687,37 +1269,23 @@ static int pxa2xx_spi_remove(struct platform_device *pdev)
1687{ 1269{
1688 struct driver_data *drv_data = platform_get_drvdata(pdev); 1270 struct driver_data *drv_data = platform_get_drvdata(pdev);
1689 struct ssp_device *ssp; 1271 struct ssp_device *ssp;
1690 int status = 0;
1691 1272
1692 if (!drv_data) 1273 if (!drv_data)
1693 return 0; 1274 return 0;
1694 ssp = drv_data->ssp; 1275 ssp = drv_data->ssp;
1695 1276
1696 /* Remove the queue */ 1277 pm_runtime_get_sync(&pdev->dev);
1697 status = destroy_queue(drv_data);
1698 if (status != 0)
1699 /* the kernel does not check the return status of this
1700 * this routine (mod->exit, within the kernel). Therefore
1701 * nothing is gained by returning from here, the module is
1702 * going away regardless, and we should not leave any more
1703 * resources allocated than necessary. We cannot free the
1704 * message memory in drv_data->queue, but we can release the
1705 * resources below. I think the kernel should honor -EBUSY
1706 * returns but... */
1707 dev_err(&pdev->dev, "pxa2xx_spi_remove: workqueue will not "
1708 "complete, message memory not freed\n");
1709 1278
1710 /* Disable the SSP at the peripheral and SOC level */ 1279 /* Disable the SSP at the peripheral and SOC level */
1711 write_SSCR0(0, drv_data->ioaddr); 1280 write_SSCR0(0, drv_data->ioaddr);
1712 clk_disable(ssp->clk); 1281 clk_disable_unprepare(ssp->clk);
1713 1282
1714 /* Release DMA */ 1283 /* Release DMA */
1715 if (drv_data->master_info->enable_dma) { 1284 if (drv_data->master_info->enable_dma)
1716 DRCMR(ssp->drcmr_rx) = 0; 1285 pxa2xx_spi_dma_release(drv_data);
1717 DRCMR(ssp->drcmr_tx) = 0; 1286
1718 pxa_free_dma(drv_data->tx_channel); 1287 pm_runtime_put_noidle(&pdev->dev);
1719 pxa_free_dma(drv_data->rx_channel); 1288 pm_runtime_disable(&pdev->dev);
1720 }
1721 1289
1722 /* Release IRQ */ 1290 /* Release IRQ */
1723 free_irq(ssp->irq, drv_data); 1291 free_irq(ssp->irq, drv_data);
@@ -1749,11 +1317,11 @@ static int pxa2xx_spi_suspend(struct device *dev)
1749 struct ssp_device *ssp = drv_data->ssp; 1317 struct ssp_device *ssp = drv_data->ssp;
1750 int status = 0; 1318 int status = 0;
1751 1319
1752 status = stop_queue(drv_data); 1320 status = spi_master_suspend(drv_data->master);
1753 if (status != 0) 1321 if (status != 0)
1754 return status; 1322 return status;
1755 write_SSCR0(0, drv_data->ioaddr); 1323 write_SSCR0(0, drv_data->ioaddr);
1756 clk_disable(ssp->clk); 1324 clk_disable_unprepare(ssp->clk);
1757 1325
1758 return 0; 1326 return 0;
1759} 1327}
@@ -1764,18 +1332,13 @@ static int pxa2xx_spi_resume(struct device *dev)
1764 struct ssp_device *ssp = drv_data->ssp; 1332 struct ssp_device *ssp = drv_data->ssp;
1765 int status = 0; 1333 int status = 0;
1766 1334
1767 if (drv_data->rx_channel != -1) 1335 pxa2xx_spi_dma_resume(drv_data);
1768 DRCMR(drv_data->ssp->drcmr_rx) =
1769 DRCMR_MAPVLD | drv_data->rx_channel;
1770 if (drv_data->tx_channel != -1)
1771 DRCMR(drv_data->ssp->drcmr_tx) =
1772 DRCMR_MAPVLD | drv_data->tx_channel;
1773 1336
1774 /* Enable the SSP clock */ 1337 /* Enable the SSP clock */
1775 clk_enable(ssp->clk); 1338 clk_prepare_enable(ssp->clk);
1776 1339
1777 /* Start the queue running */ 1340 /* Start the queue running */
1778 status = start_queue(drv_data); 1341 status = spi_master_resume(drv_data->master);
1779 if (status != 0) { 1342 if (status != 0) {
1780 dev_err(dev, "problem starting queue (%d)\n", status); 1343 dev_err(dev, "problem starting queue (%d)\n", status);
1781 return status; 1344 return status;
@@ -1783,20 +1346,38 @@ static int pxa2xx_spi_resume(struct device *dev)
1783 1346
1784 return 0; 1347 return 0;
1785} 1348}
1349#endif
1350
1351#ifdef CONFIG_PM_RUNTIME
1352static int pxa2xx_spi_runtime_suspend(struct device *dev)
1353{
1354 struct driver_data *drv_data = dev_get_drvdata(dev);
1355
1356 clk_disable_unprepare(drv_data->ssp->clk);
1357 return 0;
1358}
1359
1360static int pxa2xx_spi_runtime_resume(struct device *dev)
1361{
1362 struct driver_data *drv_data = dev_get_drvdata(dev);
1363
1364 clk_prepare_enable(drv_data->ssp->clk);
1365 return 0;
1366}
1367#endif
1786 1368
1787static const struct dev_pm_ops pxa2xx_spi_pm_ops = { 1369static const struct dev_pm_ops pxa2xx_spi_pm_ops = {
1788 .suspend = pxa2xx_spi_suspend, 1370 SET_SYSTEM_SLEEP_PM_OPS(pxa2xx_spi_suspend, pxa2xx_spi_resume)
1789 .resume = pxa2xx_spi_resume, 1371 SET_RUNTIME_PM_OPS(pxa2xx_spi_runtime_suspend,
1372 pxa2xx_spi_runtime_resume, NULL)
1790}; 1373};
1791#endif
1792 1374
1793static struct platform_driver driver = { 1375static struct platform_driver driver = {
1794 .driver = { 1376 .driver = {
1795 .name = "pxa2xx-spi", 1377 .name = "pxa2xx-spi",
1796 .owner = THIS_MODULE, 1378 .owner = THIS_MODULE,
1797#ifdef CONFIG_PM
1798 .pm = &pxa2xx_spi_pm_ops, 1379 .pm = &pxa2xx_spi_pm_ops,
1799#endif 1380 .acpi_match_table = ACPI_PTR(pxa2xx_spi_acpi_match),
1800 }, 1381 },
1801 .probe = pxa2xx_spi_probe, 1382 .probe = pxa2xx_spi_probe,
1802 .remove = pxa2xx_spi_remove, 1383 .remove = pxa2xx_spi_remove,
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
new file mode 100644
index 000000000000..5adc2a11c7bc
--- /dev/null
+++ b/drivers/spi/spi-pxa2xx.h
@@ -0,0 +1,221 @@
1/*
2 * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
3 * Copyright (C) 2013, Intel Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#ifndef SPI_PXA2XX_H
11#define SPI_PXA2XX_H
12
13#include <linux/atomic.h>
14#include <linux/dmaengine.h>
15#include <linux/errno.h>
16#include <linux/io.h>
17#include <linux/interrupt.h>
18#include <linux/platform_device.h>
19#include <linux/pxa2xx_ssp.h>
20#include <linux/scatterlist.h>
21#include <linux/sizes.h>
22#include <linux/spi/spi.h>
23#include <linux/spi/pxa2xx_spi.h>
24
25struct driver_data {
26 /* Driver model hookup */
27 struct platform_device *pdev;
28
29 /* SSP Info */
30 struct ssp_device *ssp;
31
32 /* SPI framework hookup */
33 enum pxa_ssp_type ssp_type;
34 struct spi_master *master;
35
36 /* PXA hookup */
37 struct pxa2xx_spi_master *master_info;
38
39 /* PXA private DMA setup stuff */
40 int rx_channel;
41 int tx_channel;
42 u32 *null_dma_buf;
43
44 /* SSP register addresses */
45 void __iomem *ioaddr;
46 u32 ssdr_physical;
47
48 /* SSP masks*/
49 u32 dma_cr1;
50 u32 int_cr1;
51 u32 clear_sr;
52 u32 mask_sr;
53
54 /* Maximun clock rate */
55 unsigned long max_clk_rate;
56
57 /* Message Transfer pump */
58 struct tasklet_struct pump_transfers;
59
60 /* DMA engine support */
61 struct dma_chan *rx_chan;
62 struct dma_chan *tx_chan;
63 struct sg_table rx_sgt;
64 struct sg_table tx_sgt;
65 int rx_nents;
66 int tx_nents;
67 void *dummy;
68 atomic_t dma_running;
69
70 /* Current message transfer state info */
71 struct spi_message *cur_msg;
72 struct spi_transfer *cur_transfer;
73 struct chip_data *cur_chip;
74 size_t len;
75 void *tx;
76 void *tx_end;
77 void *rx;
78 void *rx_end;
79 int dma_mapped;
80 dma_addr_t rx_dma;
81 dma_addr_t tx_dma;
82 size_t rx_map_len;
83 size_t tx_map_len;
84 u8 n_bytes;
85 int (*write)(struct driver_data *drv_data);
86 int (*read)(struct driver_data *drv_data);
87 irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
88 void (*cs_control)(u32 command);
89
90 void __iomem *lpss_base;
91};
92
93struct chip_data {
94 u32 cr0;
95 u32 cr1;
96 u32 psp;
97 u32 timeout;
98 u8 n_bytes;
99 u32 dma_burst_size;
100 u32 threshold;
101 u32 dma_threshold;
102 u16 lpss_rx_threshold;
103 u16 lpss_tx_threshold;
104 u8 enable_dma;
105 u8 bits_per_word;
106 u32 speed_hz;
107 union {
108 int gpio_cs;
109 unsigned int frm;
110 };
111 int gpio_cs_inverted;
112 int (*write)(struct driver_data *drv_data);
113 int (*read)(struct driver_data *drv_data);
114 void (*cs_control)(u32 command);
115};
116
117#define DEFINE_SSP_REG(reg, off) \
118static inline u32 read_##reg(void const __iomem *p) \
119{ return __raw_readl(p + (off)); } \
120\
121static inline void write_##reg(u32 v, void __iomem *p) \
122{ __raw_writel(v, p + (off)); }
123
124DEFINE_SSP_REG(SSCR0, 0x00)
125DEFINE_SSP_REG(SSCR1, 0x04)
126DEFINE_SSP_REG(SSSR, 0x08)
127DEFINE_SSP_REG(SSITR, 0x0c)
128DEFINE_SSP_REG(SSDR, 0x10)
129DEFINE_SSP_REG(SSTO, 0x28)
130DEFINE_SSP_REG(SSPSP, 0x2c)
131DEFINE_SSP_REG(SSITF, SSITF)
132DEFINE_SSP_REG(SSIRF, SSIRF)
133
134#define START_STATE ((void *)0)
135#define RUNNING_STATE ((void *)1)
136#define DONE_STATE ((void *)2)
137#define ERROR_STATE ((void *)-1)
138
139#define IS_DMA_ALIGNED(x) IS_ALIGNED((unsigned long)(x), DMA_ALIGNMENT)
140#define DMA_ALIGNMENT 8
141
142static inline int pxa25x_ssp_comp(struct driver_data *drv_data)
143{
144 if (drv_data->ssp_type == PXA25x_SSP)
145 return 1;
146 if (drv_data->ssp_type == CE4100_SSP)
147 return 1;
148 return 0;
149}
150
151static inline void write_SSSR_CS(struct driver_data *drv_data, u32 val)
152{
153 void __iomem *reg = drv_data->ioaddr;
154
155 if (drv_data->ssp_type == CE4100_SSP)
156 val |= read_SSSR(reg) & SSSR_ALT_FRM_MASK;
157
158 write_SSSR(val, reg);
159}
160
161extern int pxa2xx_spi_flush(struct driver_data *drv_data);
162extern void *pxa2xx_spi_next_transfer(struct driver_data *drv_data);
163
164/*
165 * Select the right DMA implementation.
166 */
167#if defined(CONFIG_SPI_PXA2XX_PXADMA)
168#define SPI_PXA2XX_USE_DMA 1
169#define MAX_DMA_LEN 8191
170#define DEFAULT_DMA_CR1 (SSCR1_TSRE | SSCR1_RSRE | SSCR1_TINTE)
171#elif defined(CONFIG_SPI_PXA2XX_DMA)
172#define SPI_PXA2XX_USE_DMA 1
173#define MAX_DMA_LEN SZ_64K
174#define DEFAULT_DMA_CR1 (SSCR1_TSRE | SSCR1_RSRE | SSCR1_TRAIL)
175#else
176#undef SPI_PXA2XX_USE_DMA
177#define MAX_DMA_LEN 0
178#define DEFAULT_DMA_CR1 0
179#endif
180
181#ifdef SPI_PXA2XX_USE_DMA
182extern bool pxa2xx_spi_dma_is_possible(size_t len);
183extern int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data);
184extern irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data);
185extern int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst);
186extern void pxa2xx_spi_dma_start(struct driver_data *drv_data);
187extern int pxa2xx_spi_dma_setup(struct driver_data *drv_data);
188extern void pxa2xx_spi_dma_release(struct driver_data *drv_data);
189extern void pxa2xx_spi_dma_resume(struct driver_data *drv_data);
190extern int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
191 struct spi_device *spi,
192 u8 bits_per_word,
193 u32 *burst_code,
194 u32 *threshold);
195#else
196static inline bool pxa2xx_spi_dma_is_possible(size_t len) { return false; }
197static inline int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data)
198{
199 return 0;
200}
201#define pxa2xx_spi_dma_transfer NULL
202static inline void pxa2xx_spi_dma_prepare(struct driver_data *drv_data,
203 u32 dma_burst) {}
204static inline void pxa2xx_spi_dma_start(struct driver_data *drv_data) {}
205static inline int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
206{
207 return 0;
208}
209static inline void pxa2xx_spi_dma_release(struct driver_data *drv_data) {}
210static inline void pxa2xx_spi_dma_resume(struct driver_data *drv_data) {}
211static inline int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
212 struct spi_device *spi,
213 u8 bits_per_word,
214 u32 *burst_code,
215 u32 *threshold)
216{
217 return -ENODEV;
218}
219#endif
220
221#endif /* SPI_PXA2XX_H */
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index ad93231a8038..8f492ed317cc 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -62,7 +62,7 @@
62#define S3C64XX_SPI_CLKSEL_SRCMSK (3<<9) 62#define S3C64XX_SPI_CLKSEL_SRCMSK (3<<9)
63#define S3C64XX_SPI_CLKSEL_SRCSHFT 9 63#define S3C64XX_SPI_CLKSEL_SRCSHFT 9
64#define S3C64XX_SPI_ENCLK_ENABLE (1<<8) 64#define S3C64XX_SPI_ENCLK_ENABLE (1<<8)
65#define S3C64XX_SPI_PSR_MASK 0xff 65#define S3C64XX_SPI_PSR_MASK 0xff
66 66
67#define S3C64XX_SPI_MODE_CH_TSZ_BYTE (0<<29) 67#define S3C64XX_SPI_MODE_CH_TSZ_BYTE (0<<29)
68#define S3C64XX_SPI_MODE_CH_TSZ_HALFWORD (1<<29) 68#define S3C64XX_SPI_MODE_CH_TSZ_HALFWORD (1<<29)
@@ -697,7 +697,7 @@ static int s3c64xx_spi_transfer_one_message(struct spi_master *master,
697 INIT_COMPLETION(sdd->xfer_completion); 697 INIT_COMPLETION(sdd->xfer_completion);
698 698
699 /* Only BPW and Speed may change across transfers */ 699 /* Only BPW and Speed may change across transfers */
700 bpw = xfer->bits_per_word ? : spi->bits_per_word; 700 bpw = xfer->bits_per_word;
701 speed = xfer->speed_hz ? : spi->max_speed_hz; 701 speed = xfer->speed_hz ? : spi->max_speed_hz;
702 702
703 if (xfer->len % (bpw / 8)) { 703 if (xfer->len % (bpw / 8)) {
@@ -743,8 +743,7 @@ static int s3c64xx_spi_transfer_one_message(struct spi_master *master,
743 sdd->regs + S3C64XX_SPI_SLAVE_SEL); 743 sdd->regs + S3C64XX_SPI_SLAVE_SEL);
744 744
745 if (status) { 745 if (status) {
746 dev_err(&spi->dev, "I/O Error: " 746 dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
747 "rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
748 xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0, 747 xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
749 (sdd->state & RXBUSY) ? 'f' : 'p', 748 (sdd->state & RXBUSY) ? 'f' : 'p',
750 (sdd->state & TXBUSY) ? 'f' : 'p', 749 (sdd->state & TXBUSY) ? 'f' : 'p',
@@ -799,7 +798,7 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
799 798
800 /* Acquire DMA channels */ 799 /* Acquire DMA channels */
801 while (!acquire_dma(sdd)) 800 while (!acquire_dma(sdd))
802 msleep(10); 801 usleep_range(10000, 11000);
803 802
804 pm_runtime_get_sync(&sdd->pdev->dev); 803 pm_runtime_get_sync(&sdd->pdev->dev);
805 804
@@ -841,16 +840,14 @@ static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata(
841 840
842 cs = kzalloc(sizeof(*cs), GFP_KERNEL); 841 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
843 if (!cs) { 842 if (!cs) {
844 dev_err(&spi->dev, "could not allocate memory for controller" 843 dev_err(&spi->dev, "could not allocate memory for controller data\n");
845 " data\n");
846 of_node_put(data_np); 844 of_node_put(data_np);
847 return ERR_PTR(-ENOMEM); 845 return ERR_PTR(-ENOMEM);
848 } 846 }
849 847
850 cs->line = of_get_named_gpio(data_np, "cs-gpio", 0); 848 cs->line = of_get_named_gpio(data_np, "cs-gpio", 0);
851 if (!gpio_is_valid(cs->line)) { 849 if (!gpio_is_valid(cs->line)) {
852 dev_err(&spi->dev, "chip select gpio is not specified or " 850 dev_err(&spi->dev, "chip select gpio is not specified or invalid\n");
853 "invalid\n");
854 kfree(cs); 851 kfree(cs);
855 of_node_put(data_np); 852 of_node_put(data_np);
856 return ERR_PTR(-EINVAL); 853 return ERR_PTR(-EINVAL);
@@ -957,6 +954,8 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
957 if (spi->max_speed_hz >= speed) { 954 if (spi->max_speed_hz >= speed) {
958 spi->max_speed_hz = speed; 955 spi->max_speed_hz = speed;
959 } else { 956 } else {
957 dev_err(&spi->dev, "Can't set %dHz transfer speed\n",
958 spi->max_speed_hz);
960 err = -EINVAL; 959 err = -EINVAL;
961 goto setup_exit; 960 goto setup_exit;
962 } 961 }
@@ -1076,8 +1075,8 @@ static int s3c64xx_spi_get_dmares(
1076 if (!sdd->pdev->dev.of_node) { 1075 if (!sdd->pdev->dev.of_node) {
1077 res = platform_get_resource(pdev, IORESOURCE_DMA, tx ? 0 : 1); 1076 res = platform_get_resource(pdev, IORESOURCE_DMA, tx ? 0 : 1);
1078 if (!res) { 1077 if (!res) {
1079 dev_err(&pdev->dev, "Unable to get SPI-%s dma " 1078 dev_err(&pdev->dev, "Unable to get SPI-%s dma resource\n",
1080 "resource\n", chan_str); 1079 chan_str);
1081 return -ENXIO; 1080 return -ENXIO;
1082 } 1081 }
1083 dma_data->dmach = res->start; 1082 dma_data->dmach = res->start;
@@ -1133,8 +1132,7 @@ static void s3c64xx_spi_dt_gpio_free(struct s3c64xx_spi_driver_data *sdd)
1133 gpio_free(sdd->gpios[idx]); 1132 gpio_free(sdd->gpios[idx]);
1134} 1133}
1135 1134
1136static struct s3c64xx_spi_info * s3c64xx_spi_parse_dt( 1135static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
1137 struct device *dev)
1138{ 1136{
1139 struct s3c64xx_spi_info *sci; 1137 struct s3c64xx_spi_info *sci;
1140 u32 temp; 1138 u32 temp;
@@ -1146,16 +1144,14 @@ static struct s3c64xx_spi_info * s3c64xx_spi_parse_dt(
1146 } 1144 }
1147 1145
1148 if (of_property_read_u32(dev->of_node, "samsung,spi-src-clk", &temp)) { 1146 if (of_property_read_u32(dev->of_node, "samsung,spi-src-clk", &temp)) {
1149 dev_warn(dev, "spi bus clock parent not specified, using " 1147 dev_warn(dev, "spi bus clock parent not specified, using clock at index 0 as parent\n");
1150 "clock at index 0 as parent\n");
1151 sci->src_clk_nr = 0; 1148 sci->src_clk_nr = 0;
1152 } else { 1149 } else {
1153 sci->src_clk_nr = temp; 1150 sci->src_clk_nr = temp;
1154 } 1151 }
1155 1152
1156 if (of_property_read_u32(dev->of_node, "num-cs", &temp)) { 1153 if (of_property_read_u32(dev->of_node, "num-cs", &temp)) {
1157 dev_warn(dev, "number of chip select lines not specified, " 1154 dev_warn(dev, "number of chip select lines not specified, assuming 1 chip select line\n");
1158 "assuming 1 chip select line\n");
1159 sci->num_cs = 1; 1155 sci->num_cs = 1;
1160 } else { 1156 } else {
1161 sci->num_cs = temp; 1157 sci->num_cs = temp;
@@ -1195,7 +1191,7 @@ static inline struct s3c64xx_spi_port_config *s3c64xx_spi_get_port_config(
1195 platform_get_device_id(pdev)->driver_data; 1191 platform_get_device_id(pdev)->driver_data;
1196} 1192}
1197 1193
1198static int __init s3c64xx_spi_probe(struct platform_device *pdev) 1194static int s3c64xx_spi_probe(struct platform_device *pdev)
1199{ 1195{
1200 struct resource *mem_res; 1196 struct resource *mem_res;
1201 struct s3c64xx_spi_driver_data *sdd; 1197 struct s3c64xx_spi_driver_data *sdd;
@@ -1245,8 +1241,8 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
1245 if (pdev->dev.of_node) { 1241 if (pdev->dev.of_node) {
1246 ret = of_alias_get_id(pdev->dev.of_node, "spi"); 1242 ret = of_alias_get_id(pdev->dev.of_node, "spi");
1247 if (ret < 0) { 1243 if (ret < 0) {
1248 dev_err(&pdev->dev, "failed to get alias id, " 1244 dev_err(&pdev->dev, "failed to get alias id, errno %d\n",
1249 "errno %d\n", ret); 1245 ret);
1250 goto err0; 1246 goto err0;
1251 } 1247 }
1252 sdd->port_id = ret; 1248 sdd->port_id = ret;
@@ -1280,7 +1276,7 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
1280 if (sdd->regs == NULL) { 1276 if (sdd->regs == NULL) {
1281 dev_err(&pdev->dev, "Unable to remap IO\n"); 1277 dev_err(&pdev->dev, "Unable to remap IO\n");
1282 ret = -ENXIO; 1278 ret = -ENXIO;
1283 goto err1; 1279 goto err0;
1284 } 1280 }
1285 1281
1286 if (!sci->cfg_gpio && pdev->dev.of_node) { 1282 if (!sci->cfg_gpio && pdev->dev.of_node) {
@@ -1289,36 +1285,36 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
1289 } else if (sci->cfg_gpio == NULL || sci->cfg_gpio()) { 1285 } else if (sci->cfg_gpio == NULL || sci->cfg_gpio()) {
1290 dev_err(&pdev->dev, "Unable to config gpio\n"); 1286 dev_err(&pdev->dev, "Unable to config gpio\n");
1291 ret = -EBUSY; 1287 ret = -EBUSY;
1292 goto err2; 1288 goto err0;
1293 } 1289 }
1294 1290
1295 /* Setup clocks */ 1291 /* Setup clocks */
1296 sdd->clk = clk_get(&pdev->dev, "spi"); 1292 sdd->clk = devm_clk_get(&pdev->dev, "spi");
1297 if (IS_ERR(sdd->clk)) { 1293 if (IS_ERR(sdd->clk)) {
1298 dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n"); 1294 dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n");
1299 ret = PTR_ERR(sdd->clk); 1295 ret = PTR_ERR(sdd->clk);
1300 goto err3; 1296 goto err1;
1301 } 1297 }
1302 1298
1303 if (clk_prepare_enable(sdd->clk)) { 1299 if (clk_prepare_enable(sdd->clk)) {
1304 dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n"); 1300 dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n");
1305 ret = -EBUSY; 1301 ret = -EBUSY;
1306 goto err4; 1302 goto err1;
1307 } 1303 }
1308 1304
1309 sprintf(clk_name, "spi_busclk%d", sci->src_clk_nr); 1305 sprintf(clk_name, "spi_busclk%d", sci->src_clk_nr);
1310 sdd->src_clk = clk_get(&pdev->dev, clk_name); 1306 sdd->src_clk = devm_clk_get(&pdev->dev, clk_name);
1311 if (IS_ERR(sdd->src_clk)) { 1307 if (IS_ERR(sdd->src_clk)) {
1312 dev_err(&pdev->dev, 1308 dev_err(&pdev->dev,
1313 "Unable to acquire clock '%s'\n", clk_name); 1309 "Unable to acquire clock '%s'\n", clk_name);
1314 ret = PTR_ERR(sdd->src_clk); 1310 ret = PTR_ERR(sdd->src_clk);
1315 goto err5; 1311 goto err2;
1316 } 1312 }
1317 1313
1318 if (clk_prepare_enable(sdd->src_clk)) { 1314 if (clk_prepare_enable(sdd->src_clk)) {
1319 dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", clk_name); 1315 dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", clk_name);
1320 ret = -EBUSY; 1316 ret = -EBUSY;
1321 goto err6; 1317 goto err2;
1322 } 1318 }
1323 1319
1324 /* Setup Deufult Mode */ 1320 /* Setup Deufult Mode */
@@ -1328,11 +1324,12 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
1328 init_completion(&sdd->xfer_completion); 1324 init_completion(&sdd->xfer_completion);
1329 INIT_LIST_HEAD(&sdd->queue); 1325 INIT_LIST_HEAD(&sdd->queue);
1330 1326
1331 ret = request_irq(irq, s3c64xx_spi_irq, 0, "spi-s3c64xx", sdd); 1327 ret = devm_request_irq(&pdev->dev, irq, s3c64xx_spi_irq, 0,
1328 "spi-s3c64xx", sdd);
1332 if (ret != 0) { 1329 if (ret != 0) {
1333 dev_err(&pdev->dev, "Failed to request IRQ %d: %d\n", 1330 dev_err(&pdev->dev, "Failed to request IRQ %d: %d\n",
1334 irq, ret); 1331 irq, ret);
1335 goto err7; 1332 goto err3;
1336 } 1333 }
1337 1334
1338 writel(S3C64XX_SPI_INT_RX_OVERRUN_EN | S3C64XX_SPI_INT_RX_UNDERRUN_EN | 1335 writel(S3C64XX_SPI_INT_RX_OVERRUN_EN | S3C64XX_SPI_INT_RX_UNDERRUN_EN |
@@ -1342,11 +1339,10 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
1342 if (spi_register_master(master)) { 1339 if (spi_register_master(master)) {
1343 dev_err(&pdev->dev, "cannot register SPI master\n"); 1340 dev_err(&pdev->dev, "cannot register SPI master\n");
1344 ret = -EBUSY; 1341 ret = -EBUSY;
1345 goto err8; 1342 goto err3;
1346 } 1343 }
1347 1344
1348 dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d " 1345 dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Slaves attached\n",
1349 "with %d Slaves attached\n",
1350 sdd->port_id, master->num_chipselect); 1346 sdd->port_id, master->num_chipselect);
1351 dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\tDMA=[Rx-%d, Tx-%d]\n", 1347 dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\tDMA=[Rx-%d, Tx-%d]\n",
1352 mem_res->end, mem_res->start, 1348 mem_res->end, mem_res->start,
@@ -1356,21 +1352,13 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
1356 1352
1357 return 0; 1353 return 0;
1358 1354
1359err8: 1355err3:
1360 free_irq(irq, sdd);
1361err7:
1362 clk_disable_unprepare(sdd->src_clk); 1356 clk_disable_unprepare(sdd->src_clk);
1363err6: 1357err2:
1364 clk_put(sdd->src_clk);
1365err5:
1366 clk_disable_unprepare(sdd->clk); 1358 clk_disable_unprepare(sdd->clk);
1367err4: 1359err1:
1368 clk_put(sdd->clk);
1369err3:
1370 if (!sdd->cntrlr_info->cfg_gpio && pdev->dev.of_node) 1360 if (!sdd->cntrlr_info->cfg_gpio && pdev->dev.of_node)
1371 s3c64xx_spi_dt_gpio_free(sdd); 1361 s3c64xx_spi_dt_gpio_free(sdd);
1372err2:
1373err1:
1374err0: 1362err0:
1375 platform_set_drvdata(pdev, NULL); 1363 platform_set_drvdata(pdev, NULL);
1376 spi_master_put(master); 1364 spi_master_put(master);
@@ -1389,13 +1377,9 @@ static int s3c64xx_spi_remove(struct platform_device *pdev)
1389 1377
1390 writel(0, sdd->regs + S3C64XX_SPI_INT_EN); 1378 writel(0, sdd->regs + S3C64XX_SPI_INT_EN);
1391 1379
1392 free_irq(platform_get_irq(pdev, 0), sdd);
1393
1394 clk_disable_unprepare(sdd->src_clk); 1380 clk_disable_unprepare(sdd->src_clk);
1395 clk_put(sdd->src_clk);
1396 1381
1397 clk_disable_unprepare(sdd->clk); 1382 clk_disable_unprepare(sdd->clk);
1398 clk_put(sdd->clk);
1399 1383
1400 if (!sdd->cntrlr_info->cfg_gpio && pdev->dev.of_node) 1384 if (!sdd->cntrlr_info->cfg_gpio && pdev->dev.of_node)
1401 s3c64xx_spi_dt_gpio_free(sdd); 1385 s3c64xx_spi_dt_gpio_free(sdd);
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 96358d0eabb7..8b40d0884f8b 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -20,6 +20,7 @@
20#include <linux/io.h> 20#include <linux/io.h>
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/of.h>
23#include <linux/platform_device.h> 24#include <linux/platform_device.h>
24#include <linux/pm_runtime.h> 25#include <linux/pm_runtime.h>
25 26
@@ -592,6 +593,37 @@ static u32 sh_msiof_spi_txrx_word(struct spi_device *spi, unsigned nsecs,
592 return 0; 593 return 0;
593} 594}
594 595
596#ifdef CONFIG_OF
597static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
598{
599 struct sh_msiof_spi_info *info;
600 struct device_node *np = dev->of_node;
601 u32 num_cs = 0;
602
603 info = devm_kzalloc(dev, sizeof(struct sh_msiof_spi_info), GFP_KERNEL);
604 if (!info) {
605 dev_err(dev, "failed to allocate setup data\n");
606 return NULL;
607 }
608
609 /* Parse the MSIOF properties */
610 of_property_read_u32(np, "num-cs", &num_cs);
611 of_property_read_u32(np, "renesas,tx-fifo-size",
612 &info->tx_fifo_override);
613 of_property_read_u32(np, "renesas,rx-fifo-size",
614 &info->rx_fifo_override);
615
616 info->num_chipselect = num_cs;
617
618 return info;
619}
620#else
621static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
622{
623 return NULL;
624}
625#endif
626
595static int sh_msiof_spi_probe(struct platform_device *pdev) 627static int sh_msiof_spi_probe(struct platform_device *pdev)
596{ 628{
597 struct resource *r; 629 struct resource *r;
@@ -610,7 +642,17 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
610 p = spi_master_get_devdata(master); 642 p = spi_master_get_devdata(master);
611 643
612 platform_set_drvdata(pdev, p); 644 platform_set_drvdata(pdev, p);
613 p->info = pdev->dev.platform_data; 645 if (pdev->dev.of_node)
646 p->info = sh_msiof_spi_parse_dt(&pdev->dev);
647 else
648 p->info = pdev->dev.platform_data;
649
650 if (!p->info) {
651 dev_err(&pdev->dev, "failed to obtain device info\n");
652 ret = -ENXIO;
653 goto err1;
654 }
655
614 init_completion(&p->done); 656 init_completion(&p->done);
615 657
616 p->clk = clk_get(&pdev->dev, NULL); 658 p->clk = clk_get(&pdev->dev, NULL);
@@ -715,6 +757,17 @@ static int sh_msiof_spi_runtime_nop(struct device *dev)
715 return 0; 757 return 0;
716} 758}
717 759
760#ifdef CONFIG_OF
761static const struct of_device_id sh_msiof_match[] = {
762 { .compatible = "renesas,sh-msiof", },
763 { .compatible = "renesas,sh-mobile-msiof", },
764 {},
765};
766MODULE_DEVICE_TABLE(of, sh_msiof_match);
767#else
768#define sh_msiof_match NULL
769#endif
770
718static struct dev_pm_ops sh_msiof_spi_dev_pm_ops = { 771static struct dev_pm_ops sh_msiof_spi_dev_pm_ops = {
719 .runtime_suspend = sh_msiof_spi_runtime_nop, 772 .runtime_suspend = sh_msiof_spi_runtime_nop,
720 .runtime_resume = sh_msiof_spi_runtime_nop, 773 .runtime_resume = sh_msiof_spi_runtime_nop,
@@ -727,6 +780,7 @@ static struct platform_driver sh_msiof_spi_drv = {
727 .name = "spi_sh_msiof", 780 .name = "spi_sh_msiof",
728 .owner = THIS_MODULE, 781 .owner = THIS_MODULE,
729 .pm = &sh_msiof_spi_dev_pm_ops, 782 .pm = &sh_msiof_spi_dev_pm_ops,
783 .of_match_table = sh_msiof_match,
730 }, 784 },
731}; 785};
732module_platform_driver(sh_msiof_spi_drv); 786module_platform_driver(sh_msiof_spi_drv);
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index e0f43a512e84..6a5626d146b7 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -382,8 +382,7 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
382 382
383 sspi = spi_master_get_devdata(spi->master); 383 sspi = spi_master_get_devdata(spi->master);
384 384
385 bits_per_word = t && t->bits_per_word ? t->bits_per_word : 385 bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word;
386 spi->bits_per_word;
387 hz = t && t->speed_hz ? t->speed_hz : spi->max_speed_hz; 386 hz = t && t->speed_hz ? t->speed_hz : spi->max_speed_hz;
388 387
389 /* Enable IO mode for RX, TX */ 388 /* Enable IO mode for RX, TX */
@@ -570,7 +569,7 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
570 ret = -EINVAL; 569 ret = -EINVAL;
571 goto free_pin; 570 goto free_pin;
572 } 571 }
573 clk_enable(sspi->clk); 572 clk_prepare_enable(sspi->clk);
574 sspi->ctrl_freq = clk_get_rate(sspi->clk); 573 sspi->ctrl_freq = clk_get_rate(sspi->clk);
575 574
576 init_completion(&sspi->done); 575 init_completion(&sspi->done);
@@ -594,7 +593,7 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
594 return 0; 593 return 0;
595 594
596free_clk: 595free_clk:
597 clk_disable(sspi->clk); 596 clk_disable_unprepare(sspi->clk);
598 clk_put(sspi->clk); 597 clk_put(sspi->clk);
599free_pin: 598free_pin:
600 pinctrl_put(sspi->p); 599 pinctrl_put(sspi->p);
@@ -618,7 +617,7 @@ static int spi_sirfsoc_remove(struct platform_device *pdev)
618 if (sspi->chipselect[i] > 0) 617 if (sspi->chipselect[i] > 0)
619 gpio_free(sspi->chipselect[i]); 618 gpio_free(sspi->chipselect[i]);
620 } 619 }
621 clk_disable(sspi->clk); 620 clk_disable_unprepare(sspi->clk);
622 clk_put(sspi->clk); 621 clk_put(sspi->clk);
623 pinctrl_put(sspi->p); 622 pinctrl_put(sspi->p);
624 spi_master_put(master); 623 spi_master_put(master);
@@ -659,6 +658,7 @@ static const struct dev_pm_ops spi_sirfsoc_pm_ops = {
659 658
660static const struct of_device_id spi_sirfsoc_of_match[] = { 659static const struct of_device_id spi_sirfsoc_of_match[] = {
661 { .compatible = "sirf,prima2-spi", }, 660 { .compatible = "sirf,prima2-spi", },
661 { .compatible = "sirf,marco-spi", },
662 {} 662 {}
663}; 663};
664MODULE_DEVICE_TABLE(of, sirfsoc_spi_of_match); 664MODULE_DEVICE_TABLE(of, sirfsoc_spi_of_match);
diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c
index 448a8cc71df3..9a42c158e245 100644
--- a/drivers/spi/spi-tegra20-sflash.c
+++ b/drivers/spi/spi-tegra20-sflash.c
@@ -269,9 +269,7 @@ static int tegra_sflash_start_transfer_one(struct spi_device *spi,
269 u32 speed; 269 u32 speed;
270 unsigned long command; 270 unsigned long command;
271 271
272 speed = t->speed_hz ? t->speed_hz : spi->max_speed_hz; 272 speed = t->speed_hz;
273 if (!speed)
274 speed = tsd->spi_max_frequency;
275 if (speed != tsd->cur_speed) { 273 if (speed != tsd->cur_speed) {
276 clk_set_rate(tsd->clk, speed); 274 clk_set_rate(tsd->clk, speed);
277 tsd->cur_speed = speed; 275 tsd->cur_speed = speed;
@@ -319,6 +317,15 @@ static int tegra_sflash_start_transfer_one(struct spi_device *spi,
319 return tegra_sflash_start_cpu_based_transfer(tsd, t); 317 return tegra_sflash_start_cpu_based_transfer(tsd, t);
320} 318}
321 319
320static int tegra_sflash_setup(struct spi_device *spi)
321{
322 struct tegra_sflash_data *tsd = spi_master_get_devdata(spi->master);
323
324 /* Set speed to the spi max fequency if spi device has not set */
325 spi->max_speed_hz = spi->max_speed_hz ? : tsd->spi_max_frequency;
326 return 0;
327}
328
322static int tegra_sflash_transfer_one_message(struct spi_master *master, 329static int tegra_sflash_transfer_one_message(struct spi_master *master,
323 struct spi_message *msg) 330 struct spi_message *msg)
324{ 331{
@@ -492,6 +499,7 @@ static int tegra_sflash_probe(struct platform_device *pdev)
492 499
493 /* the spi->mode bits understood by this driver: */ 500 /* the spi->mode bits understood by this driver: */
494 master->mode_bits = SPI_CPOL | SPI_CPHA; 501 master->mode_bits = SPI_CPOL | SPI_CPHA;
502 master->setup = tegra_sflash_setup;
495 master->transfer_one_message = tegra_sflash_transfer_one_message; 503 master->transfer_one_message = tegra_sflash_transfer_one_message;
496 master->num_chipselect = MAX_CHIP_SELECT; 504 master->num_chipselect = MAX_CHIP_SELECT;
497 master->bus_num = -1; 505 master->bus_num = -1;
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index 651167f2e0af..8d51db8dea6d 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -284,8 +284,7 @@ static unsigned tegra_slink_calculate_curr_xfer_param(
284 unsigned max_len; 284 unsigned max_len;
285 unsigned total_fifo_words; 285 unsigned total_fifo_words;
286 286
287 bits_per_word = t->bits_per_word ? t->bits_per_word : 287 bits_per_word = t->bits_per_word;
288 spi->bits_per_word;
289 tspi->bytes_per_word = (bits_per_word - 1) / 8 + 1; 288 tspi->bytes_per_word = (bits_per_word - 1) / 8 + 1;
290 289
291 if (bits_per_word == 8 || bits_per_word == 16) { 290 if (bits_per_word == 8 || bits_per_word == 16) {
@@ -378,8 +377,7 @@ static unsigned int tegra_slink_read_rx_fifo_to_client_rxbuf(
378 } else { 377 } else {
379 unsigned int bits_per_word; 378 unsigned int bits_per_word;
380 379
381 bits_per_word = t->bits_per_word ? t->bits_per_word : 380 bits_per_word = t->bits_per_word;
382 tspi->cur_spi->bits_per_word;
383 for (count = 0; count < rx_full_count; count++) { 381 for (count = 0; count < rx_full_count; count++) {
384 x = tegra_slink_readl(tspi, SLINK_RX_FIFO); 382 x = tegra_slink_readl(tspi, SLINK_RX_FIFO);
385 for (i = 0; (i < tspi->bytes_per_word); i++) 383 for (i = 0; (i < tspi->bytes_per_word); i++)
@@ -444,8 +442,7 @@ static void tegra_slink_copy_spi_rxbuf_to_client_rxbuf(
444 unsigned int x; 442 unsigned int x;
445 unsigned int rx_mask, bits_per_word; 443 unsigned int rx_mask, bits_per_word;
446 444
447 bits_per_word = t->bits_per_word ? t->bits_per_word : 445 bits_per_word = t->bits_per_word;
448 tspi->cur_spi->bits_per_word;
449 rx_mask = (1 << bits_per_word) - 1; 446 rx_mask = (1 << bits_per_word) - 1;
450 for (count = 0; count < tspi->curr_dma_words; count++) { 447 for (count = 0; count < tspi->curr_dma_words; count++) {
451 x = tspi->rx_dma_buf[count]; 448 x = tspi->rx_dma_buf[count];
@@ -728,9 +725,7 @@ static int tegra_slink_start_transfer_one(struct spi_device *spi,
728 unsigned long command2; 725 unsigned long command2;
729 726
730 bits_per_word = t->bits_per_word; 727 bits_per_word = t->bits_per_word;
731 speed = t->speed_hz ? t->speed_hz : spi->max_speed_hz; 728 speed = t->speed_hz;
732 if (!speed)
733 speed = tspi->spi_max_frequency;
734 if (speed != tspi->cur_speed) { 729 if (speed != tspi->cur_speed) {
735 clk_set_rate(tspi->clk, speed * 4); 730 clk_set_rate(tspi->clk, speed * 4);
736 tspi->cur_speed = speed; 731 tspi->cur_speed = speed;
@@ -841,6 +836,8 @@ static int tegra_slink_setup(struct spi_device *spi)
841 836
842 BUG_ON(spi->chip_select >= MAX_CHIP_SELECT); 837 BUG_ON(spi->chip_select >= MAX_CHIP_SELECT);
843 838
839 /* Set speed to the spi max fequency if spi device has not set */
840 spi->max_speed_hz = spi->max_speed_hz ? : tspi->spi_max_frequency;
844 ret = pm_runtime_get_sync(tspi->dev); 841 ret = pm_runtime_get_sync(tspi->dev);
845 if (ret < 0) { 842 if (ret < 0) {
846 dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret); 843 dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret);
diff --git a/drivers/spi/spi-txx9.c b/drivers/spi/spi-txx9.c
index d5a3cbb646cb..adb853047926 100644
--- a/drivers/spi/spi-txx9.c
+++ b/drivers/spi/spi-txx9.c
@@ -189,9 +189,8 @@ static void txx9spi_work_one(struct txx9spi *c, struct spi_message *m)
189 unsigned int len = t->len; 189 unsigned int len = t->len;
190 unsigned int wsize; 190 unsigned int wsize;
191 u32 speed_hz = t->speed_hz ? : spi->max_speed_hz; 191 u32 speed_hz = t->speed_hz ? : spi->max_speed_hz;
192 u8 bits_per_word = t->bits_per_word ? : spi->bits_per_word; 192 u8 bits_per_word = t->bits_per_word;
193 193
194 bits_per_word = bits_per_word ? : 8;
195 wsize = bits_per_word >> 3; /* in bytes */ 194 wsize = bits_per_word >> 3; /* in bytes */
196 195
197 if (prev_speed_hz != speed_hz 196 if (prev_speed_hz != speed_hz
@@ -316,9 +315,8 @@ static int txx9spi_transfer(struct spi_device *spi, struct spi_message *m)
316 /* check each transfer's parameters */ 315 /* check each transfer's parameters */
317 list_for_each_entry (t, &m->transfers, transfer_list) { 316 list_for_each_entry (t, &m->transfers, transfer_list) {
318 u32 speed_hz = t->speed_hz ? : spi->max_speed_hz; 317 u32 speed_hz = t->speed_hz ? : spi->max_speed_hz;
319 u8 bits_per_word = t->bits_per_word ? : spi->bits_per_word; 318 u8 bits_per_word = t->bits_per_word;
320 319
321 bits_per_word = bits_per_word ? : 8;
322 if (!t->tx_buf && !t->rx_buf && t->len) 320 if (!t->tx_buf && !t->rx_buf && t->len)
323 return -EINVAL; 321 return -EINVAL;
324 if (bits_per_word != 8 && bits_per_word != 16) 322 if (bits_per_word != 8 && bits_per_word != 16)
@@ -337,7 +335,7 @@ static int txx9spi_transfer(struct spi_device *spi, struct spi_message *m)
337 return 0; 335 return 0;
338} 336}
339 337
340static int __init txx9spi_probe(struct platform_device *dev) 338static int txx9spi_probe(struct platform_device *dev)
341{ 339{
342 struct spi_master *master; 340 struct spi_master *master;
343 struct txx9spi *c; 341 struct txx9spi *c;
@@ -432,7 +430,7 @@ exit:
432 return ret; 430 return ret;
433} 431}
434 432
435static int __exit txx9spi_remove(struct platform_device *dev) 433static int txx9spi_remove(struct platform_device *dev)
436{ 434{
437 struct spi_master *master = spi_master_get(platform_get_drvdata(dev)); 435 struct spi_master *master = spi_master_get(platform_get_drvdata(dev));
438 struct txx9spi *c = spi_master_get_devdata(master); 436 struct txx9spi *c = spi_master_get_devdata(master);
@@ -450,7 +448,7 @@ static int __exit txx9spi_remove(struct platform_device *dev)
450MODULE_ALIAS("platform:spi_txx9"); 448MODULE_ALIAS("platform:spi_txx9");
451 449
452static struct platform_driver txx9spi_driver = { 450static struct platform_driver txx9spi_driver = {
453 .remove = __exit_p(txx9spi_remove), 451 .remove = txx9spi_remove,
454 .driver = { 452 .driver = {
455 .name = "spi_txx9", 453 .name = "spi_txx9",
456 .owner = THIS_MODULE, 454 .owner = THIS_MODULE,
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 19ee901577da..6f193b02a9e8 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -33,7 +33,7 @@
33#include <linux/of_gpio.h> 33#include <linux/of_gpio.h>
34#include <linux/pm_runtime.h> 34#include <linux/pm_runtime.h>
35#include <linux/export.h> 35#include <linux/export.h>
36#include <linux/sched.h> 36#include <linux/sched/rt.h>
37#include <linux/delay.h> 37#include <linux/delay.h>
38#include <linux/kthread.h> 38#include <linux/kthread.h>
39#include <linux/ioport.h> 39#include <linux/ioport.h>
@@ -1059,15 +1059,14 @@ EXPORT_SYMBOL_GPL(spi_alloc_master);
1059#ifdef CONFIG_OF 1059#ifdef CONFIG_OF
1060static int of_spi_register_master(struct spi_master *master) 1060static int of_spi_register_master(struct spi_master *master)
1061{ 1061{
1062 u16 nb; 1062 int nb, i, *cs;
1063 int i, *cs;
1064 struct device_node *np = master->dev.of_node; 1063 struct device_node *np = master->dev.of_node;
1065 1064
1066 if (!np) 1065 if (!np)
1067 return 0; 1066 return 0;
1068 1067
1069 nb = of_gpio_named_count(np, "cs-gpios"); 1068 nb = of_gpio_named_count(np, "cs-gpios");
1070 master->num_chipselect = max(nb, master->num_chipselect); 1069 master->num_chipselect = max(nb, (int)master->num_chipselect);
1071 1070
1072 if (nb < 1) 1071 if (nb < 1)
1073 return 0; 1072 return 0;
@@ -1080,7 +1079,8 @@ static int of_spi_register_master(struct spi_master *master)
1080 if (!master->cs_gpios) 1079 if (!master->cs_gpios)
1081 return -ENOMEM; 1080 return -ENOMEM;
1082 1081
1083 memset(cs, -EINVAL, master->num_chipselect); 1082 for (i = 0; i < master->num_chipselect; i++)
1083 cs[i] = -EINVAL;
1084 1084
1085 for (i = 0; i < nb; i++) 1085 for (i = 0; i < nb; i++)
1086 cs[i] = of_get_named_gpio(np, "cs-gpios", i); 1086 cs[i] = of_get_named_gpio(np, "cs-gpios", i);
@@ -1135,6 +1135,9 @@ int spi_register_master(struct spi_master *master)
1135 if (master->num_chipselect == 0) 1135 if (master->num_chipselect == 0)
1136 return -EINVAL; 1136 return -EINVAL;
1137 1137
1138 if ((master->bus_num < 0) && master->dev.of_node)
1139 master->bus_num = of_alias_get_id(master->dev.of_node, "spi");
1140
1138 /* convention: dynamically assigned bus IDs count down from the max */ 1141 /* convention: dynamically assigned bus IDs count down from the max */
1139 if (master->bus_num < 0) { 1142 if (master->bus_num < 0) {
1140 /* FIXME switch to an IDR based scheme, something like 1143 /* FIXME switch to an IDR based scheme, something like
@@ -1366,12 +1369,14 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
1366 } 1369 }
1367 1370
1368 /** 1371 /**
1369 * Set transfer bits_per_word as spi device default if it is not 1372 * Set transfer bits_per_word and max speed as spi device default if
1370 * set for this transfer. 1373 * it is not set for this transfer.
1371 */ 1374 */
1372 list_for_each_entry(xfer, &message->transfers, transfer_list) { 1375 list_for_each_entry(xfer, &message->transfers, transfer_list) {
1373 if (!xfer->bits_per_word) 1376 if (!xfer->bits_per_word)
1374 xfer->bits_per_word = spi->bits_per_word; 1377 xfer->bits_per_word = spi->bits_per_word;
1378 if (!xfer->speed_hz)
1379 xfer->speed_hz = spi->max_speed_hz;
1375 } 1380 }
1376 1381
1377 message->spi = spi; 1382 message->spi = spi;
@@ -1656,7 +1661,8 @@ int spi_write_then_read(struct spi_device *spi,
1656 * using the pre-allocated buffer or the transfer is too large. 1661 * using the pre-allocated buffer or the transfer is too large.
1657 */ 1662 */
1658 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) { 1663 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
1659 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx), GFP_KERNEL); 1664 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
1665 GFP_KERNEL | GFP_DMA);
1660 if (!local_buf) 1666 if (!local_buf)
1661 return -ENOMEM; 1667 return -ENOMEM;
1662 } else { 1668 } else {
diff --git a/drivers/ssb/driver_gpio.c b/drivers/ssb/driver_gpio.c
index 97ac0a38e3d0..eb2753008ef0 100644
--- a/drivers/ssb/driver_gpio.c
+++ b/drivers/ssb/driver_gpio.c
@@ -174,3 +174,15 @@ int ssb_gpio_init(struct ssb_bus *bus)
174 174
175 return -1; 175 return -1;
176} 176}
177
178int ssb_gpio_unregister(struct ssb_bus *bus)
179{
180 if (ssb_chipco_available(&bus->chipco) ||
181 ssb_extif_available(&bus->extif)) {
182 return gpiochip_remove(&bus->gpio);
183 } else {
184 SSB_WARN_ON(1);
185 }
186
187 return -1;
188}
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 772ad9b5c304..24dc331b4701 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -443,6 +443,15 @@ static void ssb_devices_unregister(struct ssb_bus *bus)
443 443
444void ssb_bus_unregister(struct ssb_bus *bus) 444void ssb_bus_unregister(struct ssb_bus *bus)
445{ 445{
446 int err;
447
448 err = ssb_gpio_unregister(bus);
449 if (err == -EBUSY)
450 ssb_dprintk(KERN_ERR PFX "Some GPIOs are still in use.\n");
451 else if (err)
452 ssb_dprintk(KERN_ERR PFX
453 "Can not unregister GPIO driver: %i\n", err);
454
446 ssb_buses_lock(); 455 ssb_buses_lock();
447 ssb_devices_unregister(bus); 456 ssb_devices_unregister(bus);
448 list_del(&bus->list); 457 list_del(&bus->list);
diff --git a/drivers/ssb/ssb_private.h b/drivers/ssb/ssb_private.h
index 6c10b66c796c..da38305a2d22 100644
--- a/drivers/ssb/ssb_private.h
+++ b/drivers/ssb/ssb_private.h
@@ -252,11 +252,16 @@ static inline void ssb_extif_init(struct ssb_extif *extif)
252 252
253#ifdef CONFIG_SSB_DRIVER_GPIO 253#ifdef CONFIG_SSB_DRIVER_GPIO
254extern int ssb_gpio_init(struct ssb_bus *bus); 254extern int ssb_gpio_init(struct ssb_bus *bus);
255extern int ssb_gpio_unregister(struct ssb_bus *bus);
255#else /* CONFIG_SSB_DRIVER_GPIO */ 256#else /* CONFIG_SSB_DRIVER_GPIO */
256static inline int ssb_gpio_init(struct ssb_bus *bus) 257static inline int ssb_gpio_init(struct ssb_bus *bus)
257{ 258{
258 return -ENOTSUPP; 259 return -ENOTSUPP;
259} 260}
261static inline int ssb_gpio_unregister(struct ssb_bus *bus)
262{
263 return 0;
264}
260#endif /* CONFIG_SSB_DRIVER_GPIO */ 265#endif /* CONFIG_SSB_DRIVER_GPIO */
261 266
262#endif /* LINUX_SSB_PRIVATE_H_ */ 267#endif /* LINUX_SSB_PRIVATE_H_ */
diff --git a/drivers/staging/csr/bh.c b/drivers/staging/csr/bh.c
index 1a1f5c79822a..7b133597e923 100644
--- a/drivers/staging/csr/bh.c
+++ b/drivers/staging/csr/bh.c
@@ -15,7 +15,7 @@
15 */ 15 */
16#include "csr_wifi_hip_unifi.h" 16#include "csr_wifi_hip_unifi.h"
17#include "unifi_priv.h" 17#include "unifi_priv.h"
18 18#include <linux/sched/rt.h>
19 19
20/* 20/*
21 * --------------------------------------------------------------------------- 21 * ---------------------------------------------------------------------------
diff --git a/drivers/staging/csr/unifi_sme.c b/drivers/staging/csr/unifi_sme.c
index 7c6c4138fc76..49395da34b7f 100644
--- a/drivers/staging/csr/unifi_sme.c
+++ b/drivers/staging/csr/unifi_sme.c
@@ -15,7 +15,7 @@
15#include "unifi_priv.h" 15#include "unifi_priv.h"
16#include "csr_wifi_hip_unifi.h" 16#include "csr_wifi_hip_unifi.h"
17#include "csr_wifi_hip_conversions.h" 17#include "csr_wifi_hip_conversions.h"
18 18#include <linux/sched/rt.h>
19 19
20 20
21 21
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
index fb31b457a56a..c5ceb9d90ea8 100644
--- a/drivers/staging/iio/adc/mxs-lradc.c
+++ b/drivers/staging/iio/adc/mxs-lradc.c
@@ -239,7 +239,7 @@ static irqreturn_t mxs_lradc_trigger_handler(int irq, void *p)
239 struct mxs_lradc *lradc = iio_priv(iio); 239 struct mxs_lradc *lradc = iio_priv(iio);
240 const uint32_t chan_value = LRADC_CH_ACCUMULATE | 240 const uint32_t chan_value = LRADC_CH_ACCUMULATE |
241 ((LRADC_DELAY_TIMER_LOOP - 1) << LRADC_CH_NUM_SAMPLES_OFFSET); 241 ((LRADC_DELAY_TIMER_LOOP - 1) << LRADC_CH_NUM_SAMPLES_OFFSET);
242 int i, j = 0; 242 unsigned int i, j = 0;
243 243
244 for_each_set_bit(i, iio->active_scan_mask, iio->masklength) { 244 for_each_set_bit(i, iio->active_scan_mask, iio->masklength) {
245 lradc->buffer[j] = readl(lradc->base + LRADC_CH(j)); 245 lradc->buffer[j] = readl(lradc->base + LRADC_CH(j));
diff --git a/drivers/staging/iio/gyro/adis16080_core.c b/drivers/staging/iio/gyro/adis16080_core.c
index 3525a68d6a75..41d7350d030f 100644
--- a/drivers/staging/iio/gyro/adis16080_core.c
+++ b/drivers/staging/iio/gyro/adis16080_core.c
@@ -69,7 +69,7 @@ static int adis16080_spi_read(struct iio_dev *indio_dev,
69 ret = spi_read(st->us, st->buf, 2); 69 ret = spi_read(st->us, st->buf, 2);
70 70
71 if (ret == 0) 71 if (ret == 0)
72 *val = ((st->buf[0] & 0xF) << 8) | st->buf[1]; 72 *val = sign_extend32(((st->buf[0] & 0xF) << 8) | st->buf[1], 11);
73 mutex_unlock(&st->buf_lock); 73 mutex_unlock(&st->buf_lock);
74 74
75 return ret; 75 return ret;
diff --git a/drivers/staging/iio/trigger/Kconfig b/drivers/staging/iio/trigger/Kconfig
index 7d3207559265..d44d3ad26fa5 100644
--- a/drivers/staging/iio/trigger/Kconfig
+++ b/drivers/staging/iio/trigger/Kconfig
@@ -21,7 +21,6 @@ config IIO_GPIO_TRIGGER
21config IIO_SYSFS_TRIGGER 21config IIO_SYSFS_TRIGGER
22 tristate "SYSFS trigger" 22 tristate "SYSFS trigger"
23 depends on SYSFS 23 depends on SYSFS
24 depends on HAVE_IRQ_WORK
25 select IRQ_WORK 24 select IRQ_WORK
26 help 25 help
27 Provides support for using SYSFS entry as IIO triggers. 26 Provides support for using SYSFS entry as IIO triggers.
diff --git a/drivers/staging/omapdrm/Kconfig b/drivers/staging/omapdrm/Kconfig
index b724a4131435..09f65dc3d2c8 100644
--- a/drivers/staging/omapdrm/Kconfig
+++ b/drivers/staging/omapdrm/Kconfig
@@ -3,8 +3,8 @@ config DRM_OMAP
3 tristate "OMAP DRM" 3 tristate "OMAP DRM"
4 depends on DRM && !CONFIG_FB_OMAP2 4 depends on DRM && !CONFIG_FB_OMAP2
5 depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM 5 depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM
6 depends on OMAP2_DSS
6 select DRM_KMS_HELPER 7 select DRM_KMS_HELPER
7 select OMAP2_DSS
8 select FB_SYS_FILLRECT 8 select FB_SYS_FILLRECT
9 select FB_SYS_COPYAREA 9 select FB_SYS_COPYAREA
10 select FB_SYS_IMAGEBLIT 10 select FB_SYS_IMAGEBLIT
diff --git a/drivers/staging/quickstart/quickstart.c b/drivers/staging/quickstart/quickstart.c
index cac320738142..adb8da564cf6 100644
--- a/drivers/staging/quickstart/quickstart.c
+++ b/drivers/staging/quickstart/quickstart.c
@@ -296,7 +296,7 @@ fail_config:
296 return ret; 296 return ret;
297} 297}
298 298
299static int quickstart_acpi_remove(struct acpi_device *device, int type) 299static int quickstart_acpi_remove(struct acpi_device *device)
300{ 300{
301 acpi_status status; 301 acpi_status status;
302 struct quickstart_acpi *quickstart; 302 struct quickstart_acpi *quickstart;
diff --git a/drivers/staging/sb105x/sb_pci_mp.c b/drivers/staging/sb105x/sb_pci_mp.c
index 131afd0c460c..9464f3874346 100644
--- a/drivers/staging/sb105x/sb_pci_mp.c
+++ b/drivers/staging/sb105x/sb_pci_mp.c
@@ -3054,7 +3054,7 @@ static int init_mp_dev(struct pci_dev *pcidev, mppcibrd_t brd)
3054 sbdev->nr_ports = ((portnum_hex/16)*10) + (portnum_hex % 16); 3054 sbdev->nr_ports = ((portnum_hex/16)*10) + (portnum_hex % 16);
3055 } 3055 }
3056 break; 3056 break;
3057#ifdef CONFIG_PARPORT 3057#ifdef CONFIG_PARPORT_PC
3058 case PCI_DEVICE_ID_MP2S1P : 3058 case PCI_DEVICE_ID_MP2S1P :
3059 sbdev->nr_ports = 2; 3059 sbdev->nr_ports = 2;
3060 3060
diff --git a/drivers/staging/vt6656/bssdb.h b/drivers/staging/vt6656/bssdb.h
index 6b2ec390e775..806cbf72fb59 100644
--- a/drivers/staging/vt6656/bssdb.h
+++ b/drivers/staging/vt6656/bssdb.h
@@ -90,7 +90,6 @@ typedef struct tagSRSNCapObject {
90} SRSNCapObject, *PSRSNCapObject; 90} SRSNCapObject, *PSRSNCapObject;
91 91
92// BSS info(AP) 92// BSS info(AP)
93#pragma pack(1)
94typedef struct tagKnownBSS { 93typedef struct tagKnownBSS {
95 // BSS info 94 // BSS info
96 BOOL bActive; 95 BOOL bActive;
diff --git a/drivers/staging/vt6656/int.h b/drivers/staging/vt6656/int.h
index 5d8faf9f96ec..e0d2b07ba608 100644
--- a/drivers/staging/vt6656/int.h
+++ b/drivers/staging/vt6656/int.h
@@ -34,7 +34,6 @@
34#include "device.h" 34#include "device.h"
35 35
36/*--------------------- Export Definitions -------------------------*/ 36/*--------------------- Export Definitions -------------------------*/
37#pragma pack(1)
38typedef struct tagSINTData { 37typedef struct tagSINTData {
39 BYTE byTSR0; 38 BYTE byTSR0;
40 BYTE byPkt0; 39 BYTE byPkt0;
diff --git a/drivers/staging/vt6656/iocmd.h b/drivers/staging/vt6656/iocmd.h
index 22710cef751d..ae6e2d237b20 100644
--- a/drivers/staging/vt6656/iocmd.h
+++ b/drivers/staging/vt6656/iocmd.h
@@ -95,13 +95,12 @@ typedef enum tagWZONETYPE {
95// Ioctl interface structure 95// Ioctl interface structure
96// Command structure 96// Command structure
97// 97//
98#pragma pack(1)
99typedef struct tagSCmdRequest { 98typedef struct tagSCmdRequest {
100 u8 name[16]; 99 u8 name[16];
101 void *data; 100 void *data;
102 u16 wResult; 101 u16 wResult;
103 u16 wCmdCode; 102 u16 wCmdCode;
104} SCmdRequest, *PSCmdRequest; 103} __packed SCmdRequest, *PSCmdRequest;
105 104
106// 105//
107// Scan 106// Scan
@@ -111,7 +110,7 @@ typedef struct tagSCmdScan {
111 110
112 u8 ssid[SSID_MAXLEN + 2]; 111 u8 ssid[SSID_MAXLEN + 2];
113 112
114} SCmdScan, *PSCmdScan; 113} __packed SCmdScan, *PSCmdScan;
115 114
116// 115//
117// BSS Join 116// BSS Join
@@ -126,7 +125,7 @@ typedef struct tagSCmdBSSJoin {
126 BOOL bPSEnable; 125 BOOL bPSEnable;
127 BOOL bShareKeyAuth; 126 BOOL bShareKeyAuth;
128 127
129} SCmdBSSJoin, *PSCmdBSSJoin; 128} __packed SCmdBSSJoin, *PSCmdBSSJoin;
130 129
131// 130//
132// Zonetype Setting 131// Zonetype Setting
@@ -137,7 +136,7 @@ typedef struct tagSCmdZoneTypeSet {
137 BOOL bWrite; 136 BOOL bWrite;
138 WZONETYPE ZoneType; 137 WZONETYPE ZoneType;
139 138
140} SCmdZoneTypeSet, *PSCmdZoneTypeSet; 139} __packed SCmdZoneTypeSet, *PSCmdZoneTypeSet;
141 140
142typedef struct tagSWPAResult { 141typedef struct tagSWPAResult {
143 char ifname[100]; 142 char ifname[100];
@@ -145,7 +144,7 @@ typedef struct tagSWPAResult {
145 u8 key_mgmt; 144 u8 key_mgmt;
146 u8 eap_type; 145 u8 eap_type;
147 BOOL authenticated; 146 BOOL authenticated;
148} SWPAResult, *PSWPAResult; 147} __packed SWPAResult, *PSWPAResult;
149 148
150typedef struct tagSCmdStartAP { 149typedef struct tagSCmdStartAP {
151 150
@@ -157,7 +156,7 @@ typedef struct tagSCmdStartAP {
157 BOOL bShareKeyAuth; 156 BOOL bShareKeyAuth;
158 u8 byBasicRate; 157 u8 byBasicRate;
159 158
160} SCmdStartAP, *PSCmdStartAP; 159} __packed SCmdStartAP, *PSCmdStartAP;
161 160
162typedef struct tagSCmdSetWEP { 161typedef struct tagSCmdSetWEP {
163 162
@@ -167,7 +166,7 @@ typedef struct tagSCmdSetWEP {
167 BOOL bWepKeyAvailable[WEP_NKEYS]; 166 BOOL bWepKeyAvailable[WEP_NKEYS];
168 u32 auWepKeyLength[WEP_NKEYS]; 167 u32 auWepKeyLength[WEP_NKEYS];
169 168
170} SCmdSetWEP, *PSCmdSetWEP; 169} __packed SCmdSetWEP, *PSCmdSetWEP;
171 170
172typedef struct tagSBSSIDItem { 171typedef struct tagSBSSIDItem {
173 172
@@ -180,14 +179,14 @@ typedef struct tagSBSSIDItem {
180 BOOL bWEPOn; 179 BOOL bWEPOn;
181 u32 uRSSI; 180 u32 uRSSI;
182 181
183} SBSSIDItem; 182} __packed SBSSIDItem;
184 183
185 184
186typedef struct tagSBSSIDList { 185typedef struct tagSBSSIDList {
187 186
188 u32 uItem; 187 u32 uItem;
189 SBSSIDItem sBSSIDList[0]; 188 SBSSIDItem sBSSIDList[0];
190} SBSSIDList, *PSBSSIDList; 189} __packed SBSSIDList, *PSBSSIDList;
191 190
192 191
193typedef struct tagSNodeItem { 192typedef struct tagSNodeItem {
@@ -208,7 +207,7 @@ typedef struct tagSNodeItem {
208 u32 uTxAttempts; 207 u32 uTxAttempts;
209 u16 wFailureRatio; 208 u16 wFailureRatio;
210 209
211} SNodeItem; 210} __packed SNodeItem;
212 211
213 212
214typedef struct tagSNodeList { 213typedef struct tagSNodeList {
@@ -216,7 +215,7 @@ typedef struct tagSNodeList {
216 u32 uItem; 215 u32 uItem;
217 SNodeItem sNodeList[0]; 216 SNodeItem sNodeList[0];
218 217
219} SNodeList, *PSNodeList; 218} __packed SNodeList, *PSNodeList;
220 219
221 220
222typedef struct tagSCmdLinkStatus { 221typedef struct tagSCmdLinkStatus {
@@ -229,7 +228,7 @@ typedef struct tagSCmdLinkStatus {
229 u32 uChannel; 228 u32 uChannel;
230 u32 uLinkRate; 229 u32 uLinkRate;
231 230
232} SCmdLinkStatus, *PSCmdLinkStatus; 231} __packed SCmdLinkStatus, *PSCmdLinkStatus;
233 232
234// 233//
235// 802.11 counter 234// 802.11 counter
@@ -247,7 +246,7 @@ typedef struct tagSDot11MIBCount {
247 u32 ReceivedFragmentCount; 246 u32 ReceivedFragmentCount;
248 u32 MulticastReceivedFrameCount; 247 u32 MulticastReceivedFrameCount;
249 u32 FCSErrorCount; 248 u32 FCSErrorCount;
250} SDot11MIBCount, *PSDot11MIBCount; 249} __packed SDot11MIBCount, *PSDot11MIBCount;
251 250
252 251
253 252
@@ -355,13 +354,13 @@ typedef struct tagSStatMIBCount {
355 u32 ullTxBroadcastBytes[2]; 354 u32 ullTxBroadcastBytes[2];
356 u32 ullTxMulticastBytes[2]; 355 u32 ullTxMulticastBytes[2];
357 u32 ullTxDirectedBytes[2]; 356 u32 ullTxDirectedBytes[2];
358} SStatMIBCount, *PSStatMIBCount; 357} __packed SStatMIBCount, *PSStatMIBCount;
359 358
360typedef struct tagSCmdValue { 359typedef struct tagSCmdValue {
361 360
362 u32 dwValue; 361 u32 dwValue;
363 362
364} SCmdValue, *PSCmdValue; 363} __packed SCmdValue, *PSCmdValue;
365 364
366// 365//
367// hostapd & viawget ioctl related 366// hostapd & viawget ioctl related
@@ -431,7 +430,7 @@ struct viawget_hostapd_param {
431 u8 ssid[32]; 430 u8 ssid[32];
432 } scan_req; 431 } scan_req;
433 } u; 432 } u;
434}; 433} __packed;
435 434
436/*--------------------- Export Classes ----------------------------*/ 435/*--------------------- Export Classes ----------------------------*/
437 436
diff --git a/drivers/staging/vt6656/iowpa.h b/drivers/staging/vt6656/iowpa.h
index 959c8868f6e2..2522ddec718d 100644
--- a/drivers/staging/vt6656/iowpa.h
+++ b/drivers/staging/vt6656/iowpa.h
@@ -67,12 +67,11 @@ enum {
67 67
68 68
69 69
70#pragma pack(1)
71typedef struct viawget_wpa_header { 70typedef struct viawget_wpa_header {
72 u8 type; 71 u8 type;
73 u16 req_ie_len; 72 u16 req_ie_len;
74 u16 resp_ie_len; 73 u16 resp_ie_len;
75} viawget_wpa_header; 74} __packed viawget_wpa_header;
76 75
77struct viawget_wpa_param { 76struct viawget_wpa_param {
78 u32 cmd; 77 u32 cmd;
@@ -113,9 +112,8 @@ struct viawget_wpa_param {
113 u8 *buf; 112 u8 *buf;
114 } scan_results; 113 } scan_results;
115 } u; 114 } u;
116}; 115} __packed;
117 116
118#pragma pack(1)
119struct viawget_scan_result { 117struct viawget_scan_result {
120 u8 bssid[6]; 118 u8 bssid[6];
121 u8 ssid[32]; 119 u8 ssid[32];
@@ -130,7 +128,7 @@ struct viawget_scan_result {
130 int noise; 128 int noise;
131 int level; 129 int level;
132 int maxrate; 130 int maxrate;
133}; 131} __packed;
134 132
135/*--------------------- Export Classes ----------------------------*/ 133/*--------------------- Export Classes ----------------------------*/
136 134
diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c
index 4efa9bc0fcf0..89bfd858bb28 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.c
+++ b/drivers/staging/wlan-ng/prism2mgmt.c
@@ -406,7 +406,7 @@ int prism2mgmt_scan_results(wlandevice_t *wlandev, void *msgp)
406 /* SSID */ 406 /* SSID */
407 req->ssid.status = P80211ENUM_msgitem_status_data_ok; 407 req->ssid.status = P80211ENUM_msgitem_status_data_ok;
408 req->ssid.data.len = le16_to_cpu(item->ssid.len); 408 req->ssid.data.len = le16_to_cpu(item->ssid.len);
409 req->ssid.data.len = min_t(u16, req->ssid.data.len, WLAN_BSSID_LEN); 409 req->ssid.data.len = min_t(u16, req->ssid.data.len, WLAN_SSID_MAXLEN);
410 memcpy(req->ssid.data.data, item->ssid.data, req->ssid.data.len); 410 memcpy(req->ssid.data.data, item->ssid.data, req->ssid.data.len);
411 411
412 /* supported rates */ 412 /* supported rates */
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index e2695101bb99..f2aa7543d20a 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -941,6 +941,8 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
941 941
942int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors) 942int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
943{ 943{
944 int block_size = dev->dev_attrib.block_size;
945
944 if (dev->export_count) { 946 if (dev->export_count) {
945 pr_err("dev[%p]: Unable to change SE Device" 947 pr_err("dev[%p]: Unable to change SE Device"
946 " fabric_max_sectors while export_count is %d\n", 948 " fabric_max_sectors while export_count is %d\n",
@@ -978,8 +980,12 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
978 /* 980 /*
979 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() 981 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
980 */ 982 */
983 if (!block_size) {
984 block_size = 512;
985 pr_warn("Defaulting to 512 for zero block_size\n");
986 }
981 fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors, 987 fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors,
982 dev->dev_attrib.block_size); 988 block_size);
983 989
984 dev->dev_attrib.fabric_max_sectors = fabric_max_sectors; 990 dev->dev_attrib.fabric_max_sectors = fabric_max_sectors;
985 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", 991 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 810263dfa4a1..c57bbbc7a7d1 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -754,6 +754,11 @@ static int target_fabric_port_link(
754 return -EFAULT; 754 return -EFAULT;
755 } 755 }
756 756
757 if (!(dev->dev_flags & DF_CONFIGURED)) {
758 pr_err("se_device not configured yet, cannot port link\n");
759 return -ENODEV;
760 }
761
757 tpg_ci = &lun_ci->ci_parent->ci_group->cg_item; 762 tpg_ci = &lun_ci->ci_parent->ci_group->cg_item;
758 se_tpg = container_of(to_config_group(tpg_ci), 763 se_tpg = container_of(to_config_group(tpg_ci),
759 struct se_portal_group, tpg_group); 764 struct se_portal_group, tpg_group);
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 26a6d183ccb1..a664c664a31a 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -58,11 +58,10 @@ sbc_emulate_readcapacity(struct se_cmd *cmd)
58 buf[7] = dev->dev_attrib.block_size & 0xff; 58 buf[7] = dev->dev_attrib.block_size & 0xff;
59 59
60 rbuf = transport_kmap_data_sg(cmd); 60 rbuf = transport_kmap_data_sg(cmd);
61 if (!rbuf) 61 if (rbuf) {
62 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 62 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
63 63 transport_kunmap_data_sg(cmd);
64 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 64 }
65 transport_kunmap_data_sg(cmd);
66 65
67 target_complete_cmd(cmd, GOOD); 66 target_complete_cmd(cmd, GOOD);
68 return 0; 67 return 0;
@@ -97,11 +96,10 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
97 buf[14] = 0x80; 96 buf[14] = 0x80;
98 97
99 rbuf = transport_kmap_data_sg(cmd); 98 rbuf = transport_kmap_data_sg(cmd);
100 if (!rbuf) 99 if (rbuf) {
101 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 100 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
102 101 transport_kunmap_data_sg(cmd);
103 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 102 }
104 transport_kunmap_data_sg(cmd);
105 103
106 target_complete_cmd(cmd, GOOD); 104 target_complete_cmd(cmd, GOOD);
107 return 0; 105 return 0;
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 84f9e96e8ace..2d88f087d961 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -641,11 +641,10 @@ spc_emulate_inquiry(struct se_cmd *cmd)
641 641
642out: 642out:
643 rbuf = transport_kmap_data_sg(cmd); 643 rbuf = transport_kmap_data_sg(cmd);
644 if (!rbuf) 644 if (rbuf) {
645 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 645 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
646 646 transport_kunmap_data_sg(cmd);
647 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 647 }
648 transport_kunmap_data_sg(cmd);
649 648
650 if (!ret) 649 if (!ret)
651 target_complete_cmd(cmd, GOOD); 650 target_complete_cmd(cmd, GOOD);
@@ -851,7 +850,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
851{ 850{
852 struct se_device *dev = cmd->se_dev; 851 struct se_device *dev = cmd->se_dev;
853 char *cdb = cmd->t_task_cdb; 852 char *cdb = cmd->t_task_cdb;
854 unsigned char *buf, *map_buf; 853 unsigned char buf[SE_MODE_PAGE_BUF], *rbuf;
855 int type = dev->transport->get_device_type(dev); 854 int type = dev->transport->get_device_type(dev);
856 int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10); 855 int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10);
857 bool dbd = !!(cdb[1] & 0x08); 856 bool dbd = !!(cdb[1] & 0x08);
@@ -863,26 +862,8 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
863 int ret; 862 int ret;
864 int i; 863 int i;
865 864
866 map_buf = transport_kmap_data_sg(cmd); 865 memset(buf, 0, SE_MODE_PAGE_BUF);
867 if (!map_buf) 866
868 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
869 /*
870 * If SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is not set, then we
871 * know we actually allocated a full page. Otherwise, if the
872 * data buffer is too small, allocate a temporary buffer so we
873 * don't have to worry about overruns in all our INQUIRY
874 * emulation handling.
875 */
876 if (cmd->data_length < SE_MODE_PAGE_BUF &&
877 (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) {
878 buf = kzalloc(SE_MODE_PAGE_BUF, GFP_KERNEL);
879 if (!buf) {
880 transport_kunmap_data_sg(cmd);
881 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
882 }
883 } else {
884 buf = map_buf;
885 }
886 /* 867 /*
887 * Skip over MODE DATA LENGTH + MEDIUM TYPE fields to byte 3 for 868 * Skip over MODE DATA LENGTH + MEDIUM TYPE fields to byte 3 for
888 * MODE_SENSE_10 and byte 2 for MODE_SENSE (6). 869 * MODE_SENSE_10 and byte 2 for MODE_SENSE (6).
@@ -934,8 +915,6 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
934 if (page == 0x3f) { 915 if (page == 0x3f) {
935 if (subpage != 0x00 && subpage != 0xff) { 916 if (subpage != 0x00 && subpage != 0xff) {
936 pr_warn("MODE_SENSE: Invalid subpage code: 0x%02x\n", subpage); 917 pr_warn("MODE_SENSE: Invalid subpage code: 0x%02x\n", subpage);
937 kfree(buf);
938 transport_kunmap_data_sg(cmd);
939 return TCM_INVALID_CDB_FIELD; 918 return TCM_INVALID_CDB_FIELD;
940 } 919 }
941 920
@@ -972,7 +951,6 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
972 pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n", 951 pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
973 page, subpage); 952 page, subpage);
974 953
975 transport_kunmap_data_sg(cmd);
976 return TCM_UNKNOWN_MODE_PAGE; 954 return TCM_UNKNOWN_MODE_PAGE;
977 955
978set_length: 956set_length:
@@ -981,12 +959,12 @@ set_length:
981 else 959 else
982 buf[0] = length - 1; 960 buf[0] = length - 1;
983 961
984 if (buf != map_buf) { 962 rbuf = transport_kmap_data_sg(cmd);
985 memcpy(map_buf, buf, cmd->data_length); 963 if (rbuf) {
986 kfree(buf); 964 memcpy(rbuf, buf, min_t(u32, SE_MODE_PAGE_BUF, cmd->data_length));
965 transport_kunmap_data_sg(cmd);
987 } 966 }
988 967
989 transport_kunmap_data_sg(cmd);
990 target_complete_cmd(cmd, GOOD); 968 target_complete_cmd(cmd, GOOD);
991 return 0; 969 return 0;
992} 970}
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index be6a373601b7..79ff3a5e925d 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -441,6 +441,8 @@ static int pty_bsd_ioctl(struct tty_struct *tty,
441 return pty_get_pktmode(tty, (int __user *)arg); 441 return pty_get_pktmode(tty, (int __user *)arg);
442 case TIOCSIG: /* Send signal to other side of pty */ 442 case TIOCSIG: /* Send signal to other side of pty */
443 return pty_signal(tty, (int) arg); 443 return pty_signal(tty, (int) arg);
444 case TIOCGPTN: /* TTY returns ENOTTY, but glibc expects EINVAL here */
445 return -EINVAL;
444 } 446 }
445 return -ENOIOCTLCMD; 447 return -ENOIOCTLCMD;
446} 448}
diff --git a/drivers/tty/serial/8250/8250.c b/drivers/tty/serial/8250/8250.c
index d085e3a8ec06..f9320437a649 100644
--- a/drivers/tty/serial/8250/8250.c
+++ b/drivers/tty/serial/8250/8250.c
@@ -300,6 +300,12 @@ static const struct serial8250_config uart_config[] = {
300 UART_FCR_R_TRIG_00 | UART_FCR_T_TRIG_00, 300 UART_FCR_R_TRIG_00 | UART_FCR_T_TRIG_00,
301 .flags = UART_CAP_FIFO, 301 .flags = UART_CAP_FIFO,
302 }, 302 },
303 [PORT_BRCM_TRUMANAGE] = {
304 .name = "TruManage",
305 .fifo_size = 1,
306 .tx_loadsz = 1024,
307 .flags = UART_CAP_HFIFO,
308 },
303 [PORT_8250_CIR] = { 309 [PORT_8250_CIR] = {
304 .name = "CIR port" 310 .name = "CIR port"
305 } 311 }
@@ -1490,6 +1496,11 @@ void serial8250_tx_chars(struct uart_8250_port *up)
1490 port->icount.tx++; 1496 port->icount.tx++;
1491 if (uart_circ_empty(xmit)) 1497 if (uart_circ_empty(xmit))
1492 break; 1498 break;
1499 if (up->capabilities & UART_CAP_HFIFO) {
1500 if ((serial_port_in(port, UART_LSR) & BOTH_EMPTY) !=
1501 BOTH_EMPTY)
1502 break;
1503 }
1493 } while (--count > 0); 1504 } while (--count > 0);
1494 1505
1495 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 1506 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
index 3b4ea84898c2..12caa1292b75 100644
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -40,6 +40,7 @@ struct serial8250_config {
40#define UART_CAP_AFE (1 << 11) /* MCR-based hw flow control */ 40#define UART_CAP_AFE (1 << 11) /* MCR-based hw flow control */
41#define UART_CAP_UUE (1 << 12) /* UART needs IER bit 6 set (Xscale) */ 41#define UART_CAP_UUE (1 << 12) /* UART needs IER bit 6 set (Xscale) */
42#define UART_CAP_RTOIE (1 << 13) /* UART needs IER bit 4 set (Xscale, Tegra) */ 42#define UART_CAP_RTOIE (1 << 13) /* UART needs IER bit 4 set (Xscale, Tegra) */
43#define UART_CAP_HFIFO (1 << 14) /* UART has a "hidden" FIFO */
43 44
44#define UART_BUG_QUOT (1 << 0) /* UART has buggy quot LSB */ 45#define UART_BUG_QUOT (1 << 0) /* UART has buggy quot LSB */
45#define UART_BUG_TXEN (1 << 1) /* UART has buggy TX IIR status */ 46#define UART_BUG_TXEN (1 << 1) /* UART has buggy TX IIR status */
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 1d0dba2d562d..096d2ef48b32 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -79,7 +79,7 @@ static int dw8250_handle_irq(struct uart_port *p)
79 } else if ((iir & UART_IIR_BUSY) == UART_IIR_BUSY) { 79 } else if ((iir & UART_IIR_BUSY) == UART_IIR_BUSY) {
80 /* Clear the USR and write the LCR again. */ 80 /* Clear the USR and write the LCR again. */
81 (void)p->serial_in(p, UART_USR); 81 (void)p->serial_in(p, UART_USR);
82 p->serial_out(p, d->last_lcr, UART_LCR); 82 p->serial_out(p, UART_LCR, d->last_lcr);
83 83
84 return 1; 84 return 1;
85 } 85 }
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 26b9dc012ed0..a27a98e1b066 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1085,6 +1085,18 @@ pci_omegapci_setup(struct serial_private *priv,
1085 return setup_port(priv, port, 2, idx * 8, 0); 1085 return setup_port(priv, port, 2, idx * 8, 0);
1086} 1086}
1087 1087
1088static int
1089pci_brcm_trumanage_setup(struct serial_private *priv,
1090 const struct pciserial_board *board,
1091 struct uart_8250_port *port, int idx)
1092{
1093 int ret = pci_default_setup(priv, board, port, idx);
1094
1095 port->port.type = PORT_BRCM_TRUMANAGE;
1096 port->port.flags = (port->port.flags | UPF_FIXED_PORT | UPF_FIXED_TYPE);
1097 return ret;
1098}
1099
1088static int skip_tx_en_setup(struct serial_private *priv, 1100static int skip_tx_en_setup(struct serial_private *priv,
1089 const struct pciserial_board *board, 1101 const struct pciserial_board *board,
1090 struct uart_8250_port *port, int idx) 1102 struct uart_8250_port *port, int idx)
@@ -1301,9 +1313,10 @@ pci_wch_ch353_setup(struct serial_private *priv,
1301#define PCI_VENDOR_ID_AGESTAR 0x5372 1313#define PCI_VENDOR_ID_AGESTAR 0x5372
1302#define PCI_DEVICE_ID_AGESTAR_9375 0x6872 1314#define PCI_DEVICE_ID_AGESTAR_9375 0x6872
1303#define PCI_VENDOR_ID_ASIX 0x9710 1315#define PCI_VENDOR_ID_ASIX 0x9710
1304#define PCI_DEVICE_ID_COMMTECH_4222PCIE 0x0019
1305#define PCI_DEVICE_ID_COMMTECH_4224PCIE 0x0020 1316#define PCI_DEVICE_ID_COMMTECH_4224PCIE 0x0020
1306#define PCI_DEVICE_ID_COMMTECH_4228PCIE 0x0021 1317#define PCI_DEVICE_ID_COMMTECH_4228PCIE 0x0021
1318#define PCI_DEVICE_ID_COMMTECH_4222PCIE 0x0022
1319#define PCI_DEVICE_ID_BROADCOM_TRUMANAGE 0x160a
1307 1320
1308 1321
1309/* Unknown vendors/cards - this should not be in linux/pci_ids.h */ 1322/* Unknown vendors/cards - this should not be in linux/pci_ids.h */
@@ -1954,6 +1967,17 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
1954 .setup = pci_xr17v35x_setup, 1967 .setup = pci_xr17v35x_setup,
1955 }, 1968 },
1956 /* 1969 /*
1970 * Broadcom TruManage (NetXtreme)
1971 */
1972 {
1973 .vendor = PCI_VENDOR_ID_BROADCOM,
1974 .device = PCI_DEVICE_ID_BROADCOM_TRUMANAGE,
1975 .subvendor = PCI_ANY_ID,
1976 .subdevice = PCI_ANY_ID,
1977 .setup = pci_brcm_trumanage_setup,
1978 },
1979
1980 /*
1957 * Default "match everything" terminator entry 1981 * Default "match everything" terminator entry
1958 */ 1982 */
1959 { 1983 {
@@ -2148,6 +2172,7 @@ enum pci_board_num_t {
2148 pbn_ce4100_1_115200, 2172 pbn_ce4100_1_115200,
2149 pbn_omegapci, 2173 pbn_omegapci,
2150 pbn_NETMOS9900_2s_115200, 2174 pbn_NETMOS9900_2s_115200,
2175 pbn_brcm_trumanage,
2151}; 2176};
2152 2177
2153/* 2178/*
@@ -2246,7 +2271,7 @@ static struct pciserial_board pci_boards[] = {
2246 2271
2247 [pbn_b0_8_1152000_200] = { 2272 [pbn_b0_8_1152000_200] = {
2248 .flags = FL_BASE0, 2273 .flags = FL_BASE0,
2249 .num_ports = 2, 2274 .num_ports = 8,
2250 .base_baud = 1152000, 2275 .base_baud = 1152000,
2251 .uart_offset = 0x200, 2276 .uart_offset = 0x200,
2252 }, 2277 },
@@ -2892,6 +2917,12 @@ static struct pciserial_board pci_boards[] = {
2892 .num_ports = 2, 2917 .num_ports = 2,
2893 .base_baud = 115200, 2918 .base_baud = 115200,
2894 }, 2919 },
2920 [pbn_brcm_trumanage] = {
2921 .flags = FL_BASE0,
2922 .num_ports = 1,
2923 .reg_shift = 2,
2924 .base_baud = 115200,
2925 },
2895}; 2926};
2896 2927
2897static const struct pci_device_id blacklist[] = { 2928static const struct pci_device_id blacklist[] = {
@@ -4471,6 +4502,13 @@ static struct pci_device_id serial_pci_tbl[] = {
4471 pbn_omegapci }, 4502 pbn_omegapci },
4472 4503
4473 /* 4504 /*
4505 * Broadcom TruManage
4506 */
4507 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BROADCOM_TRUMANAGE,
4508 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
4509 pbn_brcm_trumanage },
4510
4511 /*
4474 * AgeStar as-prs2-009 4512 * AgeStar as-prs2-009
4475 */ 4513 */
4476 { PCI_VENDOR_ID_AGESTAR, PCI_DEVICE_ID_AGESTAR_9375, 4514 { PCI_VENDOR_ID_AGESTAR, PCI_DEVICE_ID_AGESTAR_9375,
diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
index 675d94ab0aff..8cb6d8d66a13 100644
--- a/drivers/tty/serial/ifx6x60.c
+++ b/drivers/tty/serial/ifx6x60.c
@@ -637,6 +637,7 @@ static void ifx_port_shutdown(struct tty_port *port)
637 637
638 clear_bit(IFX_SPI_STATE_IO_AVAILABLE, &ifx_dev->flags); 638 clear_bit(IFX_SPI_STATE_IO_AVAILABLE, &ifx_dev->flags);
639 mrdy_set_low(ifx_dev); 639 mrdy_set_low(ifx_dev);
640 del_timer(&ifx_dev->spi_timer);
640 clear_bit(IFX_SPI_STATE_TIMER_PENDING, &ifx_dev->flags); 641 clear_bit(IFX_SPI_STATE_TIMER_PENDING, &ifx_dev->flags);
641 tasklet_kill(&ifx_dev->io_work_tasklet); 642 tasklet_kill(&ifx_dev->io_work_tasklet);
642} 643}
@@ -810,7 +811,8 @@ static void ifx_spi_io(unsigned long data)
810 ifx_dev->spi_xfer.cs_change = 0; 811 ifx_dev->spi_xfer.cs_change = 0;
811 ifx_dev->spi_xfer.speed_hz = ifx_dev->spi_dev->max_speed_hz; 812 ifx_dev->spi_xfer.speed_hz = ifx_dev->spi_dev->max_speed_hz;
812 /* ifx_dev->spi_xfer.speed_hz = 390625; */ 813 /* ifx_dev->spi_xfer.speed_hz = 390625; */
813 ifx_dev->spi_xfer.bits_per_word = spi_bpw; 814 ifx_dev->spi_xfer.bits_per_word =
815 ifx_dev->spi_dev->bits_per_word;
814 816
815 ifx_dev->spi_xfer.tx_buf = ifx_dev->tx_buffer; 817 ifx_dev->spi_xfer.tx_buf = ifx_dev->tx_buffer;
816 ifx_dev->spi_xfer.rx_buf = ifx_dev->rx_buffer; 818 ifx_dev->spi_xfer.rx_buf = ifx_dev->rx_buffer;
diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c
index 7ce3197087bb..dd6277eb5a38 100644
--- a/drivers/tty/serial/max3100.c
+++ b/drivers/tty/serial/max3100.c
@@ -179,8 +179,7 @@ static void max3100_work(struct work_struct *w);
179 179
180static void max3100_dowork(struct max3100_port *s) 180static void max3100_dowork(struct max3100_port *s)
181{ 181{
182 if (!s->force_end_work && !work_pending(&s->work) && 182 if (!s->force_end_work && !freezing(current) && !s->suspending)
183 !freezing(current) && !s->suspending)
184 queue_work(s->workqueue, &s->work); 183 queue_work(s->workqueue, &s->work);
185} 184}
186 185
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 6db23b035efe..e55615eb34ad 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -253,7 +253,7 @@ static void mxs_auart_tx_chars(struct mxs_auart_port *s)
253 struct circ_buf *xmit = &s->port.state->xmit; 253 struct circ_buf *xmit = &s->port.state->xmit;
254 254
255 if (auart_dma_enabled(s)) { 255 if (auart_dma_enabled(s)) {
256 int i = 0; 256 u32 i = 0;
257 int size; 257 int size;
258 void *buffer = s->tx_dma_buf; 258 void *buffer = s->tx_dma_buf;
259 259
@@ -412,10 +412,12 @@ static void mxs_auart_set_mctrl(struct uart_port *u, unsigned mctrl)
412 412
413 u32 ctrl = readl(u->membase + AUART_CTRL2); 413 u32 ctrl = readl(u->membase + AUART_CTRL2);
414 414
415 ctrl &= ~AUART_CTRL2_RTSEN; 415 ctrl &= ~(AUART_CTRL2_RTSEN | AUART_CTRL2_RTS);
416 if (mctrl & TIOCM_RTS) { 416 if (mctrl & TIOCM_RTS) {
417 if (tty_port_cts_enabled(&u->state->port)) 417 if (tty_port_cts_enabled(&u->state->port))
418 ctrl |= AUART_CTRL2_RTSEN; 418 ctrl |= AUART_CTRL2_RTSEN;
419 else
420 ctrl |= AUART_CTRL2_RTS;
419 } 421 }
420 422
421 s->ctrl = mctrl; 423 s->ctrl = mctrl;
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index 12e5249d053e..e514b3a4dc57 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -1006,7 +1006,6 @@ static void s3c24xx_serial_resetport(struct uart_port *port,
1006 1006
1007 ucon &= ucon_mask; 1007 ucon &= ucon_mask;
1008 wr_regl(port, S3C2410_UCON, ucon | cfg->ucon); 1008 wr_regl(port, S3C2410_UCON, ucon | cfg->ucon);
1009 wr_regl(port, S3C2410_ULCON, cfg->ulcon);
1010 1009
1011 /* reset both fifos */ 1010 /* reset both fifos */
1012 wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH); 1011 wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
diff --git a/drivers/tty/serial/vt8500_serial.c b/drivers/tty/serial/vt8500_serial.c
index 8fd181436a6b..d5ed9f613005 100644
--- a/drivers/tty/serial/vt8500_serial.c
+++ b/drivers/tty/serial/vt8500_serial.c
@@ -604,7 +604,7 @@ static int vt8500_serial_probe(struct platform_device *pdev)
604 vt8500_port->uart.flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF; 604 vt8500_port->uart.flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF;
605 605
606 vt8500_port->clk = of_clk_get(pdev->dev.of_node, 0); 606 vt8500_port->clk = of_clk_get(pdev->dev.of_node, 0);
607 if (vt8500_port->clk) { 607 if (!IS_ERR(vt8500_port->clk)) {
608 vt8500_port->uart.uartclk = clk_get_rate(vt8500_port->clk); 608 vt8500_port->uart.uartclk = clk_get_rate(vt8500_port->clk);
609 } else { 609 } else {
610 /* use the default of 24Mhz if not specified and warn */ 610 /* use the default of 24Mhz if not specified and warn */
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index b3c4a250ff86..814655ee2d61 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -15,6 +15,7 @@
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 16
17#include <linux/sched.h> 17#include <linux/sched.h>
18#include <linux/sched/rt.h>
18#include <linux/interrupt.h> 19#include <linux/interrupt.h>
19#include <linux/mm.h> 20#include <linux/mm.h>
20#include <linux/fs.h> 21#include <linux/fs.h>
@@ -41,6 +42,7 @@
41#include <linux/slab.h> 42#include <linux/slab.h>
42#include <linux/input.h> 43#include <linux/input.h>
43#include <linux/uaccess.h> 44#include <linux/uaccess.h>
45#include <linux/moduleparam.h>
44 46
45#include <asm/ptrace.h> 47#include <asm/ptrace.h>
46#include <asm/irq_regs.h> 48#include <asm/irq_regs.h>
@@ -577,8 +579,71 @@ struct sysrq_state {
577 bool active; 579 bool active;
578 bool need_reinject; 580 bool need_reinject;
579 bool reinjecting; 581 bool reinjecting;
582
583 /* reset sequence handling */
584 bool reset_canceled;
585 unsigned long reset_keybit[BITS_TO_LONGS(KEY_CNT)];
586 int reset_seq_len;
587 int reset_seq_cnt;
588 int reset_seq_version;
580}; 589};
581 590
591#define SYSRQ_KEY_RESET_MAX 20 /* Should be plenty */
592static unsigned short sysrq_reset_seq[SYSRQ_KEY_RESET_MAX];
593static unsigned int sysrq_reset_seq_len;
594static unsigned int sysrq_reset_seq_version = 1;
595
596static void sysrq_parse_reset_sequence(struct sysrq_state *state)
597{
598 int i;
599 unsigned short key;
600
601 state->reset_seq_cnt = 0;
602
603 for (i = 0; i < sysrq_reset_seq_len; i++) {
604 key = sysrq_reset_seq[i];
605
606 if (key == KEY_RESERVED || key > KEY_MAX)
607 break;
608
609 __set_bit(key, state->reset_keybit);
610 state->reset_seq_len++;
611
612 if (test_bit(key, state->key_down))
613 state->reset_seq_cnt++;
614 }
615
616 /* Disable reset until old keys are not released */
617 state->reset_canceled = state->reset_seq_cnt != 0;
618
619 state->reset_seq_version = sysrq_reset_seq_version;
620}
621
622static bool sysrq_detect_reset_sequence(struct sysrq_state *state,
623 unsigned int code, int value)
624{
625 if (!test_bit(code, state->reset_keybit)) {
626 /*
627 * Pressing any key _not_ in reset sequence cancels
628 * the reset sequence.
629 */
630 if (value && state->reset_seq_cnt)
631 state->reset_canceled = true;
632 } else if (value == 0) {
633 /* key release */
634 if (--state->reset_seq_cnt == 0)
635 state->reset_canceled = false;
636 } else if (value == 1) {
637 /* key press, not autorepeat */
638 if (++state->reset_seq_cnt == state->reset_seq_len &&
639 !state->reset_canceled) {
640 return true;
641 }
642 }
643
644 return false;
645}
646
582static void sysrq_reinject_alt_sysrq(struct work_struct *work) 647static void sysrq_reinject_alt_sysrq(struct work_struct *work)
583{ 648{
584 struct sysrq_state *sysrq = 649 struct sysrq_state *sysrq =
@@ -605,100 +670,121 @@ static void sysrq_reinject_alt_sysrq(struct work_struct *work)
605 } 670 }
606} 671}
607 672
608static bool sysrq_filter(struct input_handle *handle, 673static bool sysrq_handle_keypress(struct sysrq_state *sysrq,
609 unsigned int type, unsigned int code, int value) 674 unsigned int code, int value)
610{ 675{
611 struct sysrq_state *sysrq = handle->private;
612 bool was_active = sysrq->active; 676 bool was_active = sysrq->active;
613 bool suppress; 677 bool suppress;
614 678
615 /* 679 switch (code) {
616 * Do not filter anything if we are in the process of re-injecting
617 * Alt+SysRq combination.
618 */
619 if (sysrq->reinjecting)
620 return false;
621 680
622 switch (type) { 681 case KEY_LEFTALT:
682 case KEY_RIGHTALT:
683 if (!value) {
684 /* One of ALTs is being released */
685 if (sysrq->active && code == sysrq->alt_use)
686 sysrq->active = false;
623 687
624 case EV_SYN: 688 sysrq->alt = KEY_RESERVED;
625 suppress = false; 689
690 } else if (value != 2) {
691 sysrq->alt = code;
692 sysrq->need_reinject = false;
693 }
626 break; 694 break;
627 695
628 case EV_KEY: 696 case KEY_SYSRQ:
629 switch (code) { 697 if (value == 1 && sysrq->alt != KEY_RESERVED) {
698 sysrq->active = true;
699 sysrq->alt_use = sysrq->alt;
700 /*
701 * If nothing else will be pressed we'll need
702 * to re-inject Alt-SysRq keysroke.
703 */
704 sysrq->need_reinject = true;
705 }
630 706
631 case KEY_LEFTALT: 707 /*
632 case KEY_RIGHTALT: 708 * Pretend that sysrq was never pressed at all. This
633 if (!value) { 709 * is needed to properly handle KGDB which will try
634 /* One of ALTs is being released */ 710 * to release all keys after exiting debugger. If we
635 if (sysrq->active && code == sysrq->alt_use) 711 * do not clear key bit it KGDB will end up sending
636 sysrq->active = false; 712 * release events for Alt and SysRq, potentially
713 * triggering print screen function.
714 */
715 if (sysrq->active)
716 clear_bit(KEY_SYSRQ, sysrq->handle.dev->key);
637 717
638 sysrq->alt = KEY_RESERVED; 718 break;
639 719
640 } else if (value != 2) { 720 default:
641 sysrq->alt = code; 721 if (sysrq->active && value && value != 2) {
642 sysrq->need_reinject = false; 722 sysrq->need_reinject = false;
643 } 723 __handle_sysrq(sysrq_xlate[code], true);
644 break; 724 }
725 break;
726 }
645 727
646 case KEY_SYSRQ: 728 suppress = sysrq->active;
647 if (value == 1 && sysrq->alt != KEY_RESERVED) {
648 sysrq->active = true;
649 sysrq->alt_use = sysrq->alt;
650 /*
651 * If nothing else will be pressed we'll need
652 * to re-inject Alt-SysRq keysroke.
653 */
654 sysrq->need_reinject = true;
655 }
656 729
657 /* 730 if (!sysrq->active) {
658 * Pretend that sysrq was never pressed at all. This
659 * is needed to properly handle KGDB which will try
660 * to release all keys after exiting debugger. If we
661 * do not clear key bit it KGDB will end up sending
662 * release events for Alt and SysRq, potentially
663 * triggering print screen function.
664 */
665 if (sysrq->active)
666 clear_bit(KEY_SYSRQ, handle->dev->key);
667 731
668 break; 732 /*
733 * See if reset sequence has changed since the last time.
734 */
735 if (sysrq->reset_seq_version != sysrq_reset_seq_version)
736 sysrq_parse_reset_sequence(sysrq);
669 737
670 default: 738 /*
671 if (sysrq->active && value && value != 2) { 739 * If we are not suppressing key presses keep track of
672 sysrq->need_reinject = false; 740 * keyboard state so we can release keys that have been
673 __handle_sysrq(sysrq_xlate[code], true); 741 * pressed before entering SysRq mode.
674 } 742 */
675 break; 743 if (value)
744 set_bit(code, sysrq->key_down);
745 else
746 clear_bit(code, sysrq->key_down);
747
748 if (was_active)
749 schedule_work(&sysrq->reinject_work);
750
751 if (sysrq_detect_reset_sequence(sysrq, code, value)) {
752 /* Force emergency reboot */
753 __handle_sysrq(sysrq_xlate[KEY_B], false);
676 } 754 }
677 755
678 suppress = sysrq->active; 756 } else if (value == 0 && test_and_clear_bit(code, sysrq->key_down)) {
757 /*
758 * Pass on release events for keys that was pressed before
759 * entering SysRq mode.
760 */
761 suppress = false;
762 }
679 763
680 if (!sysrq->active) { 764 return suppress;
681 /* 765}
682 * If we are not suppressing key presses keep track of
683 * keyboard state so we can release keys that have been
684 * pressed before entering SysRq mode.
685 */
686 if (value)
687 set_bit(code, sysrq->key_down);
688 else
689 clear_bit(code, sysrq->key_down);
690 766
691 if (was_active) 767static bool sysrq_filter(struct input_handle *handle,
692 schedule_work(&sysrq->reinject_work); 768 unsigned int type, unsigned int code, int value)
769{
770 struct sysrq_state *sysrq = handle->private;
771 bool suppress;
693 772
694 } else if (value == 0 && 773 /*
695 test_and_clear_bit(code, sysrq->key_down)) { 774 * Do not filter anything if we are in the process of re-injecting
696 /* 775 * Alt+SysRq combination.
697 * Pass on release events for keys that was pressed before 776 */
698 * entering SysRq mode. 777 if (sysrq->reinjecting)
699 */ 778 return false;
700 suppress = false; 779
701 } 780 switch (type) {
781
782 case EV_SYN:
783 suppress = false;
784 break;
785
786 case EV_KEY:
787 suppress = sysrq_handle_keypress(sysrq, code, value);
702 break; 788 break;
703 789
704 default: 790 default:
@@ -786,7 +872,20 @@ static bool sysrq_handler_registered;
786 872
787static inline void sysrq_register_handler(void) 873static inline void sysrq_register_handler(void)
788{ 874{
875 extern unsigned short platform_sysrq_reset_seq[] __weak;
876 unsigned short key;
789 int error; 877 int error;
878 int i;
879
880 if (platform_sysrq_reset_seq) {
881 for (i = 0; i < ARRAY_SIZE(sysrq_reset_seq); i++) {
882 key = platform_sysrq_reset_seq[i];
883 if (key == KEY_RESERVED || key > KEY_MAX)
884 break;
885
886 sysrq_reset_seq[sysrq_reset_seq_len++] = key;
887 }
888 }
790 889
791 error = input_register_handler(&sysrq_handler); 890 error = input_register_handler(&sysrq_handler);
792 if (error) 891 if (error)
@@ -803,6 +902,36 @@ static inline void sysrq_unregister_handler(void)
803 } 902 }
804} 903}
805 904
905static int sysrq_reset_seq_param_set(const char *buffer,
906 const struct kernel_param *kp)
907{
908 unsigned long val;
909 int error;
910
911 error = strict_strtoul(buffer, 0, &val);
912 if (error < 0)
913 return error;
914
915 if (val > KEY_MAX)
916 return -EINVAL;
917
918 *((unsigned short *)kp->arg) = val;
919 sysrq_reset_seq_version++;
920
921 return 0;
922}
923
924static struct kernel_param_ops param_ops_sysrq_reset_seq = {
925 .get = param_get_ushort,
926 .set = sysrq_reset_seq_param_set,
927};
928
929#define param_check_sysrq_reset_seq(name, p) \
930 __param_check(name, p, unsigned short)
931
932module_param_array_named(reset_seq, sysrq_reset_seq, sysrq_reset_seq,
933 &sysrq_reset_seq_len, 0644);
934
806#else 935#else
807 936
808static inline void sysrq_register_handler(void) 937static inline void sysrq_register_handler(void)
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 4225d5e72131..8e64adf8e4d5 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -39,6 +39,7 @@
39#include <asm/unaligned.h> 39#include <asm/unaligned.h>
40#include <linux/platform_device.h> 40#include <linux/platform_device.h>
41#include <linux/workqueue.h> 41#include <linux/workqueue.h>
42#include <linux/pm_runtime.h>
42 43
43#include <linux/usb.h> 44#include <linux/usb.h>
44#include <linux/usb/hcd.h> 45#include <linux/usb/hcd.h>
@@ -1025,6 +1026,49 @@ static int register_root_hub(struct usb_hcd *hcd)
1025 return retval; 1026 return retval;
1026} 1027}
1027 1028
1029/*
1030 * usb_hcd_start_port_resume - a root-hub port is sending a resume signal
1031 * @bus: the bus which the root hub belongs to
1032 * @portnum: the port which is being resumed
1033 *
1034 * HCDs should call this function when they know that a resume signal is
1035 * being sent to a root-hub port. The root hub will be prevented from
1036 * going into autosuspend until usb_hcd_end_port_resume() is called.
1037 *
1038 * The bus's private lock must be held by the caller.
1039 */
1040void usb_hcd_start_port_resume(struct usb_bus *bus, int portnum)
1041{
1042 unsigned bit = 1 << portnum;
1043
1044 if (!(bus->resuming_ports & bit)) {
1045 bus->resuming_ports |= bit;
1046 pm_runtime_get_noresume(&bus->root_hub->dev);
1047 }
1048}
1049EXPORT_SYMBOL_GPL(usb_hcd_start_port_resume);
1050
1051/*
1052 * usb_hcd_end_port_resume - a root-hub port has stopped sending a resume signal
1053 * @bus: the bus which the root hub belongs to
1054 * @portnum: the port which is being resumed
1055 *
1056 * HCDs should call this function when they know that a resume signal has
1057 * stopped being sent to a root-hub port. The root hub will be allowed to
1058 * autosuspend again.
1059 *
1060 * The bus's private lock must be held by the caller.
1061 */
1062void usb_hcd_end_port_resume(struct usb_bus *bus, int portnum)
1063{
1064 unsigned bit = 1 << portnum;
1065
1066 if (bus->resuming_ports & bit) {
1067 bus->resuming_ports &= ~bit;
1068 pm_runtime_put_noidle(&bus->root_hub->dev);
1069 }
1070}
1071EXPORT_SYMBOL_GPL(usb_hcd_end_port_resume);
1028 1072
1029/*-------------------------------------------------------------------------*/ 1073/*-------------------------------------------------------------------------*/
1030 1074
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 957ed2c41482..cbf7168e3ce7 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2838,6 +2838,23 @@ void usb_enable_ltm(struct usb_device *udev)
2838EXPORT_SYMBOL_GPL(usb_enable_ltm); 2838EXPORT_SYMBOL_GPL(usb_enable_ltm);
2839 2839
2840#ifdef CONFIG_USB_SUSPEND 2840#ifdef CONFIG_USB_SUSPEND
2841/*
2842 * usb_disable_function_remotewakeup - disable usb3.0
2843 * device's function remote wakeup
2844 * @udev: target device
2845 *
2846 * Assume there's only one function on the USB 3.0
2847 * device and disable remote wake for the first
2848 * interface. FIXME if the interface association
2849 * descriptor shows there's more than one function.
2850 */
2851static int usb_disable_function_remotewakeup(struct usb_device *udev)
2852{
2853 return usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
2854 USB_REQ_CLEAR_FEATURE, USB_RECIP_INTERFACE,
2855 USB_INTRF_FUNC_SUSPEND, 0, NULL, 0,
2856 USB_CTRL_SET_TIMEOUT);
2857}
2841 2858
2842/* 2859/*
2843 * usb_port_suspend - suspend a usb device's upstream port 2860 * usb_port_suspend - suspend a usb device's upstream port
@@ -2955,12 +2972,19 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
2955 dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n", 2972 dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n",
2956 port1, status); 2973 port1, status);
2957 /* paranoia: "should not happen" */ 2974 /* paranoia: "should not happen" */
2958 if (udev->do_remote_wakeup) 2975 if (udev->do_remote_wakeup) {
2959 (void) usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 2976 if (!hub_is_superspeed(hub->hdev)) {
2960 USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE, 2977 (void) usb_control_msg(udev,
2961 USB_DEVICE_REMOTE_WAKEUP, 0, 2978 usb_sndctrlpipe(udev, 0),
2962 NULL, 0, 2979 USB_REQ_CLEAR_FEATURE,
2963 USB_CTRL_SET_TIMEOUT); 2980 USB_RECIP_DEVICE,
2981 USB_DEVICE_REMOTE_WAKEUP, 0,
2982 NULL, 0,
2983 USB_CTRL_SET_TIMEOUT);
2984 } else
2985 (void) usb_disable_function_remotewakeup(udev);
2986
2987 }
2964 2988
2965 /* Try to enable USB2 hardware LPM again */ 2989 /* Try to enable USB2 hardware LPM again */
2966 if (udev->usb2_hw_lpm_capable == 1) 2990 if (udev->usb2_hw_lpm_capable == 1)
@@ -3052,20 +3076,30 @@ static int finish_port_resume(struct usb_device *udev)
3052 * udev->reset_resume 3076 * udev->reset_resume
3053 */ 3077 */
3054 } else if (udev->actconfig && !udev->reset_resume) { 3078 } else if (udev->actconfig && !udev->reset_resume) {
3055 le16_to_cpus(&devstatus); 3079 if (!hub_is_superspeed(udev->parent)) {
3056 if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) { 3080 le16_to_cpus(&devstatus);
3057 status = usb_control_msg(udev, 3081 if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP))
3058 usb_sndctrlpipe(udev, 0), 3082 status = usb_control_msg(udev,
3059 USB_REQ_CLEAR_FEATURE, 3083 usb_sndctrlpipe(udev, 0),
3084 USB_REQ_CLEAR_FEATURE,
3060 USB_RECIP_DEVICE, 3085 USB_RECIP_DEVICE,
3061 USB_DEVICE_REMOTE_WAKEUP, 0, 3086 USB_DEVICE_REMOTE_WAKEUP, 0,
3062 NULL, 0, 3087 NULL, 0,
3063 USB_CTRL_SET_TIMEOUT); 3088 USB_CTRL_SET_TIMEOUT);
3064 if (status) 3089 } else {
3065 dev_dbg(&udev->dev, 3090 status = usb_get_status(udev, USB_RECIP_INTERFACE, 0,
3066 "disable remote wakeup, status %d\n", 3091 &devstatus);
3067 status); 3092 le16_to_cpus(&devstatus);
3093 if (!status && devstatus & (USB_INTRF_STAT_FUNC_RW_CAP
3094 | USB_INTRF_STAT_FUNC_RW))
3095 status =
3096 usb_disable_function_remotewakeup(udev);
3068 } 3097 }
3098
3099 if (status)
3100 dev_dbg(&udev->dev,
3101 "disable remote wakeup, status %d\n",
3102 status);
3069 status = 0; 3103 status = 0;
3070 } 3104 }
3071 return status; 3105 return status;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 2e43b332aae8..2fdd767f8fe8 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1605,6 +1605,7 @@ static int dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1605 1605
1606 if (epnum == 0 || epnum == 1) { 1606 if (epnum == 0 || epnum == 1) {
1607 dep->endpoint.maxpacket = 512; 1607 dep->endpoint.maxpacket = 512;
1608 dep->endpoint.maxburst = 1;
1608 dep->endpoint.ops = &dwc3_gadget_ep0_ops; 1609 dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1609 if (!epnum) 1610 if (!epnum)
1610 dwc->gadget.ep0 = &dep->endpoint; 1611 dwc->gadget.ep0 = &dep->endpoint;
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index 4a6961c517f2..8c2f25121149 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -1153,15 +1153,15 @@ static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)
1153 pr_err("%s: unmapped value: %lu\n", opts, value); 1153 pr_err("%s: unmapped value: %lu\n", opts, value);
1154 return -EINVAL; 1154 return -EINVAL;
1155 } 1155 }
1156 } 1156 } else if (!memcmp(opts, "gid", 3)) {
1157 else if (!memcmp(opts, "gid", 3))
1158 data->perms.gid = make_kgid(current_user_ns(), value); 1157 data->perms.gid = make_kgid(current_user_ns(), value);
1159 if (!gid_valid(data->perms.gid)) { 1158 if (!gid_valid(data->perms.gid)) {
1160 pr_err("%s: unmapped value: %lu\n", opts, value); 1159 pr_err("%s: unmapped value: %lu\n", opts, value);
1161 return -EINVAL; 1160 return -EINVAL;
1162 } 1161 }
1163 else 1162 } else {
1164 goto invalid; 1163 goto invalid;
1164 }
1165 break; 1165 break;
1166 1166
1167 default: 1167 default:
diff --git a/drivers/usb/gadget/fsl_mxc_udc.c b/drivers/usb/gadget/fsl_mxc_udc.c
index 1b0f086426bd..d3bd7b095ba3 100644
--- a/drivers/usb/gadget/fsl_mxc_udc.c
+++ b/drivers/usb/gadget/fsl_mxc_udc.c
@@ -18,14 +18,13 @@
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19#include <linux/io.h> 19#include <linux/io.h>
20 20
21#include <mach/hardware.h>
22
23static struct clk *mxc_ahb_clk; 21static struct clk *mxc_ahb_clk;
24static struct clk *mxc_per_clk; 22static struct clk *mxc_per_clk;
25static struct clk *mxc_ipg_clk; 23static struct clk *mxc_ipg_clk;
26 24
27/* workaround ENGcm09152 for i.MX35 */ 25/* workaround ENGcm09152 for i.MX35 */
28#define USBPHYCTRL_OTGBASE_OFFSET 0x608 26#define MX35_USBPHYCTRL_OFFSET 0x600
27#define USBPHYCTRL_OTGBASE_OFFSET 0x8
29#define USBPHYCTRL_EVDO (1 << 23) 28#define USBPHYCTRL_EVDO (1 << 23)
30 29
31int fsl_udc_clk_init(struct platform_device *pdev) 30int fsl_udc_clk_init(struct platform_device *pdev)
@@ -59,7 +58,7 @@ int fsl_udc_clk_init(struct platform_device *pdev)
59 clk_prepare_enable(mxc_per_clk); 58 clk_prepare_enable(mxc_per_clk);
60 59
61 /* make sure USB_CLK is running at 60 MHz +/- 1000 Hz */ 60 /* make sure USB_CLK is running at 60 MHz +/- 1000 Hz */
62 if (!cpu_is_mx51()) { 61 if (!strcmp(pdev->id_entry->name, "imx-udc-mx27")) {
63 freq = clk_get_rate(mxc_per_clk); 62 freq = clk_get_rate(mxc_per_clk);
64 if (pdata->phy_mode != FSL_USB2_PHY_ULPI && 63 if (pdata->phy_mode != FSL_USB2_PHY_ULPI &&
65 (freq < 59999000 || freq > 60001000)) { 64 (freq < 59999000 || freq > 60001000)) {
@@ -79,27 +78,40 @@ eclkrate:
79 return ret; 78 return ret;
80} 79}
81 80
82void fsl_udc_clk_finalize(struct platform_device *pdev) 81int fsl_udc_clk_finalize(struct platform_device *pdev)
83{ 82{
84 struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data; 83 struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
85 if (cpu_is_mx35()) { 84 int ret = 0;
86 unsigned int v;
87 85
88 /* workaround ENGcm09152 for i.MX35 */ 86 /* workaround ENGcm09152 for i.MX35 */
89 if (pdata->workaround & FLS_USB2_WORKAROUND_ENGCM09152) { 87 if (pdata->workaround & FLS_USB2_WORKAROUND_ENGCM09152) {
90 v = readl(MX35_IO_ADDRESS(MX35_USB_BASE_ADDR + 88 unsigned int v;
91 USBPHYCTRL_OTGBASE_OFFSET)); 89 struct resource *res = platform_get_resource
92 writel(v | USBPHYCTRL_EVDO, 90 (pdev, IORESOURCE_MEM, 0);
93 MX35_IO_ADDRESS(MX35_USB_BASE_ADDR + 91 void __iomem *phy_regs = ioremap(res->start +
94 USBPHYCTRL_OTGBASE_OFFSET)); 92 MX35_USBPHYCTRL_OFFSET, 512);
93 if (!phy_regs) {
94 dev_err(&pdev->dev, "ioremap for phy address fails\n");
95 ret = -EINVAL;
96 goto ioremap_err;
95 } 97 }
98
99 v = readl(phy_regs + USBPHYCTRL_OTGBASE_OFFSET);
100 writel(v | USBPHYCTRL_EVDO,
101 phy_regs + USBPHYCTRL_OTGBASE_OFFSET);
102
103 iounmap(phy_regs);
96 } 104 }
97 105
106
107ioremap_err:
98 /* ULPI transceivers don't need usbpll */ 108 /* ULPI transceivers don't need usbpll */
99 if (pdata->phy_mode == FSL_USB2_PHY_ULPI) { 109 if (pdata->phy_mode == FSL_USB2_PHY_ULPI) {
100 clk_disable_unprepare(mxc_per_clk); 110 clk_disable_unprepare(mxc_per_clk);
101 mxc_per_clk = NULL; 111 mxc_per_clk = NULL;
102 } 112 }
113
114 return ret;
103} 115}
104 116
105void fsl_udc_clk_release(void) 117void fsl_udc_clk_release(void)
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
index c19f7f13790b..667275cb7bad 100644
--- a/drivers/usb/gadget/fsl_udc_core.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -41,6 +41,7 @@
41#include <linux/fsl_devices.h> 41#include <linux/fsl_devices.h>
42#include <linux/dmapool.h> 42#include <linux/dmapool.h>
43#include <linux/delay.h> 43#include <linux/delay.h>
44#include <linux/of_device.h>
44 45
45#include <asm/byteorder.h> 46#include <asm/byteorder.h>
46#include <asm/io.h> 47#include <asm/io.h>
@@ -2438,11 +2439,6 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
2438 unsigned int i; 2439 unsigned int i;
2439 u32 dccparams; 2440 u32 dccparams;
2440 2441
2441 if (strcmp(pdev->name, driver_name)) {
2442 VDBG("Wrong device");
2443 return -ENODEV;
2444 }
2445
2446 udc_controller = kzalloc(sizeof(struct fsl_udc), GFP_KERNEL); 2442 udc_controller = kzalloc(sizeof(struct fsl_udc), GFP_KERNEL);
2447 if (udc_controller == NULL) { 2443 if (udc_controller == NULL) {
2448 ERR("malloc udc failed\n"); 2444 ERR("malloc udc failed\n");
@@ -2547,7 +2543,9 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
2547 dr_controller_setup(udc_controller); 2543 dr_controller_setup(udc_controller);
2548 } 2544 }
2549 2545
2550 fsl_udc_clk_finalize(pdev); 2546 ret = fsl_udc_clk_finalize(pdev);
2547 if (ret)
2548 goto err_free_irq;
2551 2549
2552 /* Setup gadget structure */ 2550 /* Setup gadget structure */
2553 udc_controller->gadget.ops = &fsl_gadget_ops; 2551 udc_controller->gadget.ops = &fsl_gadget_ops;
@@ -2756,22 +2754,32 @@ static int fsl_udc_otg_resume(struct device *dev)
2756 2754
2757 return fsl_udc_resume(NULL); 2755 return fsl_udc_resume(NULL);
2758} 2756}
2759
2760/*------------------------------------------------------------------------- 2757/*-------------------------------------------------------------------------
2761 Register entry point for the peripheral controller driver 2758 Register entry point for the peripheral controller driver
2762--------------------------------------------------------------------------*/ 2759--------------------------------------------------------------------------*/
2763 2760static const struct platform_device_id fsl_udc_devtype[] = {
2761 {
2762 .name = "imx-udc-mx27",
2763 }, {
2764 .name = "imx-udc-mx51",
2765 }, {
2766 /* sentinel */
2767 }
2768};
2769MODULE_DEVICE_TABLE(platform, fsl_udc_devtype);
2764static struct platform_driver udc_driver = { 2770static struct platform_driver udc_driver = {
2765 .remove = __exit_p(fsl_udc_remove), 2771 .remove = __exit_p(fsl_udc_remove),
2772 /* Just for FSL i.mx SoC currently */
2773 .id_table = fsl_udc_devtype,
2766 /* these suspend and resume are not usb suspend and resume */ 2774 /* these suspend and resume are not usb suspend and resume */
2767 .suspend = fsl_udc_suspend, 2775 .suspend = fsl_udc_suspend,
2768 .resume = fsl_udc_resume, 2776 .resume = fsl_udc_resume,
2769 .driver = { 2777 .driver = {
2770 .name = (char *)driver_name, 2778 .name = (char *)driver_name,
2771 .owner = THIS_MODULE, 2779 .owner = THIS_MODULE,
2772 /* udc suspend/resume called from OTG driver */ 2780 /* udc suspend/resume called from OTG driver */
2773 .suspend = fsl_udc_otg_suspend, 2781 .suspend = fsl_udc_otg_suspend,
2774 .resume = fsl_udc_otg_resume, 2782 .resume = fsl_udc_otg_resume,
2775 }, 2783 },
2776}; 2784};
2777 2785
diff --git a/drivers/usb/gadget/fsl_usb2_udc.h b/drivers/usb/gadget/fsl_usb2_udc.h
index f61a967f7082..c6703bb07b23 100644
--- a/drivers/usb/gadget/fsl_usb2_udc.h
+++ b/drivers/usb/gadget/fsl_usb2_udc.h
@@ -592,15 +592,16 @@ static inline struct ep_queue_head *get_qh_by_ep(struct fsl_ep *ep)
592struct platform_device; 592struct platform_device;
593#ifdef CONFIG_ARCH_MXC 593#ifdef CONFIG_ARCH_MXC
594int fsl_udc_clk_init(struct platform_device *pdev); 594int fsl_udc_clk_init(struct platform_device *pdev);
595void fsl_udc_clk_finalize(struct platform_device *pdev); 595int fsl_udc_clk_finalize(struct platform_device *pdev);
596void fsl_udc_clk_release(void); 596void fsl_udc_clk_release(void);
597#else 597#else
598static inline int fsl_udc_clk_init(struct platform_device *pdev) 598static inline int fsl_udc_clk_init(struct platform_device *pdev)
599{ 599{
600 return 0; 600 return 0;
601} 601}
602static inline void fsl_udc_clk_finalize(struct platform_device *pdev) 602static inline int fsl_udc_clk_finalize(struct platform_device *pdev)
603{ 603{
604 return 0;
604} 605}
605static inline void fsl_udc_clk_release(void) 606static inline void fsl_udc_clk_release(void)
606{ 607{
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index d6bb128ce21e..3a21c5d683c0 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -148,7 +148,7 @@ config USB_EHCI_FSL
148 Variation of ARC USB block used in some Freescale chips. 148 Variation of ARC USB block used in some Freescale chips.
149 149
150config USB_EHCI_MXC 150config USB_EHCI_MXC
151 bool "Support for Freescale i.MX on-chip EHCI USB controller" 151 tristate "Support for Freescale i.MX on-chip EHCI USB controller"
152 depends on USB_EHCI_HCD && ARCH_MXC 152 depends on USB_EHCI_HCD && ARCH_MXC
153 select USB_EHCI_ROOT_HUB_TT 153 select USB_EHCI_ROOT_HUB_TT
154 ---help--- 154 ---help---
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index 1eb4c3006e9e..001fbff2fdef 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_PCI) += pci-quirks.o
26obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o 26obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o
27obj-$(CONFIG_USB_EHCI_PCI) += ehci-pci.o 27obj-$(CONFIG_USB_EHCI_PCI) += ehci-pci.o
28obj-$(CONFIG_USB_EHCI_HCD_PLATFORM) += ehci-platform.o 28obj-$(CONFIG_USB_EHCI_HCD_PLATFORM) += ehci-platform.o
29obj-$(CONFIG_USB_EHCI_MXC) += ehci-mxc.o
29 30
30obj-$(CONFIG_USB_OXU210HP_HCD) += oxu210hp-hcd.o 31obj-$(CONFIG_USB_OXU210HP_HCD) += oxu210hp-hcd.o
31obj-$(CONFIG_USB_ISP116X_HCD) += isp116x-hcd.o 32obj-$(CONFIG_USB_ISP116X_HCD) += isp116x-hcd.o
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index c97503bb0b0e..b416a3fc9959 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -74,10 +74,6 @@ static const char hcd_name [] = "ehci_hcd";
74#undef VERBOSE_DEBUG 74#undef VERBOSE_DEBUG
75#undef EHCI_URB_TRACE 75#undef EHCI_URB_TRACE
76 76
77#ifdef DEBUG
78#define EHCI_STATS
79#endif
80
81/* magic numbers that can affect system performance */ 77/* magic numbers that can affect system performance */
82#define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */ 78#define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
83#define EHCI_TUNE_RL_HS 4 /* nak throttle; see 4.9 */ 79#define EHCI_TUNE_RL_HS 4 /* nak throttle; see 4.9 */
@@ -801,6 +797,7 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
801 ehci->reset_done[i] = jiffies + msecs_to_jiffies(25); 797 ehci->reset_done[i] = jiffies + msecs_to_jiffies(25);
802 set_bit(i, &ehci->resuming_ports); 798 set_bit(i, &ehci->resuming_ports);
803 ehci_dbg (ehci, "port %d remote wakeup\n", i + 1); 799 ehci_dbg (ehci, "port %d remote wakeup\n", i + 1);
800 usb_hcd_start_port_resume(&hcd->self, i);
804 mod_timer(&hcd->rh_timer, ehci->reset_done[i]); 801 mod_timer(&hcd->rh_timer, ehci->reset_done[i]);
805 } 802 }
806 } 803 }
@@ -1250,11 +1247,6 @@ MODULE_LICENSE ("GPL");
1250#define PLATFORM_DRIVER ehci_fsl_driver 1247#define PLATFORM_DRIVER ehci_fsl_driver
1251#endif 1248#endif
1252 1249
1253#ifdef CONFIG_USB_EHCI_MXC
1254#include "ehci-mxc.c"
1255#define PLATFORM_DRIVER ehci_mxc_driver
1256#endif
1257
1258#ifdef CONFIG_USB_EHCI_SH 1250#ifdef CONFIG_USB_EHCI_SH
1259#include "ehci-sh.c" 1251#include "ehci-sh.c"
1260#define PLATFORM_DRIVER ehci_hcd_sh_driver 1252#define PLATFORM_DRIVER ehci_hcd_sh_driver
@@ -1352,7 +1344,8 @@ MODULE_LICENSE ("GPL");
1352 1344
1353#if !IS_ENABLED(CONFIG_USB_EHCI_PCI) && \ 1345#if !IS_ENABLED(CONFIG_USB_EHCI_PCI) && \
1354 !IS_ENABLED(CONFIG_USB_EHCI_HCD_PLATFORM) && \ 1346 !IS_ENABLED(CONFIG_USB_EHCI_HCD_PLATFORM) && \
1355 !defined(CONFIG_USB_CHIPIDEA_HOST) && \ 1347 !IS_ENABLED(CONFIG_USB_CHIPIDEA_HOST) && \
1348 !IS_ENABLED(CONFIG_USB_EHCI_MXC) && \
1356 !defined(PLATFORM_DRIVER) && \ 1349 !defined(PLATFORM_DRIVER) && \
1357 !defined(PS3_SYSTEM_BUS_DRIVER) && \ 1350 !defined(PS3_SYSTEM_BUS_DRIVER) && \
1358 !defined(OF_PLATFORM_DRIVER) && \ 1351 !defined(OF_PLATFORM_DRIVER) && \
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 4ccb97c0678f..4d3b294f203e 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -649,7 +649,11 @@ ehci_hub_status_data (struct usb_hcd *hcd, char *buf)
649 status = STS_PCD; 649 status = STS_PCD;
650 } 650 }
651 } 651 }
652 /* FIXME autosuspend idle root hubs */ 652
653 /* If a resume is in progress, make sure it can finish */
654 if (ehci->resuming_ports)
655 mod_timer(&hcd->rh_timer, jiffies + msecs_to_jiffies(25));
656
653 spin_unlock_irqrestore (&ehci->lock, flags); 657 spin_unlock_irqrestore (&ehci->lock, flags);
654 return status ? retval : 0; 658 return status ? retval : 0;
655} 659}
@@ -851,6 +855,7 @@ static int ehci_hub_control (
851 /* resume signaling for 20 msec */ 855 /* resume signaling for 20 msec */
852 ehci->reset_done[wIndex] = jiffies 856 ehci->reset_done[wIndex] = jiffies
853 + msecs_to_jiffies(20); 857 + msecs_to_jiffies(20);
858 usb_hcd_start_port_resume(&hcd->self, wIndex);
854 /* check the port again */ 859 /* check the port again */
855 mod_timer(&ehci_to_hcd(ehci)->rh_timer, 860 mod_timer(&ehci_to_hcd(ehci)->rh_timer,
856 ehci->reset_done[wIndex]); 861 ehci->reset_done[wIndex]);
@@ -862,6 +867,7 @@ static int ehci_hub_control (
862 clear_bit(wIndex, &ehci->suspended_ports); 867 clear_bit(wIndex, &ehci->suspended_ports);
863 set_bit(wIndex, &ehci->port_c_suspend); 868 set_bit(wIndex, &ehci->port_c_suspend);
864 ehci->reset_done[wIndex] = 0; 869 ehci->reset_done[wIndex] = 0;
870 usb_hcd_end_port_resume(&hcd->self, wIndex);
865 871
866 /* stop resume signaling */ 872 /* stop resume signaling */
867 temp = ehci_readl(ehci, status_reg); 873 temp = ehci_readl(ehci, status_reg);
@@ -950,6 +956,7 @@ static int ehci_hub_control (
950 ehci->reset_done[wIndex] = 0; 956 ehci->reset_done[wIndex] = 0;
951 if (temp & PORT_PE) 957 if (temp & PORT_PE)
952 set_bit(wIndex, &ehci->port_c_suspend); 958 set_bit(wIndex, &ehci->port_c_suspend);
959 usb_hcd_end_port_resume(&hcd->self, wIndex);
953 } 960 }
954 961
955 if (temp & PORT_OC) 962 if (temp & PORT_OC)
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index ec7f5d2c90de..dedb80bb8d40 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -17,75 +17,38 @@
17 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 17 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 */ 18 */
19 19
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/io.h>
20#include <linux/platform_device.h> 23#include <linux/platform_device.h>
21#include <linux/clk.h> 24#include <linux/clk.h>
22#include <linux/delay.h> 25#include <linux/delay.h>
23#include <linux/usb/otg.h> 26#include <linux/usb/otg.h>
24#include <linux/usb/ulpi.h> 27#include <linux/usb/ulpi.h>
25#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/usb.h>
30#include <linux/usb/hcd.h>
26 31
27#include <linux/platform_data/usb-ehci-mxc.h> 32#include <linux/platform_data/usb-ehci-mxc.h>
28 33
29#include <asm/mach-types.h> 34#include <asm/mach-types.h>
30 35
36#include "ehci.h"
37
38#define DRIVER_DESC "Freescale On-Chip EHCI Host driver"
39
40static const char hcd_name[] = "ehci-mxc";
41
31#define ULPI_VIEWPORT_OFFSET 0x170 42#define ULPI_VIEWPORT_OFFSET 0x170
32 43
33struct ehci_mxc_priv { 44struct ehci_mxc_priv {
34 struct clk *usbclk, *ahbclk, *phyclk; 45 struct clk *usbclk, *ahbclk, *phyclk;
35 struct usb_hcd *hcd;
36}; 46};
37 47
38/* called during probe() after chip reset completes */ 48static struct hc_driver __read_mostly ehci_mxc_hc_driver;
39static int ehci_mxc_setup(struct usb_hcd *hcd)
40{
41 hcd->has_tt = 1;
42
43 return ehci_setup(hcd);
44}
45 49
46static const struct hc_driver ehci_mxc_hc_driver = { 50static const struct ehci_driver_overrides ehci_mxc_overrides __initdata = {
47 .description = hcd_name, 51 .extra_priv_size = sizeof(struct ehci_mxc_priv),
48 .product_desc = "Freescale On-Chip EHCI Host Controller",
49 .hcd_priv_size = sizeof(struct ehci_hcd),
50
51 /*
52 * generic hardware linkage
53 */
54 .irq = ehci_irq,
55 .flags = HCD_USB2 | HCD_MEMORY,
56
57 /*
58 * basic lifecycle operations
59 */
60 .reset = ehci_mxc_setup,
61 .start = ehci_run,
62 .stop = ehci_stop,
63 .shutdown = ehci_shutdown,
64
65 /*
66 * managing i/o requests and associated device resources
67 */
68 .urb_enqueue = ehci_urb_enqueue,
69 .urb_dequeue = ehci_urb_dequeue,
70 .endpoint_disable = ehci_endpoint_disable,
71 .endpoint_reset = ehci_endpoint_reset,
72
73 /*
74 * scheduling support
75 */
76 .get_frame_number = ehci_get_frame,
77
78 /*
79 * root hub support
80 */
81 .hub_status_data = ehci_hub_status_data,
82 .hub_control = ehci_hub_control,
83 .bus_suspend = ehci_bus_suspend,
84 .bus_resume = ehci_bus_resume,
85 .relinquish_port = ehci_relinquish_port,
86 .port_handed_over = ehci_port_handed_over,
87
88 .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
89}; 52};
90 53
91static int ehci_mxc_drv_probe(struct platform_device *pdev) 54static int ehci_mxc_drv_probe(struct platform_device *pdev)
@@ -112,12 +75,6 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
112 if (!hcd) 75 if (!hcd)
113 return -ENOMEM; 76 return -ENOMEM;
114 77
115 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
116 if (!priv) {
117 ret = -ENOMEM;
118 goto err_alloc;
119 }
120
121 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 78 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
122 if (!res) { 79 if (!res) {
123 dev_err(dev, "Found HC with no register addr. Check setup!\n"); 80 dev_err(dev, "Found HC with no register addr. Check setup!\n");
@@ -135,6 +92,10 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
135 goto err_alloc; 92 goto err_alloc;
136 } 93 }
137 94
95 hcd->has_tt = 1;
96 ehci = hcd_to_ehci(hcd);
97 priv = (struct ehci_mxc_priv *) ehci->priv;
98
138 /* enable clocks */ 99 /* enable clocks */
139 priv->usbclk = devm_clk_get(&pdev->dev, "ipg"); 100 priv->usbclk = devm_clk_get(&pdev->dev, "ipg");
140 if (IS_ERR(priv->usbclk)) { 101 if (IS_ERR(priv->usbclk)) {
@@ -169,8 +130,6 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
169 mdelay(10); 130 mdelay(10);
170 } 131 }
171 132
172 ehci = hcd_to_ehci(hcd);
173
174 /* EHCI registers start at offset 0x100 */ 133 /* EHCI registers start at offset 0x100 */
175 ehci->caps = hcd->regs + 0x100; 134 ehci->caps = hcd->regs + 0x100;
176 ehci->regs = hcd->regs + 0x100 + 135 ehci->regs = hcd->regs + 0x100 +
@@ -198,8 +157,7 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
198 } 157 }
199 } 158 }
200 159
201 priv->hcd = hcd; 160 platform_set_drvdata(pdev, hcd);
202 platform_set_drvdata(pdev, priv);
203 161
204 ret = usb_add_hcd(hcd, irq, IRQF_SHARED); 162 ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
205 if (ret) 163 if (ret)
@@ -244,8 +202,11 @@ err_alloc:
244static int __exit ehci_mxc_drv_remove(struct platform_device *pdev) 202static int __exit ehci_mxc_drv_remove(struct platform_device *pdev)
245{ 203{
246 struct mxc_usbh_platform_data *pdata = pdev->dev.platform_data; 204 struct mxc_usbh_platform_data *pdata = pdev->dev.platform_data;
247 struct ehci_mxc_priv *priv = platform_get_drvdata(pdev); 205 struct usb_hcd *hcd = platform_get_drvdata(pdev);
248 struct usb_hcd *hcd = priv->hcd; 206 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
207 struct ehci_mxc_priv *priv = (struct ehci_mxc_priv *) ehci->priv;
208
209 usb_remove_hcd(hcd);
249 210
250 if (pdata && pdata->exit) 211 if (pdata && pdata->exit)
251 pdata->exit(pdev); 212 pdata->exit(pdev);
@@ -253,23 +214,20 @@ static int __exit ehci_mxc_drv_remove(struct platform_device *pdev)
253 if (pdata->otg) 214 if (pdata->otg)
254 usb_phy_shutdown(pdata->otg); 215 usb_phy_shutdown(pdata->otg);
255 216
256 usb_remove_hcd(hcd);
257 usb_put_hcd(hcd);
258 platform_set_drvdata(pdev, NULL);
259
260 clk_disable_unprepare(priv->usbclk); 217 clk_disable_unprepare(priv->usbclk);
261 clk_disable_unprepare(priv->ahbclk); 218 clk_disable_unprepare(priv->ahbclk);
262 219
263 if (priv->phyclk) 220 if (priv->phyclk)
264 clk_disable_unprepare(priv->phyclk); 221 clk_disable_unprepare(priv->phyclk);
265 222
223 usb_put_hcd(hcd);
224 platform_set_drvdata(pdev, NULL);
266 return 0; 225 return 0;
267} 226}
268 227
269static void ehci_mxc_drv_shutdown(struct platform_device *pdev) 228static void ehci_mxc_drv_shutdown(struct platform_device *pdev)
270{ 229{
271 struct ehci_mxc_priv *priv = platform_get_drvdata(pdev); 230 struct usb_hcd *hcd = platform_get_drvdata(pdev);
272 struct usb_hcd *hcd = priv->hcd;
273 231
274 if (hcd->driver->shutdown) 232 if (hcd->driver->shutdown)
275 hcd->driver->shutdown(hcd); 233 hcd->driver->shutdown(hcd);
@@ -279,9 +237,31 @@ MODULE_ALIAS("platform:mxc-ehci");
279 237
280static struct platform_driver ehci_mxc_driver = { 238static struct platform_driver ehci_mxc_driver = {
281 .probe = ehci_mxc_drv_probe, 239 .probe = ehci_mxc_drv_probe,
282 .remove = __exit_p(ehci_mxc_drv_remove), 240 .remove = ehci_mxc_drv_remove,
283 .shutdown = ehci_mxc_drv_shutdown, 241 .shutdown = ehci_mxc_drv_shutdown,
284 .driver = { 242 .driver = {
285 .name = "mxc-ehci", 243 .name = "mxc-ehci",
286 }, 244 },
287}; 245};
246
247static int __init ehci_mxc_init(void)
248{
249 if (usb_disabled())
250 return -ENODEV;
251
252 pr_info("%s: " DRIVER_DESC "\n", hcd_name);
253
254 ehci_init_driver(&ehci_mxc_hc_driver, &ehci_mxc_overrides);
255 return platform_driver_register(&ehci_mxc_driver);
256}
257module_init(ehci_mxc_init);
258
259static void __exit ehci_mxc_cleanup(void)
260{
261 platform_driver_unregister(&ehci_mxc_driver);
262}
263module_exit(ehci_mxc_cleanup);
264
265MODULE_DESCRIPTION(DRIVER_DESC);
266MODULE_AUTHOR("Sascha Hauer");
267MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 3d989028c836..fd252f0cfb3a 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -1197,17 +1197,26 @@ static void start_iaa_cycle(struct ehci_hcd *ehci, bool nested)
1197 if (ehci->async_iaa || ehci->async_unlinking) 1197 if (ehci->async_iaa || ehci->async_unlinking)
1198 return; 1198 return;
1199 1199
1200 /* Do all the waiting QHs at once */
1201 ehci->async_iaa = ehci->async_unlink;
1202 ehci->async_unlink = NULL;
1203
1204 /* If the controller isn't running, we don't have to wait for it */ 1200 /* If the controller isn't running, we don't have to wait for it */
1205 if (unlikely(ehci->rh_state < EHCI_RH_RUNNING)) { 1201 if (unlikely(ehci->rh_state < EHCI_RH_RUNNING)) {
1202
1203 /* Do all the waiting QHs */
1204 ehci->async_iaa = ehci->async_unlink;
1205 ehci->async_unlink = NULL;
1206
1206 if (!nested) /* Avoid recursion */ 1207 if (!nested) /* Avoid recursion */
1207 end_unlink_async(ehci); 1208 end_unlink_async(ehci);
1208 1209
1209 /* Otherwise start a new IAA cycle */ 1210 /* Otherwise start a new IAA cycle */
1210 } else if (likely(ehci->rh_state == EHCI_RH_RUNNING)) { 1211 } else if (likely(ehci->rh_state == EHCI_RH_RUNNING)) {
1212 struct ehci_qh *qh;
1213
1214 /* Do only the first waiting QH (nVidia bug?) */
1215 qh = ehci->async_unlink;
1216 ehci->async_iaa = qh;
1217 ehci->async_unlink = qh->unlink_next;
1218 qh->unlink_next = NULL;
1219
1211 /* Make sure the unlinks are all visible to the hardware */ 1220 /* Make sure the unlinks are all visible to the hardware */
1212 wmb(); 1221 wmb();
1213 1222
@@ -1255,34 +1264,35 @@ static void end_unlink_async(struct ehci_hcd *ehci)
1255 } 1264 }
1256} 1265}
1257 1266
1267static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh);
1268
1258static void unlink_empty_async(struct ehci_hcd *ehci) 1269static void unlink_empty_async(struct ehci_hcd *ehci)
1259{ 1270{
1260 struct ehci_qh *qh, *next; 1271 struct ehci_qh *qh;
1261 bool stopped = (ehci->rh_state < EHCI_RH_RUNNING); 1272 struct ehci_qh *qh_to_unlink = NULL;
1262 bool check_unlinks_later = false; 1273 bool check_unlinks_later = false;
1274 int count = 0;
1263 1275
1264 /* Unlink all the async QHs that have been empty for a timer cycle */ 1276 /* Find the last async QH which has been empty for a timer cycle */
1265 next = ehci->async->qh_next.qh; 1277 for (qh = ehci->async->qh_next.qh; qh; qh = qh->qh_next.qh) {
1266 while (next) {
1267 qh = next;
1268 next = qh->qh_next.qh;
1269
1270 if (list_empty(&qh->qtd_list) && 1278 if (list_empty(&qh->qtd_list) &&
1271 qh->qh_state == QH_STATE_LINKED) { 1279 qh->qh_state == QH_STATE_LINKED) {
1272 if (!stopped && qh->unlink_cycle == 1280 ++count;
1273 ehci->async_unlink_cycle) 1281 if (qh->unlink_cycle == ehci->async_unlink_cycle)
1274 check_unlinks_later = true; 1282 check_unlinks_later = true;
1275 else 1283 else
1276 single_unlink_async(ehci, qh); 1284 qh_to_unlink = qh;
1277 } 1285 }
1278 } 1286 }
1279 1287
1280 /* Start a new IAA cycle if any QHs are waiting for it */ 1288 /* If nothing else is being unlinked, unlink the last empty QH */
1281 if (ehci->async_unlink) 1289 if (!ehci->async_iaa && !ehci->async_unlink && qh_to_unlink) {
1282 start_iaa_cycle(ehci, false); 1290 start_unlink_async(ehci, qh_to_unlink);
1291 --count;
1292 }
1283 1293
1284 /* QHs that haven't been empty for long enough will be handled later */ 1294 /* Other QHs will be handled later */
1285 if (check_unlinks_later) { 1295 if (count > 0) {
1286 ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true); 1296 ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true);
1287 ++ehci->async_unlink_cycle; 1297 ++ehci->async_unlink_cycle;
1288 } 1298 }
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 69ebee73c0c1..b476daf49f6f 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -213,7 +213,7 @@ static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask)
213} 213}
214 214
215static const unsigned char 215static const unsigned char
216max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 }; 216max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 125, 25 };
217 217
218/* carryover low/fullspeed bandwidth that crosses uframe boundries */ 218/* carryover low/fullspeed bandwidth that crosses uframe boundries */
219static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8]) 219static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
@@ -2212,11 +2212,11 @@ static void scan_isoc(struct ehci_hcd *ehci)
2212 } 2212 }
2213 ehci->now_frame = now_frame; 2213 ehci->now_frame = now_frame;
2214 2214
2215 frame = ehci->last_iso_frame;
2215 for (;;) { 2216 for (;;) {
2216 union ehci_shadow q, *q_p; 2217 union ehci_shadow q, *q_p;
2217 __hc32 type, *hw_p; 2218 __hc32 type, *hw_p;
2218 2219
2219 frame = ehci->last_iso_frame;
2220restart: 2220restart:
2221 /* scan each element in frame's queue for completions */ 2221 /* scan each element in frame's queue for completions */
2222 q_p = &ehci->pshadow [frame]; 2222 q_p = &ehci->pshadow [frame];
@@ -2321,6 +2321,9 @@ restart:
2321 /* Stop when we have reached the current frame */ 2321 /* Stop when we have reached the current frame */
2322 if (frame == now_frame) 2322 if (frame == now_frame)
2323 break; 2323 break;
2324 ehci->last_iso_frame = (frame + 1) & fmask; 2324
2325 /* The last frame may still have active siTDs */
2326 ehci->last_iso_frame = frame;
2327 frame = (frame + 1) & fmask;
2325 } 2328 }
2326} 2329}
diff --git a/drivers/usb/host/ehci-timer.c b/drivers/usb/host/ehci-timer.c
index 20dbdcbe9b0f..f904071d70df 100644
--- a/drivers/usb/host/ehci-timer.c
+++ b/drivers/usb/host/ehci-timer.c
@@ -113,14 +113,15 @@ static void ehci_poll_ASS(struct ehci_hcd *ehci)
113 113
114 if (want != actual) { 114 if (want != actual) {
115 115
116 /* Poll again later, but give up after about 20 ms */ 116 /* Poll again later */
117 if (ehci->ASS_poll_count++ < 20) { 117 ehci_enable_event(ehci, EHCI_HRTIMER_POLL_ASS, true);
118 ehci_enable_event(ehci, EHCI_HRTIMER_POLL_ASS, true); 118 ++ehci->ASS_poll_count;
119 return; 119 return;
120 }
121 ehci_dbg(ehci, "Waited too long for the async schedule status (%x/%x), giving up\n",
122 want, actual);
123 } 120 }
121
122 if (ehci->ASS_poll_count > 20)
123 ehci_dbg(ehci, "ASS poll count reached %d\n",
124 ehci->ASS_poll_count);
124 ehci->ASS_poll_count = 0; 125 ehci->ASS_poll_count = 0;
125 126
126 /* The status is up-to-date; restart or stop the schedule as needed */ 127 /* The status is up-to-date; restart or stop the schedule as needed */
@@ -159,14 +160,14 @@ static void ehci_poll_PSS(struct ehci_hcd *ehci)
159 160
160 if (want != actual) { 161 if (want != actual) {
161 162
162 /* Poll again later, but give up after about 20 ms */ 163 /* Poll again later */
163 if (ehci->PSS_poll_count++ < 20) { 164 ehci_enable_event(ehci, EHCI_HRTIMER_POLL_PSS, true);
164 ehci_enable_event(ehci, EHCI_HRTIMER_POLL_PSS, true); 165 return;
165 return;
166 }
167 ehci_dbg(ehci, "Waited too long for the periodic schedule status (%x/%x), giving up\n",
168 want, actual);
169 } 166 }
167
168 if (ehci->PSS_poll_count > 20)
169 ehci_dbg(ehci, "PSS poll count reached %d\n",
170 ehci->PSS_poll_count);
170 ehci->PSS_poll_count = 0; 171 ehci->PSS_poll_count = 0;
171 172
172 /* The status is up-to-date; restart or stop the schedule as needed */ 173 /* The status is up-to-date; restart or stop the schedule as needed */
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 9dadc7118d68..36c3a8210595 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -38,6 +38,10 @@ typedef __u16 __bitwise __hc16;
38#endif 38#endif
39 39
40/* statistics can be kept for tuning/monitoring */ 40/* statistics can be kept for tuning/monitoring */
41#ifdef DEBUG
42#define EHCI_STATS
43#endif
44
41struct ehci_stats { 45struct ehci_stats {
42 /* irq usage */ 46 /* irq usage */
43 unsigned long normal; 47 unsigned long normal;
@@ -221,6 +225,9 @@ struct ehci_hcd { /* one per controller */
221#ifdef DEBUG 225#ifdef DEBUG
222 struct dentry *debug_dir; 226 struct dentry *debug_dir;
223#endif 227#endif
228
229 /* platform-specific data -- must come last */
230 unsigned long priv[0] __aligned(sizeof(s64));
224}; 231};
225 232
226/* convert between an HCD pointer and the corresponding EHCI_HCD */ 233/* convert between an HCD pointer and the corresponding EHCI_HCD */
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index a3b6d7104ae2..4c338ec03a07 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -780,6 +780,7 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)
780 "defaulting to EHCI.\n"); 780 "defaulting to EHCI.\n");
781 dev_warn(&xhci_pdev->dev, 781 dev_warn(&xhci_pdev->dev,
782 "USB 3.0 devices will work at USB 2.0 speeds.\n"); 782 "USB 3.0 devices will work at USB 2.0 speeds.\n");
783 usb_disable_xhci_ports(xhci_pdev);
783 return; 784 return;
784 } 785 }
785 786
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index 4b9e9aba2665..4f64d24eebc8 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -447,6 +447,10 @@ static irqreturn_t uhci_irq(struct usb_hcd *hcd)
447 return IRQ_NONE; 447 return IRQ_NONE;
448 uhci_writew(uhci, status, USBSTS); /* Clear it */ 448 uhci_writew(uhci, status, USBSTS); /* Clear it */
449 449
450 spin_lock(&uhci->lock);
451 if (unlikely(!uhci->is_initialized)) /* not yet configured */
452 goto done;
453
450 if (status & ~(USBSTS_USBINT | USBSTS_ERROR | USBSTS_RD)) { 454 if (status & ~(USBSTS_USBINT | USBSTS_ERROR | USBSTS_RD)) {
451 if (status & USBSTS_HSE) 455 if (status & USBSTS_HSE)
452 dev_err(uhci_dev(uhci), "host system error, " 456 dev_err(uhci_dev(uhci), "host system error, "
@@ -455,7 +459,6 @@ static irqreturn_t uhci_irq(struct usb_hcd *hcd)
455 dev_err(uhci_dev(uhci), "host controller process " 459 dev_err(uhci_dev(uhci), "host controller process "
456 "error, something bad happened!\n"); 460 "error, something bad happened!\n");
457 if (status & USBSTS_HCH) { 461 if (status & USBSTS_HCH) {
458 spin_lock(&uhci->lock);
459 if (uhci->rh_state >= UHCI_RH_RUNNING) { 462 if (uhci->rh_state >= UHCI_RH_RUNNING) {
460 dev_err(uhci_dev(uhci), 463 dev_err(uhci_dev(uhci),
461 "host controller halted, " 464 "host controller halted, "
@@ -473,15 +476,15 @@ static irqreturn_t uhci_irq(struct usb_hcd *hcd)
473 * pending unlinks */ 476 * pending unlinks */
474 mod_timer(&hcd->rh_timer, jiffies); 477 mod_timer(&hcd->rh_timer, jiffies);
475 } 478 }
476 spin_unlock(&uhci->lock);
477 } 479 }
478 } 480 }
479 481
480 if (status & USBSTS_RD) 482 if (status & USBSTS_RD) {
483 spin_unlock(&uhci->lock);
481 usb_hcd_poll_rh_status(hcd); 484 usb_hcd_poll_rh_status(hcd);
482 else { 485 } else {
483 spin_lock(&uhci->lock);
484 uhci_scan_schedule(uhci); 486 uhci_scan_schedule(uhci);
487 done:
485 spin_unlock(&uhci->lock); 488 spin_unlock(&uhci->lock);
486 } 489 }
487 490
@@ -662,9 +665,9 @@ static int uhci_start(struct usb_hcd *hcd)
662 */ 665 */
663 mb(); 666 mb();
664 667
668 spin_lock_irq(&uhci->lock);
665 configure_hc(uhci); 669 configure_hc(uhci);
666 uhci->is_initialized = 1; 670 uhci->is_initialized = 1;
667 spin_lock_irq(&uhci->lock);
668 start_rh(uhci); 671 start_rh(uhci);
669 spin_unlock_irq(&uhci->lock); 672 spin_unlock_irq(&uhci->lock);
670 return 0; 673 return 0;
diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c
index 768d54295a20..15d13229ddbb 100644
--- a/drivers/usb/host/uhci-hub.c
+++ b/drivers/usb/host/uhci-hub.c
@@ -116,6 +116,7 @@ static void uhci_finish_suspend(struct uhci_hcd *uhci, int port,
116 } 116 }
117 } 117 }
118 clear_bit(port, &uhci->resuming_ports); 118 clear_bit(port, &uhci->resuming_ports);
119 usb_hcd_end_port_resume(&uhci_to_hcd(uhci)->self, port);
119} 120}
120 121
121/* Wait for the UHCI controller in HP's iLO2 server management chip. 122/* Wait for the UHCI controller in HP's iLO2 server management chip.
@@ -167,6 +168,8 @@ static void uhci_check_ports(struct uhci_hcd *uhci)
167 set_bit(port, &uhci->resuming_ports); 168 set_bit(port, &uhci->resuming_ports);
168 uhci->ports_timeout = jiffies + 169 uhci->ports_timeout = jiffies +
169 msecs_to_jiffies(25); 170 msecs_to_jiffies(25);
171 usb_hcd_start_port_resume(
172 &uhci_to_hcd(uhci)->self, port);
170 173
171 /* Make sure we see the port again 174 /* Make sure we see the port again
172 * after the resuming period is over. */ 175 * after the resuming period is over. */
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 59fb5c677dbe..7f76a49e90d3 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1698,7 +1698,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
1698 faked_port_index + 1); 1698 faked_port_index + 1);
1699 if (slot_id && xhci->devs[slot_id]) 1699 if (slot_id && xhci->devs[slot_id])
1700 xhci_ring_device(xhci, slot_id); 1700 xhci_ring_device(xhci, slot_id);
1701 if (bus_state->port_remote_wakeup && (1 << faked_port_index)) { 1701 if (bus_state->port_remote_wakeup & (1 << faked_port_index)) {
1702 bus_state->port_remote_wakeup &= 1702 bus_state->port_remote_wakeup &=
1703 ~(1 << faked_port_index); 1703 ~(1 << faked_port_index);
1704 xhci_test_and_clear_bit(xhci, port_array, 1704 xhci_test_and_clear_bit(xhci, port_array,
@@ -2589,6 +2589,8 @@ cleanup:
2589 (trb_comp_code != COMP_STALL && 2589 (trb_comp_code != COMP_STALL &&
2590 trb_comp_code != COMP_BABBLE)) 2590 trb_comp_code != COMP_BABBLE))
2591 xhci_urb_free_priv(xhci, urb_priv); 2591 xhci_urb_free_priv(xhci, urb_priv);
2592 else
2593 kfree(urb_priv);
2592 2594
2593 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); 2595 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
2594 if ((urb->actual_length != urb->transfer_buffer_length && 2596 if ((urb->actual_length != urb->transfer_buffer_length &&
@@ -3108,7 +3110,7 @@ static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
3108 * running_total. 3110 * running_total.
3109 */ 3111 */
3110 packets_transferred = (running_total + trb_buff_len) / 3112 packets_transferred = (running_total + trb_buff_len) /
3111 usb_endpoint_maxp(&urb->ep->desc); 3113 GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
3112 3114
3113 if ((total_packet_count - packets_transferred) > 31) 3115 if ((total_packet_count - packets_transferred) > 31)
3114 return 31 << 17; 3116 return 31 << 17;
@@ -3642,7 +3644,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3642 td_len = urb->iso_frame_desc[i].length; 3644 td_len = urb->iso_frame_desc[i].length;
3643 td_remain_len = td_len; 3645 td_remain_len = td_len;
3644 total_packet_count = DIV_ROUND_UP(td_len, 3646 total_packet_count = DIV_ROUND_UP(td_len,
3645 usb_endpoint_maxp(&urb->ep->desc)); 3647 GET_MAX_PACKET(
3648 usb_endpoint_maxp(&urb->ep->desc)));
3646 /* A zero-length transfer still involves at least one packet. */ 3649 /* A zero-length transfer still involves at least one packet. */
3647 if (total_packet_count == 0) 3650 if (total_packet_count == 0)
3648 total_packet_count++; 3651 total_packet_count++;
@@ -3664,9 +3667,11 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3664 td = urb_priv->td[i]; 3667 td = urb_priv->td[i];
3665 for (j = 0; j < trbs_per_td; j++) { 3668 for (j = 0; j < trbs_per_td; j++) {
3666 u32 remainder = 0; 3669 u32 remainder = 0;
3667 field = TRB_TBC(burst_count) | TRB_TLBPC(residue); 3670 field = 0;
3668 3671
3669 if (first_trb) { 3672 if (first_trb) {
3673 field = TRB_TBC(burst_count) |
3674 TRB_TLBPC(residue);
3670 /* Queue the isoc TRB */ 3675 /* Queue the isoc TRB */
3671 field |= TRB_TYPE(TRB_ISOC); 3676 field |= TRB_TYPE(TRB_ISOC);
3672 /* Assume URB_ISO_ASAP is set */ 3677 /* Assume URB_ISO_ASAP is set */
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index 0968dd7a859d..f522000e8f06 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -105,7 +105,7 @@ static void cppi_reset_tx(struct cppi_tx_stateram __iomem *tx, u32 ptr)
105 musb_writel(&tx->tx_complete, 0, ptr); 105 musb_writel(&tx->tx_complete, 0, ptr);
106} 106}
107 107
108static void __init cppi_pool_init(struct cppi *cppi, struct cppi_channel *c) 108static void cppi_pool_init(struct cppi *cppi, struct cppi_channel *c)
109{ 109{
110 int j; 110 int j;
111 111
@@ -150,7 +150,7 @@ static void cppi_pool_free(struct cppi_channel *c)
150 c->last_processed = NULL; 150 c->last_processed = NULL;
151} 151}
152 152
153static int __init cppi_controller_start(struct dma_controller *c) 153static int cppi_controller_start(struct dma_controller *c)
154{ 154{
155 struct cppi *controller; 155 struct cppi *controller;
156 void __iomem *tibase; 156 void __iomem *tibase;
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index f14736f647ff..edc0f0dcad83 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -60,6 +60,7 @@ static const struct usb_device_id id_table[] = {
60 { USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */ 60 { USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */
61 { USB_DEVICE(0x0FCF, 0x1004) }, /* Dynastream ANT2USB */ 61 { USB_DEVICE(0x0FCF, 0x1004) }, /* Dynastream ANT2USB */
62 { USB_DEVICE(0x0FCF, 0x1006) }, /* Dynastream ANT development board */ 62 { USB_DEVICE(0x0FCF, 0x1006) }, /* Dynastream ANT development board */
63 { USB_DEVICE(0x0FDE, 0xCA05) }, /* OWL Wireless Electricity Monitor CM-160 */
63 { USB_DEVICE(0x10A6, 0xAA26) }, /* Knock-off DCU-11 cable */ 64 { USB_DEVICE(0x10A6, 0xAA26) }, /* Knock-off DCU-11 cable */
64 { USB_DEVICE(0x10AB, 0x10C5) }, /* Siemens MC60 Cable */ 65 { USB_DEVICE(0x10AB, 0x10C5) }, /* Siemens MC60 Cable */
65 { USB_DEVICE(0x10B5, 0xAC70) }, /* Nokia CA-42 USB */ 66 { USB_DEVICE(0x10B5, 0xAC70) }, /* Nokia CA-42 USB */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index ba68835d06a6..90ceef1776c3 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -584,6 +584,7 @@ static struct usb_device_id id_table_combined [] = {
584 /* 584 /*
585 * ELV devices: 585 * ELV devices:
586 */ 586 */
587 { USB_DEVICE(FTDI_ELV_VID, FTDI_ELV_WS300_PID) },
587 { USB_DEVICE(FTDI_VID, FTDI_ELV_USR_PID) }, 588 { USB_DEVICE(FTDI_VID, FTDI_ELV_USR_PID) },
588 { USB_DEVICE(FTDI_VID, FTDI_ELV_MSM1_PID) }, 589 { USB_DEVICE(FTDI_VID, FTDI_ELV_MSM1_PID) },
589 { USB_DEVICE(FTDI_VID, FTDI_ELV_KL100_PID) }, 590 { USB_DEVICE(FTDI_VID, FTDI_ELV_KL100_PID) },
@@ -670,6 +671,7 @@ static struct usb_device_id id_table_combined [] = {
670 { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_5_PID) }, 671 { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_5_PID) },
671 { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_6_PID) }, 672 { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_6_PID) },
672 { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_7_PID) }, 673 { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_7_PID) },
674 { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) },
673 { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) }, 675 { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) },
674 { USB_DEVICE(FTDI_VID, FTDI_ACTIVE_ROBOTS_PID) }, 676 { USB_DEVICE(FTDI_VID, FTDI_ACTIVE_ROBOTS_PID) },
675 { USB_DEVICE(FTDI_VID, FTDI_MHAM_KW_PID) }, 677 { USB_DEVICE(FTDI_VID, FTDI_MHAM_KW_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index fa5d56038276..9d359e189a64 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -147,6 +147,11 @@
147#define XSENS_CONVERTER_6_PID 0xD38E 147#define XSENS_CONVERTER_6_PID 0xD38E
148#define XSENS_CONVERTER_7_PID 0xD38F 148#define XSENS_CONVERTER_7_PID 0xD38F
149 149
150/**
151 * Zolix (www.zolix.com.cb) product ids
152 */
153#define FTDI_OMNI1509 0xD491 /* Omni1509 embedded USB-serial */
154
150/* 155/*
151 * NDI (www.ndigital.com) product ids 156 * NDI (www.ndigital.com) product ids
152 */ 157 */
@@ -204,7 +209,7 @@
204 209
205/* 210/*
206 * ELV USB devices submitted by Christian Abt of ELV (www.elv.de). 211 * ELV USB devices submitted by Christian Abt of ELV (www.elv.de).
207 * All of these devices use FTDI's vendor ID (0x0403). 212 * Almost all of these devices use FTDI's vendor ID (0x0403).
208 * Further IDs taken from ELV Windows .inf file. 213 * Further IDs taken from ELV Windows .inf file.
209 * 214 *
210 * The previously included PID for the UO 100 module was incorrect. 215 * The previously included PID for the UO 100 module was incorrect.
@@ -212,6 +217,8 @@
212 * 217 *
213 * Armin Laeuger originally sent the PID for the UM 100 module. 218 * Armin Laeuger originally sent the PID for the UM 100 module.
214 */ 219 */
220#define FTDI_ELV_VID 0x1B1F /* ELV AG */
221#define FTDI_ELV_WS300_PID 0xC006 /* eQ3 WS 300 PC II */
215#define FTDI_ELV_USR_PID 0xE000 /* ELV Universal-Sound-Recorder */ 222#define FTDI_ELV_USR_PID 0xE000 /* ELV Universal-Sound-Recorder */
216#define FTDI_ELV_MSM1_PID 0xE001 /* ELV Mini-Sound-Modul */ 223#define FTDI_ELV_MSM1_PID 0xE001 /* ELV Mini-Sound-Modul */
217#define FTDI_ELV_KL100_PID 0xE002 /* ELV Kfz-Leistungsmesser KL 100 */ 224#define FTDI_ELV_KL100_PID 0xE002 /* ELV Kfz-Leistungsmesser KL 100 */
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index 58184f3de686..82afc4d6a327 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
530 wait_queue_t wait; 530 wait_queue_t wait;
531 unsigned long flags; 531 unsigned long flags;
532 532
533 if (!tty)
534 return;
535
533 if (!timeout) 536 if (!timeout)
534 timeout = (HZ * EDGE_CLOSING_WAIT)/100; 537 timeout = (HZ * EDGE_CLOSING_WAIT)/100;
535 538
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 478adcfcdf26..567bc77d6397 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -242,6 +242,7 @@ static void option_instat_callback(struct urb *urb);
242#define TELIT_PRODUCT_CC864_DUAL 0x1005 242#define TELIT_PRODUCT_CC864_DUAL 0x1005
243#define TELIT_PRODUCT_CC864_SINGLE 0x1006 243#define TELIT_PRODUCT_CC864_SINGLE 0x1006
244#define TELIT_PRODUCT_DE910_DUAL 0x1010 244#define TELIT_PRODUCT_DE910_DUAL 0x1010
245#define TELIT_PRODUCT_LE920 0x1200
245 246
246/* ZTE PRODUCTS */ 247/* ZTE PRODUCTS */
247#define ZTE_VENDOR_ID 0x19d2 248#define ZTE_VENDOR_ID 0x19d2
@@ -449,6 +450,14 @@ static void option_instat_callback(struct urb *urb);
449#define PETATEL_VENDOR_ID 0x1ff4 450#define PETATEL_VENDOR_ID 0x1ff4
450#define PETATEL_PRODUCT_NP10T 0x600e 451#define PETATEL_PRODUCT_NP10T 0x600e
451 452
453/* TP-LINK Incorporated products */
454#define TPLINK_VENDOR_ID 0x2357
455#define TPLINK_PRODUCT_MA180 0x0201
456
457/* Changhong products */
458#define CHANGHONG_VENDOR_ID 0x2077
459#define CHANGHONG_PRODUCT_CH690 0x7001
460
452/* some devices interfaces need special handling due to a number of reasons */ 461/* some devices interfaces need special handling due to a number of reasons */
453enum option_blacklist_reason { 462enum option_blacklist_reason {
454 OPTION_BLACKLIST_NONE = 0, 463 OPTION_BLACKLIST_NONE = 0,
@@ -530,6 +539,11 @@ static const struct option_blacklist_info zte_1255_blacklist = {
530 .reserved = BIT(3) | BIT(4), 539 .reserved = BIT(3) | BIT(4),
531}; 540};
532 541
542static const struct option_blacklist_info telit_le920_blacklist = {
543 .sendsetup = BIT(0),
544 .reserved = BIT(1) | BIT(5),
545};
546
533static const struct usb_device_id option_ids[] = { 547static const struct usb_device_id option_ids[] = {
534 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, 548 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
535 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, 549 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -780,6 +794,8 @@ static const struct usb_device_id option_ids[] = {
780 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) }, 794 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) },
781 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) }, 795 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
782 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) }, 796 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
797 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
798 .driver_info = (kernel_ulong_t)&telit_le920_blacklist },
783 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ 799 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
784 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff), 800 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
785 .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, 801 .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
@@ -930,7 +946,8 @@ static const struct usb_device_id option_ids[] = {
930 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0254, 0xff, 0xff, 0xff) }, 946 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0254, 0xff, 0xff, 0xff) },
931 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */ 947 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */
932 .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, 948 .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
933 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff) }, 949 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff), /* ONDA MT8205 */
950 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
934 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff), /* ZTE MF880 */ 951 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff), /* ZTE MF880 */
935 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 952 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
936 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0317, 0xff, 0xff, 0xff) }, 953 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0317, 0xff, 0xff, 0xff) },
@@ -1311,6 +1328,9 @@ static const struct usb_device_id option_ids[] = {
1311 { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) }, 1328 { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) },
1312 { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) }, 1329 { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
1313 { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) }, 1330 { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) },
1331 { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
1332 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1333 { USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) },
1314 { } /* Terminating entry */ 1334 { } /* Terminating entry */
1315}; 1335};
1316MODULE_DEVICE_TABLE(usb, option_ids); 1336MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index aa148c21ea40..24662547dc5b 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -53,6 +53,7 @@ static const struct usb_device_id id_table[] = {
53 {DEVICE_G1K(0x05c6, 0x9221)}, /* Generic Gobi QDL device */ 53 {DEVICE_G1K(0x05c6, 0x9221)}, /* Generic Gobi QDL device */
54 {DEVICE_G1K(0x05c6, 0x9231)}, /* Generic Gobi QDL device */ 54 {DEVICE_G1K(0x05c6, 0x9231)}, /* Generic Gobi QDL device */
55 {DEVICE_G1K(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */ 55 {DEVICE_G1K(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */
56 {DEVICE_G1K(0x1bc7, 0x900e)}, /* Telit Gobi QDL device */
56 57
57 /* Gobi 2000 devices */ 58 /* Gobi 2000 devices */
58 {USB_DEVICE(0x1410, 0xa010)}, /* Novatel Gobi 2000 QDL device */ 59 {USB_DEVICE(0x1410, 0xa010)}, /* Novatel Gobi 2000 QDL device */
diff --git a/drivers/usb/storage/initializers.c b/drivers/usb/storage/initializers.c
index 105d900150c1..16b0bf055eeb 100644
--- a/drivers/usb/storage/initializers.c
+++ b/drivers/usb/storage/initializers.c
@@ -92,8 +92,8 @@ int usb_stor_ucr61s2b_init(struct us_data *us)
92 return 0; 92 return 0;
93} 93}
94 94
95/* This places the HUAWEI E220 devices in multi-port mode */ 95/* This places the HUAWEI usb dongles in multi-port mode */
96int usb_stor_huawei_e220_init(struct us_data *us) 96static int usb_stor_huawei_feature_init(struct us_data *us)
97{ 97{
98 int result; 98 int result;
99 99
@@ -104,3 +104,75 @@ int usb_stor_huawei_e220_init(struct us_data *us)
104 US_DEBUGP("Huawei mode set result is %d\n", result); 104 US_DEBUGP("Huawei mode set result is %d\n", result);
105 return 0; 105 return 0;
106} 106}
107
108/*
109 * It will send a scsi switch command called rewind' to huawei dongle.
110 * When the dongle receives this command at the first time,
111 * it will reboot immediately. After rebooted, it will ignore this command.
112 * So it is unnecessary to read its response.
113 */
114static int usb_stor_huawei_scsi_init(struct us_data *us)
115{
116 int result = 0;
117 int act_len = 0;
118 struct bulk_cb_wrap *bcbw = (struct bulk_cb_wrap *) us->iobuf;
119 char rewind_cmd[] = {0x11, 0x06, 0x20, 0x00, 0x00, 0x01, 0x01, 0x00,
120 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
121
122 bcbw->Signature = cpu_to_le32(US_BULK_CB_SIGN);
123 bcbw->Tag = 0;
124 bcbw->DataTransferLength = 0;
125 bcbw->Flags = bcbw->Lun = 0;
126 bcbw->Length = sizeof(rewind_cmd);
127 memset(bcbw->CDB, 0, sizeof(bcbw->CDB));
128 memcpy(bcbw->CDB, rewind_cmd, sizeof(rewind_cmd));
129
130 result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcbw,
131 US_BULK_CB_WRAP_LEN, &act_len);
132 US_DEBUGP("transfer actual length=%d, result=%d\n", act_len, result);
133 return result;
134}
135
136/*
137 * It tries to find the supported Huawei USB dongles.
138 * In Huawei, they assign the following product IDs
139 * for all of their mobile broadband dongles,
140 * including the new dongles in the future.
141 * So if the product ID is not included in this list,
142 * it means it is not Huawei's mobile broadband dongles.
143 */
144static int usb_stor_huawei_dongles_pid(struct us_data *us)
145{
146 struct usb_interface_descriptor *idesc;
147 int idProduct;
148
149 idesc = &us->pusb_intf->cur_altsetting->desc;
150 idProduct = us->pusb_dev->descriptor.idProduct;
151 /* The first port is CDROM,
152 * means the dongle in the single port mode,
153 * and a switch command is required to be sent. */
154 if (idesc && idesc->bInterfaceNumber == 0) {
155 if ((idProduct == 0x1001)
156 || (idProduct == 0x1003)
157 || (idProduct == 0x1004)
158 || (idProduct >= 0x1401 && idProduct <= 0x1500)
159 || (idProduct >= 0x1505 && idProduct <= 0x1600)
160 || (idProduct >= 0x1c02 && idProduct <= 0x2202)) {
161 return 1;
162 }
163 }
164 return 0;
165}
166
167int usb_stor_huawei_init(struct us_data *us)
168{
169 int result = 0;
170
171 if (usb_stor_huawei_dongles_pid(us)) {
172 if (us->pusb_dev->descriptor.idProduct >= 0x1446)
173 result = usb_stor_huawei_scsi_init(us);
174 else
175 result = usb_stor_huawei_feature_init(us);
176 }
177 return result;
178}
diff --git a/drivers/usb/storage/initializers.h b/drivers/usb/storage/initializers.h
index 529327fbb06b..5376d4fc76f0 100644
--- a/drivers/usb/storage/initializers.h
+++ b/drivers/usb/storage/initializers.h
@@ -46,5 +46,5 @@ int usb_stor_euscsi_init(struct us_data *us);
46 * flash reader */ 46 * flash reader */
47int usb_stor_ucr61s2b_init(struct us_data *us); 47int usb_stor_ucr61s2b_init(struct us_data *us);
48 48
49/* This places the HUAWEI E220 devices in multi-port mode */ 49/* This places the HUAWEI usb dongles in multi-port mode */
50int usb_stor_huawei_e220_init(struct us_data *us); 50int usb_stor_huawei_init(struct us_data *us);
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index d305a5aa3a5d..72923b56bbf6 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1527,335 +1527,10 @@ UNUSUAL_DEV( 0x1210, 0x0003, 0x0100, 0x0100,
1527/* Reported by fangxiaozhi <huananhu@huawei.com> 1527/* Reported by fangxiaozhi <huananhu@huawei.com>
1528 * This brings the HUAWEI data card devices into multi-port mode 1528 * This brings the HUAWEI data card devices into multi-port mode
1529 */ 1529 */
1530UNUSUAL_DEV( 0x12d1, 0x1001, 0x0000, 0x0000, 1530UNUSUAL_VENDOR_INTF(0x12d1, 0x08, 0x06, 0x50,
1531 "HUAWEI MOBILE", 1531 "HUAWEI MOBILE",
1532 "Mass Storage", 1532 "Mass Storage",
1533 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1533 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_init,
1534 0),
1535UNUSUAL_DEV( 0x12d1, 0x1003, 0x0000, 0x0000,
1536 "HUAWEI MOBILE",
1537 "Mass Storage",
1538 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1539 0),
1540UNUSUAL_DEV( 0x12d1, 0x1004, 0x0000, 0x0000,
1541 "HUAWEI MOBILE",
1542 "Mass Storage",
1543 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1544 0),
1545UNUSUAL_DEV( 0x12d1, 0x1401, 0x0000, 0x0000,
1546 "HUAWEI MOBILE",
1547 "Mass Storage",
1548 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1549 0),
1550UNUSUAL_DEV( 0x12d1, 0x1402, 0x0000, 0x0000,
1551 "HUAWEI MOBILE",
1552 "Mass Storage",
1553 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1554 0),
1555UNUSUAL_DEV( 0x12d1, 0x1403, 0x0000, 0x0000,
1556 "HUAWEI MOBILE",
1557 "Mass Storage",
1558 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1559 0),
1560UNUSUAL_DEV( 0x12d1, 0x1404, 0x0000, 0x0000,
1561 "HUAWEI MOBILE",
1562 "Mass Storage",
1563 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1564 0),
1565UNUSUAL_DEV( 0x12d1, 0x1405, 0x0000, 0x0000,
1566 "HUAWEI MOBILE",
1567 "Mass Storage",
1568 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1569 0),
1570UNUSUAL_DEV( 0x12d1, 0x1406, 0x0000, 0x0000,
1571 "HUAWEI MOBILE",
1572 "Mass Storage",
1573 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1574 0),
1575UNUSUAL_DEV( 0x12d1, 0x1407, 0x0000, 0x0000,
1576 "HUAWEI MOBILE",
1577 "Mass Storage",
1578 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1579 0),
1580UNUSUAL_DEV( 0x12d1, 0x1408, 0x0000, 0x0000,
1581 "HUAWEI MOBILE",
1582 "Mass Storage",
1583 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1584 0),
1585UNUSUAL_DEV( 0x12d1, 0x1409, 0x0000, 0x0000,
1586 "HUAWEI MOBILE",
1587 "Mass Storage",
1588 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1589 0),
1590UNUSUAL_DEV( 0x12d1, 0x140A, 0x0000, 0x0000,
1591 "HUAWEI MOBILE",
1592 "Mass Storage",
1593 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1594 0),
1595UNUSUAL_DEV( 0x12d1, 0x140B, 0x0000, 0x0000,
1596 "HUAWEI MOBILE",
1597 "Mass Storage",
1598 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1599 0),
1600UNUSUAL_DEV( 0x12d1, 0x140C, 0x0000, 0x0000,
1601 "HUAWEI MOBILE",
1602 "Mass Storage",
1603 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1604 0),
1605UNUSUAL_DEV( 0x12d1, 0x140D, 0x0000, 0x0000,
1606 "HUAWEI MOBILE",
1607 "Mass Storage",
1608 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1609 0),
1610UNUSUAL_DEV( 0x12d1, 0x140E, 0x0000, 0x0000,
1611 "HUAWEI MOBILE",
1612 "Mass Storage",
1613 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1614 0),
1615UNUSUAL_DEV( 0x12d1, 0x140F, 0x0000, 0x0000,
1616 "HUAWEI MOBILE",
1617 "Mass Storage",
1618 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1619 0),
1620UNUSUAL_DEV( 0x12d1, 0x1410, 0x0000, 0x0000,
1621 "HUAWEI MOBILE",
1622 "Mass Storage",
1623 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1624 0),
1625UNUSUAL_DEV( 0x12d1, 0x1411, 0x0000, 0x0000,
1626 "HUAWEI MOBILE",
1627 "Mass Storage",
1628 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1629 0),
1630UNUSUAL_DEV( 0x12d1, 0x1412, 0x0000, 0x0000,
1631 "HUAWEI MOBILE",
1632 "Mass Storage",
1633 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1634 0),
1635UNUSUAL_DEV( 0x12d1, 0x1413, 0x0000, 0x0000,
1636 "HUAWEI MOBILE",
1637 "Mass Storage",
1638 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1639 0),
1640UNUSUAL_DEV( 0x12d1, 0x1414, 0x0000, 0x0000,
1641 "HUAWEI MOBILE",
1642 "Mass Storage",
1643 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1644 0),
1645UNUSUAL_DEV( 0x12d1, 0x1415, 0x0000, 0x0000,
1646 "HUAWEI MOBILE",
1647 "Mass Storage",
1648 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1649 0),
1650UNUSUAL_DEV( 0x12d1, 0x1416, 0x0000, 0x0000,
1651 "HUAWEI MOBILE",
1652 "Mass Storage",
1653 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1654 0),
1655UNUSUAL_DEV( 0x12d1, 0x1417, 0x0000, 0x0000,
1656 "HUAWEI MOBILE",
1657 "Mass Storage",
1658 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1659 0),
1660UNUSUAL_DEV( 0x12d1, 0x1418, 0x0000, 0x0000,
1661 "HUAWEI MOBILE",
1662 "Mass Storage",
1663 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1664 0),
1665UNUSUAL_DEV( 0x12d1, 0x1419, 0x0000, 0x0000,
1666 "HUAWEI MOBILE",
1667 "Mass Storage",
1668 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1669 0),
1670UNUSUAL_DEV( 0x12d1, 0x141A, 0x0000, 0x0000,
1671 "HUAWEI MOBILE",
1672 "Mass Storage",
1673 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1674 0),
1675UNUSUAL_DEV( 0x12d1, 0x141B, 0x0000, 0x0000,
1676 "HUAWEI MOBILE",
1677 "Mass Storage",
1678 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1679 0),
1680UNUSUAL_DEV( 0x12d1, 0x141C, 0x0000, 0x0000,
1681 "HUAWEI MOBILE",
1682 "Mass Storage",
1683 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1684 0),
1685UNUSUAL_DEV( 0x12d1, 0x141D, 0x0000, 0x0000,
1686 "HUAWEI MOBILE",
1687 "Mass Storage",
1688 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1689 0),
1690UNUSUAL_DEV( 0x12d1, 0x141E, 0x0000, 0x0000,
1691 "HUAWEI MOBILE",
1692 "Mass Storage",
1693 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1694 0),
1695UNUSUAL_DEV( 0x12d1, 0x141F, 0x0000, 0x0000,
1696 "HUAWEI MOBILE",
1697 "Mass Storage",
1698 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1699 0),
1700UNUSUAL_DEV( 0x12d1, 0x1420, 0x0000, 0x0000,
1701 "HUAWEI MOBILE",
1702 "Mass Storage",
1703 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1704 0),
1705UNUSUAL_DEV( 0x12d1, 0x1421, 0x0000, 0x0000,
1706 "HUAWEI MOBILE",
1707 "Mass Storage",
1708 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1709 0),
1710UNUSUAL_DEV( 0x12d1, 0x1422, 0x0000, 0x0000,
1711 "HUAWEI MOBILE",
1712 "Mass Storage",
1713 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1714 0),
1715UNUSUAL_DEV( 0x12d1, 0x1423, 0x0000, 0x0000,
1716 "HUAWEI MOBILE",
1717 "Mass Storage",
1718 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1719 0),
1720UNUSUAL_DEV( 0x12d1, 0x1424, 0x0000, 0x0000,
1721 "HUAWEI MOBILE",
1722 "Mass Storage",
1723 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1724 0),
1725UNUSUAL_DEV( 0x12d1, 0x1425, 0x0000, 0x0000,
1726 "HUAWEI MOBILE",
1727 "Mass Storage",
1728 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1729 0),
1730UNUSUAL_DEV( 0x12d1, 0x1426, 0x0000, 0x0000,
1731 "HUAWEI MOBILE",
1732 "Mass Storage",
1733 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1734 0),
1735UNUSUAL_DEV( 0x12d1, 0x1427, 0x0000, 0x0000,
1736 "HUAWEI MOBILE",
1737 "Mass Storage",
1738 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1739 0),
1740UNUSUAL_DEV( 0x12d1, 0x1428, 0x0000, 0x0000,
1741 "HUAWEI MOBILE",
1742 "Mass Storage",
1743 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1744 0),
1745UNUSUAL_DEV( 0x12d1, 0x1429, 0x0000, 0x0000,
1746 "HUAWEI MOBILE",
1747 "Mass Storage",
1748 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1749 0),
1750UNUSUAL_DEV( 0x12d1, 0x142A, 0x0000, 0x0000,
1751 "HUAWEI MOBILE",
1752 "Mass Storage",
1753 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1754 0),
1755UNUSUAL_DEV( 0x12d1, 0x142B, 0x0000, 0x0000,
1756 "HUAWEI MOBILE",
1757 "Mass Storage",
1758 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1759 0),
1760UNUSUAL_DEV( 0x12d1, 0x142C, 0x0000, 0x0000,
1761 "HUAWEI MOBILE",
1762 "Mass Storage",
1763 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1764 0),
1765UNUSUAL_DEV( 0x12d1, 0x142D, 0x0000, 0x0000,
1766 "HUAWEI MOBILE",
1767 "Mass Storage",
1768 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1769 0),
1770UNUSUAL_DEV( 0x12d1, 0x142E, 0x0000, 0x0000,
1771 "HUAWEI MOBILE",
1772 "Mass Storage",
1773 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1774 0),
1775UNUSUAL_DEV( 0x12d1, 0x142F, 0x0000, 0x0000,
1776 "HUAWEI MOBILE",
1777 "Mass Storage",
1778 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1779 0),
1780UNUSUAL_DEV( 0x12d1, 0x1430, 0x0000, 0x0000,
1781 "HUAWEI MOBILE",
1782 "Mass Storage",
1783 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1784 0),
1785UNUSUAL_DEV( 0x12d1, 0x1431, 0x0000, 0x0000,
1786 "HUAWEI MOBILE",
1787 "Mass Storage",
1788 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1789 0),
1790UNUSUAL_DEV( 0x12d1, 0x1432, 0x0000, 0x0000,
1791 "HUAWEI MOBILE",
1792 "Mass Storage",
1793 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1794 0),
1795UNUSUAL_DEV( 0x12d1, 0x1433, 0x0000, 0x0000,
1796 "HUAWEI MOBILE",
1797 "Mass Storage",
1798 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1799 0),
1800UNUSUAL_DEV( 0x12d1, 0x1434, 0x0000, 0x0000,
1801 "HUAWEI MOBILE",
1802 "Mass Storage",
1803 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1804 0),
1805UNUSUAL_DEV( 0x12d1, 0x1435, 0x0000, 0x0000,
1806 "HUAWEI MOBILE",
1807 "Mass Storage",
1808 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1809 0),
1810UNUSUAL_DEV( 0x12d1, 0x1436, 0x0000, 0x0000,
1811 "HUAWEI MOBILE",
1812 "Mass Storage",
1813 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1814 0),
1815UNUSUAL_DEV( 0x12d1, 0x1437, 0x0000, 0x0000,
1816 "HUAWEI MOBILE",
1817 "Mass Storage",
1818 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1819 0),
1820UNUSUAL_DEV( 0x12d1, 0x1438, 0x0000, 0x0000,
1821 "HUAWEI MOBILE",
1822 "Mass Storage",
1823 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1824 0),
1825UNUSUAL_DEV( 0x12d1, 0x1439, 0x0000, 0x0000,
1826 "HUAWEI MOBILE",
1827 "Mass Storage",
1828 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1829 0),
1830UNUSUAL_DEV( 0x12d1, 0x143A, 0x0000, 0x0000,
1831 "HUAWEI MOBILE",
1832 "Mass Storage",
1833 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1834 0),
1835UNUSUAL_DEV( 0x12d1, 0x143B, 0x0000, 0x0000,
1836 "HUAWEI MOBILE",
1837 "Mass Storage",
1838 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1839 0),
1840UNUSUAL_DEV( 0x12d1, 0x143C, 0x0000, 0x0000,
1841 "HUAWEI MOBILE",
1842 "Mass Storage",
1843 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1844 0),
1845UNUSUAL_DEV( 0x12d1, 0x143D, 0x0000, 0x0000,
1846 "HUAWEI MOBILE",
1847 "Mass Storage",
1848 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1849 0),
1850UNUSUAL_DEV( 0x12d1, 0x143E, 0x0000, 0x0000,
1851 "HUAWEI MOBILE",
1852 "Mass Storage",
1853 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1854 0),
1855UNUSUAL_DEV( 0x12d1, 0x143F, 0x0000, 0x0000,
1856 "HUAWEI MOBILE",
1857 "Mass Storage",
1858 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
1859 0), 1534 0),
1860 1535
1861/* Reported by Vilius Bilinkevicius <vilisas AT xxx DOT lt) */ 1536/* Reported by Vilius Bilinkevicius <vilisas AT xxx DOT lt) */
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 31b3e1a61bbd..cf09b6ba71ff 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -120,6 +120,17 @@ MODULE_PARM_DESC(quirks, "supplemental list of device IDs and their quirks");
120 .useTransport = use_transport, \ 120 .useTransport = use_transport, \
121} 121}
122 122
123#define UNUSUAL_VENDOR_INTF(idVendor, cl, sc, pr, \
124 vendor_name, product_name, use_protocol, use_transport, \
125 init_function, Flags) \
126{ \
127 .vendorName = vendor_name, \
128 .productName = product_name, \
129 .useProtocol = use_protocol, \
130 .useTransport = use_transport, \
131 .initFunction = init_function, \
132}
133
123static struct us_unusual_dev us_unusual_dev_list[] = { 134static struct us_unusual_dev us_unusual_dev_list[] = {
124# include "unusual_devs.h" 135# include "unusual_devs.h"
125 { } /* Terminating entry */ 136 { } /* Terminating entry */
@@ -131,6 +142,7 @@ static struct us_unusual_dev for_dynamic_ids =
131#undef UNUSUAL_DEV 142#undef UNUSUAL_DEV
132#undef COMPLIANT_DEV 143#undef COMPLIANT_DEV
133#undef USUAL_DEV 144#undef USUAL_DEV
145#undef UNUSUAL_VENDOR_INTF
134 146
135#ifdef CONFIG_LOCKDEP 147#ifdef CONFIG_LOCKDEP
136 148
diff --git a/drivers/usb/storage/usual-tables.c b/drivers/usb/storage/usual-tables.c
index b78a526910fb..5ef8ce74aae4 100644
--- a/drivers/usb/storage/usual-tables.c
+++ b/drivers/usb/storage/usual-tables.c
@@ -41,6 +41,20 @@
41#define USUAL_DEV(useProto, useTrans) \ 41#define USUAL_DEV(useProto, useTrans) \
42{ USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, useProto, useTrans) } 42{ USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, useProto, useTrans) }
43 43
44/* Define the device is matched with Vendor ID and interface descriptors */
45#define UNUSUAL_VENDOR_INTF(id_vendor, cl, sc, pr, \
46 vendorName, productName, useProtocol, useTransport, \
47 initFunction, flags) \
48{ \
49 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO \
50 | USB_DEVICE_ID_MATCH_VENDOR, \
51 .idVendor = (id_vendor), \
52 .bInterfaceClass = (cl), \
53 .bInterfaceSubClass = (sc), \
54 .bInterfaceProtocol = (pr), \
55 .driver_info = (flags) \
56}
57
44struct usb_device_id usb_storage_usb_ids[] = { 58struct usb_device_id usb_storage_usb_ids[] = {
45# include "unusual_devs.h" 59# include "unusual_devs.h"
46 { } /* Terminating entry */ 60 { } /* Terminating entry */
@@ -50,6 +64,7 @@ MODULE_DEVICE_TABLE(usb, usb_storage_usb_ids);
50#undef UNUSUAL_DEV 64#undef UNUSUAL_DEV
51#undef COMPLIANT_DEV 65#undef COMPLIANT_DEV
52#undef USUAL_DEV 66#undef USUAL_DEV
67#undef UNUSUAL_VENDOR_INTF
53 68
54/* 69/*
55 * The table of devices to ignore 70 * The table of devices to ignore
diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
index 4362d9e7baa3..f72323ef618f 100644
--- a/drivers/vfio/pci/vfio_pci_rdwr.c
+++ b/drivers/vfio/pci/vfio_pci_rdwr.c
@@ -240,17 +240,17 @@ ssize_t vfio_pci_mem_readwrite(struct vfio_pci_device *vdev, char __user *buf,
240 filled = 1; 240 filled = 1;
241 } else { 241 } else {
242 /* Drop writes, fill reads with FF */ 242 /* Drop writes, fill reads with FF */
243 filled = min((size_t)(x_end - pos), count);
243 if (!iswrite) { 244 if (!iswrite) {
244 char val = 0xFF; 245 char val = 0xFF;
245 size_t i; 246 size_t i;
246 247
247 for (i = 0; i < x_end - pos; i++) { 248 for (i = 0; i < filled; i++) {
248 if (put_user(val, buf + i)) 249 if (put_user(val, buf + i))
249 goto out; 250 goto out;
250 } 251 }
251 } 252 }
252 253
253 filled = x_end - pos;
254 } 254 }
255 255
256 count -= filled; 256 count -= filled;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index ebd08b21b234..959b1cd89e6a 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -165,12 +165,16 @@ static void tx_poll_stop(struct vhost_net *net)
165} 165}
166 166
167/* Caller must have TX VQ lock */ 167/* Caller must have TX VQ lock */
168static void tx_poll_start(struct vhost_net *net, struct socket *sock) 168static int tx_poll_start(struct vhost_net *net, struct socket *sock)
169{ 169{
170 int ret;
171
170 if (unlikely(net->tx_poll_state != VHOST_NET_POLL_STOPPED)) 172 if (unlikely(net->tx_poll_state != VHOST_NET_POLL_STOPPED))
171 return; 173 return 0;
172 vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file); 174 ret = vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file);
173 net->tx_poll_state = VHOST_NET_POLL_STARTED; 175 if (!ret)
176 net->tx_poll_state = VHOST_NET_POLL_STARTED;
177 return ret;
174} 178}
175 179
176/* In case of DMA done not in order in lower device driver for some reason. 180/* In case of DMA done not in order in lower device driver for some reason.
@@ -642,20 +646,23 @@ static void vhost_net_disable_vq(struct vhost_net *n,
642 vhost_poll_stop(n->poll + VHOST_NET_VQ_RX); 646 vhost_poll_stop(n->poll + VHOST_NET_VQ_RX);
643} 647}
644 648
645static void vhost_net_enable_vq(struct vhost_net *n, 649static int vhost_net_enable_vq(struct vhost_net *n,
646 struct vhost_virtqueue *vq) 650 struct vhost_virtqueue *vq)
647{ 651{
648 struct socket *sock; 652 struct socket *sock;
653 int ret;
649 654
650 sock = rcu_dereference_protected(vq->private_data, 655 sock = rcu_dereference_protected(vq->private_data,
651 lockdep_is_held(&vq->mutex)); 656 lockdep_is_held(&vq->mutex));
652 if (!sock) 657 if (!sock)
653 return; 658 return 0;
654 if (vq == n->vqs + VHOST_NET_VQ_TX) { 659 if (vq == n->vqs + VHOST_NET_VQ_TX) {
655 n->tx_poll_state = VHOST_NET_POLL_STOPPED; 660 n->tx_poll_state = VHOST_NET_POLL_STOPPED;
656 tx_poll_start(n, sock); 661 ret = tx_poll_start(n, sock);
657 } else 662 } else
658 vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file); 663 ret = vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file);
664
665 return ret;
659} 666}
660 667
661static struct socket *vhost_net_stop_vq(struct vhost_net *n, 668static struct socket *vhost_net_stop_vq(struct vhost_net *n,
@@ -827,15 +834,18 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
827 r = PTR_ERR(ubufs); 834 r = PTR_ERR(ubufs);
828 goto err_ubufs; 835 goto err_ubufs;
829 } 836 }
830 oldubufs = vq->ubufs; 837
831 vq->ubufs = ubufs;
832 vhost_net_disable_vq(n, vq); 838 vhost_net_disable_vq(n, vq);
833 rcu_assign_pointer(vq->private_data, sock); 839 rcu_assign_pointer(vq->private_data, sock);
834 vhost_net_enable_vq(n, vq);
835
836 r = vhost_init_used(vq); 840 r = vhost_init_used(vq);
837 if (r) 841 if (r)
838 goto err_vq; 842 goto err_used;
843 r = vhost_net_enable_vq(n, vq);
844 if (r)
845 goto err_used;
846
847 oldubufs = vq->ubufs;
848 vq->ubufs = ubufs;
839 849
840 n->tx_packets = 0; 850 n->tx_packets = 0;
841 n->tx_zcopy_err = 0; 851 n->tx_zcopy_err = 0;
@@ -859,6 +869,11 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
859 mutex_unlock(&n->dev.mutex); 869 mutex_unlock(&n->dev.mutex);
860 return 0; 870 return 0;
861 871
872err_used:
873 rcu_assign_pointer(vq->private_data, oldsock);
874 vhost_net_enable_vq(n, vq);
875 if (ubufs)
876 vhost_ubuf_put_and_wait(ubufs);
862err_ubufs: 877err_ubufs:
863 fput(sock->file); 878 fput(sock->file);
864err_vq: 879err_vq:
diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
index b20df5c829f5..22321cf84fbe 100644
--- a/drivers/vhost/tcm_vhost.c
+++ b/drivers/vhost/tcm_vhost.c
@@ -575,10 +575,8 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs)
575 575
576 /* Must use ioctl VHOST_SCSI_SET_ENDPOINT */ 576 /* Must use ioctl VHOST_SCSI_SET_ENDPOINT */
577 tv_tpg = vs->vs_tpg; 577 tv_tpg = vs->vs_tpg;
578 if (unlikely(!tv_tpg)) { 578 if (unlikely(!tv_tpg))
579 pr_err("%s endpoint not set\n", __func__);
580 return; 579 return;
581 }
582 580
583 mutex_lock(&vq->mutex); 581 mutex_lock(&vq->mutex);
584 vhost_disable_notify(&vs->dev, vq); 582 vhost_disable_notify(&vs->dev, vq);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 34389f75fe65..9759249e6d90 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -77,26 +77,38 @@ void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
77 init_poll_funcptr(&poll->table, vhost_poll_func); 77 init_poll_funcptr(&poll->table, vhost_poll_func);
78 poll->mask = mask; 78 poll->mask = mask;
79 poll->dev = dev; 79 poll->dev = dev;
80 poll->wqh = NULL;
80 81
81 vhost_work_init(&poll->work, fn); 82 vhost_work_init(&poll->work, fn);
82} 83}
83 84
84/* Start polling a file. We add ourselves to file's wait queue. The caller must 85/* Start polling a file. We add ourselves to file's wait queue. The caller must
85 * keep a reference to a file until after vhost_poll_stop is called. */ 86 * keep a reference to a file until after vhost_poll_stop is called. */
86void vhost_poll_start(struct vhost_poll *poll, struct file *file) 87int vhost_poll_start(struct vhost_poll *poll, struct file *file)
87{ 88{
88 unsigned long mask; 89 unsigned long mask;
90 int ret = 0;
89 91
90 mask = file->f_op->poll(file, &poll->table); 92 mask = file->f_op->poll(file, &poll->table);
91 if (mask) 93 if (mask)
92 vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask); 94 vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
95 if (mask & POLLERR) {
96 if (poll->wqh)
97 remove_wait_queue(poll->wqh, &poll->wait);
98 ret = -EINVAL;
99 }
100
101 return ret;
93} 102}
94 103
95/* Stop polling a file. After this function returns, it becomes safe to drop the 104/* Stop polling a file. After this function returns, it becomes safe to drop the
96 * file reference. You must also flush afterwards. */ 105 * file reference. You must also flush afterwards. */
97void vhost_poll_stop(struct vhost_poll *poll) 106void vhost_poll_stop(struct vhost_poll *poll)
98{ 107{
99 remove_wait_queue(poll->wqh, &poll->wait); 108 if (poll->wqh) {
109 remove_wait_queue(poll->wqh, &poll->wait);
110 poll->wqh = NULL;
111 }
100} 112}
101 113
102static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work, 114static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
@@ -792,7 +804,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
792 fput(filep); 804 fput(filep);
793 805
794 if (pollstart && vq->handle_kick) 806 if (pollstart && vq->handle_kick)
795 vhost_poll_start(&vq->poll, vq->kick); 807 r = vhost_poll_start(&vq->poll, vq->kick);
796 808
797 mutex_unlock(&vq->mutex); 809 mutex_unlock(&vq->mutex);
798 810
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 2639c58b23ab..17261e277c02 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -42,7 +42,7 @@ void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);
42 42
43void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, 43void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
44 unsigned long mask, struct vhost_dev *dev); 44 unsigned long mask, struct vhost_dev *dev);
45void vhost_poll_start(struct vhost_poll *poll, struct file *file); 45int vhost_poll_start(struct vhost_poll *poll, struct file *file);
46void vhost_poll_stop(struct vhost_poll *poll); 46void vhost_poll_stop(struct vhost_poll *poll);
47void vhost_poll_flush(struct vhost_poll *poll); 47void vhost_poll_flush(struct vhost_poll *poll);
48void vhost_poll_queue(struct vhost_poll *poll); 48void vhost_poll_queue(struct vhost_poll *poll);
diff --git a/drivers/video/backlight/apple_bl.c b/drivers/video/backlight/apple_bl.c
index f088d4c07381..d84329676689 100644
--- a/drivers/video/backlight/apple_bl.c
+++ b/drivers/video/backlight/apple_bl.c
@@ -196,7 +196,7 @@ static int apple_bl_add(struct acpi_device *dev)
196 return 0; 196 return 0;
197} 197}
198 198
199static int apple_bl_remove(struct acpi_device *dev, int type) 199static int apple_bl_remove(struct acpi_device *dev)
200{ 200{
201 backlight_device_unregister(apple_backlight_device); 201 backlight_device_unregister(apple_backlight_device);
202 202
diff --git a/drivers/video/exynos/exynos_dp_core.c b/drivers/video/exynos/exynos_dp_core.c
index 4ef18e2e90cc..2d0d144add1b 100644
--- a/drivers/video/exynos/exynos_dp_core.c
+++ b/drivers/video/exynos/exynos_dp_core.c
@@ -1121,8 +1121,7 @@ static int exynos_dp_remove(struct platform_device *pdev)
1121 1121
1122 disable_irq(dp->irq); 1122 disable_irq(dp->irq);
1123 1123
1124 if (work_pending(&dp->hotplug_work)) 1124 flush_work(&dp->hotplug_work);
1125 flush_work(&dp->hotplug_work);
1126 1125
1127 if (pdev->dev.of_node) { 1126 if (pdev->dev.of_node) {
1128 if (dp->phy_addr) 1127 if (dp->phy_addr)
@@ -1144,8 +1143,7 @@ static int exynos_dp_suspend(struct device *dev)
1144 struct exynos_dp_platdata *pdata = dev->platform_data; 1143 struct exynos_dp_platdata *pdata = dev->platform_data;
1145 struct exynos_dp_device *dp = dev_get_drvdata(dev); 1144 struct exynos_dp_device *dp = dev_get_drvdata(dev);
1146 1145
1147 if (work_pending(&dp->hotplug_work)) 1146 flush_work(&dp->hotplug_work);
1148 flush_work(&dp->hotplug_work);
1149 1147
1150 if (dev->of_node) { 1148 if (dev->of_node) {
1151 if (dp->phy_addr) 1149 if (dp->phy_addr)
diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c
index 12526787a7c7..0abf2bf20836 100644
--- a/drivers/video/imxfb.c
+++ b/drivers/video/imxfb.c
@@ -139,6 +139,7 @@ struct imxfb_info {
139 struct clk *clk_ahb; 139 struct clk *clk_ahb;
140 struct clk *clk_per; 140 struct clk *clk_per;
141 enum imxfb_type devtype; 141 enum imxfb_type devtype;
142 bool enabled;
142 143
143 /* 144 /*
144 * These are the addresses we mapped 145 * These are the addresses we mapped
@@ -536,6 +537,10 @@ static void imxfb_exit_backlight(struct imxfb_info *fbi)
536 537
537static void imxfb_enable_controller(struct imxfb_info *fbi) 538static void imxfb_enable_controller(struct imxfb_info *fbi)
538{ 539{
540
541 if (fbi->enabled)
542 return;
543
539 pr_debug("Enabling LCD controller\n"); 544 pr_debug("Enabling LCD controller\n");
540 545
541 writel(fbi->screen_dma, fbi->regs + LCDC_SSA); 546 writel(fbi->screen_dma, fbi->regs + LCDC_SSA);
@@ -556,6 +561,7 @@ static void imxfb_enable_controller(struct imxfb_info *fbi)
556 clk_prepare_enable(fbi->clk_ipg); 561 clk_prepare_enable(fbi->clk_ipg);
557 clk_prepare_enable(fbi->clk_ahb); 562 clk_prepare_enable(fbi->clk_ahb);
558 clk_prepare_enable(fbi->clk_per); 563 clk_prepare_enable(fbi->clk_per);
564 fbi->enabled = true;
559 565
560 if (fbi->backlight_power) 566 if (fbi->backlight_power)
561 fbi->backlight_power(1); 567 fbi->backlight_power(1);
@@ -565,6 +571,9 @@ static void imxfb_enable_controller(struct imxfb_info *fbi)
565 571
566static void imxfb_disable_controller(struct imxfb_info *fbi) 572static void imxfb_disable_controller(struct imxfb_info *fbi)
567{ 573{
574 if (!fbi->enabled)
575 return;
576
568 pr_debug("Disabling LCD controller\n"); 577 pr_debug("Disabling LCD controller\n");
569 578
570 if (fbi->backlight_power) 579 if (fbi->backlight_power)
@@ -575,6 +584,7 @@ static void imxfb_disable_controller(struct imxfb_info *fbi)
575 clk_disable_unprepare(fbi->clk_per); 584 clk_disable_unprepare(fbi->clk_per);
576 clk_disable_unprepare(fbi->clk_ipg); 585 clk_disable_unprepare(fbi->clk_ipg);
577 clk_disable_unprepare(fbi->clk_ahb); 586 clk_disable_unprepare(fbi->clk_ahb);
587 fbi->enabled = false;
578 588
579 writel(0, fbi->regs + LCDC_RMCR); 589 writel(0, fbi->regs + LCDC_RMCR);
580} 590}
@@ -729,6 +739,8 @@ static int __init imxfb_init_fbinfo(struct platform_device *pdev)
729 739
730 memset(fbi, 0, sizeof(struct imxfb_info)); 740 memset(fbi, 0, sizeof(struct imxfb_info));
731 741
742 fbi->devtype = pdev->id_entry->driver_data;
743
732 strlcpy(info->fix.id, IMX_NAME, sizeof(info->fix.id)); 744 strlcpy(info->fix.id, IMX_NAME, sizeof(info->fix.id));
733 745
734 info->fix.type = FB_TYPE_PACKED_PIXELS; 746 info->fix.type = FB_TYPE_PACKED_PIXELS;
@@ -789,7 +801,6 @@ static int __init imxfb_probe(struct platform_device *pdev)
789 return -ENOMEM; 801 return -ENOMEM;
790 802
791 fbi = info->par; 803 fbi = info->par;
792 fbi->devtype = pdev->id_entry->driver_data;
793 804
794 if (!fb_mode) 805 if (!fb_mode)
795 fb_mode = pdata->mode[0].mode.name; 806 fb_mode = pdata->mode[0].mode.name;
diff --git a/drivers/video/omap2/dss/dss_features.c b/drivers/video/omap2/dss/dss_features.c
index 18688c12e30d..d7d66ef5cb58 100644
--- a/drivers/video/omap2/dss/dss_features.c
+++ b/drivers/video/omap2/dss/dss_features.c
@@ -538,6 +538,7 @@ static const enum dss_feat_id omap3630_dss_feat_list[] = {
538 FEAT_ALPHA_FIXED_ZORDER, 538 FEAT_ALPHA_FIXED_ZORDER,
539 FEAT_FIFO_MERGE, 539 FEAT_FIFO_MERGE,
540 FEAT_OMAP3_DSI_FIFO_BUG, 540 FEAT_OMAP3_DSI_FIFO_BUG,
541 FEAT_DPI_USES_VDDS_DSI,
541}; 542};
542 543
543static const enum dss_feat_id omap4430_es1_0_dss_feat_list[] = { 544static const enum dss_feat_id omap4430_es1_0_dss_feat_list[] = {
diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c
index 4dcfced107f5..084041d42c9a 100644
--- a/drivers/xen/cpu_hotplug.c
+++ b/drivers/xen/cpu_hotplug.c
@@ -25,10 +25,10 @@ static void disable_hotplug_cpu(int cpu)
25static int vcpu_online(unsigned int cpu) 25static int vcpu_online(unsigned int cpu)
26{ 26{
27 int err; 27 int err;
28 char dir[32], state[32]; 28 char dir[16], state[16];
29 29
30 sprintf(dir, "cpu/%u", cpu); 30 sprintf(dir, "cpu/%u", cpu);
31 err = xenbus_scanf(XBT_NIL, dir, "availability", "%s", state); 31 err = xenbus_scanf(XBT_NIL, dir, "availability", "%15s", state);
32 if (err != 1) { 32 if (err != 1) {
33 if (!xen_initial_domain()) 33 if (!xen_initial_domain())
34 printk(KERN_ERR "XENBUS: Unable to read cpu state\n"); 34 printk(KERN_ERR "XENBUS: Unable to read cpu state\n");
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 0be4df39e953..22f77c5f6012 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -840,7 +840,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
840 840
841 if (irq == -1) { 841 if (irq == -1) {
842 irq = xen_allocate_irq_dynamic(); 842 irq = xen_allocate_irq_dynamic();
843 if (irq == -1) 843 if (irq < 0)
844 goto out; 844 goto out;
845 845
846 irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, 846 irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
@@ -944,7 +944,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
944 944
945 if (irq == -1) { 945 if (irq == -1) {
946 irq = xen_allocate_irq_dynamic(); 946 irq = xen_allocate_irq_dynamic();
947 if (irq == -1) 947 if (irq < 0)
948 goto out; 948 goto out;
949 949
950 irq_set_chip_and_handler_name(irq, &xen_percpu_chip, 950 irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
@@ -1787,7 +1787,7 @@ void xen_callback_vector(void)
1787 int rc; 1787 int rc;
1788 uint64_t callback_via; 1788 uint64_t callback_via;
1789 if (xen_have_vector_callback) { 1789 if (xen_have_vector_callback) {
1790 callback_via = HVM_CALLBACK_VECTOR(XEN_HVM_EVTCHN_CALLBACK); 1790 callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR);
1791 rc = xen_set_callback_via(callback_via); 1791 rc = xen_set_callback_via(callback_via);
1792 if (rc) { 1792 if (rc) {
1793 printk(KERN_ERR "Request for Xen HVM callback vector" 1793 printk(KERN_ERR "Request for Xen HVM callback vector"
@@ -1798,8 +1798,9 @@ void xen_callback_vector(void)
1798 printk(KERN_INFO "Xen HVM callback vector for event delivery is " 1798 printk(KERN_INFO "Xen HVM callback vector for event delivery is "
1799 "enabled\n"); 1799 "enabled\n");
1800 /* in the restore case the vector has already been allocated */ 1800 /* in the restore case the vector has already been allocated */
1801 if (!test_bit(XEN_HVM_EVTCHN_CALLBACK, used_vectors)) 1801 if (!test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors))
1802 alloc_intr_gate(XEN_HVM_EVTCHN_CALLBACK, xen_hvm_callback_vector); 1802 alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
1803 xen_hvm_callback_vector);
1803 } 1804 }
1804} 1805}
1805#else 1806#else
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 2e22df2f7a3f..3c8803feba26 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -56,10 +56,15 @@ MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by "
56static atomic_t pages_mapped = ATOMIC_INIT(0); 56static atomic_t pages_mapped = ATOMIC_INIT(0);
57 57
58static int use_ptemod; 58static int use_ptemod;
59#define populate_freeable_maps use_ptemod
59 60
60struct gntdev_priv { 61struct gntdev_priv {
62 /* maps with visible offsets in the file descriptor */
61 struct list_head maps; 63 struct list_head maps;
62 /* lock protects maps from concurrent changes */ 64 /* maps that are not visible; will be freed on munmap.
65 * Only populated if populate_freeable_maps == 1 */
66 struct list_head freeable_maps;
67 /* lock protects maps and freeable_maps */
63 spinlock_t lock; 68 spinlock_t lock;
64 struct mm_struct *mm; 69 struct mm_struct *mm;
65 struct mmu_notifier mn; 70 struct mmu_notifier mn;
@@ -193,7 +198,7 @@ static struct grant_map *gntdev_find_map_index(struct gntdev_priv *priv,
193 return NULL; 198 return NULL;
194} 199}
195 200
196static void gntdev_put_map(struct grant_map *map) 201static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map)
197{ 202{
198 if (!map) 203 if (!map)
199 return; 204 return;
@@ -208,6 +213,12 @@ static void gntdev_put_map(struct grant_map *map)
208 evtchn_put(map->notify.event); 213 evtchn_put(map->notify.event);
209 } 214 }
210 215
216 if (populate_freeable_maps && priv) {
217 spin_lock(&priv->lock);
218 list_del(&map->next);
219 spin_unlock(&priv->lock);
220 }
221
211 if (map->pages && !use_ptemod) 222 if (map->pages && !use_ptemod)
212 unmap_grant_pages(map, 0, map->count); 223 unmap_grant_pages(map, 0, map->count);
213 gntdev_free_map(map); 224 gntdev_free_map(map);
@@ -301,17 +312,10 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
301 312
302 if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) { 313 if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
303 int pgno = (map->notify.addr >> PAGE_SHIFT); 314 int pgno = (map->notify.addr >> PAGE_SHIFT);
304 if (pgno >= offset && pgno < offset + pages && use_ptemod) { 315 if (pgno >= offset && pgno < offset + pages) {
305 void __user *tmp = (void __user *) 316 /* No need for kmap, pages are in lowmem */
306 map->vma->vm_start + map->notify.addr; 317 uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno]));
307 err = copy_to_user(tmp, &err, 1);
308 if (err)
309 return -EFAULT;
310 map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
311 } else if (pgno >= offset && pgno < offset + pages) {
312 uint8_t *tmp = kmap(map->pages[pgno]);
313 tmp[map->notify.addr & (PAGE_SIZE-1)] = 0; 318 tmp[map->notify.addr & (PAGE_SIZE-1)] = 0;
314 kunmap(map->pages[pgno]);
315 map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE; 319 map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
316 } 320 }
317 } 321 }
@@ -376,11 +380,24 @@ static void gntdev_vma_open(struct vm_area_struct *vma)
376static void gntdev_vma_close(struct vm_area_struct *vma) 380static void gntdev_vma_close(struct vm_area_struct *vma)
377{ 381{
378 struct grant_map *map = vma->vm_private_data; 382 struct grant_map *map = vma->vm_private_data;
383 struct file *file = vma->vm_file;
384 struct gntdev_priv *priv = file->private_data;
379 385
380 pr_debug("gntdev_vma_close %p\n", vma); 386 pr_debug("gntdev_vma_close %p\n", vma);
381 map->vma = NULL; 387 if (use_ptemod) {
388 /* It is possible that an mmu notifier could be running
389 * concurrently, so take priv->lock to ensure that the vma won't
390 * vanishing during the unmap_grant_pages call, since we will
391 * spin here until that completes. Such a concurrent call will
392 * not do any unmapping, since that has been done prior to
393 * closing the vma, but it may still iterate the unmap_ops list.
394 */
395 spin_lock(&priv->lock);
396 map->vma = NULL;
397 spin_unlock(&priv->lock);
398 }
382 vma->vm_private_data = NULL; 399 vma->vm_private_data = NULL;
383 gntdev_put_map(map); 400 gntdev_put_map(priv, map);
384} 401}
385 402
386static struct vm_operations_struct gntdev_vmops = { 403static struct vm_operations_struct gntdev_vmops = {
@@ -390,33 +407,43 @@ static struct vm_operations_struct gntdev_vmops = {
390 407
391/* ------------------------------------------------------------------ */ 408/* ------------------------------------------------------------------ */
392 409
410static void unmap_if_in_range(struct grant_map *map,
411 unsigned long start, unsigned long end)
412{
413 unsigned long mstart, mend;
414 int err;
415
416 if (!map->vma)
417 return;
418 if (map->vma->vm_start >= end)
419 return;
420 if (map->vma->vm_end <= start)
421 return;
422 mstart = max(start, map->vma->vm_start);
423 mend = min(end, map->vma->vm_end);
424 pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
425 map->index, map->count,
426 map->vma->vm_start, map->vma->vm_end,
427 start, end, mstart, mend);
428 err = unmap_grant_pages(map,
429 (mstart - map->vma->vm_start) >> PAGE_SHIFT,
430 (mend - mstart) >> PAGE_SHIFT);
431 WARN_ON(err);
432}
433
393static void mn_invl_range_start(struct mmu_notifier *mn, 434static void mn_invl_range_start(struct mmu_notifier *mn,
394 struct mm_struct *mm, 435 struct mm_struct *mm,
395 unsigned long start, unsigned long end) 436 unsigned long start, unsigned long end)
396{ 437{
397 struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn); 438 struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
398 struct grant_map *map; 439 struct grant_map *map;
399 unsigned long mstart, mend;
400 int err;
401 440
402 spin_lock(&priv->lock); 441 spin_lock(&priv->lock);
403 list_for_each_entry(map, &priv->maps, next) { 442 list_for_each_entry(map, &priv->maps, next) {
404 if (!map->vma) 443 unmap_if_in_range(map, start, end);
405 continue; 444 }
406 if (map->vma->vm_start >= end) 445 list_for_each_entry(map, &priv->freeable_maps, next) {
407 continue; 446 unmap_if_in_range(map, start, end);
408 if (map->vma->vm_end <= start)
409 continue;
410 mstart = max(start, map->vma->vm_start);
411 mend = min(end, map->vma->vm_end);
412 pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
413 map->index, map->count,
414 map->vma->vm_start, map->vma->vm_end,
415 start, end, mstart, mend);
416 err = unmap_grant_pages(map,
417 (mstart - map->vma->vm_start) >> PAGE_SHIFT,
418 (mend - mstart) >> PAGE_SHIFT);
419 WARN_ON(err);
420 } 447 }
421 spin_unlock(&priv->lock); 448 spin_unlock(&priv->lock);
422} 449}
@@ -445,6 +472,15 @@ static void mn_release(struct mmu_notifier *mn,
445 err = unmap_grant_pages(map, /* offset */ 0, map->count); 472 err = unmap_grant_pages(map, /* offset */ 0, map->count);
446 WARN_ON(err); 473 WARN_ON(err);
447 } 474 }
475 list_for_each_entry(map, &priv->freeable_maps, next) {
476 if (!map->vma)
477 continue;
478 pr_debug("map %d+%d (%lx %lx)\n",
479 map->index, map->count,
480 map->vma->vm_start, map->vma->vm_end);
481 err = unmap_grant_pages(map, /* offset */ 0, map->count);
482 WARN_ON(err);
483 }
448 spin_unlock(&priv->lock); 484 spin_unlock(&priv->lock);
449} 485}
450 486
@@ -466,6 +502,7 @@ static int gntdev_open(struct inode *inode, struct file *flip)
466 return -ENOMEM; 502 return -ENOMEM;
467 503
468 INIT_LIST_HEAD(&priv->maps); 504 INIT_LIST_HEAD(&priv->maps);
505 INIT_LIST_HEAD(&priv->freeable_maps);
469 spin_lock_init(&priv->lock); 506 spin_lock_init(&priv->lock);
470 507
471 if (use_ptemod) { 508 if (use_ptemod) {
@@ -500,8 +537,9 @@ static int gntdev_release(struct inode *inode, struct file *flip)
500 while (!list_empty(&priv->maps)) { 537 while (!list_empty(&priv->maps)) {
501 map = list_entry(priv->maps.next, struct grant_map, next); 538 map = list_entry(priv->maps.next, struct grant_map, next);
502 list_del(&map->next); 539 list_del(&map->next);
503 gntdev_put_map(map); 540 gntdev_put_map(NULL /* already removed */, map);
504 } 541 }
542 WARN_ON(!list_empty(&priv->freeable_maps));
505 543
506 if (use_ptemod) 544 if (use_ptemod)
507 mmu_notifier_unregister(&priv->mn, priv->mm); 545 mmu_notifier_unregister(&priv->mn, priv->mm);
@@ -529,14 +567,14 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
529 567
530 if (unlikely(atomic_add_return(op.count, &pages_mapped) > limit)) { 568 if (unlikely(atomic_add_return(op.count, &pages_mapped) > limit)) {
531 pr_debug("can't map: over limit\n"); 569 pr_debug("can't map: over limit\n");
532 gntdev_put_map(map); 570 gntdev_put_map(NULL, map);
533 return err; 571 return err;
534 } 572 }
535 573
536 if (copy_from_user(map->grants, &u->refs, 574 if (copy_from_user(map->grants, &u->refs,
537 sizeof(map->grants[0]) * op.count) != 0) { 575 sizeof(map->grants[0]) * op.count) != 0) {
538 gntdev_put_map(map); 576 gntdev_put_map(NULL, map);
539 return err; 577 return -EFAULT;
540 } 578 }
541 579
542 spin_lock(&priv->lock); 580 spin_lock(&priv->lock);
@@ -565,11 +603,13 @@ static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
565 map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count); 603 map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
566 if (map) { 604 if (map) {
567 list_del(&map->next); 605 list_del(&map->next);
606 if (populate_freeable_maps)
607 list_add_tail(&map->next, &priv->freeable_maps);
568 err = 0; 608 err = 0;
569 } 609 }
570 spin_unlock(&priv->lock); 610 spin_unlock(&priv->lock);
571 if (map) 611 if (map)
572 gntdev_put_map(map); 612 gntdev_put_map(priv, map);
573 return err; 613 return err;
574} 614}
575 615
@@ -579,25 +619,31 @@ static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
579 struct ioctl_gntdev_get_offset_for_vaddr op; 619 struct ioctl_gntdev_get_offset_for_vaddr op;
580 struct vm_area_struct *vma; 620 struct vm_area_struct *vma;
581 struct grant_map *map; 621 struct grant_map *map;
622 int rv = -EINVAL;
582 623
583 if (copy_from_user(&op, u, sizeof(op)) != 0) 624 if (copy_from_user(&op, u, sizeof(op)) != 0)
584 return -EFAULT; 625 return -EFAULT;
585 pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr); 626 pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr);
586 627
628 down_read(&current->mm->mmap_sem);
587 vma = find_vma(current->mm, op.vaddr); 629 vma = find_vma(current->mm, op.vaddr);
588 if (!vma || vma->vm_ops != &gntdev_vmops) 630 if (!vma || vma->vm_ops != &gntdev_vmops)
589 return -EINVAL; 631 goto out_unlock;
590 632
591 map = vma->vm_private_data; 633 map = vma->vm_private_data;
592 if (!map) 634 if (!map)
593 return -EINVAL; 635 goto out_unlock;
594 636
595 op.offset = map->index << PAGE_SHIFT; 637 op.offset = map->index << PAGE_SHIFT;
596 op.count = map->count; 638 op.count = map->count;
639 rv = 0;
597 640
598 if (copy_to_user(u, &op, sizeof(op)) != 0) 641 out_unlock:
642 up_read(&current->mm->mmap_sem);
643
644 if (rv == 0 && copy_to_user(u, &op, sizeof(op)) != 0)
599 return -EFAULT; 645 return -EFAULT;
600 return 0; 646 return rv;
601} 647}
602 648
603static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u) 649static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
@@ -778,7 +824,7 @@ out_unlock_put:
778out_put_map: 824out_put_map:
779 if (use_ptemod) 825 if (use_ptemod)
780 map->vma = NULL; 826 map->vma = NULL;
781 gntdev_put_map(map); 827 gntdev_put_map(priv, map);
782 return err; 828 return err;
783} 829}
784 830
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 7038de53652b..157c0ccda3ef 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -56,10 +56,6 @@
56/* External tools reserve first few grant table entries. */ 56/* External tools reserve first few grant table entries. */
57#define NR_RESERVED_ENTRIES 8 57#define NR_RESERVED_ENTRIES 8
58#define GNTTAB_LIST_END 0xffffffff 58#define GNTTAB_LIST_END 0xffffffff
59#define GREFS_PER_GRANT_FRAME \
60(grant_table_version == 1 ? \
61(PAGE_SIZE / sizeof(struct grant_entry_v1)) : \
62(PAGE_SIZE / sizeof(union grant_entry_v2)))
63 59
64static grant_ref_t **gnttab_list; 60static grant_ref_t **gnttab_list;
65static unsigned int nr_grant_frames; 61static unsigned int nr_grant_frames;
@@ -154,6 +150,7 @@ static struct gnttab_ops *gnttab_interface;
154static grant_status_t *grstatus; 150static grant_status_t *grstatus;
155 151
156static int grant_table_version; 152static int grant_table_version;
153static int grefs_per_grant_frame;
157 154
158static struct gnttab_free_callback *gnttab_free_callback_list; 155static struct gnttab_free_callback *gnttab_free_callback_list;
159 156
@@ -767,12 +764,14 @@ static int grow_gnttab_list(unsigned int more_frames)
767 unsigned int new_nr_grant_frames, extra_entries, i; 764 unsigned int new_nr_grant_frames, extra_entries, i;
768 unsigned int nr_glist_frames, new_nr_glist_frames; 765 unsigned int nr_glist_frames, new_nr_glist_frames;
769 766
767 BUG_ON(grefs_per_grant_frame == 0);
768
770 new_nr_grant_frames = nr_grant_frames + more_frames; 769 new_nr_grant_frames = nr_grant_frames + more_frames;
771 extra_entries = more_frames * GREFS_PER_GRANT_FRAME; 770 extra_entries = more_frames * grefs_per_grant_frame;
772 771
773 nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP; 772 nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
774 new_nr_glist_frames = 773 new_nr_glist_frames =
775 (new_nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP; 774 (new_nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
776 for (i = nr_glist_frames; i < new_nr_glist_frames; i++) { 775 for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
777 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC); 776 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
778 if (!gnttab_list[i]) 777 if (!gnttab_list[i])
@@ -780,12 +779,12 @@ static int grow_gnttab_list(unsigned int more_frames)
780 } 779 }
781 780
782 781
783 for (i = GREFS_PER_GRANT_FRAME * nr_grant_frames; 782 for (i = grefs_per_grant_frame * nr_grant_frames;
784 i < GREFS_PER_GRANT_FRAME * new_nr_grant_frames - 1; i++) 783 i < grefs_per_grant_frame * new_nr_grant_frames - 1; i++)
785 gnttab_entry(i) = i + 1; 784 gnttab_entry(i) = i + 1;
786 785
787 gnttab_entry(i) = gnttab_free_head; 786 gnttab_entry(i) = gnttab_free_head;
788 gnttab_free_head = GREFS_PER_GRANT_FRAME * nr_grant_frames; 787 gnttab_free_head = grefs_per_grant_frame * nr_grant_frames;
789 gnttab_free_count += extra_entries; 788 gnttab_free_count += extra_entries;
790 789
791 nr_grant_frames = new_nr_grant_frames; 790 nr_grant_frames = new_nr_grant_frames;
@@ -957,7 +956,8 @@ EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
957 956
958static unsigned nr_status_frames(unsigned nr_grant_frames) 957static unsigned nr_status_frames(unsigned nr_grant_frames)
959{ 958{
960 return (nr_grant_frames * GREFS_PER_GRANT_FRAME + SPP - 1) / SPP; 959 BUG_ON(grefs_per_grant_frame == 0);
960 return (nr_grant_frames * grefs_per_grant_frame + SPP - 1) / SPP;
961} 961}
962 962
963static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes) 963static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
@@ -1115,6 +1115,7 @@ static void gnttab_request_version(void)
1115 rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1); 1115 rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
1116 if (rc == 0 && gsv.version == 2) { 1116 if (rc == 0 && gsv.version == 2) {
1117 grant_table_version = 2; 1117 grant_table_version = 2;
1118 grefs_per_grant_frame = PAGE_SIZE / sizeof(union grant_entry_v2);
1118 gnttab_interface = &gnttab_v2_ops; 1119 gnttab_interface = &gnttab_v2_ops;
1119 } else if (grant_table_version == 2) { 1120 } else if (grant_table_version == 2) {
1120 /* 1121 /*
@@ -1127,17 +1128,17 @@ static void gnttab_request_version(void)
1127 panic("we need grant tables version 2, but only version 1 is available"); 1128 panic("we need grant tables version 2, but only version 1 is available");
1128 } else { 1129 } else {
1129 grant_table_version = 1; 1130 grant_table_version = 1;
1131 grefs_per_grant_frame = PAGE_SIZE / sizeof(struct grant_entry_v1);
1130 gnttab_interface = &gnttab_v1_ops; 1132 gnttab_interface = &gnttab_v1_ops;
1131 } 1133 }
1132 printk(KERN_INFO "Grant tables using version %d layout.\n", 1134 printk(KERN_INFO "Grant tables using version %d layout.\n",
1133 grant_table_version); 1135 grant_table_version);
1134} 1136}
1135 1137
1136int gnttab_resume(void) 1138static int gnttab_setup(void)
1137{ 1139{
1138 unsigned int max_nr_gframes; 1140 unsigned int max_nr_gframes;
1139 1141
1140 gnttab_request_version();
1141 max_nr_gframes = gnttab_max_grant_frames(); 1142 max_nr_gframes = gnttab_max_grant_frames();
1142 if (max_nr_gframes < nr_grant_frames) 1143 if (max_nr_gframes < nr_grant_frames)
1143 return -ENOSYS; 1144 return -ENOSYS;
@@ -1160,6 +1161,12 @@ int gnttab_resume(void)
1160 return 0; 1161 return 0;
1161} 1162}
1162 1163
1164int gnttab_resume(void)
1165{
1166 gnttab_request_version();
1167 return gnttab_setup();
1168}
1169
1163int gnttab_suspend(void) 1170int gnttab_suspend(void)
1164{ 1171{
1165 gnttab_interface->unmap_frames(); 1172 gnttab_interface->unmap_frames();
@@ -1171,9 +1178,10 @@ static int gnttab_expand(unsigned int req_entries)
1171 int rc; 1178 int rc;
1172 unsigned int cur, extra; 1179 unsigned int cur, extra;
1173 1180
1181 BUG_ON(grefs_per_grant_frame == 0);
1174 cur = nr_grant_frames; 1182 cur = nr_grant_frames;
1175 extra = ((req_entries + (GREFS_PER_GRANT_FRAME-1)) / 1183 extra = ((req_entries + (grefs_per_grant_frame-1)) /
1176 GREFS_PER_GRANT_FRAME); 1184 grefs_per_grant_frame);
1177 if (cur + extra > gnttab_max_grant_frames()) 1185 if (cur + extra > gnttab_max_grant_frames())
1178 return -ENOSPC; 1186 return -ENOSPC;
1179 1187
@@ -1191,21 +1199,23 @@ int gnttab_init(void)
1191 unsigned int nr_init_grefs; 1199 unsigned int nr_init_grefs;
1192 int ret; 1200 int ret;
1193 1201
1202 gnttab_request_version();
1194 nr_grant_frames = 1; 1203 nr_grant_frames = 1;
1195 boot_max_nr_grant_frames = __max_nr_grant_frames(); 1204 boot_max_nr_grant_frames = __max_nr_grant_frames();
1196 1205
1197 /* Determine the maximum number of frames required for the 1206 /* Determine the maximum number of frames required for the
1198 * grant reference free list on the current hypervisor. 1207 * grant reference free list on the current hypervisor.
1199 */ 1208 */
1209 BUG_ON(grefs_per_grant_frame == 0);
1200 max_nr_glist_frames = (boot_max_nr_grant_frames * 1210 max_nr_glist_frames = (boot_max_nr_grant_frames *
1201 GREFS_PER_GRANT_FRAME / RPP); 1211 grefs_per_grant_frame / RPP);
1202 1212
1203 gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *), 1213 gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
1204 GFP_KERNEL); 1214 GFP_KERNEL);
1205 if (gnttab_list == NULL) 1215 if (gnttab_list == NULL)
1206 return -ENOMEM; 1216 return -ENOMEM;
1207 1217
1208 nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP; 1218 nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
1209 for (i = 0; i < nr_glist_frames; i++) { 1219 for (i = 0; i < nr_glist_frames; i++) {
1210 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL); 1220 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
1211 if (gnttab_list[i] == NULL) { 1221 if (gnttab_list[i] == NULL) {
@@ -1214,12 +1224,12 @@ int gnttab_init(void)
1214 } 1224 }
1215 } 1225 }
1216 1226
1217 if (gnttab_resume() < 0) { 1227 if (gnttab_setup() < 0) {
1218 ret = -ENODEV; 1228 ret = -ENODEV;
1219 goto ini_nomem; 1229 goto ini_nomem;
1220 } 1230 }
1221 1231
1222 nr_init_grefs = nr_grant_frames * GREFS_PER_GRANT_FRAME; 1232 nr_init_grefs = nr_grant_frames * grefs_per_grant_frame;
1223 1233
1224 for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++) 1234 for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
1225 gnttab_entry(i) = i + 1; 1235 gnttab_entry(i) = i + 1;
diff --git a/drivers/xen/pcpu.c b/drivers/xen/pcpu.c
index 067fcfa1723e..5a27a4599a4a 100644
--- a/drivers/xen/pcpu.c
+++ b/drivers/xen/pcpu.c
@@ -278,8 +278,7 @@ static int sync_pcpu(uint32_t cpu, uint32_t *max_cpu)
278 * Only those at cpu present map has its sys interface. 278 * Only those at cpu present map has its sys interface.
279 */ 279 */
280 if (info->flags & XEN_PCPU_FLAGS_INVALID) { 280 if (info->flags & XEN_PCPU_FLAGS_INVALID) {
281 if (pcpu) 281 unregister_and_remove_pcpu(pcpu);
282 unregister_and_remove_pcpu(pcpu);
283 return 0; 282 return 0;
284 } 283 }
285 284
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 0bbbccbb1f12..ca2b00e9d558 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -199,9 +199,6 @@ static long privcmd_ioctl_mmap(void __user *udata)
199 LIST_HEAD(pagelist); 199 LIST_HEAD(pagelist);
200 struct mmap_mfn_state state; 200 struct mmap_mfn_state state;
201 201
202 if (!xen_initial_domain())
203 return -EPERM;
204
205 /* We only support privcmd_ioctl_mmap_batch for auto translated. */ 202 /* We only support privcmd_ioctl_mmap_batch for auto translated. */
206 if (xen_feature(XENFEAT_auto_translated_physmap)) 203 if (xen_feature(XENFEAT_auto_translated_physmap))
207 return -ENOSYS; 204 return -ENOSYS;
@@ -261,11 +258,12 @@ struct mmap_batch_state {
261 * -ENOENT if at least 1 -ENOENT has happened. 258 * -ENOENT if at least 1 -ENOENT has happened.
262 */ 259 */
263 int global_error; 260 int global_error;
264 /* An array for individual errors */ 261 int version;
265 int *err;
266 262
267 /* User-space mfn array to store errors in the second pass for V1. */ 263 /* User-space mfn array to store errors in the second pass for V1. */
268 xen_pfn_t __user *user_mfn; 264 xen_pfn_t __user *user_mfn;
265 /* User-space int array to store errors in the second pass for V2. */
266 int __user *user_err;
269}; 267};
270 268
271/* auto translated dom0 note: if domU being created is PV, then mfn is 269/* auto translated dom0 note: if domU being created is PV, then mfn is
@@ -288,7 +286,19 @@ static int mmap_batch_fn(void *data, void *state)
288 &cur_page); 286 &cur_page);
289 287
290 /* Store error code for second pass. */ 288 /* Store error code for second pass. */
291 *(st->err++) = ret; 289 if (st->version == 1) {
290 if (ret < 0) {
291 /*
292 * V1 encodes the error codes in the 32bit top nibble of the
293 * mfn (with its known limitations vis-a-vis 64 bit callers).
294 */
295 *mfnp |= (ret == -ENOENT) ?
296 PRIVCMD_MMAPBATCH_PAGED_ERROR :
297 PRIVCMD_MMAPBATCH_MFN_ERROR;
298 }
299 } else { /* st->version == 2 */
300 *((int *) mfnp) = ret;
301 }
292 302
293 /* And see if it affects the global_error. */ 303 /* And see if it affects the global_error. */
294 if (ret < 0) { 304 if (ret < 0) {
@@ -305,20 +315,25 @@ static int mmap_batch_fn(void *data, void *state)
305 return 0; 315 return 0;
306} 316}
307 317
308static int mmap_return_errors_v1(void *data, void *state) 318static int mmap_return_errors(void *data, void *state)
309{ 319{
310 xen_pfn_t *mfnp = data;
311 struct mmap_batch_state *st = state; 320 struct mmap_batch_state *st = state;
312 int err = *(st->err++);
313 321
314 /* 322 if (st->version == 1) {
315 * V1 encodes the error codes in the 32bit top nibble of the 323 xen_pfn_t mfnp = *((xen_pfn_t *) data);
316 * mfn (with its known limitations vis-a-vis 64 bit callers). 324 if (mfnp & PRIVCMD_MMAPBATCH_MFN_ERROR)
317 */ 325 return __put_user(mfnp, st->user_mfn++);
318 *mfnp |= (err == -ENOENT) ? 326 else
319 PRIVCMD_MMAPBATCH_PAGED_ERROR : 327 st->user_mfn++;
320 PRIVCMD_MMAPBATCH_MFN_ERROR; 328 } else { /* st->version == 2 */
321 return __put_user(*mfnp, st->user_mfn++); 329 int err = *((int *) data);
330 if (err)
331 return __put_user(err, st->user_err++);
332 else
333 st->user_err++;
334 }
335
336 return 0;
322} 337}
323 338
324/* Allocate pfns that are then mapped with gmfns from foreign domid. Update 339/* Allocate pfns that are then mapped with gmfns from foreign domid. Update
@@ -357,12 +372,8 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
357 struct vm_area_struct *vma; 372 struct vm_area_struct *vma;
358 unsigned long nr_pages; 373 unsigned long nr_pages;
359 LIST_HEAD(pagelist); 374 LIST_HEAD(pagelist);
360 int *err_array = NULL;
361 struct mmap_batch_state state; 375 struct mmap_batch_state state;
362 376
363 if (!xen_initial_domain())
364 return -EPERM;
365
366 switch (version) { 377 switch (version) {
367 case 1: 378 case 1:
368 if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch))) 379 if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch)))
@@ -396,10 +407,12 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
396 goto out; 407 goto out;
397 } 408 }
398 409
399 err_array = kcalloc(m.num, sizeof(int), GFP_KERNEL); 410 if (version == 2) {
400 if (err_array == NULL) { 411 /* Zero error array now to only copy back actual errors. */
401 ret = -ENOMEM; 412 if (clear_user(m.err, sizeof(int) * m.num)) {
402 goto out; 413 ret = -EFAULT;
414 goto out;
415 }
403 } 416 }
404 417
405 down_write(&mm->mmap_sem); 418 down_write(&mm->mmap_sem);
@@ -427,7 +440,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
427 state.va = m.addr; 440 state.va = m.addr;
428 state.index = 0; 441 state.index = 0;
429 state.global_error = 0; 442 state.global_error = 0;
430 state.err = err_array; 443 state.version = version;
431 444
432 /* mmap_batch_fn guarantees ret == 0 */ 445 /* mmap_batch_fn guarantees ret == 0 */
433 BUG_ON(traverse_pages(m.num, sizeof(xen_pfn_t), 446 BUG_ON(traverse_pages(m.num, sizeof(xen_pfn_t),
@@ -435,21 +448,14 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
435 448
436 up_write(&mm->mmap_sem); 449 up_write(&mm->mmap_sem);
437 450
438 if (version == 1) { 451 if (state.global_error) {
439 if (state.global_error) { 452 /* Write back errors in second pass. */
440 /* Write back errors in second pass. */ 453 state.user_mfn = (xen_pfn_t *)m.arr;
441 state.user_mfn = (xen_pfn_t *)m.arr; 454 state.user_err = m.err;
442 state.err = err_array; 455 ret = traverse_pages(m.num, sizeof(xen_pfn_t),
443 ret = traverse_pages(m.num, sizeof(xen_pfn_t), 456 &pagelist, mmap_return_errors, &state);
444 &pagelist, mmap_return_errors_v1, &state); 457 } else
445 } else 458 ret = 0;
446 ret = 0;
447
448 } else if (version == 2) {
449 ret = __copy_to_user(m.err, err_array, m.num * sizeof(int));
450 if (ret)
451 ret = -EFAULT;
452 }
453 459
454 /* If we have not had any EFAULT-like global errors then set the global 460 /* If we have not had any EFAULT-like global errors then set the global
455 * error to -ENOENT if necessary. */ 461 * error to -ENOENT if necessary. */
@@ -457,7 +463,6 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
457 ret = -ENOENT; 463 ret = -ENOENT;
458 464
459out: 465out:
460 kfree(err_array);
461 free_page_list(&pagelist); 466 free_page_list(&pagelist);
462 467
463 return ret; 468 return ret;
diff --git a/drivers/xen/xen-acpi-pad.c b/drivers/xen/xen-acpi-pad.c
index da39191e7278..c763479ed85e 100644
--- a/drivers/xen/xen-acpi-pad.c
+++ b/drivers/xen/xen-acpi-pad.c
@@ -140,8 +140,7 @@ static int acpi_pad_add(struct acpi_device *device)
140 return 0; 140 return 0;
141} 141}
142 142
143static int acpi_pad_remove(struct acpi_device *device, 143static int acpi_pad_remove(struct acpi_device *device)
144 int type)
145{ 144{
146 mutex_lock(&xen_cpu_lock); 145 mutex_lock(&xen_cpu_lock);
147 xen_acpi_pad_idle_cpus(0); 146 xen_acpi_pad_idle_cpus(0);
diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h
index a7def010eba3..f72af87640e0 100644
--- a/drivers/xen/xen-pciback/pciback.h
+++ b/drivers/xen/xen-pciback/pciback.h
@@ -124,7 +124,7 @@ static inline int xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
124static inline void xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev, 124static inline void xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
125 struct pci_dev *dev) 125 struct pci_dev *dev)
126{ 126{
127 if (xen_pcibk_backend && xen_pcibk_backend->free) 127 if (xen_pcibk_backend && xen_pcibk_backend->release)
128 return xen_pcibk_backend->release(pdev, dev); 128 return xen_pcibk_backend->release(pdev, dev);
129} 129}
130 130
diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
index 97f5d264c31e..37c1f825f513 100644
--- a/drivers/xen/xen-pciback/pciback_ops.c
+++ b/drivers/xen/xen-pciback/pciback_ops.c
@@ -135,7 +135,6 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev,
135 struct pci_dev *dev, struct xen_pci_op *op) 135 struct pci_dev *dev, struct xen_pci_op *op)
136{ 136{
137 struct xen_pcibk_dev_data *dev_data; 137 struct xen_pcibk_dev_data *dev_data;
138 int otherend = pdev->xdev->otherend_id;
139 int status; 138 int status;
140 139
141 if (unlikely(verbose_request)) 140 if (unlikely(verbose_request))
@@ -144,8 +143,9 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev,
144 status = pci_enable_msi(dev); 143 status = pci_enable_msi(dev);
145 144
146 if (status) { 145 if (status) {
147 printk(KERN_ERR "error enable msi for guest %x status %x\n", 146 pr_warn_ratelimited(DRV_NAME ": %s: error enabling MSI for guest %u: err %d\n",
148 otherend, status); 147 pci_name(dev), pdev->xdev->otherend_id,
148 status);
149 op->value = 0; 149 op->value = 0;
150 return XEN_PCI_ERR_op_failed; 150 return XEN_PCI_ERR_op_failed;
151 } 151 }
@@ -223,10 +223,10 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
223 pci_name(dev), i, 223 pci_name(dev), i,
224 op->msix_entries[i].vector); 224 op->msix_entries[i].vector);
225 } 225 }
226 } else { 226 } else
227 printk(KERN_WARNING DRV_NAME ": %s: failed to enable MSI-X: err %d!\n", 227 pr_warn_ratelimited(DRV_NAME ": %s: error enabling MSI-X for guest %u: err %d!\n",
228 pci_name(dev), result); 228 pci_name(dev), pdev->xdev->otherend_id,
229 } 229 result);
230 kfree(entries); 230 kfree(entries);
231 231
232 op->value = result; 232 op->value = result;
diff --git a/fs/Kconfig b/fs/Kconfig
index cfe512fd1caf..780725a463b1 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -68,16 +68,6 @@ source "fs/quota/Kconfig"
68source "fs/autofs4/Kconfig" 68source "fs/autofs4/Kconfig"
69source "fs/fuse/Kconfig" 69source "fs/fuse/Kconfig"
70 70
71config CUSE
72 tristate "Character device in Userspace support"
73 depends on FUSE_FS
74 help
75 This FUSE extension allows character devices to be
76 implemented in userspace.
77
78 If you want to develop or use userspace character device
79 based on CUSE, answer Y or M.
80
81config GENERIC_ACL 71config GENERIC_ACL
82 bool 72 bool
83 select FS_POSIX_ACL 73 select FS_POSIX_ACL
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 0c42cdbabecf..49d0b43458b7 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -33,6 +33,7 @@
33#include <linux/elf.h> 33#include <linux/elf.h>
34#include <linux/utsname.h> 34#include <linux/utsname.h>
35#include <linux/coredump.h> 35#include <linux/coredump.h>
36#include <linux/sched.h>
36#include <asm/uaccess.h> 37#include <asm/uaccess.h>
37#include <asm/param.h> 38#include <asm/param.h>
38#include <asm/page.h> 39#include <asm/page.h>
@@ -1320,8 +1321,11 @@ static void fill_prstatus(struct elf_prstatus *prstatus,
1320 cputime_to_timeval(cputime.utime, &prstatus->pr_utime); 1321 cputime_to_timeval(cputime.utime, &prstatus->pr_utime);
1321 cputime_to_timeval(cputime.stime, &prstatus->pr_stime); 1322 cputime_to_timeval(cputime.stime, &prstatus->pr_stime);
1322 } else { 1323 } else {
1323 cputime_to_timeval(p->utime, &prstatus->pr_utime); 1324 cputime_t utime, stime;
1324 cputime_to_timeval(p->stime, &prstatus->pr_stime); 1325
1326 task_cputime(p, &utime, &stime);
1327 cputime_to_timeval(utime, &prstatus->pr_utime);
1328 cputime_to_timeval(stime, &prstatus->pr_stime);
1325 } 1329 }
1326 cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime); 1330 cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
1327 cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime); 1331 cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index dc84732e554f..cb240dd3b402 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -1375,8 +1375,11 @@ static void fill_prstatus(struct elf_prstatus *prstatus,
1375 cputime_to_timeval(cputime.utime, &prstatus->pr_utime); 1375 cputime_to_timeval(cputime.utime, &prstatus->pr_utime);
1376 cputime_to_timeval(cputime.stime, &prstatus->pr_stime); 1376 cputime_to_timeval(cputime.stime, &prstatus->pr_stime);
1377 } else { 1377 } else {
1378 cputime_to_timeval(p->utime, &prstatus->pr_utime); 1378 cputime_t utime, stime;
1379 cputime_to_timeval(p->stime, &prstatus->pr_stime); 1379
1380 task_cputime(p, &utime, &stime);
1381 cputime_to_timeval(utime, &prstatus->pr_utime);
1382 cputime_to_timeval(stime, &prstatus->pr_stime);
1380 } 1383 }
1381 cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime); 1384 cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
1382 cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime); 1385 cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 521e9d4424f6..5a3327b8f90d 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3997,7 +3997,7 @@ again:
3997 * We make the other tasks wait for the flush only when we can flush 3997 * We make the other tasks wait for the flush only when we can flush
3998 * all things. 3998 * all things.
3999 */ 3999 */
4000 if (ret && flush == BTRFS_RESERVE_FLUSH_ALL) { 4000 if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4001 flushing = true; 4001 flushing = true;
4002 space_info->flush = 1; 4002 space_info->flush = 1;
4003 } 4003 }
@@ -4534,7 +4534,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4534 unsigned nr_extents = 0; 4534 unsigned nr_extents = 0;
4535 int extra_reserve = 0; 4535 int extra_reserve = 0;
4536 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL; 4536 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
4537 int ret; 4537 int ret = 0;
4538 bool delalloc_lock = true; 4538 bool delalloc_lock = true;
4539 4539
4540 /* If we are a free space inode we need to not flush since we will be in 4540 /* If we are a free space inode we need to not flush since we will be in
@@ -4579,20 +4579,18 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4579 csum_bytes = BTRFS_I(inode)->csum_bytes; 4579 csum_bytes = BTRFS_I(inode)->csum_bytes;
4580 spin_unlock(&BTRFS_I(inode)->lock); 4580 spin_unlock(&BTRFS_I(inode)->lock);
4581 4581
4582 if (root->fs_info->quota_enabled) { 4582 if (root->fs_info->quota_enabled)
4583 ret = btrfs_qgroup_reserve(root, num_bytes + 4583 ret = btrfs_qgroup_reserve(root, num_bytes +
4584 nr_extents * root->leafsize); 4584 nr_extents * root->leafsize);
4585 if (ret) {
4586 spin_lock(&BTRFS_I(inode)->lock);
4587 calc_csum_metadata_size(inode, num_bytes, 0);
4588 spin_unlock(&BTRFS_I(inode)->lock);
4589 if (delalloc_lock)
4590 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
4591 return ret;
4592 }
4593 }
4594 4585
4595 ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush); 4586 /*
4587 * ret != 0 here means the qgroup reservation failed, we go straight to
4588 * the shared error handling then.
4589 */
4590 if (ret == 0)
4591 ret = reserve_metadata_bytes(root, block_rsv,
4592 to_reserve, flush);
4593
4596 if (ret) { 4594 if (ret) {
4597 u64 to_free = 0; 4595 u64 to_free = 0;
4598 unsigned dropped; 4596 unsigned dropped;
@@ -5560,7 +5558,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
5560 int empty_cluster = 2 * 1024 * 1024; 5558 int empty_cluster = 2 * 1024 * 1024;
5561 struct btrfs_space_info *space_info; 5559 struct btrfs_space_info *space_info;
5562 int loop = 0; 5560 int loop = 0;
5563 int index = 0; 5561 int index = __get_raid_index(data);
5564 int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ? 5562 int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
5565 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC; 5563 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
5566 bool found_uncached_bg = false; 5564 bool found_uncached_bg = false;
@@ -6788,11 +6786,13 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
6788 &wc->flags[level]); 6786 &wc->flags[level]);
6789 if (ret < 0) { 6787 if (ret < 0) {
6790 btrfs_tree_unlock_rw(eb, path->locks[level]); 6788 btrfs_tree_unlock_rw(eb, path->locks[level]);
6789 path->locks[level] = 0;
6791 return ret; 6790 return ret;
6792 } 6791 }
6793 BUG_ON(wc->refs[level] == 0); 6792 BUG_ON(wc->refs[level] == 0);
6794 if (wc->refs[level] == 1) { 6793 if (wc->refs[level] == 1) {
6795 btrfs_tree_unlock_rw(eb, path->locks[level]); 6794 btrfs_tree_unlock_rw(eb, path->locks[level]);
6795 path->locks[level] = 0;
6796 return 1; 6796 return 1;
6797 } 6797 }
6798 } 6798 }
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index f169d6b11d7f..fdb7a8db3b57 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -171,6 +171,10 @@ static int mergable_maps(struct extent_map *prev, struct extent_map *next)
171 if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags)) 171 if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags))
172 return 0; 172 return 0;
173 173
174 if (test_bit(EXTENT_FLAG_LOGGING, &prev->flags) ||
175 test_bit(EXTENT_FLAG_LOGGING, &next->flags))
176 return 0;
177
174 if (extent_map_end(prev) == next->start && 178 if (extent_map_end(prev) == next->start &&
175 prev->flags == next->flags && 179 prev->flags == next->flags &&
176 prev->bdev == next->bdev && 180 prev->bdev == next->bdev &&
@@ -255,7 +259,8 @@ int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len,
255 if (!em) 259 if (!em)
256 goto out; 260 goto out;
257 261
258 list_move(&em->list, &tree->modified_extents); 262 if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
263 list_move(&em->list, &tree->modified_extents);
259 em->generation = gen; 264 em->generation = gen;
260 clear_bit(EXTENT_FLAG_PINNED, &em->flags); 265 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
261 em->mod_start = em->start; 266 em->mod_start = em->start;
@@ -280,6 +285,13 @@ out:
280 285
281} 286}
282 287
288void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em)
289{
290 clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
291 if (em->in_tree)
292 try_merge_map(tree, em);
293}
294
283/** 295/**
284 * add_extent_mapping - add new extent map to the extent tree 296 * add_extent_mapping - add new extent map to the extent tree
285 * @tree: tree to insert new map in 297 * @tree: tree to insert new map in
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h
index 922943ce29e8..c6598c89cff8 100644
--- a/fs/btrfs/extent_map.h
+++ b/fs/btrfs/extent_map.h
@@ -69,6 +69,7 @@ void free_extent_map(struct extent_map *em);
69int __init extent_map_init(void); 69int __init extent_map_init(void);
70void extent_map_exit(void); 70void extent_map_exit(void);
71int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len, u64 gen); 71int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len, u64 gen);
72void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em);
72struct extent_map *search_extent_mapping(struct extent_map_tree *tree, 73struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
73 u64 start, u64 len); 74 u64 start, u64 len);
74#endif 75#endif
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index bd38cef42358..94aa53b38721 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -460,8 +460,8 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
460 if (!contig) 460 if (!contig)
461 offset = page_offset(bvec->bv_page) + bvec->bv_offset; 461 offset = page_offset(bvec->bv_page) + bvec->bv_offset;
462 462
463 if (!contig && (offset >= ordered->file_offset + ordered->len || 463 if (offset >= ordered->file_offset + ordered->len ||
464 offset < ordered->file_offset)) { 464 offset < ordered->file_offset) {
465 unsigned long bytes_left; 465 unsigned long bytes_left;
466 sums->len = this_sum_bytes; 466 sums->len = this_sum_bytes;
467 this_sum_bytes = 0; 467 this_sum_bytes = 0;
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 77061bf43edb..aeb84469d2c4 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -293,15 +293,24 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
293 struct btrfs_key key; 293 struct btrfs_key key;
294 struct btrfs_ioctl_defrag_range_args range; 294 struct btrfs_ioctl_defrag_range_args range;
295 int num_defrag; 295 int num_defrag;
296 int index;
297 int ret;
296 298
297 /* get the inode */ 299 /* get the inode */
298 key.objectid = defrag->root; 300 key.objectid = defrag->root;
299 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY); 301 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
300 key.offset = (u64)-1; 302 key.offset = (u64)-1;
303
304 index = srcu_read_lock(&fs_info->subvol_srcu);
305
301 inode_root = btrfs_read_fs_root_no_name(fs_info, &key); 306 inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
302 if (IS_ERR(inode_root)) { 307 if (IS_ERR(inode_root)) {
303 kmem_cache_free(btrfs_inode_defrag_cachep, defrag); 308 ret = PTR_ERR(inode_root);
304 return PTR_ERR(inode_root); 309 goto cleanup;
310 }
311 if (btrfs_root_refs(&inode_root->root_item) == 0) {
312 ret = -ENOENT;
313 goto cleanup;
305 } 314 }
306 315
307 key.objectid = defrag->ino; 316 key.objectid = defrag->ino;
@@ -309,9 +318,10 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
309 key.offset = 0; 318 key.offset = 0;
310 inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL); 319 inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
311 if (IS_ERR(inode)) { 320 if (IS_ERR(inode)) {
312 kmem_cache_free(btrfs_inode_defrag_cachep, defrag); 321 ret = PTR_ERR(inode);
313 return PTR_ERR(inode); 322 goto cleanup;
314 } 323 }
324 srcu_read_unlock(&fs_info->subvol_srcu, index);
315 325
316 /* do a chunk of defrag */ 326 /* do a chunk of defrag */
317 clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags); 327 clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
@@ -346,6 +356,10 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
346 356
347 iput(inode); 357 iput(inode);
348 return 0; 358 return 0;
359cleanup:
360 srcu_read_unlock(&fs_info->subvol_srcu, index);
361 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
362 return ret;
349} 363}
350 364
351/* 365/*
@@ -1594,9 +1608,10 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
1594 if (err < 0 && num_written > 0) 1608 if (err < 0 && num_written > 0)
1595 num_written = err; 1609 num_written = err;
1596 } 1610 }
1597out: 1611
1598 if (sync) 1612 if (sync)
1599 atomic_dec(&BTRFS_I(inode)->sync_writers); 1613 atomic_dec(&BTRFS_I(inode)->sync_writers);
1614out:
1600 sb_end_write(inode->i_sb); 1615 sb_end_write(inode->i_sb);
1601 current->backing_dev_info = NULL; 1616 current->backing_dev_info = NULL;
1602 return num_written ? num_written : err; 1617 return num_written ? num_written : err;
@@ -2241,6 +2256,7 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
2241 if (lockend <= lockstart) 2256 if (lockend <= lockstart)
2242 lockend = lockstart + root->sectorsize; 2257 lockend = lockstart + root->sectorsize;
2243 2258
2259 lockend--;
2244 len = lockend - lockstart + 1; 2260 len = lockend - lockstart + 1;
2245 2261
2246 len = max_t(u64, len, root->sectorsize); 2262 len = max_t(u64, len, root->sectorsize);
@@ -2307,9 +2323,12 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
2307 } 2323 }
2308 } 2324 }
2309 2325
2310 *offset = start; 2326 if (!test_bit(EXTENT_FLAG_PREALLOC,
2311 free_extent_map(em); 2327 &em->flags)) {
2312 break; 2328 *offset = start;
2329 free_extent_map(em);
2330 break;
2331 }
2313 } 2332 }
2314 } 2333 }
2315 2334
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 59ea2e4349c9..0be7a8742a43 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -1862,11 +1862,13 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
1862{ 1862{
1863 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 1863 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1864 struct btrfs_free_space *info; 1864 struct btrfs_free_space *info;
1865 int ret = 0; 1865 int ret;
1866 bool re_search = false;
1866 1867
1867 spin_lock(&ctl->tree_lock); 1868 spin_lock(&ctl->tree_lock);
1868 1869
1869again: 1870again:
1871 ret = 0;
1870 if (!bytes) 1872 if (!bytes)
1871 goto out_lock; 1873 goto out_lock;
1872 1874
@@ -1879,17 +1881,17 @@ again:
1879 info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 1881 info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1880 1, 0); 1882 1, 0);
1881 if (!info) { 1883 if (!info) {
1882 /* the tree logging code might be calling us before we 1884 /*
1883 * have fully loaded the free space rbtree for this 1885 * If we found a partial bit of our free space in a
1884 * block group. So it is possible the entry won't 1886 * bitmap but then couldn't find the other part this may
1885 * be in the rbtree yet at all. The caching code 1887 * be a problem, so WARN about it.
1886 * will make sure not to put it in the rbtree if
1887 * the logging code has pinned it.
1888 */ 1888 */
1889 WARN_ON(re_search);
1889 goto out_lock; 1890 goto out_lock;
1890 } 1891 }
1891 } 1892 }
1892 1893
1894 re_search = false;
1893 if (!info->bitmap) { 1895 if (!info->bitmap) {
1894 unlink_free_space(ctl, info); 1896 unlink_free_space(ctl, info);
1895 if (offset == info->offset) { 1897 if (offset == info->offset) {
@@ -1935,8 +1937,10 @@ again:
1935 } 1937 }
1936 1938
1937 ret = remove_from_bitmap(ctl, info, &offset, &bytes); 1939 ret = remove_from_bitmap(ctl, info, &offset, &bytes);
1938 if (ret == -EAGAIN) 1940 if (ret == -EAGAIN) {
1941 re_search = true;
1939 goto again; 1942 goto again;
1943 }
1940 BUG_ON(ret); /* logic error */ 1944 BUG_ON(ret); /* logic error */
1941out_lock: 1945out_lock:
1942 spin_unlock(&ctl->tree_lock); 1946 spin_unlock(&ctl->tree_lock);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 16d9e8e191e6..cc93b23ca352 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -88,7 +88,7 @@ static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
88 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK, 88 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
89}; 89};
90 90
91static int btrfs_setsize(struct inode *inode, loff_t newsize); 91static int btrfs_setsize(struct inode *inode, struct iattr *attr);
92static int btrfs_truncate(struct inode *inode); 92static int btrfs_truncate(struct inode *inode);
93static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent); 93static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
94static noinline int cow_file_range(struct inode *inode, 94static noinline int cow_file_range(struct inode *inode,
@@ -2478,6 +2478,18 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
2478 continue; 2478 continue;
2479 } 2479 }
2480 nr_truncate++; 2480 nr_truncate++;
2481
2482 /* 1 for the orphan item deletion. */
2483 trans = btrfs_start_transaction(root, 1);
2484 if (IS_ERR(trans)) {
2485 ret = PTR_ERR(trans);
2486 goto out;
2487 }
2488 ret = btrfs_orphan_add(trans, inode);
2489 btrfs_end_transaction(trans, root);
2490 if (ret)
2491 goto out;
2492
2481 ret = btrfs_truncate(inode); 2493 ret = btrfs_truncate(inode);
2482 } else { 2494 } else {
2483 nr_unlink++; 2495 nr_unlink++;
@@ -3665,6 +3677,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
3665 block_end - cur_offset, 0); 3677 block_end - cur_offset, 0);
3666 if (IS_ERR(em)) { 3678 if (IS_ERR(em)) {
3667 err = PTR_ERR(em); 3679 err = PTR_ERR(em);
3680 em = NULL;
3668 break; 3681 break;
3669 } 3682 }
3670 last_byte = min(extent_map_end(em), block_end); 3683 last_byte = min(extent_map_end(em), block_end);
@@ -3748,16 +3761,27 @@ next:
3748 return err; 3761 return err;
3749} 3762}
3750 3763
3751static int btrfs_setsize(struct inode *inode, loff_t newsize) 3764static int btrfs_setsize(struct inode *inode, struct iattr *attr)
3752{ 3765{
3753 struct btrfs_root *root = BTRFS_I(inode)->root; 3766 struct btrfs_root *root = BTRFS_I(inode)->root;
3754 struct btrfs_trans_handle *trans; 3767 struct btrfs_trans_handle *trans;
3755 loff_t oldsize = i_size_read(inode); 3768 loff_t oldsize = i_size_read(inode);
3769 loff_t newsize = attr->ia_size;
3770 int mask = attr->ia_valid;
3756 int ret; 3771 int ret;
3757 3772
3758 if (newsize == oldsize) 3773 if (newsize == oldsize)
3759 return 0; 3774 return 0;
3760 3775
3776 /*
3777 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
3778 * special case where we need to update the times despite not having
3779 * these flags set. For all other operations the VFS set these flags
3780 * explicitly if it wants a timestamp update.
3781 */
3782 if (newsize != oldsize && (!(mask & (ATTR_CTIME | ATTR_MTIME))))
3783 inode->i_ctime = inode->i_mtime = current_fs_time(inode->i_sb);
3784
3761 if (newsize > oldsize) { 3785 if (newsize > oldsize) {
3762 truncate_pagecache(inode, oldsize, newsize); 3786 truncate_pagecache(inode, oldsize, newsize);
3763 ret = btrfs_cont_expand(inode, oldsize, newsize); 3787 ret = btrfs_cont_expand(inode, oldsize, newsize);
@@ -3783,9 +3807,34 @@ static int btrfs_setsize(struct inode *inode, loff_t newsize)
3783 set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE, 3807 set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
3784 &BTRFS_I(inode)->runtime_flags); 3808 &BTRFS_I(inode)->runtime_flags);
3785 3809
3810 /*
3811 * 1 for the orphan item we're going to add
3812 * 1 for the orphan item deletion.
3813 */
3814 trans = btrfs_start_transaction(root, 2);
3815 if (IS_ERR(trans))
3816 return PTR_ERR(trans);
3817
3818 /*
3819 * We need to do this in case we fail at _any_ point during the
3820 * actual truncate. Once we do the truncate_setsize we could
3821 * invalidate pages which forces any outstanding ordered io to
3822 * be instantly completed which will give us extents that need
3823 * to be truncated. If we fail to get an orphan inode down we
3824 * could have left over extents that were never meant to live,
3825 * so we need to garuntee from this point on that everything
3826 * will be consistent.
3827 */
3828 ret = btrfs_orphan_add(trans, inode);
3829 btrfs_end_transaction(trans, root);
3830 if (ret)
3831 return ret;
3832
3786 /* we don't support swapfiles, so vmtruncate shouldn't fail */ 3833 /* we don't support swapfiles, so vmtruncate shouldn't fail */
3787 truncate_setsize(inode, newsize); 3834 truncate_setsize(inode, newsize);
3788 ret = btrfs_truncate(inode); 3835 ret = btrfs_truncate(inode);
3836 if (ret && inode->i_nlink)
3837 btrfs_orphan_del(NULL, inode);
3789 } 3838 }
3790 3839
3791 return ret; 3840 return ret;
@@ -3805,7 +3854,7 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
3805 return err; 3854 return err;
3806 3855
3807 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 3856 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
3808 err = btrfs_setsize(inode, attr->ia_size); 3857 err = btrfs_setsize(inode, attr);
3809 if (err) 3858 if (err)
3810 return err; 3859 return err;
3811 } 3860 }
@@ -5572,10 +5621,13 @@ struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *pag
5572 return em; 5621 return em;
5573 if (em) { 5622 if (em) {
5574 /* 5623 /*
5575 * if our em maps to a hole, there might 5624 * if our em maps to
5576 * actually be delalloc bytes behind it 5625 * - a hole or
5626 * - a pre-alloc extent,
5627 * there might actually be delalloc bytes behind it.
5577 */ 5628 */
5578 if (em->block_start != EXTENT_MAP_HOLE) 5629 if (em->block_start != EXTENT_MAP_HOLE &&
5630 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
5579 return em; 5631 return em;
5580 else 5632 else
5581 hole_em = em; 5633 hole_em = em;
@@ -5657,6 +5709,8 @@ struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *pag
5657 */ 5709 */
5658 em->block_start = hole_em->block_start; 5710 em->block_start = hole_em->block_start;
5659 em->block_len = hole_len; 5711 em->block_len = hole_len;
5712 if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags))
5713 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
5660 } else { 5714 } else {
5661 em->start = range_start; 5715 em->start = range_start;
5662 em->len = found; 5716 em->len = found;
@@ -6915,11 +6969,9 @@ static int btrfs_truncate(struct inode *inode)
6915 6969
6916 /* 6970 /*
6917 * 1 for the truncate slack space 6971 * 1 for the truncate slack space
6918 * 1 for the orphan item we're going to add
6919 * 1 for the orphan item deletion
6920 * 1 for updating the inode. 6972 * 1 for updating the inode.
6921 */ 6973 */
6922 trans = btrfs_start_transaction(root, 4); 6974 trans = btrfs_start_transaction(root, 2);
6923 if (IS_ERR(trans)) { 6975 if (IS_ERR(trans)) {
6924 err = PTR_ERR(trans); 6976 err = PTR_ERR(trans);
6925 goto out; 6977 goto out;
@@ -6930,12 +6982,6 @@ static int btrfs_truncate(struct inode *inode)
6930 min_size); 6982 min_size);
6931 BUG_ON(ret); 6983 BUG_ON(ret);
6932 6984
6933 ret = btrfs_orphan_add(trans, inode);
6934 if (ret) {
6935 btrfs_end_transaction(trans, root);
6936 goto out;
6937 }
6938
6939 /* 6985 /*
6940 * setattr is responsible for setting the ordered_data_close flag, 6986 * setattr is responsible for setting the ordered_data_close flag,
6941 * but that is only tested during the last file release. That 6987 * but that is only tested during the last file release. That
@@ -7004,12 +7050,6 @@ static int btrfs_truncate(struct inode *inode)
7004 ret = btrfs_orphan_del(trans, inode); 7050 ret = btrfs_orphan_del(trans, inode);
7005 if (ret) 7051 if (ret)
7006 err = ret; 7052 err = ret;
7007 } else if (ret && inode->i_nlink > 0) {
7008 /*
7009 * Failed to do the truncate, remove us from the in memory
7010 * orphan list.
7011 */
7012 ret = btrfs_orphan_del(NULL, inode);
7013 } 7053 }
7014 7054
7015 if (trans) { 7055 if (trans) {
@@ -7531,41 +7571,61 @@ void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work)
7531 */ 7571 */
7532int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput) 7572int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
7533{ 7573{
7534 struct list_head *head = &root->fs_info->delalloc_inodes;
7535 struct btrfs_inode *binode; 7574 struct btrfs_inode *binode;
7536 struct inode *inode; 7575 struct inode *inode;
7537 struct btrfs_delalloc_work *work, *next; 7576 struct btrfs_delalloc_work *work, *next;
7538 struct list_head works; 7577 struct list_head works;
7578 struct list_head splice;
7539 int ret = 0; 7579 int ret = 0;
7540 7580
7541 if (root->fs_info->sb->s_flags & MS_RDONLY) 7581 if (root->fs_info->sb->s_flags & MS_RDONLY)
7542 return -EROFS; 7582 return -EROFS;
7543 7583
7544 INIT_LIST_HEAD(&works); 7584 INIT_LIST_HEAD(&works);
7545 7585 INIT_LIST_HEAD(&splice);
7586again:
7546 spin_lock(&root->fs_info->delalloc_lock); 7587 spin_lock(&root->fs_info->delalloc_lock);
7547 while (!list_empty(head)) { 7588 list_splice_init(&root->fs_info->delalloc_inodes, &splice);
7548 binode = list_entry(head->next, struct btrfs_inode, 7589 while (!list_empty(&splice)) {
7590 binode = list_entry(splice.next, struct btrfs_inode,
7549 delalloc_inodes); 7591 delalloc_inodes);
7592
7593 list_del_init(&binode->delalloc_inodes);
7594
7550 inode = igrab(&binode->vfs_inode); 7595 inode = igrab(&binode->vfs_inode);
7551 if (!inode) 7596 if (!inode)
7552 list_del_init(&binode->delalloc_inodes); 7597 continue;
7598
7599 list_add_tail(&binode->delalloc_inodes,
7600 &root->fs_info->delalloc_inodes);
7553 spin_unlock(&root->fs_info->delalloc_lock); 7601 spin_unlock(&root->fs_info->delalloc_lock);
7554 if (inode) { 7602
7555 work = btrfs_alloc_delalloc_work(inode, 0, delay_iput); 7603 work = btrfs_alloc_delalloc_work(inode, 0, delay_iput);
7556 if (!work) { 7604 if (unlikely(!work)) {
7557 ret = -ENOMEM; 7605 ret = -ENOMEM;
7558 goto out; 7606 goto out;
7559 }
7560 list_add_tail(&work->list, &works);
7561 btrfs_queue_worker(&root->fs_info->flush_workers,
7562 &work->work);
7563 } 7607 }
7608 list_add_tail(&work->list, &works);
7609 btrfs_queue_worker(&root->fs_info->flush_workers,
7610 &work->work);
7611
7564 cond_resched(); 7612 cond_resched();
7565 spin_lock(&root->fs_info->delalloc_lock); 7613 spin_lock(&root->fs_info->delalloc_lock);
7566 } 7614 }
7567 spin_unlock(&root->fs_info->delalloc_lock); 7615 spin_unlock(&root->fs_info->delalloc_lock);
7568 7616
7617 list_for_each_entry_safe(work, next, &works, list) {
7618 list_del_init(&work->list);
7619 btrfs_wait_and_free_delalloc_work(work);
7620 }
7621
7622 spin_lock(&root->fs_info->delalloc_lock);
7623 if (!list_empty(&root->fs_info->delalloc_inodes)) {
7624 spin_unlock(&root->fs_info->delalloc_lock);
7625 goto again;
7626 }
7627 spin_unlock(&root->fs_info->delalloc_lock);
7628
7569 /* the filemap_flush will queue IO into the worker threads, but 7629 /* the filemap_flush will queue IO into the worker threads, but
7570 * we have to make sure the IO is actually started and that 7630 * we have to make sure the IO is actually started and that
7571 * ordered extents get created before we return 7631 * ordered extents get created before we return
@@ -7578,11 +7638,18 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
7578 atomic_read(&root->fs_info->async_delalloc_pages) == 0)); 7638 atomic_read(&root->fs_info->async_delalloc_pages) == 0));
7579 } 7639 }
7580 atomic_dec(&root->fs_info->async_submit_draining); 7640 atomic_dec(&root->fs_info->async_submit_draining);
7641 return 0;
7581out: 7642out:
7582 list_for_each_entry_safe(work, next, &works, list) { 7643 list_for_each_entry_safe(work, next, &works, list) {
7583 list_del_init(&work->list); 7644 list_del_init(&work->list);
7584 btrfs_wait_and_free_delalloc_work(work); 7645 btrfs_wait_and_free_delalloc_work(work);
7585 } 7646 }
7647
7648 if (!list_empty_careful(&splice)) {
7649 spin_lock(&root->fs_info->delalloc_lock);
7650 list_splice_tail(&splice, &root->fs_info->delalloc_inodes);
7651 spin_unlock(&root->fs_info->delalloc_lock);
7652 }
7586 return ret; 7653 return ret;
7587} 7654}
7588 7655
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 4b4516770f05..338f2597bf7f 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -515,7 +515,6 @@ static noinline int create_subvol(struct btrfs_root *root,
515 515
516 BUG_ON(ret); 516 BUG_ON(ret);
517 517
518 d_instantiate(dentry, btrfs_lookup_dentry(dir, dentry));
519fail: 518fail:
520 if (async_transid) { 519 if (async_transid) {
521 *async_transid = trans->transid; 520 *async_transid = trans->transid;
@@ -525,6 +524,10 @@ fail:
525 } 524 }
526 if (err && !ret) 525 if (err && !ret)
527 ret = err; 526 ret = err;
527
528 if (!ret)
529 d_instantiate(dentry, btrfs_lookup_dentry(dir, dentry));
530
528 return ret; 531 return ret;
529} 532}
530 533
@@ -1339,7 +1342,8 @@ static noinline int btrfs_ioctl_resize(struct file *file,
1339 if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running, 1342 if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
1340 1)) { 1343 1)) {
1341 pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n"); 1344 pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n");
1342 return -EINPROGRESS; 1345 mnt_drop_write_file(file);
1346 return -EINVAL;
1343 } 1347 }
1344 1348
1345 mutex_lock(&root->fs_info->volume_mutex); 1349 mutex_lock(&root->fs_info->volume_mutex);
@@ -1362,6 +1366,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
1362 printk(KERN_INFO "btrfs: resizing devid %llu\n", 1366 printk(KERN_INFO "btrfs: resizing devid %llu\n",
1363 (unsigned long long)devid); 1367 (unsigned long long)devid);
1364 } 1368 }
1369
1365 device = btrfs_find_device(root->fs_info, devid, NULL, NULL); 1370 device = btrfs_find_device(root->fs_info, devid, NULL, NULL);
1366 if (!device) { 1371 if (!device) {
1367 printk(KERN_INFO "btrfs: resizer unable to find device %llu\n", 1372 printk(KERN_INFO "btrfs: resizer unable to find device %llu\n",
@@ -1369,9 +1374,10 @@ static noinline int btrfs_ioctl_resize(struct file *file,
1369 ret = -EINVAL; 1374 ret = -EINVAL;
1370 goto out_free; 1375 goto out_free;
1371 } 1376 }
1372 if (device->fs_devices && device->fs_devices->seeding) { 1377
1378 if (!device->writeable) {
1373 printk(KERN_INFO "btrfs: resizer unable to apply on " 1379 printk(KERN_INFO "btrfs: resizer unable to apply on "
1374 "seeding device %llu\n", 1380 "readonly device %llu\n",
1375 (unsigned long long)devid); 1381 (unsigned long long)devid);
1376 ret = -EINVAL; 1382 ret = -EINVAL;
1377 goto out_free; 1383 goto out_free;
@@ -1443,8 +1449,8 @@ out_free:
1443 kfree(vol_args); 1449 kfree(vol_args);
1444out: 1450out:
1445 mutex_unlock(&root->fs_info->volume_mutex); 1451 mutex_unlock(&root->fs_info->volume_mutex);
1446 mnt_drop_write_file(file);
1447 atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0); 1452 atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0);
1453 mnt_drop_write_file(file);
1448 return ret; 1454 return ret;
1449} 1455}
1450 1456
@@ -2095,13 +2101,13 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
2095 err = inode_permission(inode, MAY_WRITE | MAY_EXEC); 2101 err = inode_permission(inode, MAY_WRITE | MAY_EXEC);
2096 if (err) 2102 if (err)
2097 goto out_dput; 2103 goto out_dput;
2098
2099 /* check if subvolume may be deleted by a non-root user */
2100 err = btrfs_may_delete(dir, dentry, 1);
2101 if (err)
2102 goto out_dput;
2103 } 2104 }
2104 2105
2106 /* check if subvolume may be deleted by a user */
2107 err = btrfs_may_delete(dir, dentry, 1);
2108 if (err)
2109 goto out_dput;
2110
2105 if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) { 2111 if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) {
2106 err = -EINVAL; 2112 err = -EINVAL;
2107 goto out_dput; 2113 goto out_dput;
@@ -2183,19 +2189,20 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
2183 struct btrfs_ioctl_defrag_range_args *range; 2189 struct btrfs_ioctl_defrag_range_args *range;
2184 int ret; 2190 int ret;
2185 2191
2186 if (btrfs_root_readonly(root)) 2192 ret = mnt_want_write_file(file);
2187 return -EROFS; 2193 if (ret)
2194 return ret;
2188 2195
2189 if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running, 2196 if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
2190 1)) { 2197 1)) {
2191 pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n"); 2198 pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n");
2192 return -EINPROGRESS; 2199 mnt_drop_write_file(file);
2200 return -EINVAL;
2193 } 2201 }
2194 ret = mnt_want_write_file(file); 2202
2195 if (ret) { 2203 if (btrfs_root_readonly(root)) {
2196 atomic_set(&root->fs_info->mutually_exclusive_operation_running, 2204 ret = -EROFS;
2197 0); 2205 goto out;
2198 return ret;
2199 } 2206 }
2200 2207
2201 switch (inode->i_mode & S_IFMT) { 2208 switch (inode->i_mode & S_IFMT) {
@@ -2247,8 +2254,8 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
2247 ret = -EINVAL; 2254 ret = -EINVAL;
2248 } 2255 }
2249out: 2256out:
2250 mnt_drop_write_file(file);
2251 atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0); 2257 atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0);
2258 mnt_drop_write_file(file);
2252 return ret; 2259 return ret;
2253} 2260}
2254 2261
@@ -2263,7 +2270,7 @@ static long btrfs_ioctl_add_dev(struct btrfs_root *root, void __user *arg)
2263 if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running, 2270 if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
2264 1)) { 2271 1)) {
2265 pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n"); 2272 pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n");
2266 return -EINPROGRESS; 2273 return -EINVAL;
2267 } 2274 }
2268 2275
2269 mutex_lock(&root->fs_info->volume_mutex); 2276 mutex_lock(&root->fs_info->volume_mutex);
@@ -2300,7 +2307,7 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
2300 1)) { 2307 1)) {
2301 pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n"); 2308 pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n");
2302 mnt_drop_write_file(file); 2309 mnt_drop_write_file(file);
2303 return -EINPROGRESS; 2310 return -EINVAL;
2304 } 2311 }
2305 2312
2306 mutex_lock(&root->fs_info->volume_mutex); 2313 mutex_lock(&root->fs_info->volume_mutex);
@@ -2316,8 +2323,8 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
2316 kfree(vol_args); 2323 kfree(vol_args);
2317out: 2324out:
2318 mutex_unlock(&root->fs_info->volume_mutex); 2325 mutex_unlock(&root->fs_info->volume_mutex);
2319 mnt_drop_write_file(file);
2320 atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0); 2326 atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0);
2327 mnt_drop_write_file(file);
2321 return ret; 2328 return ret;
2322} 2329}
2323 2330
@@ -3437,8 +3444,8 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg)
3437 struct btrfs_fs_info *fs_info = root->fs_info; 3444 struct btrfs_fs_info *fs_info = root->fs_info;
3438 struct btrfs_ioctl_balance_args *bargs; 3445 struct btrfs_ioctl_balance_args *bargs;
3439 struct btrfs_balance_control *bctl; 3446 struct btrfs_balance_control *bctl;
3447 bool need_unlock; /* for mut. excl. ops lock */
3440 int ret; 3448 int ret;
3441 int need_to_clear_lock = 0;
3442 3449
3443 if (!capable(CAP_SYS_ADMIN)) 3450 if (!capable(CAP_SYS_ADMIN))
3444 return -EPERM; 3451 return -EPERM;
@@ -3447,14 +3454,61 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg)
3447 if (ret) 3454 if (ret)
3448 return ret; 3455 return ret;
3449 3456
3450 mutex_lock(&fs_info->volume_mutex); 3457again:
3458 if (!atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1)) {
3459 mutex_lock(&fs_info->volume_mutex);
3460 mutex_lock(&fs_info->balance_mutex);
3461 need_unlock = true;
3462 goto locked;
3463 }
3464
3465 /*
3466 * mut. excl. ops lock is locked. Three possibilites:
3467 * (1) some other op is running
3468 * (2) balance is running
3469 * (3) balance is paused -- special case (think resume)
3470 */
3451 mutex_lock(&fs_info->balance_mutex); 3471 mutex_lock(&fs_info->balance_mutex);
3472 if (fs_info->balance_ctl) {
3473 /* this is either (2) or (3) */
3474 if (!atomic_read(&fs_info->balance_running)) {
3475 mutex_unlock(&fs_info->balance_mutex);
3476 if (!mutex_trylock(&fs_info->volume_mutex))
3477 goto again;
3478 mutex_lock(&fs_info->balance_mutex);
3479
3480 if (fs_info->balance_ctl &&
3481 !atomic_read(&fs_info->balance_running)) {
3482 /* this is (3) */
3483 need_unlock = false;
3484 goto locked;
3485 }
3486
3487 mutex_unlock(&fs_info->balance_mutex);
3488 mutex_unlock(&fs_info->volume_mutex);
3489 goto again;
3490 } else {
3491 /* this is (2) */
3492 mutex_unlock(&fs_info->balance_mutex);
3493 ret = -EINPROGRESS;
3494 goto out;
3495 }
3496 } else {
3497 /* this is (1) */
3498 mutex_unlock(&fs_info->balance_mutex);
3499 pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n");
3500 ret = -EINVAL;
3501 goto out;
3502 }
3503
3504locked:
3505 BUG_ON(!atomic_read(&fs_info->mutually_exclusive_operation_running));
3452 3506
3453 if (arg) { 3507 if (arg) {
3454 bargs = memdup_user(arg, sizeof(*bargs)); 3508 bargs = memdup_user(arg, sizeof(*bargs));
3455 if (IS_ERR(bargs)) { 3509 if (IS_ERR(bargs)) {
3456 ret = PTR_ERR(bargs); 3510 ret = PTR_ERR(bargs);
3457 goto out; 3511 goto out_unlock;
3458 } 3512 }
3459 3513
3460 if (bargs->flags & BTRFS_BALANCE_RESUME) { 3514 if (bargs->flags & BTRFS_BALANCE_RESUME) {
@@ -3474,13 +3528,10 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg)
3474 bargs = NULL; 3528 bargs = NULL;
3475 } 3529 }
3476 3530
3477 if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running, 3531 if (fs_info->balance_ctl) {
3478 1)) {
3479 pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n");
3480 ret = -EINPROGRESS; 3532 ret = -EINPROGRESS;
3481 goto out_bargs; 3533 goto out_bargs;
3482 } 3534 }
3483 need_to_clear_lock = 1;
3484 3535
3485 bctl = kzalloc(sizeof(*bctl), GFP_NOFS); 3536 bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
3486 if (!bctl) { 3537 if (!bctl) {
@@ -3501,11 +3552,17 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg)
3501 } 3552 }
3502 3553
3503do_balance: 3554do_balance:
3504 ret = btrfs_balance(bctl, bargs);
3505 /* 3555 /*
3506 * bctl is freed in __cancel_balance or in free_fs_info if 3556 * Ownership of bctl and mutually_exclusive_operation_running
3507 * restriper was paused all the way until unmount 3557 * goes to to btrfs_balance. bctl is freed in __cancel_balance,
3558 * or, if restriper was paused all the way until unmount, in
3559 * free_fs_info. mutually_exclusive_operation_running is
3560 * cleared in __cancel_balance.
3508 */ 3561 */
3562 need_unlock = false;
3563
3564 ret = btrfs_balance(bctl, bargs);
3565
3509 if (arg) { 3566 if (arg) {
3510 if (copy_to_user(arg, bargs, sizeof(*bargs))) 3567 if (copy_to_user(arg, bargs, sizeof(*bargs)))
3511 ret = -EFAULT; 3568 ret = -EFAULT;
@@ -3513,12 +3570,12 @@ do_balance:
3513 3570
3514out_bargs: 3571out_bargs:
3515 kfree(bargs); 3572 kfree(bargs);
3516out: 3573out_unlock:
3517 if (need_to_clear_lock)
3518 atomic_set(&root->fs_info->mutually_exclusive_operation_running,
3519 0);
3520 mutex_unlock(&fs_info->balance_mutex); 3574 mutex_unlock(&fs_info->balance_mutex);
3521 mutex_unlock(&fs_info->volume_mutex); 3575 mutex_unlock(&fs_info->volume_mutex);
3576 if (need_unlock)
3577 atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3578out:
3522 mnt_drop_write_file(file); 3579 mnt_drop_write_file(file);
3523 return ret; 3580 return ret;
3524} 3581}
@@ -3698,6 +3755,11 @@ static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
3698 goto drop_write; 3755 goto drop_write;
3699 } 3756 }
3700 3757
3758 if (!sa->qgroupid) {
3759 ret = -EINVAL;
3760 goto out;
3761 }
3762
3701 trans = btrfs_join_transaction(root); 3763 trans = btrfs_join_transaction(root);
3702 if (IS_ERR(trans)) { 3764 if (IS_ERR(trans)) {
3703 ret = PTR_ERR(trans); 3765 ret = PTR_ERR(trans);
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index f10731297040..e5ed56729607 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -836,9 +836,16 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
836 * if the disk i_size is already at the inode->i_size, or 836 * if the disk i_size is already at the inode->i_size, or
837 * this ordered extent is inside the disk i_size, we're done 837 * this ordered extent is inside the disk i_size, we're done
838 */ 838 */
839 if (disk_i_size == i_size || offset <= disk_i_size) { 839 if (disk_i_size == i_size)
840 goto out;
841
842 /*
843 * We still need to update disk_i_size if outstanding_isize is greater
844 * than disk_i_size.
845 */
846 if (offset <= disk_i_size &&
847 (!ordered || ordered->outstanding_isize <= disk_i_size))
840 goto out; 848 goto out;
841 }
842 849
843 /* 850 /*
844 * walk backward from this ordered extent to disk_i_size. 851 * walk backward from this ordered extent to disk_i_size.
@@ -870,7 +877,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
870 break; 877 break;
871 if (test->file_offset >= i_size) 878 if (test->file_offset >= i_size)
872 break; 879 break;
873 if (test->file_offset >= disk_i_size) { 880 if (entry_end(test) > disk_i_size) {
874 /* 881 /*
875 * we don't update disk_i_size now, so record this 882 * we don't update disk_i_size now, so record this
876 * undealt i_size. Or we will not know the real 883 * undealt i_size. Or we will not know the real
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index fe9d02c45f8e..a5c856234323 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -379,6 +379,13 @@ next1:
379 379
380 ret = add_relation_rb(fs_info, found_key.objectid, 380 ret = add_relation_rb(fs_info, found_key.objectid,
381 found_key.offset); 381 found_key.offset);
382 if (ret == -ENOENT) {
383 printk(KERN_WARNING
384 "btrfs: orphan qgroup relation 0x%llx->0x%llx\n",
385 (unsigned long long)found_key.objectid,
386 (unsigned long long)found_key.offset);
387 ret = 0; /* ignore the error */
388 }
382 if (ret) 389 if (ret)
383 goto out; 390 goto out;
384next2: 391next2:
@@ -956,17 +963,28 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
956 struct btrfs_fs_info *fs_info, u64 qgroupid) 963 struct btrfs_fs_info *fs_info, u64 qgroupid)
957{ 964{
958 struct btrfs_root *quota_root; 965 struct btrfs_root *quota_root;
966 struct btrfs_qgroup *qgroup;
959 int ret = 0; 967 int ret = 0;
960 968
961 quota_root = fs_info->quota_root; 969 quota_root = fs_info->quota_root;
962 if (!quota_root) 970 if (!quota_root)
963 return -EINVAL; 971 return -EINVAL;
964 972
973 /* check if there are no relations to this qgroup */
974 spin_lock(&fs_info->qgroup_lock);
975 qgroup = find_qgroup_rb(fs_info, qgroupid);
976 if (qgroup) {
977 if (!list_empty(&qgroup->groups) || !list_empty(&qgroup->members)) {
978 spin_unlock(&fs_info->qgroup_lock);
979 return -EBUSY;
980 }
981 }
982 spin_unlock(&fs_info->qgroup_lock);
983
965 ret = del_qgroup_item(trans, quota_root, qgroupid); 984 ret = del_qgroup_item(trans, quota_root, qgroupid);
966 985
967 spin_lock(&fs_info->qgroup_lock); 986 spin_lock(&fs_info->qgroup_lock);
968 del_qgroup_rb(quota_root->fs_info, qgroupid); 987 del_qgroup_rb(quota_root->fs_info, qgroupid);
969
970 spin_unlock(&fs_info->qgroup_lock); 988 spin_unlock(&fs_info->qgroup_lock);
971 989
972 return ret; 990 return ret;
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index bdbb94f245c9..67783e03d121 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -580,20 +580,29 @@ static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
580 int corrected = 0; 580 int corrected = 0;
581 struct btrfs_key key; 581 struct btrfs_key key;
582 struct inode *inode = NULL; 582 struct inode *inode = NULL;
583 struct btrfs_fs_info *fs_info;
583 u64 end = offset + PAGE_SIZE - 1; 584 u64 end = offset + PAGE_SIZE - 1;
584 struct btrfs_root *local_root; 585 struct btrfs_root *local_root;
586 int srcu_index;
585 587
586 key.objectid = root; 588 key.objectid = root;
587 key.type = BTRFS_ROOT_ITEM_KEY; 589 key.type = BTRFS_ROOT_ITEM_KEY;
588 key.offset = (u64)-1; 590 key.offset = (u64)-1;
589 local_root = btrfs_read_fs_root_no_name(fixup->root->fs_info, &key); 591
590 if (IS_ERR(local_root)) 592 fs_info = fixup->root->fs_info;
593 srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
594
595 local_root = btrfs_read_fs_root_no_name(fs_info, &key);
596 if (IS_ERR(local_root)) {
597 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
591 return PTR_ERR(local_root); 598 return PTR_ERR(local_root);
599 }
592 600
593 key.type = BTRFS_INODE_ITEM_KEY; 601 key.type = BTRFS_INODE_ITEM_KEY;
594 key.objectid = inum; 602 key.objectid = inum;
595 key.offset = 0; 603 key.offset = 0;
596 inode = btrfs_iget(fixup->root->fs_info->sb, &key, local_root, NULL); 604 inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
605 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
597 if (IS_ERR(inode)) 606 if (IS_ERR(inode))
598 return PTR_ERR(inode); 607 return PTR_ERR(inode);
599 608
@@ -606,7 +615,6 @@ static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
606 } 615 }
607 616
608 if (PageUptodate(page)) { 617 if (PageUptodate(page)) {
609 struct btrfs_fs_info *fs_info;
610 if (PageDirty(page)) { 618 if (PageDirty(page)) {
611 /* 619 /*
612 * we need to write the data to the defect sector. the 620 * we need to write the data to the defect sector. the
@@ -3180,18 +3188,25 @@ static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, void *ctx)
3180 u64 physical_for_dev_replace; 3188 u64 physical_for_dev_replace;
3181 u64 len; 3189 u64 len;
3182 struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info; 3190 struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info;
3191 int srcu_index;
3183 3192
3184 key.objectid = root; 3193 key.objectid = root;
3185 key.type = BTRFS_ROOT_ITEM_KEY; 3194 key.type = BTRFS_ROOT_ITEM_KEY;
3186 key.offset = (u64)-1; 3195 key.offset = (u64)-1;
3196
3197 srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
3198
3187 local_root = btrfs_read_fs_root_no_name(fs_info, &key); 3199 local_root = btrfs_read_fs_root_no_name(fs_info, &key);
3188 if (IS_ERR(local_root)) 3200 if (IS_ERR(local_root)) {
3201 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
3189 return PTR_ERR(local_root); 3202 return PTR_ERR(local_root);
3203 }
3190 3204
3191 key.type = BTRFS_INODE_ITEM_KEY; 3205 key.type = BTRFS_INODE_ITEM_KEY;
3192 key.objectid = inum; 3206 key.objectid = inum;
3193 key.offset = 0; 3207 key.offset = 0;
3194 inode = btrfs_iget(fs_info->sb, &key, local_root, NULL); 3208 inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
3209 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
3195 if (IS_ERR(inode)) 3210 if (IS_ERR(inode))
3196 return PTR_ERR(inode); 3211 return PTR_ERR(inode);
3197 3212
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 54454542ad40..321b7fb4e441 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -1814,8 +1814,10 @@ static int name_cache_insert(struct send_ctx *sctx,
1814 (unsigned long)nce->ino); 1814 (unsigned long)nce->ino);
1815 if (!nce_head) { 1815 if (!nce_head) {
1816 nce_head = kmalloc(sizeof(*nce_head), GFP_NOFS); 1816 nce_head = kmalloc(sizeof(*nce_head), GFP_NOFS);
1817 if (!nce_head) 1817 if (!nce_head) {
1818 kfree(nce);
1818 return -ENOMEM; 1819 return -ENOMEM;
1820 }
1819 INIT_LIST_HEAD(nce_head); 1821 INIT_LIST_HEAD(nce_head);
1820 1822
1821 ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head); 1823 ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head);
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 99545df1b86c..d8982e9601d3 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -267,7 +267,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
267 function, line, errstr); 267 function, line, errstr);
268 return; 268 return;
269 } 269 }
270 trans->transaction->aborted = errno; 270 ACCESS_ONCE(trans->transaction->aborted) = errno;
271 __btrfs_std_error(root->fs_info, function, line, errno, NULL); 271 __btrfs_std_error(root->fs_info, function, line, errno, NULL);
272} 272}
273/* 273/*
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 87fac9a21ea5..fc03aa60b684 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -333,12 +333,14 @@ start_transaction(struct btrfs_root *root, u64 num_items, int type,
333 &root->fs_info->trans_block_rsv, 333 &root->fs_info->trans_block_rsv,
334 num_bytes, flush); 334 num_bytes, flush);
335 if (ret) 335 if (ret)
336 return ERR_PTR(ret); 336 goto reserve_fail;
337 } 337 }
338again: 338again:
339 h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS); 339 h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
340 if (!h) 340 if (!h) {
341 return ERR_PTR(-ENOMEM); 341 ret = -ENOMEM;
342 goto alloc_fail;
343 }
342 344
343 /* 345 /*
344 * If we are JOIN_NOLOCK we're already committing a transaction and 346 * If we are JOIN_NOLOCK we're already committing a transaction and
@@ -365,11 +367,7 @@ again:
365 if (ret < 0) { 367 if (ret < 0) {
366 /* We must get the transaction if we are JOIN_NOLOCK. */ 368 /* We must get the transaction if we are JOIN_NOLOCK. */
367 BUG_ON(type == TRANS_JOIN_NOLOCK); 369 BUG_ON(type == TRANS_JOIN_NOLOCK);
368 370 goto join_fail;
369 if (type < TRANS_JOIN_NOLOCK)
370 sb_end_intwrite(root->fs_info->sb);
371 kmem_cache_free(btrfs_trans_handle_cachep, h);
372 return ERR_PTR(ret);
373 } 371 }
374 372
375 cur_trans = root->fs_info->running_transaction; 373 cur_trans = root->fs_info->running_transaction;
@@ -410,6 +408,19 @@ got_it:
410 if (!current->journal_info && type != TRANS_USERSPACE) 408 if (!current->journal_info && type != TRANS_USERSPACE)
411 current->journal_info = h; 409 current->journal_info = h;
412 return h; 410 return h;
411
412join_fail:
413 if (type < TRANS_JOIN_NOLOCK)
414 sb_end_intwrite(root->fs_info->sb);
415 kmem_cache_free(btrfs_trans_handle_cachep, h);
416alloc_fail:
417 if (num_bytes)
418 btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv,
419 num_bytes);
420reserve_fail:
421 if (qgroup_reserved)
422 btrfs_qgroup_free(root, qgroup_reserved);
423 return ERR_PTR(ret);
413} 424}
414 425
415struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, 426struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
@@ -1468,7 +1479,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1468 goto cleanup_transaction; 1479 goto cleanup_transaction;
1469 } 1480 }
1470 1481
1471 if (cur_trans->aborted) { 1482 /* Stop the commit early if ->aborted is set */
1483 if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
1472 ret = cur_trans->aborted; 1484 ret = cur_trans->aborted;
1473 goto cleanup_transaction; 1485 goto cleanup_transaction;
1474 } 1486 }
@@ -1574,6 +1586,11 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1574 wait_event(cur_trans->writer_wait, 1586 wait_event(cur_trans->writer_wait,
1575 atomic_read(&cur_trans->num_writers) == 1); 1587 atomic_read(&cur_trans->num_writers) == 1);
1576 1588
1589 /* ->aborted might be set after the previous check, so check it */
1590 if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
1591 ret = cur_trans->aborted;
1592 goto cleanup_transaction;
1593 }
1577 /* 1594 /*
1578 * the reloc mutex makes sure that we stop 1595 * the reloc mutex makes sure that we stop
1579 * the balancing code from coming in and moving 1596 * the balancing code from coming in and moving
@@ -1657,6 +1674,17 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1657 goto cleanup_transaction; 1674 goto cleanup_transaction;
1658 } 1675 }
1659 1676
1677 /*
1678 * The tasks which save the space cache and inode cache may also
1679 * update ->aborted, check it.
1680 */
1681 if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
1682 ret = cur_trans->aborted;
1683 mutex_unlock(&root->fs_info->tree_log_mutex);
1684 mutex_unlock(&root->fs_info->reloc_mutex);
1685 goto cleanup_transaction;
1686 }
1687
1660 btrfs_prepare_extent_commit(trans, root); 1688 btrfs_prepare_extent_commit(trans, root);
1661 1689
1662 cur_trans = root->fs_info->running_transaction; 1690 cur_trans = root->fs_info->running_transaction;
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 83186c7e45d4..9027bb1e7466 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -3357,6 +3357,11 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
3357 if (skip_csum) 3357 if (skip_csum)
3358 return 0; 3358 return 0;
3359 3359
3360 if (em->compress_type) {
3361 csum_offset = 0;
3362 csum_len = block_len;
3363 }
3364
3360 /* block start is already adjusted for the file extent offset. */ 3365 /* block start is already adjusted for the file extent offset. */
3361 ret = btrfs_lookup_csums_range(log->fs_info->csum_root, 3366 ret = btrfs_lookup_csums_range(log->fs_info->csum_root,
3362 em->block_start + csum_offset, 3367 em->block_start + csum_offset,
@@ -3410,13 +3415,13 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
3410 em = list_entry(extents.next, struct extent_map, list); 3415 em = list_entry(extents.next, struct extent_map, list);
3411 3416
3412 list_del_init(&em->list); 3417 list_del_init(&em->list);
3413 clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
3414 3418
3415 /* 3419 /*
3416 * If we had an error we just need to delete everybody from our 3420 * If we had an error we just need to delete everybody from our
3417 * private list. 3421 * private list.
3418 */ 3422 */
3419 if (ret) { 3423 if (ret) {
3424 clear_em_logging(tree, em);
3420 free_extent_map(em); 3425 free_extent_map(em);
3421 continue; 3426 continue;
3422 } 3427 }
@@ -3424,8 +3429,9 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
3424 write_unlock(&tree->lock); 3429 write_unlock(&tree->lock);
3425 3430
3426 ret = log_one_extent(trans, inode, root, em, path); 3431 ret = log_one_extent(trans, inode, root, em, path);
3427 free_extent_map(em);
3428 write_lock(&tree->lock); 3432 write_lock(&tree->lock);
3433 clear_em_logging(tree, em);
3434 free_extent_map(em);
3429 } 3435 }
3430 WARN_ON(!list_empty(&extents)); 3436 WARN_ON(!list_empty(&extents));
3431 write_unlock(&tree->lock); 3437 write_unlock(&tree->lock);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 5cce6aa74012..5cbb7f4b1672 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1431,7 +1431,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1431 } 1431 }
1432 } else { 1432 } else {
1433 ret = btrfs_get_bdev_and_sb(device_path, 1433 ret = btrfs_get_bdev_and_sb(device_path,
1434 FMODE_READ | FMODE_EXCL, 1434 FMODE_WRITE | FMODE_EXCL,
1435 root->fs_info->bdev_holder, 0, 1435 root->fs_info->bdev_holder, 0,
1436 &bdev, &bh); 1436 &bdev, &bh);
1437 if (ret) 1437 if (ret)
@@ -1556,7 +1556,8 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1556 ret = 0; 1556 ret = 0;
1557 1557
1558 /* Notify udev that device has changed */ 1558 /* Notify udev that device has changed */
1559 btrfs_kobject_uevent(bdev, KOBJ_CHANGE); 1559 if (bdev)
1560 btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
1560 1561
1561error_brelse: 1562error_brelse:
1562 brelse(bh); 1563 brelse(bh);
@@ -2614,7 +2615,14 @@ static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
2614 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 2615 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2615 chunk_used = btrfs_block_group_used(&cache->item); 2616 chunk_used = btrfs_block_group_used(&cache->item);
2616 2617
2617 user_thresh = div_factor_fine(cache->key.offset, bargs->usage); 2618 if (bargs->usage == 0)
2619 user_thresh = 0;
2620 else if (bargs->usage > 100)
2621 user_thresh = cache->key.offset;
2622 else
2623 user_thresh = div_factor_fine(cache->key.offset,
2624 bargs->usage);
2625
2618 if (chunk_used < user_thresh) 2626 if (chunk_used < user_thresh)
2619 ret = 0; 2627 ret = 0;
2620 2628
@@ -2959,6 +2967,8 @@ static void __cancel_balance(struct btrfs_fs_info *fs_info)
2959 unset_balance_control(fs_info); 2967 unset_balance_control(fs_info);
2960 ret = del_balance_item(fs_info->tree_root); 2968 ret = del_balance_item(fs_info->tree_root);
2961 BUG_ON(ret); 2969 BUG_ON(ret);
2970
2971 atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
2962} 2972}
2963 2973
2964void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock, 2974void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
@@ -3138,8 +3148,10 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
3138out: 3148out:
3139 if (bctl->flags & BTRFS_BALANCE_RESUME) 3149 if (bctl->flags & BTRFS_BALANCE_RESUME)
3140 __cancel_balance(fs_info); 3150 __cancel_balance(fs_info);
3141 else 3151 else {
3142 kfree(bctl); 3152 kfree(bctl);
3153 atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3154 }
3143 return ret; 3155 return ret;
3144} 3156}
3145 3157
@@ -3156,7 +3168,6 @@ static int balance_kthread(void *data)
3156 ret = btrfs_balance(fs_info->balance_ctl, NULL); 3168 ret = btrfs_balance(fs_info->balance_ctl, NULL);
3157 } 3169 }
3158 3170
3159 atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3160 mutex_unlock(&fs_info->balance_mutex); 3171 mutex_unlock(&fs_info->balance_mutex);
3161 mutex_unlock(&fs_info->volume_mutex); 3172 mutex_unlock(&fs_info->volume_mutex);
3162 3173
@@ -3179,7 +3190,6 @@ int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
3179 return 0; 3190 return 0;
3180 } 3191 }
3181 3192
3182 WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1));
3183 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance"); 3193 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
3184 if (IS_ERR(tsk)) 3194 if (IS_ERR(tsk))
3185 return PTR_ERR(tsk); 3195 return PTR_ERR(tsk);
@@ -3233,6 +3243,8 @@ int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
3233 btrfs_balance_sys(leaf, item, &disk_bargs); 3243 btrfs_balance_sys(leaf, item, &disk_bargs);
3234 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs); 3244 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
3235 3245
3246 WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1));
3247
3236 mutex_lock(&fs_info->volume_mutex); 3248 mutex_lock(&fs_info->volume_mutex);
3237 mutex_lock(&fs_info->balance_mutex); 3249 mutex_lock(&fs_info->balance_mutex);
3238 3250
@@ -3496,7 +3508,7 @@ struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
3496 { 1, 1, 2, 2, 2, 2 /* raid1 */ }, 3508 { 1, 1, 2, 2, 2, 2 /* raid1 */ },
3497 { 1, 2, 1, 1, 1, 2 /* dup */ }, 3509 { 1, 2, 1, 1, 1, 2 /* dup */ },
3498 { 1, 1, 0, 2, 1, 1 /* raid0 */ }, 3510 { 1, 1, 0, 2, 1, 1 /* raid0 */ },
3499 { 1, 1, 0, 1, 1, 1 /* single */ }, 3511 { 1, 1, 1, 1, 1, 1 /* single */ },
3500}; 3512};
3501 3513
3502static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, 3514static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index ce5cbd717bfc..210fce2df308 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -226,6 +226,8 @@ compose_mount_options_out:
226compose_mount_options_err: 226compose_mount_options_err:
227 kfree(mountdata); 227 kfree(mountdata);
228 mountdata = ERR_PTR(rc); 228 mountdata = ERR_PTR(rc);
229 kfree(*devname);
230 *devname = NULL;
229 goto compose_mount_options_out; 231 goto compose_mount_options_out;
230} 232}
231 233
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 17c3643e5950..12b3da39733b 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1917,7 +1917,7 @@ srcip_matches(struct sockaddr *srcaddr, struct sockaddr *rhs)
1917 } 1917 }
1918 case AF_INET6: { 1918 case AF_INET6: {
1919 struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *)srcaddr; 1919 struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *)srcaddr;
1920 struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)&rhs; 1920 struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)rhs;
1921 return ipv6_addr_equal(&saddr6->sin6_addr, &vaddr6->sin6_addr); 1921 return ipv6_addr_equal(&saddr6->sin6_addr, &vaddr6->sin6_addr);
1922 } 1922 }
1923 default: 1923 default:
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index 7ff49852b0cb..911649a47dd5 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -503,11 +503,11 @@ static ssize_t device_write(struct file *file, const char __user *buf,
503#endif 503#endif
504 return -EINVAL; 504 return -EINVAL;
505 505
506#ifdef CONFIG_COMPAT 506 /*
507 if (count > sizeof(struct dlm_write_request32) + DLM_RESNAME_MAXLEN) 507 * can't compare against COMPAT/dlm_write_request32 because
508#else 508 * we don't yet know if is64bit is zero
509 */
509 if (count > sizeof(struct dlm_write_request) + DLM_RESNAME_MAXLEN) 510 if (count > sizeof(struct dlm_write_request) + DLM_RESNAME_MAXLEN)
510#endif
511 return -EINVAL; 511 return -EINVAL;
512 512
513 kbuf = kzalloc(count + 1, GFP_NOFS); 513 kbuf = kzalloc(count + 1, GFP_NOFS);
diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
index e95b94945d5f..137af4255da6 100644
--- a/fs/f2fs/acl.c
+++ b/fs/f2fs/acl.c
@@ -191,15 +191,14 @@ struct posix_acl *f2fs_get_acl(struct inode *inode, int type)
191 retval = f2fs_getxattr(inode, name_index, "", value, retval); 191 retval = f2fs_getxattr(inode, name_index, "", value, retval);
192 } 192 }
193 193
194 if (retval < 0) { 194 if (retval > 0)
195 if (retval == -ENODATA)
196 acl = NULL;
197 else
198 acl = ERR_PTR(retval);
199 } else {
200 acl = f2fs_acl_from_disk(value, retval); 195 acl = f2fs_acl_from_disk(value, retval);
201 } 196 else if (retval == -ENODATA)
197 acl = NULL;
198 else
199 acl = ERR_PTR(retval);
202 kfree(value); 200 kfree(value);
201
203 if (!IS_ERR(acl)) 202 if (!IS_ERR(acl))
204 set_cached_acl(inode, type, acl); 203 set_cached_acl(inode, type, acl);
205 204
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 6ef36c37e2be..ff3c8439af87 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -214,7 +214,6 @@ retry:
214 goto retry; 214 goto retry;
215 } 215 }
216 new->ino = ino; 216 new->ino = ino;
217 INIT_LIST_HEAD(&new->list);
218 217
219 /* add new_oentry into list which is sorted by inode number */ 218 /* add new_oentry into list which is sorted by inode number */
220 if (orphan) { 219 if (orphan) {
@@ -772,7 +771,7 @@ void init_orphan_info(struct f2fs_sb_info *sbi)
772 sbi->n_orphans = 0; 771 sbi->n_orphans = 0;
773} 772}
774 773
775int create_checkpoint_caches(void) 774int __init create_checkpoint_caches(void)
776{ 775{
777 orphan_entry_slab = f2fs_kmem_cache_create("f2fs_orphan_entry", 776 orphan_entry_slab = f2fs_kmem_cache_create("f2fs_orphan_entry",
778 sizeof(struct orphan_inode_entry), NULL); 777 sizeof(struct orphan_inode_entry), NULL);
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 3aa5ce7cab83..7bd22a201125 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -547,6 +547,15 @@ redirty_out:
547 547
548#define MAX_DESIRED_PAGES_WP 4096 548#define MAX_DESIRED_PAGES_WP 4096
549 549
550static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
551 void *data)
552{
553 struct address_space *mapping = data;
554 int ret = mapping->a_ops->writepage(page, wbc);
555 mapping_set_error(mapping, ret);
556 return ret;
557}
558
550static int f2fs_write_data_pages(struct address_space *mapping, 559static int f2fs_write_data_pages(struct address_space *mapping,
551 struct writeback_control *wbc) 560 struct writeback_control *wbc)
552{ 561{
@@ -563,7 +572,7 @@ static int f2fs_write_data_pages(struct address_space *mapping,
563 572
564 if (!S_ISDIR(inode->i_mode)) 573 if (!S_ISDIR(inode->i_mode))
565 mutex_lock(&sbi->writepages); 574 mutex_lock(&sbi->writepages);
566 ret = generic_writepages(mapping, wbc); 575 ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
567 if (!S_ISDIR(inode->i_mode)) 576 if (!S_ISDIR(inode->i_mode))
568 mutex_unlock(&sbi->writepages); 577 mutex_unlock(&sbi->writepages);
569 f2fs_submit_bio(sbi, DATA, (wbc->sync_mode == WB_SYNC_ALL)); 578 f2fs_submit_bio(sbi, DATA, (wbc->sync_mode == WB_SYNC_ALL));
@@ -689,6 +698,11 @@ static int f2fs_set_data_page_dirty(struct page *page)
689 return 0; 698 return 0;
690} 699}
691 700
701static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
702{
703 return generic_block_bmap(mapping, block, get_data_block_ro);
704}
705
692const struct address_space_operations f2fs_dblock_aops = { 706const struct address_space_operations f2fs_dblock_aops = {
693 .readpage = f2fs_read_data_page, 707 .readpage = f2fs_read_data_page,
694 .readpages = f2fs_read_data_pages, 708 .readpages = f2fs_read_data_pages,
@@ -700,4 +714,5 @@ const struct address_space_operations f2fs_dblock_aops = {
700 .invalidatepage = f2fs_invalidate_data_page, 714 .invalidatepage = f2fs_invalidate_data_page,
701 .releasepage = f2fs_release_data_page, 715 .releasepage = f2fs_release_data_page,
702 .direct_IO = f2fs_direct_IO, 716 .direct_IO = f2fs_direct_IO,
717 .bmap = f2fs_bmap,
703}; 718};
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index 0e0380a588ad..c8c37307b326 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -26,6 +26,7 @@
26 26
27static LIST_HEAD(f2fs_stat_list); 27static LIST_HEAD(f2fs_stat_list);
28static struct dentry *debugfs_root; 28static struct dentry *debugfs_root;
29static DEFINE_MUTEX(f2fs_stat_mutex);
29 30
30static void update_general_status(struct f2fs_sb_info *sbi) 31static void update_general_status(struct f2fs_sb_info *sbi)
31{ 32{
@@ -180,18 +181,14 @@ static int stat_show(struct seq_file *s, void *v)
180 int i = 0; 181 int i = 0;
181 int j; 182 int j;
182 183
184 mutex_lock(&f2fs_stat_mutex);
183 list_for_each_entry_safe(si, next, &f2fs_stat_list, stat_list) { 185 list_for_each_entry_safe(si, next, &f2fs_stat_list, stat_list) {
184 186
185 mutex_lock(&si->stat_lock);
186 if (!si->sbi) {
187 mutex_unlock(&si->stat_lock);
188 continue;
189 }
190 update_general_status(si->sbi); 187 update_general_status(si->sbi);
191 188
192 seq_printf(s, "\n=====[ partition info. #%d ]=====\n", i++); 189 seq_printf(s, "\n=====[ partition info. #%d ]=====\n", i++);
193 seq_printf(s, "[SB: 1] [CP: 2] [NAT: %d] [SIT: %d] ", 190 seq_printf(s, "[SB: 1] [CP: 2] [SIT: %d] [NAT: %d] ",
194 si->nat_area_segs, si->sit_area_segs); 191 si->sit_area_segs, si->nat_area_segs);
195 seq_printf(s, "[SSA: %d] [MAIN: %d", 192 seq_printf(s, "[SSA: %d] [MAIN: %d",
196 si->ssa_area_segs, si->main_area_segs); 193 si->ssa_area_segs, si->main_area_segs);
197 seq_printf(s, "(OverProv:%d Resv:%d)]\n\n", 194 seq_printf(s, "(OverProv:%d Resv:%d)]\n\n",
@@ -286,8 +283,8 @@ static int stat_show(struct seq_file *s, void *v)
286 seq_printf(s, "\nMemory: %u KB = static: %u + cached: %u\n", 283 seq_printf(s, "\nMemory: %u KB = static: %u + cached: %u\n",
287 (si->base_mem + si->cache_mem) >> 10, 284 (si->base_mem + si->cache_mem) >> 10,
288 si->base_mem >> 10, si->cache_mem >> 10); 285 si->base_mem >> 10, si->cache_mem >> 10);
289 mutex_unlock(&si->stat_lock);
290 } 286 }
287 mutex_unlock(&f2fs_stat_mutex);
291 return 0; 288 return 0;
292} 289}
293 290
@@ -303,7 +300,7 @@ static const struct file_operations stat_fops = {
303 .release = single_release, 300 .release = single_release,
304}; 301};
305 302
306static int init_stats(struct f2fs_sb_info *sbi) 303int f2fs_build_stats(struct f2fs_sb_info *sbi)
307{ 304{
308 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); 305 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
309 struct f2fs_stat_info *si; 306 struct f2fs_stat_info *si;
@@ -313,9 +310,6 @@ static int init_stats(struct f2fs_sb_info *sbi)
313 return -ENOMEM; 310 return -ENOMEM;
314 311
315 si = sbi->stat_info; 312 si = sbi->stat_info;
316 mutex_init(&si->stat_lock);
317 list_add_tail(&si->stat_list, &f2fs_stat_list);
318
319 si->all_area_segs = le32_to_cpu(raw_super->segment_count); 313 si->all_area_segs = le32_to_cpu(raw_super->segment_count);
320 si->sit_area_segs = le32_to_cpu(raw_super->segment_count_sit); 314 si->sit_area_segs = le32_to_cpu(raw_super->segment_count_sit);
321 si->nat_area_segs = le32_to_cpu(raw_super->segment_count_nat); 315 si->nat_area_segs = le32_to_cpu(raw_super->segment_count_nat);
@@ -325,21 +319,11 @@ static int init_stats(struct f2fs_sb_info *sbi)
325 si->main_area_zones = si->main_area_sections / 319 si->main_area_zones = si->main_area_sections /
326 le32_to_cpu(raw_super->secs_per_zone); 320 le32_to_cpu(raw_super->secs_per_zone);
327 si->sbi = sbi; 321 si->sbi = sbi;
328 return 0;
329}
330 322
331int f2fs_build_stats(struct f2fs_sb_info *sbi) 323 mutex_lock(&f2fs_stat_mutex);
332{ 324 list_add_tail(&si->stat_list, &f2fs_stat_list);
333 int retval; 325 mutex_unlock(&f2fs_stat_mutex);
334
335 retval = init_stats(sbi);
336 if (retval)
337 return retval;
338
339 if (!debugfs_root)
340 debugfs_root = debugfs_create_dir("f2fs", NULL);
341 326
342 debugfs_create_file("status", S_IRUGO, debugfs_root, NULL, &stat_fops);
343 return 0; 327 return 0;
344} 328}
345 329
@@ -347,14 +331,22 @@ void f2fs_destroy_stats(struct f2fs_sb_info *sbi)
347{ 331{
348 struct f2fs_stat_info *si = sbi->stat_info; 332 struct f2fs_stat_info *si = sbi->stat_info;
349 333
334 mutex_lock(&f2fs_stat_mutex);
350 list_del(&si->stat_list); 335 list_del(&si->stat_list);
351 mutex_lock(&si->stat_lock); 336 mutex_unlock(&f2fs_stat_mutex);
352 si->sbi = NULL; 337
353 mutex_unlock(&si->stat_lock);
354 kfree(sbi->stat_info); 338 kfree(sbi->stat_info);
355} 339}
356 340
357void destroy_root_stats(void) 341void __init f2fs_create_root_stats(void)
342{
343 debugfs_root = debugfs_create_dir("f2fs", NULL);
344 if (debugfs_root)
345 debugfs_create_file("status", S_IRUGO, debugfs_root,
346 NULL, &stat_fops);
347}
348
349void f2fs_destroy_root_stats(void)
358{ 350{
359 debugfs_remove_recursive(debugfs_root); 351 debugfs_remove_recursive(debugfs_root);
360 debugfs_root = NULL; 352 debugfs_root = NULL;
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 951ed52748f6..989980e16d0b 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -503,7 +503,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
503 } 503 }
504 504
505 if (inode) { 505 if (inode) {
506 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; 506 inode->i_ctime = CURRENT_TIME;
507 drop_nlink(inode); 507 drop_nlink(inode);
508 if (S_ISDIR(inode->i_mode)) { 508 if (S_ISDIR(inode->i_mode)) {
509 drop_nlink(inode); 509 drop_nlink(inode);
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 13c6dfbb7183..c8e2d751ef9c 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -211,11 +211,11 @@ struct dnode_of_data {
211static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode, 211static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode,
212 struct page *ipage, struct page *npage, nid_t nid) 212 struct page *ipage, struct page *npage, nid_t nid)
213{ 213{
214 memset(dn, 0, sizeof(*dn));
214 dn->inode = inode; 215 dn->inode = inode;
215 dn->inode_page = ipage; 216 dn->inode_page = ipage;
216 dn->node_page = npage; 217 dn->node_page = npage;
217 dn->nid = nid; 218 dn->nid = nid;
218 dn->inode_page_locked = 0;
219} 219}
220 220
221/* 221/*
@@ -877,6 +877,8 @@ bool f2fs_empty_dir(struct inode *);
877 * super.c 877 * super.c
878 */ 878 */
879int f2fs_sync_fs(struct super_block *, int); 879int f2fs_sync_fs(struct super_block *, int);
880extern __printf(3, 4)
881void f2fs_msg(struct super_block *, const char *, const char *, ...);
880 882
881/* 883/*
882 * hash.c 884 * hash.c
@@ -912,7 +914,7 @@ int restore_node_summary(struct f2fs_sb_info *, unsigned int,
912void flush_nat_entries(struct f2fs_sb_info *); 914void flush_nat_entries(struct f2fs_sb_info *);
913int build_node_manager(struct f2fs_sb_info *); 915int build_node_manager(struct f2fs_sb_info *);
914void destroy_node_manager(struct f2fs_sb_info *); 916void destroy_node_manager(struct f2fs_sb_info *);
915int create_node_manager_caches(void); 917int __init create_node_manager_caches(void);
916void destroy_node_manager_caches(void); 918void destroy_node_manager_caches(void);
917 919
918/* 920/*
@@ -964,7 +966,7 @@ void sync_dirty_dir_inodes(struct f2fs_sb_info *);
964void block_operations(struct f2fs_sb_info *); 966void block_operations(struct f2fs_sb_info *);
965void write_checkpoint(struct f2fs_sb_info *, bool, bool); 967void write_checkpoint(struct f2fs_sb_info *, bool, bool);
966void init_orphan_info(struct f2fs_sb_info *); 968void init_orphan_info(struct f2fs_sb_info *);
967int create_checkpoint_caches(void); 969int __init create_checkpoint_caches(void);
968void destroy_checkpoint_caches(void); 970void destroy_checkpoint_caches(void);
969 971
970/* 972/*
@@ -984,9 +986,9 @@ int do_write_data_page(struct page *);
984int start_gc_thread(struct f2fs_sb_info *); 986int start_gc_thread(struct f2fs_sb_info *);
985void stop_gc_thread(struct f2fs_sb_info *); 987void stop_gc_thread(struct f2fs_sb_info *);
986block_t start_bidx_of_node(unsigned int); 988block_t start_bidx_of_node(unsigned int);
987int f2fs_gc(struct f2fs_sb_info *, int); 989int f2fs_gc(struct f2fs_sb_info *);
988void build_gc_manager(struct f2fs_sb_info *); 990void build_gc_manager(struct f2fs_sb_info *);
989int create_gc_caches(void); 991int __init create_gc_caches(void);
990void destroy_gc_caches(void); 992void destroy_gc_caches(void);
991 993
992/* 994/*
@@ -1058,7 +1060,8 @@ struct f2fs_stat_info {
1058 1060
1059int f2fs_build_stats(struct f2fs_sb_info *); 1061int f2fs_build_stats(struct f2fs_sb_info *);
1060void f2fs_destroy_stats(struct f2fs_sb_info *); 1062void f2fs_destroy_stats(struct f2fs_sb_info *);
1061void destroy_root_stats(void); 1063void __init f2fs_create_root_stats(void);
1064void f2fs_destroy_root_stats(void);
1062#else 1065#else
1063#define stat_inc_call_count(si) 1066#define stat_inc_call_count(si)
1064#define stat_inc_seg_count(si, type) 1067#define stat_inc_seg_count(si, type)
@@ -1068,7 +1071,8 @@ void destroy_root_stats(void);
1068 1071
1069static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; } 1072static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }
1070static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { } 1073static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { }
1071static inline void destroy_root_stats(void) { } 1074static inline void __init f2fs_create_root_stats(void) { }
1075static inline void f2fs_destroy_root_stats(void) { }
1072#endif 1076#endif
1073 1077
1074extern const struct file_operations f2fs_dir_operations; 1078extern const struct file_operations f2fs_dir_operations;
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 7f9ea9271ebe..3191b52aafb0 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -96,8 +96,9 @@ out:
96} 96}
97 97
98static const struct vm_operations_struct f2fs_file_vm_ops = { 98static const struct vm_operations_struct f2fs_file_vm_ops = {
99 .fault = filemap_fault, 99 .fault = filemap_fault,
100 .page_mkwrite = f2fs_vm_page_mkwrite, 100 .page_mkwrite = f2fs_vm_page_mkwrite,
101 .remap_pages = generic_file_remap_pages,
101}; 102};
102 103
103static int need_to_sync_dir(struct f2fs_sb_info *sbi, struct inode *inode) 104static int need_to_sync_dir(struct f2fs_sb_info *sbi, struct inode *inode)
@@ -137,6 +138,9 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
137 if (ret) 138 if (ret)
138 return ret; 139 return ret;
139 140
141 /* guarantee free sections for fsync */
142 f2fs_balance_fs(sbi);
143
140 mutex_lock(&inode->i_mutex); 144 mutex_lock(&inode->i_mutex);
141 145
142 if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) 146 if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
@@ -407,6 +411,8 @@ int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
407 struct dnode_of_data dn; 411 struct dnode_of_data dn;
408 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 412 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
409 413
414 f2fs_balance_fs(sbi);
415
410 mutex_lock_op(sbi, DATA_TRUNC); 416 mutex_lock_op(sbi, DATA_TRUNC);
411 set_new_dnode(&dn, inode, NULL, NULL, 0); 417 set_new_dnode(&dn, inode, NULL, NULL, 0);
412 err = get_dnode_of_data(&dn, index, RDONLY_NODE); 418 err = get_dnode_of_data(&dn, index, RDONLY_NODE);
@@ -534,7 +540,6 @@ static long f2fs_fallocate(struct file *file, int mode,
534 loff_t offset, loff_t len) 540 loff_t offset, loff_t len)
535{ 541{
536 struct inode *inode = file->f_path.dentry->d_inode; 542 struct inode *inode = file->f_path.dentry->d_inode;
537 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
538 long ret; 543 long ret;
539 544
540 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 545 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
@@ -545,7 +550,10 @@ static long f2fs_fallocate(struct file *file, int mode,
545 else 550 else
546 ret = expand_inode_data(inode, offset, len, mode); 551 ret = expand_inode_data(inode, offset, len, mode);
547 552
548 f2fs_balance_fs(sbi); 553 if (!ret) {
554 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
555 mark_inode_dirty(inode);
556 }
549 return ret; 557 return ret;
550} 558}
551 559
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index b0ec721e984a..c386910dacc5 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -78,7 +78,7 @@ static int gc_thread_func(void *data)
78 78
79 sbi->bg_gc++; 79 sbi->bg_gc++;
80 80
81 if (f2fs_gc(sbi, 1) == GC_NONE) 81 if (f2fs_gc(sbi) == GC_NONE)
82 wait_ms = GC_THREAD_NOGC_SLEEP_TIME; 82 wait_ms = GC_THREAD_NOGC_SLEEP_TIME;
83 else if (wait_ms == GC_THREAD_NOGC_SLEEP_TIME) 83 else if (wait_ms == GC_THREAD_NOGC_SLEEP_TIME)
84 wait_ms = GC_THREAD_MAX_SLEEP_TIME; 84 wait_ms = GC_THREAD_MAX_SLEEP_TIME;
@@ -424,7 +424,11 @@ next_step:
424} 424}
425 425
426/* 426/*
427 * Calculate start block index that this node page contains 427 * Calculate start block index indicating the given node offset.
428 * Be careful, caller should give this node offset only indicating direct node
429 * blocks. If any node offsets, which point the other types of node blocks such
430 * as indirect or double indirect node blocks, are given, it must be a caller's
431 * bug.
428 */ 432 */
429block_t start_bidx_of_node(unsigned int node_ofs) 433block_t start_bidx_of_node(unsigned int node_ofs)
430{ 434{
@@ -651,62 +655,44 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
651 return ret; 655 return ret;
652} 656}
653 657
654int f2fs_gc(struct f2fs_sb_info *sbi, int nGC) 658int f2fs_gc(struct f2fs_sb_info *sbi)
655{ 659{
656 unsigned int segno;
657 int old_free_secs, cur_free_secs;
658 int gc_status, nfree;
659 struct list_head ilist; 660 struct list_head ilist;
661 unsigned int segno, i;
660 int gc_type = BG_GC; 662 int gc_type = BG_GC;
663 int gc_status = GC_NONE;
661 664
662 INIT_LIST_HEAD(&ilist); 665 INIT_LIST_HEAD(&ilist);
663gc_more: 666gc_more:
664 nfree = 0; 667 if (!(sbi->sb->s_flags & MS_ACTIVE))
665 gc_status = GC_NONE; 668 goto stop;
666 669
667 if (has_not_enough_free_secs(sbi)) 670 if (has_not_enough_free_secs(sbi))
668 old_free_secs = reserved_sections(sbi); 671 gc_type = FG_GC;
669 else
670 old_free_secs = free_sections(sbi);
671
672 while (sbi->sb->s_flags & MS_ACTIVE) {
673 int i;
674 if (has_not_enough_free_secs(sbi))
675 gc_type = FG_GC;
676 672
677 cur_free_secs = free_sections(sbi) + nfree; 673 if (!__get_victim(sbi, &segno, gc_type, NO_CHECK_TYPE))
674 goto stop;
678 675
679 /* We got free space successfully. */ 676 for (i = 0; i < sbi->segs_per_sec; i++) {
680 if (nGC < cur_free_secs - old_free_secs) 677 /*
681 break; 678 * do_garbage_collect will give us three gc_status:
682 679 * GC_ERROR, GC_DONE, and GC_BLOCKED.
683 if (!__get_victim(sbi, &segno, gc_type, NO_CHECK_TYPE)) 680 * If GC is finished uncleanly, we have to return
681 * the victim to dirty segment list.
682 */
683 gc_status = do_garbage_collect(sbi, segno + i, &ilist, gc_type);
684 if (gc_status != GC_DONE)
684 break; 685 break;
685
686 for (i = 0; i < sbi->segs_per_sec; i++) {
687 /*
688 * do_garbage_collect will give us three gc_status:
689 * GC_ERROR, GC_DONE, and GC_BLOCKED.
690 * If GC is finished uncleanly, we have to return
691 * the victim to dirty segment list.
692 */
693 gc_status = do_garbage_collect(sbi, segno + i,
694 &ilist, gc_type);
695 if (gc_status != GC_DONE)
696 goto stop;
697 nfree++;
698 }
699 } 686 }
700stop: 687 if (has_not_enough_free_secs(sbi)) {
701 if (has_not_enough_free_secs(sbi) || gc_status == GC_BLOCKED) {
702 write_checkpoint(sbi, (gc_status == GC_BLOCKED), false); 688 write_checkpoint(sbi, (gc_status == GC_BLOCKED), false);
703 if (nfree) 689 if (has_not_enough_free_secs(sbi))
704 goto gc_more; 690 goto gc_more;
705 } 691 }
692stop:
706 mutex_unlock(&sbi->gc_mutex); 693 mutex_unlock(&sbi->gc_mutex);
707 694
708 put_gc_inode(&ilist); 695 put_gc_inode(&ilist);
709 BUG_ON(!list_empty(&ilist));
710 return gc_status; 696 return gc_status;
711} 697}
712 698
@@ -715,7 +701,7 @@ void build_gc_manager(struct f2fs_sb_info *sbi)
715 DIRTY_I(sbi)->v_ops = &default_v_ops; 701 DIRTY_I(sbi)->v_ops = &default_v_ops;
716} 702}
717 703
718int create_gc_caches(void) 704int __init create_gc_caches(void)
719{ 705{
720 winode_slab = f2fs_kmem_cache_create("f2fs_gc_inodes", 706 winode_slab = f2fs_kmem_cache_create("f2fs_gc_inodes",
721 sizeof(struct inode_entry), NULL); 707 sizeof(struct inode_entry), NULL);
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index bf20b4d03214..794241777322 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -217,6 +217,9 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
217 inode->i_ino == F2FS_META_INO(sbi)) 217 inode->i_ino == F2FS_META_INO(sbi))
218 return 0; 218 return 0;
219 219
220 if (wbc)
221 f2fs_balance_fs(sbi);
222
220 node_page = get_node_page(sbi, inode->i_ino); 223 node_page = get_node_page(sbi, inode->i_ino);
221 if (IS_ERR(node_page)) 224 if (IS_ERR(node_page))
222 return PTR_ERR(node_page); 225 return PTR_ERR(node_page);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 5066bfd256c9..9bda63c9c166 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1124,6 +1124,12 @@ static int f2fs_write_node_page(struct page *page,
1124 return 0; 1124 return 0;
1125} 1125}
1126 1126
1127/*
1128 * It is very important to gather dirty pages and write at once, so that we can
1129 * submit a big bio without interfering other data writes.
1130 * Be default, 512 pages (2MB), a segment size, is quite reasonable.
1131 */
1132#define COLLECT_DIRTY_NODES 512
1127static int f2fs_write_node_pages(struct address_space *mapping, 1133static int f2fs_write_node_pages(struct address_space *mapping,
1128 struct writeback_control *wbc) 1134 struct writeback_control *wbc)
1129{ 1135{
@@ -1131,17 +1137,16 @@ static int f2fs_write_node_pages(struct address_space *mapping,
1131 struct block_device *bdev = sbi->sb->s_bdev; 1137 struct block_device *bdev = sbi->sb->s_bdev;
1132 long nr_to_write = wbc->nr_to_write; 1138 long nr_to_write = wbc->nr_to_write;
1133 1139
1134 if (wbc->for_kupdate) 1140 /* First check balancing cached NAT entries */
1135 return 0;
1136
1137 if (get_pages(sbi, F2FS_DIRTY_NODES) == 0)
1138 return 0;
1139
1140 if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) { 1141 if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) {
1141 write_checkpoint(sbi, false, false); 1142 write_checkpoint(sbi, false, false);
1142 return 0; 1143 return 0;
1143 } 1144 }
1144 1145
1146 /* collect a number of dirty node pages and write together */
1147 if (get_pages(sbi, F2FS_DIRTY_NODES) < COLLECT_DIRTY_NODES)
1148 return 0;
1149
1145 /* if mounting is failed, skip writing node pages */ 1150 /* if mounting is failed, skip writing node pages */
1146 wbc->nr_to_write = bio_get_nr_vecs(bdev); 1151 wbc->nr_to_write = bio_get_nr_vecs(bdev);
1147 sync_node_pages(sbi, 0, wbc); 1152 sync_node_pages(sbi, 0, wbc);
@@ -1732,7 +1737,7 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
1732 kfree(nm_i); 1737 kfree(nm_i);
1733} 1738}
1734 1739
1735int create_node_manager_caches(void) 1740int __init create_node_manager_caches(void)
1736{ 1741{
1737 nat_entry_slab = f2fs_kmem_cache_create("nat_entry", 1742 nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
1738 sizeof(struct nat_entry), NULL); 1743 sizeof(struct nat_entry), NULL);
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index b571fee677d5..f42e4060b399 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -67,7 +67,7 @@ static int recover_dentry(struct page *ipage, struct inode *inode)
67 kunmap(page); 67 kunmap(page);
68 f2fs_put_page(page, 0); 68 f2fs_put_page(page, 0);
69 } else { 69 } else {
70 f2fs_add_link(&dent, inode); 70 err = f2fs_add_link(&dent, inode);
71 } 71 }
72 iput(dir); 72 iput(dir);
73out: 73out:
@@ -151,7 +151,6 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
151 goto out; 151 goto out;
152 } 152 }
153 153
154 INIT_LIST_HEAD(&entry->list);
155 list_add_tail(&entry->list, head); 154 list_add_tail(&entry->list, head);
156 entry->blkaddr = blkaddr; 155 entry->blkaddr = blkaddr;
157 } 156 }
@@ -174,10 +173,9 @@ out:
174static void destroy_fsync_dnodes(struct f2fs_sb_info *sbi, 173static void destroy_fsync_dnodes(struct f2fs_sb_info *sbi,
175 struct list_head *head) 174 struct list_head *head)
176{ 175{
177 struct list_head *this; 176 struct fsync_inode_entry *entry, *tmp;
178 struct fsync_inode_entry *entry; 177
179 list_for_each(this, head) { 178 list_for_each_entry_safe(entry, tmp, head, list) {
180 entry = list_entry(this, struct fsync_inode_entry, list);
181 iput(entry->inode); 179 iput(entry->inode);
182 list_del(&entry->list); 180 list_del(&entry->list);
183 kmem_cache_free(fsync_entry_slab, entry); 181 kmem_cache_free(fsync_entry_slab, entry);
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index de6240922b0a..4b0099066582 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -31,7 +31,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi)
31 */ 31 */
32 if (has_not_enough_free_secs(sbi)) { 32 if (has_not_enough_free_secs(sbi)) {
33 mutex_lock(&sbi->gc_mutex); 33 mutex_lock(&sbi->gc_mutex);
34 f2fs_gc(sbi, 1); 34 f2fs_gc(sbi);
35 } 35 }
36} 36}
37 37
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 08a94c814bdc..37fad04c8669 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -53,6 +53,18 @@ static match_table_t f2fs_tokens = {
53 {Opt_err, NULL}, 53 {Opt_err, NULL},
54}; 54};
55 55
56void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
57{
58 struct va_format vaf;
59 va_list args;
60
61 va_start(args, fmt);
62 vaf.fmt = fmt;
63 vaf.va = &args;
64 printk("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf);
65 va_end(args);
66}
67
56static void init_once(void *foo) 68static void init_once(void *foo)
57{ 69{
58 struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo; 70 struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
@@ -125,6 +137,8 @@ int f2fs_sync_fs(struct super_block *sb, int sync)
125 137
126 if (sync) 138 if (sync)
127 write_checkpoint(sbi, false, false); 139 write_checkpoint(sbi, false, false);
140 else
141 f2fs_balance_fs(sbi);
128 142
129 return 0; 143 return 0;
130} 144}
@@ -247,7 +261,8 @@ static const struct export_operations f2fs_export_ops = {
247 .get_parent = f2fs_get_parent, 261 .get_parent = f2fs_get_parent,
248}; 262};
249 263
250static int parse_options(struct f2fs_sb_info *sbi, char *options) 264static int parse_options(struct super_block *sb, struct f2fs_sb_info *sbi,
265 char *options)
251{ 266{
252 substring_t args[MAX_OPT_ARGS]; 267 substring_t args[MAX_OPT_ARGS];
253 char *p; 268 char *p;
@@ -286,7 +301,8 @@ static int parse_options(struct f2fs_sb_info *sbi, char *options)
286 break; 301 break;
287#else 302#else
288 case Opt_nouser_xattr: 303 case Opt_nouser_xattr:
289 pr_info("nouser_xattr options not supported\n"); 304 f2fs_msg(sb, KERN_INFO,
305 "nouser_xattr options not supported");
290 break; 306 break;
291#endif 307#endif
292#ifdef CONFIG_F2FS_FS_POSIX_ACL 308#ifdef CONFIG_F2FS_FS_POSIX_ACL
@@ -295,7 +311,7 @@ static int parse_options(struct f2fs_sb_info *sbi, char *options)
295 break; 311 break;
296#else 312#else
297 case Opt_noacl: 313 case Opt_noacl:
298 pr_info("noacl options not supported\n"); 314 f2fs_msg(sb, KERN_INFO, "noacl options not supported");
299 break; 315 break;
300#endif 316#endif
301 case Opt_active_logs: 317 case Opt_active_logs:
@@ -309,8 +325,9 @@ static int parse_options(struct f2fs_sb_info *sbi, char *options)
309 set_opt(sbi, DISABLE_EXT_IDENTIFY); 325 set_opt(sbi, DISABLE_EXT_IDENTIFY);
310 break; 326 break;
311 default: 327 default:
312 pr_err("Unrecognized mount option \"%s\" or missing value\n", 328 f2fs_msg(sb, KERN_ERR,
313 p); 329 "Unrecognized mount option \"%s\" or missing value",
330 p);
314 return -EINVAL; 331 return -EINVAL;
315 } 332 }
316 } 333 }
@@ -337,23 +354,36 @@ static loff_t max_file_size(unsigned bits)
337 return result; 354 return result;
338} 355}
339 356
340static int sanity_check_raw_super(struct f2fs_super_block *raw_super) 357static int sanity_check_raw_super(struct super_block *sb,
358 struct f2fs_super_block *raw_super)
341{ 359{
342 unsigned int blocksize; 360 unsigned int blocksize;
343 361
344 if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) 362 if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
363 f2fs_msg(sb, KERN_INFO,
364 "Magic Mismatch, valid(0x%x) - read(0x%x)",
365 F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
345 return 1; 366 return 1;
367 }
346 368
347 /* Currently, support only 4KB block size */ 369 /* Currently, support only 4KB block size */
348 blocksize = 1 << le32_to_cpu(raw_super->log_blocksize); 370 blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
349 if (blocksize != PAGE_CACHE_SIZE) 371 if (blocksize != PAGE_CACHE_SIZE) {
372 f2fs_msg(sb, KERN_INFO,
373 "Invalid blocksize (%u), supports only 4KB\n",
374 blocksize);
350 return 1; 375 return 1;
376 }
351 if (le32_to_cpu(raw_super->log_sectorsize) != 377 if (le32_to_cpu(raw_super->log_sectorsize) !=
352 F2FS_LOG_SECTOR_SIZE) 378 F2FS_LOG_SECTOR_SIZE) {
379 f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize");
353 return 1; 380 return 1;
381 }
354 if (le32_to_cpu(raw_super->log_sectors_per_block) != 382 if (le32_to_cpu(raw_super->log_sectors_per_block) !=
355 F2FS_LOG_SECTORS_PER_BLOCK) 383 F2FS_LOG_SECTORS_PER_BLOCK) {
384 f2fs_msg(sb, KERN_INFO, "Invalid log sectors per block");
356 return 1; 385 return 1;
386 }
357 return 0; 387 return 0;
358} 388}
359 389
@@ -413,14 +443,17 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
413 if (!sbi) 443 if (!sbi)
414 return -ENOMEM; 444 return -ENOMEM;
415 445
416 /* set a temporary block size */ 446 /* set a block size */
417 if (!sb_set_blocksize(sb, F2FS_BLKSIZE)) 447 if (!sb_set_blocksize(sb, F2FS_BLKSIZE)) {
448 f2fs_msg(sb, KERN_ERR, "unable to set blocksize");
418 goto free_sbi; 449 goto free_sbi;
450 }
419 451
420 /* read f2fs raw super block */ 452 /* read f2fs raw super block */
421 raw_super_buf = sb_bread(sb, 0); 453 raw_super_buf = sb_bread(sb, 0);
422 if (!raw_super_buf) { 454 if (!raw_super_buf) {
423 err = -EIO; 455 err = -EIO;
456 f2fs_msg(sb, KERN_ERR, "unable to read superblock");
424 goto free_sbi; 457 goto free_sbi;
425 } 458 }
426 raw_super = (struct f2fs_super_block *) 459 raw_super = (struct f2fs_super_block *)
@@ -438,12 +471,14 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
438 set_opt(sbi, POSIX_ACL); 471 set_opt(sbi, POSIX_ACL);
439#endif 472#endif
440 /* parse mount options */ 473 /* parse mount options */
441 if (parse_options(sbi, (char *)data)) 474 if (parse_options(sb, sbi, (char *)data))
442 goto free_sb_buf; 475 goto free_sb_buf;
443 476
444 /* sanity checking of raw super */ 477 /* sanity checking of raw super */
445 if (sanity_check_raw_super(raw_super)) 478 if (sanity_check_raw_super(sb, raw_super)) {
479 f2fs_msg(sb, KERN_ERR, "Can't find a valid F2FS filesystem");
446 goto free_sb_buf; 480 goto free_sb_buf;
481 }
447 482
448 sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize)); 483 sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize));
449 sb->s_max_links = F2FS_LINK_MAX; 484 sb->s_max_links = F2FS_LINK_MAX;
@@ -477,18 +512,23 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
477 /* get an inode for meta space */ 512 /* get an inode for meta space */
478 sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi)); 513 sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
479 if (IS_ERR(sbi->meta_inode)) { 514 if (IS_ERR(sbi->meta_inode)) {
515 f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
480 err = PTR_ERR(sbi->meta_inode); 516 err = PTR_ERR(sbi->meta_inode);
481 goto free_sb_buf; 517 goto free_sb_buf;
482 } 518 }
483 519
484 err = get_valid_checkpoint(sbi); 520 err = get_valid_checkpoint(sbi);
485 if (err) 521 if (err) {
522 f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint");
486 goto free_meta_inode; 523 goto free_meta_inode;
524 }
487 525
488 /* sanity checking of checkpoint */ 526 /* sanity checking of checkpoint */
489 err = -EINVAL; 527 err = -EINVAL;
490 if (sanity_check_ckpt(raw_super, sbi->ckpt)) 528 if (sanity_check_ckpt(raw_super, sbi->ckpt)) {
529 f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint");
491 goto free_cp; 530 goto free_cp;
531 }
492 532
493 sbi->total_valid_node_count = 533 sbi->total_valid_node_count =
494 le32_to_cpu(sbi->ckpt->valid_node_count); 534 le32_to_cpu(sbi->ckpt->valid_node_count);
@@ -502,25 +542,28 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
502 INIT_LIST_HEAD(&sbi->dir_inode_list); 542 INIT_LIST_HEAD(&sbi->dir_inode_list);
503 spin_lock_init(&sbi->dir_inode_lock); 543 spin_lock_init(&sbi->dir_inode_lock);
504 544
505 /* init super block */
506 if (!sb_set_blocksize(sb, sbi->blocksize))
507 goto free_cp;
508
509 init_orphan_info(sbi); 545 init_orphan_info(sbi);
510 546
511 /* setup f2fs internal modules */ 547 /* setup f2fs internal modules */
512 err = build_segment_manager(sbi); 548 err = build_segment_manager(sbi);
513 if (err) 549 if (err) {
550 f2fs_msg(sb, KERN_ERR,
551 "Failed to initialize F2FS segment manager");
514 goto free_sm; 552 goto free_sm;
553 }
515 err = build_node_manager(sbi); 554 err = build_node_manager(sbi);
516 if (err) 555 if (err) {
556 f2fs_msg(sb, KERN_ERR,
557 "Failed to initialize F2FS node manager");
517 goto free_nm; 558 goto free_nm;
559 }
518 560
519 build_gc_manager(sbi); 561 build_gc_manager(sbi);
520 562
521 /* get an inode for node space */ 563 /* get an inode for node space */
522 sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi)); 564 sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
523 if (IS_ERR(sbi->node_inode)) { 565 if (IS_ERR(sbi->node_inode)) {
566 f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
524 err = PTR_ERR(sbi->node_inode); 567 err = PTR_ERR(sbi->node_inode);
525 goto free_nm; 568 goto free_nm;
526 } 569 }
@@ -533,6 +576,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
533 /* read root inode and dentry */ 576 /* read root inode and dentry */
534 root = f2fs_iget(sb, F2FS_ROOT_INO(sbi)); 577 root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
535 if (IS_ERR(root)) { 578 if (IS_ERR(root)) {
579 f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
536 err = PTR_ERR(root); 580 err = PTR_ERR(root);
537 goto free_node_inode; 581 goto free_node_inode;
538 } 582 }
@@ -596,7 +640,7 @@ static struct file_system_type f2fs_fs_type = {
596 .fs_flags = FS_REQUIRES_DEV, 640 .fs_flags = FS_REQUIRES_DEV,
597}; 641};
598 642
599static int init_inodecache(void) 643static int __init init_inodecache(void)
600{ 644{
601 f2fs_inode_cachep = f2fs_kmem_cache_create("f2fs_inode_cache", 645 f2fs_inode_cachep = f2fs_kmem_cache_create("f2fs_inode_cache",
602 sizeof(struct f2fs_inode_info), NULL); 646 sizeof(struct f2fs_inode_info), NULL);
@@ -631,14 +675,17 @@ static int __init init_f2fs_fs(void)
631 err = create_checkpoint_caches(); 675 err = create_checkpoint_caches();
632 if (err) 676 if (err)
633 goto fail; 677 goto fail;
634 return register_filesystem(&f2fs_fs_type); 678 err = register_filesystem(&f2fs_fs_type);
679 if (err)
680 goto fail;
681 f2fs_create_root_stats();
635fail: 682fail:
636 return err; 683 return err;
637} 684}
638 685
639static void __exit exit_f2fs_fs(void) 686static void __exit exit_f2fs_fs(void)
640{ 687{
641 destroy_root_stats(); 688 f2fs_destroy_root_stats();
642 unregister_filesystem(&f2fs_fs_type); 689 unregister_filesystem(&f2fs_fs_type);
643 destroy_checkpoint_caches(); 690 destroy_checkpoint_caches();
644 destroy_gc_caches(); 691 destroy_gc_caches();
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 940136a3d3a6..8038c0496504 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -318,6 +318,8 @@ int f2fs_setxattr(struct inode *inode, int name_index, const char *name,
318 if (name_len > 255 || value_len > MAX_VALUE_LEN) 318 if (name_len > 255 || value_len > MAX_VALUE_LEN)
319 return -ERANGE; 319 return -ERANGE;
320 320
321 f2fs_balance_fs(sbi);
322
321 mutex_lock_op(sbi, NODE_NEW); 323 mutex_lock_op(sbi, NODE_NEW);
322 if (!fi->i_xattr_nid) { 324 if (!fi->i_xattr_nid) {
323 /* Allocate new attribute block */ 325 /* Allocate new attribute block */
diff --git a/fs/fuse/Kconfig b/fs/fuse/Kconfig
index 0cf160a94eda..1b2f6c2c3aaf 100644
--- a/fs/fuse/Kconfig
+++ b/fs/fuse/Kconfig
@@ -4,12 +4,24 @@ config FUSE_FS
4 With FUSE it is possible to implement a fully functional filesystem 4 With FUSE it is possible to implement a fully functional filesystem
5 in a userspace program. 5 in a userspace program.
6 6
7 There's also companion library: libfuse. This library along with 7 There's also a companion library: libfuse2. This library is available
8 utilities is available from the FUSE homepage: 8 from the FUSE homepage:
9 <http://fuse.sourceforge.net/> 9 <http://fuse.sourceforge.net/>
10 although chances are your distribution already has that library
11 installed if you've installed the "fuse" package itself.
10 12
11 See <file:Documentation/filesystems/fuse.txt> for more information. 13 See <file:Documentation/filesystems/fuse.txt> for more information.
12 See <file:Documentation/Changes> for needed library/utility version. 14 See <file:Documentation/Changes> for needed library/utility version.
13 15
14 If you want to develop a userspace FS, or if you want to use 16 If you want to develop a userspace FS, or if you want to use
15 a filesystem based on FUSE, answer Y or M. 17 a filesystem based on FUSE, answer Y or M.
18
19config CUSE
20 tristate "Character device in Userspace support"
21 depends on FUSE_FS
22 help
23 This FUSE extension allows character devices to be
24 implemented in userspace.
25
26 If you want to develop or use a userspace character device
27 based on CUSE, answer Y or M.
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
index ee8d55042298..e397b675b029 100644
--- a/fs/fuse/cuse.c
+++ b/fs/fuse/cuse.c
@@ -45,7 +45,6 @@
45#include <linux/miscdevice.h> 45#include <linux/miscdevice.h>
46#include <linux/mutex.h> 46#include <linux/mutex.h>
47#include <linux/slab.h> 47#include <linux/slab.h>
48#include <linux/spinlock.h>
49#include <linux/stat.h> 48#include <linux/stat.h>
50#include <linux/module.h> 49#include <linux/module.h>
51 50
@@ -63,7 +62,7 @@ struct cuse_conn {
63 bool unrestricted_ioctl; 62 bool unrestricted_ioctl;
64}; 63};
65 64
66static DEFINE_SPINLOCK(cuse_lock); /* protects cuse_conntbl */ 65static DEFINE_MUTEX(cuse_lock); /* protects registration */
67static struct list_head cuse_conntbl[CUSE_CONNTBL_LEN]; 66static struct list_head cuse_conntbl[CUSE_CONNTBL_LEN];
68static struct class *cuse_class; 67static struct class *cuse_class;
69 68
@@ -114,14 +113,14 @@ static int cuse_open(struct inode *inode, struct file *file)
114 int rc; 113 int rc;
115 114
116 /* look up and get the connection */ 115 /* look up and get the connection */
117 spin_lock(&cuse_lock); 116 mutex_lock(&cuse_lock);
118 list_for_each_entry(pos, cuse_conntbl_head(devt), list) 117 list_for_each_entry(pos, cuse_conntbl_head(devt), list)
119 if (pos->dev->devt == devt) { 118 if (pos->dev->devt == devt) {
120 fuse_conn_get(&pos->fc); 119 fuse_conn_get(&pos->fc);
121 cc = pos; 120 cc = pos;
122 break; 121 break;
123 } 122 }
124 spin_unlock(&cuse_lock); 123 mutex_unlock(&cuse_lock);
125 124
126 /* dead? */ 125 /* dead? */
127 if (!cc) 126 if (!cc)
@@ -267,7 +266,7 @@ static int cuse_parse_one(char **pp, char *end, char **keyp, char **valp)
267static int cuse_parse_devinfo(char *p, size_t len, struct cuse_devinfo *devinfo) 266static int cuse_parse_devinfo(char *p, size_t len, struct cuse_devinfo *devinfo)
268{ 267{
269 char *end = p + len; 268 char *end = p + len;
270 char *key, *val; 269 char *uninitialized_var(key), *uninitialized_var(val);
271 int rc; 270 int rc;
272 271
273 while (true) { 272 while (true) {
@@ -305,14 +304,14 @@ static void cuse_gendev_release(struct device *dev)
305 */ 304 */
306static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req) 305static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
307{ 306{
308 struct cuse_conn *cc = fc_to_cc(fc); 307 struct cuse_conn *cc = fc_to_cc(fc), *pos;
309 struct cuse_init_out *arg = req->out.args[0].value; 308 struct cuse_init_out *arg = req->out.args[0].value;
310 struct page *page = req->pages[0]; 309 struct page *page = req->pages[0];
311 struct cuse_devinfo devinfo = { }; 310 struct cuse_devinfo devinfo = { };
312 struct device *dev; 311 struct device *dev;
313 struct cdev *cdev; 312 struct cdev *cdev;
314 dev_t devt; 313 dev_t devt;
315 int rc; 314 int rc, i;
316 315
317 if (req->out.h.error || 316 if (req->out.h.error ||
318 arg->major != FUSE_KERNEL_VERSION || arg->minor < 11) { 317 arg->major != FUSE_KERNEL_VERSION || arg->minor < 11) {
@@ -356,15 +355,24 @@ static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
356 dev_set_drvdata(dev, cc); 355 dev_set_drvdata(dev, cc);
357 dev_set_name(dev, "%s", devinfo.name); 356 dev_set_name(dev, "%s", devinfo.name);
358 357
358 mutex_lock(&cuse_lock);
359
360 /* make sure the device-name is unique */
361 for (i = 0; i < CUSE_CONNTBL_LEN; ++i) {
362 list_for_each_entry(pos, &cuse_conntbl[i], list)
363 if (!strcmp(dev_name(pos->dev), dev_name(dev)))
364 goto err_unlock;
365 }
366
359 rc = device_add(dev); 367 rc = device_add(dev);
360 if (rc) 368 if (rc)
361 goto err_device; 369 goto err_unlock;
362 370
363 /* register cdev */ 371 /* register cdev */
364 rc = -ENOMEM; 372 rc = -ENOMEM;
365 cdev = cdev_alloc(); 373 cdev = cdev_alloc();
366 if (!cdev) 374 if (!cdev)
367 goto err_device; 375 goto err_unlock;
368 376
369 cdev->owner = THIS_MODULE; 377 cdev->owner = THIS_MODULE;
370 cdev->ops = &cuse_frontend_fops; 378 cdev->ops = &cuse_frontend_fops;
@@ -377,9 +385,8 @@ static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
377 cc->cdev = cdev; 385 cc->cdev = cdev;
378 386
379 /* make the device available */ 387 /* make the device available */
380 spin_lock(&cuse_lock);
381 list_add(&cc->list, cuse_conntbl_head(devt)); 388 list_add(&cc->list, cuse_conntbl_head(devt));
382 spin_unlock(&cuse_lock); 389 mutex_unlock(&cuse_lock);
383 390
384 /* announce device availability */ 391 /* announce device availability */
385 dev_set_uevent_suppress(dev, 0); 392 dev_set_uevent_suppress(dev, 0);
@@ -391,7 +398,8 @@ out:
391 398
392err_cdev: 399err_cdev:
393 cdev_del(cdev); 400 cdev_del(cdev);
394err_device: 401err_unlock:
402 mutex_unlock(&cuse_lock);
395 put_device(dev); 403 put_device(dev);
396err_region: 404err_region:
397 unregister_chrdev_region(devt, 1); 405 unregister_chrdev_region(devt, 1);
@@ -520,9 +528,9 @@ static int cuse_channel_release(struct inode *inode, struct file *file)
520 int rc; 528 int rc;
521 529
522 /* remove from the conntbl, no more access from this point on */ 530 /* remove from the conntbl, no more access from this point on */
523 spin_lock(&cuse_lock); 531 mutex_lock(&cuse_lock);
524 list_del_init(&cc->list); 532 list_del_init(&cc->list);
525 spin_unlock(&cuse_lock); 533 mutex_unlock(&cuse_lock);
526 534
527 /* remove device */ 535 /* remove device */
528 if (cc->dev) 536 if (cc->dev)
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index c16335315e5d..e83351aa5bad 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -692,8 +692,6 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
692 struct page *oldpage = *pagep; 692 struct page *oldpage = *pagep;
693 struct page *newpage; 693 struct page *newpage;
694 struct pipe_buffer *buf = cs->pipebufs; 694 struct pipe_buffer *buf = cs->pipebufs;
695 struct address_space *mapping;
696 pgoff_t index;
697 695
698 unlock_request(cs->fc, cs->req); 696 unlock_request(cs->fc, cs->req);
699 fuse_copy_finish(cs); 697 fuse_copy_finish(cs);
@@ -724,9 +722,6 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
724 if (fuse_check_page(newpage) != 0) 722 if (fuse_check_page(newpage) != 0)
725 goto out_fallback_unlock; 723 goto out_fallback_unlock;
726 724
727 mapping = oldpage->mapping;
728 index = oldpage->index;
729
730 /* 725 /*
731 * This is a new and locked page, it shouldn't be mapped or 726 * This is a new and locked page, it shouldn't be mapped or
732 * have any special flags on it 727 * have any special flags on it
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index e21d4d8f87e3..f3ab824fa302 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -2177,8 +2177,8 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
2177 return ret; 2177 return ret;
2178} 2178}
2179 2179
2180long fuse_file_fallocate(struct file *file, int mode, loff_t offset, 2180static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
2181 loff_t length) 2181 loff_t length)
2182{ 2182{
2183 struct fuse_file *ff = file->private_data; 2183 struct fuse_file *ff = file->private_data;
2184 struct fuse_conn *fc = ff->fc; 2184 struct fuse_conn *fc = ff->fc;
@@ -2213,7 +2213,6 @@ long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
2213 2213
2214 return err; 2214 return err;
2215} 2215}
2216EXPORT_SYMBOL_GPL(fuse_file_fallocate);
2217 2216
2218static const struct file_operations fuse_file_operations = { 2217static const struct file_operations fuse_file_operations = {
2219 .llseek = fuse_file_llseek, 2218 .llseek = fuse_file_llseek,
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index b906ed17a839..9802de0f85e6 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -281,6 +281,7 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
281{ 281{
282 struct gfs2_sbd *sdp = gl->gl_sbd; 282 struct gfs2_sbd *sdp = gl->gl_sbd;
283 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 283 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
284 int lvb_needs_unlock = 0;
284 int error; 285 int error;
285 286
286 if (gl->gl_lksb.sb_lkid == 0) { 287 if (gl->gl_lksb.sb_lkid == 0) {
@@ -294,8 +295,12 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
294 gfs2_update_request_times(gl); 295 gfs2_update_request_times(gl);
295 296
296 /* don't want to skip dlm_unlock writing the lvb when lock is ex */ 297 /* don't want to skip dlm_unlock writing the lvb when lock is ex */
298
299 if (gl->gl_lksb.sb_lvbptr && (gl->gl_state == LM_ST_EXCLUSIVE))
300 lvb_needs_unlock = 1;
301
297 if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) && 302 if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
298 gl->gl_lksb.sb_lvbptr && (gl->gl_state != LM_ST_EXCLUSIVE)) { 303 !lvb_needs_unlock) {
299 gfs2_glock_free(gl); 304 gfs2_glock_free(gl);
300 return; 305 return;
301 } 306 }
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index dd057bc6b65b..fc8dc20fdeb9 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -177,11 +177,31 @@ out_nofree:
177 return mnt; 177 return mnt;
178} 178}
179 179
180static int
181nfs_namespace_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
182{
183 if (NFS_FH(dentry->d_inode)->size != 0)
184 return nfs_getattr(mnt, dentry, stat);
185 generic_fillattr(dentry->d_inode, stat);
186 return 0;
187}
188
189static int
190nfs_namespace_setattr(struct dentry *dentry, struct iattr *attr)
191{
192 if (NFS_FH(dentry->d_inode)->size != 0)
193 return nfs_setattr(dentry, attr);
194 return -EACCES;
195}
196
180const struct inode_operations nfs_mountpoint_inode_operations = { 197const struct inode_operations nfs_mountpoint_inode_operations = {
181 .getattr = nfs_getattr, 198 .getattr = nfs_getattr,
199 .setattr = nfs_setattr,
182}; 200};
183 201
184const struct inode_operations nfs_referral_inode_operations = { 202const struct inode_operations nfs_referral_inode_operations = {
203 .getattr = nfs_namespace_getattr,
204 .setattr = nfs_namespace_setattr,
185}; 205};
186 206
187static void nfs_expire_automounts(struct work_struct *work) 207static void nfs_expire_automounts(struct work_struct *work)
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index acc347268124..2e9779b58b7a 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -236,11 +236,10 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
236 error = nfs4_discover_server_trunking(clp, &old); 236 error = nfs4_discover_server_trunking(clp, &old);
237 if (error < 0) 237 if (error < 0)
238 goto error; 238 goto error;
239 nfs_put_client(clp);
239 if (clp != old) { 240 if (clp != old) {
240 clp->cl_preserve_clid = true; 241 clp->cl_preserve_clid = true;
241 nfs_put_client(clp);
242 clp = old; 242 clp = old;
243 atomic_inc(&clp->cl_count);
244 } 243 }
245 244
246 return clp; 245 return clp;
@@ -306,7 +305,7 @@ int nfs40_walk_client_list(struct nfs_client *new,
306 .clientid = new->cl_clientid, 305 .clientid = new->cl_clientid,
307 .confirm = new->cl_confirm, 306 .confirm = new->cl_confirm,
308 }; 307 };
309 int status; 308 int status = -NFS4ERR_STALE_CLIENTID;
310 309
311 spin_lock(&nn->nfs_client_lock); 310 spin_lock(&nn->nfs_client_lock);
312 list_for_each_entry_safe(pos, n, &nn->nfs_client_list, cl_share_link) { 311 list_for_each_entry_safe(pos, n, &nn->nfs_client_list, cl_share_link) {
@@ -332,40 +331,33 @@ int nfs40_walk_client_list(struct nfs_client *new,
332 331
333 if (prev) 332 if (prev)
334 nfs_put_client(prev); 333 nfs_put_client(prev);
334 prev = pos;
335 335
336 status = nfs4_proc_setclientid_confirm(pos, &clid, cred); 336 status = nfs4_proc_setclientid_confirm(pos, &clid, cred);
337 if (status == 0) { 337 switch (status) {
338 case -NFS4ERR_STALE_CLIENTID:
339 break;
340 case 0:
338 nfs4_swap_callback_idents(pos, new); 341 nfs4_swap_callback_idents(pos, new);
339 342
340 nfs_put_client(pos); 343 prev = NULL;
341 *result = pos; 344 *result = pos;
342 dprintk("NFS: <-- %s using nfs_client = %p ({%d})\n", 345 dprintk("NFS: <-- %s using nfs_client = %p ({%d})\n",
343 __func__, pos, atomic_read(&pos->cl_count)); 346 __func__, pos, atomic_read(&pos->cl_count));
344 return 0; 347 default:
345 } 348 goto out;
346 if (status != -NFS4ERR_STALE_CLIENTID) {
347 nfs_put_client(pos);
348 dprintk("NFS: <-- %s status = %d, no result\n",
349 __func__, status);
350 return status;
351 } 349 }
352 350
353 spin_lock(&nn->nfs_client_lock); 351 spin_lock(&nn->nfs_client_lock);
354 prev = pos;
355 } 352 }
353 spin_unlock(&nn->nfs_client_lock);
356 354
357 /* 355 /* No match found. The server lost our clientid */
358 * No matching nfs_client found. This should be impossible, 356out:
359 * because the new nfs_client has already been added to
360 * nfs_client_list by nfs_get_client().
361 *
362 * Don't BUG(), since the caller is holding a mutex.
363 */
364 if (prev) 357 if (prev)
365 nfs_put_client(prev); 358 nfs_put_client(prev);
366 spin_unlock(&nn->nfs_client_lock); 359 dprintk("NFS: <-- %s status = %d\n", __func__, status);
367 pr_err("NFS: %s Error: no matching nfs_client found\n", __func__); 360 return status;
368 return -NFS4ERR_STALE_CLIENTID;
369} 361}
370 362
371#ifdef CONFIG_NFS_V4_1 363#ifdef CONFIG_NFS_V4_1
@@ -432,7 +424,7 @@ int nfs41_walk_client_list(struct nfs_client *new,
432{ 424{
433 struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id); 425 struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id);
434 struct nfs_client *pos, *n, *prev = NULL; 426 struct nfs_client *pos, *n, *prev = NULL;
435 int error; 427 int status = -NFS4ERR_STALE_CLIENTID;
436 428
437 spin_lock(&nn->nfs_client_lock); 429 spin_lock(&nn->nfs_client_lock);
438 list_for_each_entry_safe(pos, n, &nn->nfs_client_list, cl_share_link) { 430 list_for_each_entry_safe(pos, n, &nn->nfs_client_list, cl_share_link) {
@@ -448,14 +440,17 @@ int nfs41_walk_client_list(struct nfs_client *new,
448 nfs_put_client(prev); 440 nfs_put_client(prev);
449 prev = pos; 441 prev = pos;
450 442
451 error = nfs_wait_client_init_complete(pos); 443 nfs4_schedule_lease_recovery(pos);
452 if (error < 0) { 444 status = nfs_wait_client_init_complete(pos);
445 if (status < 0) {
453 nfs_put_client(pos); 446 nfs_put_client(pos);
454 spin_lock(&nn->nfs_client_lock); 447 spin_lock(&nn->nfs_client_lock);
455 continue; 448 continue;
456 } 449 }
457 450 status = pos->cl_cons_state;
458 spin_lock(&nn->nfs_client_lock); 451 spin_lock(&nn->nfs_client_lock);
452 if (status < 0)
453 continue;
459 } 454 }
460 455
461 if (pos->rpc_ops != new->rpc_ops) 456 if (pos->rpc_ops != new->rpc_ops)
@@ -473,6 +468,7 @@ int nfs41_walk_client_list(struct nfs_client *new,
473 if (!nfs4_match_serverowners(pos, new)) 468 if (!nfs4_match_serverowners(pos, new))
474 continue; 469 continue;
475 470
471 atomic_inc(&pos->cl_count);
476 spin_unlock(&nn->nfs_client_lock); 472 spin_unlock(&nn->nfs_client_lock);
477 dprintk("NFS: <-- %s using nfs_client = %p ({%d})\n", 473 dprintk("NFS: <-- %s using nfs_client = %p ({%d})\n",
478 __func__, pos, atomic_read(&pos->cl_count)); 474 __func__, pos, atomic_read(&pos->cl_count));
@@ -481,16 +477,10 @@ int nfs41_walk_client_list(struct nfs_client *new,
481 return 0; 477 return 0;
482 } 478 }
483 479
484 /* 480 /* No matching nfs_client found. */
485 * No matching nfs_client found. This should be impossible,
486 * because the new nfs_client has already been added to
487 * nfs_client_list by nfs_get_client().
488 *
489 * Don't BUG(), since the caller is holding a mutex.
490 */
491 spin_unlock(&nn->nfs_client_lock); 481 spin_unlock(&nn->nfs_client_lock);
492 pr_err("NFS: %s Error: no matching nfs_client found\n", __func__); 482 dprintk("NFS: <-- %s status = %d\n", __func__, status);
493 return -NFS4ERR_STALE_CLIENTID; 483 return status;
494} 484}
495#endif /* CONFIG_NFS_V4_1 */ 485#endif /* CONFIG_NFS_V4_1 */
496 486
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 9448c579d41a..e61f68d5ef21 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -136,16 +136,11 @@ int nfs40_discover_server_trunking(struct nfs_client *clp,
136 clp->cl_confirm = clid.confirm; 136 clp->cl_confirm = clid.confirm;
137 137
138 status = nfs40_walk_client_list(clp, result, cred); 138 status = nfs40_walk_client_list(clp, result, cred);
139 switch (status) { 139 if (status == 0) {
140 case -NFS4ERR_STALE_CLIENTID:
141 set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
142 case 0:
143 /* Sustain the lease, even if it's empty. If the clientid4 140 /* Sustain the lease, even if it's empty. If the clientid4
144 * goes stale it's of no use for trunking discovery. */ 141 * goes stale it's of no use for trunking discovery. */
145 nfs4_schedule_state_renewal(*result); 142 nfs4_schedule_state_renewal(*result);
146 break;
147 } 143 }
148
149out: 144out:
150 return status; 145 return status;
151} 146}
@@ -1863,6 +1858,7 @@ again:
1863 case -ETIMEDOUT: 1858 case -ETIMEDOUT:
1864 case -EAGAIN: 1859 case -EAGAIN:
1865 ssleep(1); 1860 ssleep(1);
1861 case -NFS4ERR_STALE_CLIENTID:
1866 dprintk("NFS: %s after status %d, retrying\n", 1862 dprintk("NFS: %s after status %d, retrying\n",
1867 __func__, status); 1863 __func__, status);
1868 goto again; 1864 goto again;
@@ -2022,8 +2018,18 @@ static int nfs4_reset_session(struct nfs_client *clp)
2022 nfs4_begin_drain_session(clp); 2018 nfs4_begin_drain_session(clp);
2023 cred = nfs4_get_exchange_id_cred(clp); 2019 cred = nfs4_get_exchange_id_cred(clp);
2024 status = nfs4_proc_destroy_session(clp->cl_session, cred); 2020 status = nfs4_proc_destroy_session(clp->cl_session, cred);
2025 if (status && status != -NFS4ERR_BADSESSION && 2021 switch (status) {
2026 status != -NFS4ERR_DEADSESSION) { 2022 case 0:
2023 case -NFS4ERR_BADSESSION:
2024 case -NFS4ERR_DEADSESSION:
2025 break;
2026 case -NFS4ERR_BACK_CHAN_BUSY:
2027 case -NFS4ERR_DELAY:
2028 set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
2029 status = 0;
2030 ssleep(1);
2031 goto out;
2032 default:
2027 status = nfs4_recovery_handle_error(clp, status); 2033 status = nfs4_recovery_handle_error(clp, status);
2028 goto out; 2034 goto out;
2029 } 2035 }
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 2e7e8c878e5d..b056b1628722 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -2589,27 +2589,23 @@ nfs_xdev_mount(struct file_system_type *fs_type, int flags,
2589 struct nfs_server *server; 2589 struct nfs_server *server;
2590 struct dentry *mntroot = ERR_PTR(-ENOMEM); 2590 struct dentry *mntroot = ERR_PTR(-ENOMEM);
2591 struct nfs_subversion *nfs_mod = NFS_SB(data->sb)->nfs_client->cl_nfs_mod; 2591 struct nfs_subversion *nfs_mod = NFS_SB(data->sb)->nfs_client->cl_nfs_mod;
2592 int error;
2593 2592
2594 dprintk("--> nfs_xdev_mount_common()\n"); 2593 dprintk("--> nfs_xdev_mount()\n");
2595 2594
2596 mount_info.mntfh = mount_info.cloned->fh; 2595 mount_info.mntfh = mount_info.cloned->fh;
2597 2596
2598 /* create a new volume representation */ 2597 /* create a new volume representation */
2599 server = nfs_mod->rpc_ops->clone_server(NFS_SB(data->sb), data->fh, data->fattr, data->authflavor); 2598 server = nfs_mod->rpc_ops->clone_server(NFS_SB(data->sb), data->fh, data->fattr, data->authflavor);
2600 if (IS_ERR(server)) {
2601 error = PTR_ERR(server);
2602 goto out_err;
2603 }
2604 2599
2605 mntroot = nfs_fs_mount_common(server, flags, dev_name, &mount_info, nfs_mod); 2600 if (IS_ERR(server))
2606 dprintk("<-- nfs_xdev_mount_common() = 0\n"); 2601 mntroot = ERR_CAST(server);
2607out: 2602 else
2608 return mntroot; 2603 mntroot = nfs_fs_mount_common(server, flags,
2604 dev_name, &mount_info, nfs_mod);
2609 2605
2610out_err: 2606 dprintk("<-- nfs_xdev_mount() = %ld\n",
2611 dprintk("<-- nfs_xdev_mount_common() = %d [error]\n", error); 2607 IS_ERR(mntroot) ? PTR_ERR(mntroot) : 0L);
2612 goto out; 2608 return mntroot;
2613} 2609}
2614 2610
2615#if IS_ENABLED(CONFIG_NFS_V4) 2611#if IS_ENABLED(CONFIG_NFS_V4)
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index fdb180769485..f3859354e41a 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -664,8 +664,11 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
664 if (ret < 0) 664 if (ret < 0)
665 printk(KERN_ERR "NILFS: GC failed during preparation: " 665 printk(KERN_ERR "NILFS: GC failed during preparation: "
666 "cannot read source blocks: err=%d\n", ret); 666 "cannot read source blocks: err=%d\n", ret);
667 else 667 else {
668 if (nilfs_sb_need_update(nilfs))
669 set_nilfs_discontinued(nilfs);
668 ret = nilfs_clean_segments(inode->i_sb, argv, kbufs); 670 ret = nilfs_clean_segments(inode->i_sb, argv, kbufs);
671 }
669 672
670 nilfs_remove_all_gcinodes(nilfs); 673 nilfs_remove_all_gcinodes(nilfs);
671 clear_nilfs_gc_running(nilfs); 674 clear_nilfs_gc_running(nilfs);
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 6a91e6ffbcbd..f7ed9ee46eb9 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -449,7 +449,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
449 do { 449 do {
450 min_flt += t->min_flt; 450 min_flt += t->min_flt;
451 maj_flt += t->maj_flt; 451 maj_flt += t->maj_flt;
452 gtime += t->gtime; 452 gtime += task_gtime(t);
453 t = next_thread(t); 453 t = next_thread(t);
454 } while (t != task); 454 } while (t != task);
455 455
@@ -472,7 +472,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
472 min_flt = task->min_flt; 472 min_flt = task->min_flt;
473 maj_flt = task->maj_flt; 473 maj_flt = task->maj_flt;
474 task_cputime_adjusted(task, &utime, &stime); 474 task_cputime_adjusted(task, &utime, &stime);
475 gtime = task->gtime; 475 gtime = task_gtime(task);
476 } 476 }
477 477
478 /* scale priority and nice values from timeslices to -20..20 */ 478 /* scale priority and nice values from timeslices to -20..20 */
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index 7003e5266f25..288f068740f6 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -167,12 +167,16 @@ static ssize_t ramoops_pstore_read(u64 *id, enum pstore_type_id *type,
167static size_t ramoops_write_kmsg_hdr(struct persistent_ram_zone *prz) 167static size_t ramoops_write_kmsg_hdr(struct persistent_ram_zone *prz)
168{ 168{
169 char *hdr; 169 char *hdr;
170 struct timeval timestamp; 170 struct timespec timestamp;
171 size_t len; 171 size_t len;
172 172
173 do_gettimeofday(&timestamp); 173 /* Report zeroed timestamp if called before timekeeping has resumed. */
174 if (__getnstimeofday(&timestamp)) {
175 timestamp.tv_sec = 0;
176 timestamp.tv_nsec = 0;
177 }
174 hdr = kasprintf(GFP_ATOMIC, RAMOOPS_KERNMSG_HDR "%lu.%lu\n", 178 hdr = kasprintf(GFP_ATOMIC, RAMOOPS_KERNMSG_HDR "%lu.%lu\n",
175 (long)timestamp.tv_sec, (long)timestamp.tv_usec); 179 (long)timestamp.tv_sec, (long)(timestamp.tv_nsec / 1000));
176 WARN_ON_ONCE(!hdr); 180 WARN_ON_ONCE(!hdr);
177 len = hdr ? strlen(hdr) : 0; 181 len = hdr ? strlen(hdr) : 0;
178 persistent_ram_write(prz, hdr, len); 182 persistent_ram_write(prz, hdr, len);
diff --git a/fs/select.c b/fs/select.c
index 2ef72d965036..8c1c96c27062 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -26,6 +26,7 @@
26#include <linux/fs.h> 26#include <linux/fs.h>
27#include <linux/rcupdate.h> 27#include <linux/rcupdate.h>
28#include <linux/hrtimer.h> 28#include <linux/hrtimer.h>
29#include <linux/sched/rt.h>
29 30
30#include <asm/uaccess.h> 31#include <asm/uaccess.h>
31 32
diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c
index 2df555c66d57..aec3d5c98c94 100644
--- a/fs/sysfs/group.c
+++ b/fs/sysfs/group.c
@@ -205,6 +205,48 @@ void sysfs_unmerge_group(struct kobject *kobj,
205} 205}
206EXPORT_SYMBOL_GPL(sysfs_unmerge_group); 206EXPORT_SYMBOL_GPL(sysfs_unmerge_group);
207 207
208/**
209 * sysfs_add_link_to_group - add a symlink to an attribute group.
210 * @kobj: The kobject containing the group.
211 * @group_name: The name of the group.
212 * @target: The target kobject of the symlink to create.
213 * @link_name: The name of the symlink to create.
214 */
215int sysfs_add_link_to_group(struct kobject *kobj, const char *group_name,
216 struct kobject *target, const char *link_name)
217{
218 struct sysfs_dirent *dir_sd;
219 int error = 0;
220
221 dir_sd = sysfs_get_dirent(kobj->sd, NULL, group_name);
222 if (!dir_sd)
223 return -ENOENT;
224
225 error = sysfs_create_link_sd(dir_sd, target, link_name);
226 sysfs_put(dir_sd);
227
228 return error;
229}
230EXPORT_SYMBOL_GPL(sysfs_add_link_to_group);
231
232/**
233 * sysfs_remove_link_from_group - remove a symlink from an attribute group.
234 * @kobj: The kobject containing the group.
235 * @group_name: The name of the group.
236 * @link_name: The name of the symlink to remove.
237 */
238void sysfs_remove_link_from_group(struct kobject *kobj, const char *group_name,
239 const char *link_name)
240{
241 struct sysfs_dirent *dir_sd;
242
243 dir_sd = sysfs_get_dirent(kobj->sd, NULL, group_name);
244 if (dir_sd) {
245 sysfs_hash_and_remove(dir_sd, NULL, link_name);
246 sysfs_put(dir_sd);
247 }
248}
249EXPORT_SYMBOL_GPL(sysfs_remove_link_from_group);
208 250
209EXPORT_SYMBOL_GPL(sysfs_create_group); 251EXPORT_SYMBOL_GPL(sysfs_create_group);
210EXPORT_SYMBOL_GPL(sysfs_update_group); 252EXPORT_SYMBOL_GPL(sysfs_update_group);
diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
index 3c9eb5624f5e..8c940df97a52 100644
--- a/fs/sysfs/symlink.c
+++ b/fs/sysfs/symlink.c
@@ -21,26 +21,17 @@
21 21
22#include "sysfs.h" 22#include "sysfs.h"
23 23
24static int sysfs_do_create_link(struct kobject *kobj, struct kobject *target, 24static int sysfs_do_create_link_sd(struct sysfs_dirent *parent_sd,
25 const char *name, int warn) 25 struct kobject *target,
26 const char *name, int warn)
26{ 27{
27 struct sysfs_dirent *parent_sd = NULL;
28 struct sysfs_dirent *target_sd = NULL; 28 struct sysfs_dirent *target_sd = NULL;
29 struct sysfs_dirent *sd = NULL; 29 struct sysfs_dirent *sd = NULL;
30 struct sysfs_addrm_cxt acxt; 30 struct sysfs_addrm_cxt acxt;
31 enum kobj_ns_type ns_type; 31 enum kobj_ns_type ns_type;
32 int error; 32 int error;
33 33
34 BUG_ON(!name); 34 BUG_ON(!name || !parent_sd);
35
36 if (!kobj)
37 parent_sd = &sysfs_root;
38 else
39 parent_sd = kobj->sd;
40
41 error = -EFAULT;
42 if (!parent_sd)
43 goto out_put;
44 35
45 /* target->sd can go away beneath us but is protected with 36 /* target->sd can go away beneath us but is protected with
46 * sysfs_assoc_lock. Fetch target_sd from it. 37 * sysfs_assoc_lock. Fetch target_sd from it.
@@ -96,6 +87,34 @@ static int sysfs_do_create_link(struct kobject *kobj, struct kobject *target,
96} 87}
97 88
98/** 89/**
90 * sysfs_create_link_sd - create symlink to a given object.
91 * @sd: directory we're creating the link in.
92 * @target: object we're pointing to.
93 * @name: name of the symlink.
94 */
95int sysfs_create_link_sd(struct sysfs_dirent *sd, struct kobject *target,
96 const char *name)
97{
98 return sysfs_do_create_link_sd(sd, target, name, 1);
99}
100
101static int sysfs_do_create_link(struct kobject *kobj, struct kobject *target,
102 const char *name, int warn)
103{
104 struct sysfs_dirent *parent_sd = NULL;
105
106 if (!kobj)
107 parent_sd = &sysfs_root;
108 else
109 parent_sd = kobj->sd;
110
111 if (!parent_sd)
112 return -EFAULT;
113
114 return sysfs_do_create_link_sd(parent_sd, target, name, warn);
115}
116
117/**
99 * sysfs_create_link - create symlink between two objects. 118 * sysfs_create_link - create symlink between two objects.
100 * @kobj: object whose directory we're creating the link in. 119 * @kobj: object whose directory we're creating the link in.
101 * @target: object we're pointing to. 120 * @target: object we're pointing to.
diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h
index d73c0932bbd6..d1e4043eb0c3 100644
--- a/fs/sysfs/sysfs.h
+++ b/fs/sysfs/sysfs.h
@@ -240,3 +240,5 @@ void unmap_bin_file(struct sysfs_dirent *attr_sd);
240 * symlink.c 240 * symlink.c
241 */ 241 */
242extern const struct inode_operations sysfs_symlink_inode_operations; 242extern const struct inode_operations sysfs_symlink_inode_operations;
243int sysfs_create_link_sd(struct sysfs_dirent *sd, struct kobject *target,
244 const char *name);
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 4111a40ebe1a..5f707e537171 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -86,11 +86,11 @@ xfs_destroy_ioend(
86 } 86 }
87 87
88 if (ioend->io_iocb) { 88 if (ioend->io_iocb) {
89 inode_dio_done(ioend->io_inode);
89 if (ioend->io_isasync) { 90 if (ioend->io_isasync) {
90 aio_complete(ioend->io_iocb, ioend->io_error ? 91 aio_complete(ioend->io_iocb, ioend->io_error ?
91 ioend->io_error : ioend->io_result, 0); 92 ioend->io_error : ioend->io_result, 0);
92 } 93 }
93 inode_dio_done(ioend->io_inode);
94 } 94 }
95 95
96 mempool_free(ioend, xfs_ioend_pool); 96 mempool_free(ioend, xfs_ioend_pool);
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 0e92d12765d2..cdb2d3348583 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -4680,9 +4680,6 @@ __xfs_bmapi_allocate(
4680 return error; 4680 return error;
4681 } 4681 }
4682 4682
4683 if (bma->flags & XFS_BMAPI_STACK_SWITCH)
4684 bma->stack_switch = 1;
4685
4686 error = xfs_bmap_alloc(bma); 4683 error = xfs_bmap_alloc(bma);
4687 if (error) 4684 if (error)
4688 return error; 4685 return error;
@@ -4956,6 +4953,9 @@ xfs_bmapi_write(
4956 bma.flist = flist; 4953 bma.flist = flist;
4957 bma.firstblock = firstblock; 4954 bma.firstblock = firstblock;
4958 4955
4956 if (flags & XFS_BMAPI_STACK_SWITCH)
4957 bma.stack_switch = 1;
4958
4959 while (bno < end && n < *nmap) { 4959 while (bno < end && n < *nmap) {
4960 inhole = eof || bma.got.br_startoff > bno; 4960 inhole = eof || bma.got.br_startoff > bno;
4961 wasdelay = !inhole && isnullstartblock(bma.got.br_startblock); 4961 wasdelay = !inhole && isnullstartblock(bma.got.br_startblock);
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 56d1614760cf..fbbb9eb92e32 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -487,6 +487,7 @@ _xfs_buf_find(
487 struct rb_node *parent; 487 struct rb_node *parent;
488 xfs_buf_t *bp; 488 xfs_buf_t *bp;
489 xfs_daddr_t blkno = map[0].bm_bn; 489 xfs_daddr_t blkno = map[0].bm_bn;
490 xfs_daddr_t eofs;
490 int numblks = 0; 491 int numblks = 0;
491 int i; 492 int i;
492 493
@@ -498,6 +499,23 @@ _xfs_buf_find(
498 ASSERT(!(numbytes < (1 << btp->bt_sshift))); 499 ASSERT(!(numbytes < (1 << btp->bt_sshift)));
499 ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_smask)); 500 ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_smask));
500 501
502 /*
503 * Corrupted block numbers can get through to here, unfortunately, so we
504 * have to check that the buffer falls within the filesystem bounds.
505 */
506 eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks);
507 if (blkno >= eofs) {
508 /*
509 * XXX (dgc): we should really be returning EFSCORRUPTED here,
510 * but none of the higher level infrastructure supports
511 * returning a specific error on buffer lookup failures.
512 */
513 xfs_alert(btp->bt_mount,
514 "%s: Block out of range: block 0x%llx, EOFS 0x%llx ",
515 __func__, blkno, eofs);
516 return NULL;
517 }
518
501 /* get tree root */ 519 /* get tree root */
502 pag = xfs_perag_get(btp->bt_mount, 520 pag = xfs_perag_get(btp->bt_mount,
503 xfs_daddr_to_agno(btp->bt_mount, blkno)); 521 xfs_daddr_to_agno(btp->bt_mount, blkno));
@@ -1487,6 +1505,8 @@ restart:
1487 while (!list_empty(&btp->bt_lru)) { 1505 while (!list_empty(&btp->bt_lru)) {
1488 bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru); 1506 bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
1489 if (atomic_read(&bp->b_hold) > 1) { 1507 if (atomic_read(&bp->b_hold) > 1) {
1508 trace_xfs_buf_wait_buftarg(bp, _RET_IP_);
1509 list_move_tail(&bp->b_lru, &btp->bt_lru);
1490 spin_unlock(&btp->bt_lru_lock); 1510 spin_unlock(&btp->bt_lru_lock);
1491 delay(100); 1511 delay(100);
1492 goto restart; 1512 goto restart;
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 77b09750e92c..3f9949fee391 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -652,7 +652,10 @@ xfs_buf_item_unlock(
652 652
653 /* 653 /*
654 * If the buf item isn't tracking any data, free it, otherwise drop the 654 * If the buf item isn't tracking any data, free it, otherwise drop the
655 * reference we hold to it. 655 * reference we hold to it. If we are aborting the transaction, this may
656 * be the only reference to the buf item, so we free it anyway
657 * regardless of whether it is dirty or not. A dirty abort implies a
658 * shutdown, anyway.
656 */ 659 */
657 clean = 1; 660 clean = 1;
658 for (i = 0; i < bip->bli_format_count; i++) { 661 for (i = 0; i < bip->bli_format_count; i++) {
@@ -664,7 +667,12 @@ xfs_buf_item_unlock(
664 } 667 }
665 if (clean) 668 if (clean)
666 xfs_buf_item_relse(bp); 669 xfs_buf_item_relse(bp);
667 else 670 else if (aborted) {
671 if (atomic_dec_and_test(&bip->bli_refcount)) {
672 ASSERT(XFS_FORCED_SHUTDOWN(lip->li_mountp));
673 xfs_buf_item_relse(bp);
674 }
675 } else
668 atomic_dec(&bip->bli_refcount); 676 atomic_dec(&bip->bli_refcount);
669 677
670 if (!hold) 678 if (!hold)
diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c
index d0e9c74d3d96..a8bd26b82ecb 100644
--- a/fs/xfs/xfs_dfrag.c
+++ b/fs/xfs/xfs_dfrag.c
@@ -246,10 +246,10 @@ xfs_swap_extents(
246 goto out_unlock; 246 goto out_unlock;
247 } 247 }
248 248
249 error = -filemap_write_and_wait(VFS_I(ip)->i_mapping); 249 error = -filemap_write_and_wait(VFS_I(tip)->i_mapping);
250 if (error) 250 if (error)
251 goto out_unlock; 251 goto out_unlock;
252 truncate_pagecache_range(VFS_I(ip), 0, -1); 252 truncate_pagecache_range(VFS_I(tip), 0, -1);
253 253
254 /* Verify O_DIRECT for ftmp */ 254 /* Verify O_DIRECT for ftmp */
255 if (VN_CACHED(VFS_I(tip)) != 0) { 255 if (VN_CACHED(VFS_I(tip)) != 0) {
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index add06b4e9a63..364818eef40e 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -351,6 +351,15 @@ xfs_iomap_prealloc_size(
351 } 351 }
352 if (shift) 352 if (shift)
353 alloc_blocks >>= shift; 353 alloc_blocks >>= shift;
354
355 /*
356 * If we are still trying to allocate more space than is
357 * available, squash the prealloc hard. This can happen if we
358 * have a large file on a small filesystem and the above
359 * lowspace thresholds are smaller than MAXEXTLEN.
360 */
361 while (alloc_blocks >= freesp)
362 alloc_blocks >>= 4;
354 } 363 }
355 364
356 if (alloc_blocks < mp->m_writeio_blocks) 365 if (alloc_blocks < mp->m_writeio_blocks)
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index da508463ff10..7d6df7c00c36 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -658,7 +658,7 @@ xfs_sb_quiet_read_verify(
658 return; 658 return;
659 } 659 }
660 /* quietly fail */ 660 /* quietly fail */
661 xfs_buf_ioerror(bp, EFSCORRUPTED); 661 xfs_buf_ioerror(bp, EWRONGFS);
662} 662}
663 663
664static void 664static void
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 2e137d4a85ae..16a812977eab 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -341,6 +341,7 @@ DEFINE_BUF_EVENT(xfs_buf_item_relse);
341DEFINE_BUF_EVENT(xfs_buf_item_iodone); 341DEFINE_BUF_EVENT(xfs_buf_item_iodone);
342DEFINE_BUF_EVENT(xfs_buf_item_iodone_async); 342DEFINE_BUF_EVENT(xfs_buf_item_iodone_async);
343DEFINE_BUF_EVENT(xfs_buf_error_relse); 343DEFINE_BUF_EVENT(xfs_buf_error_relse);
344DEFINE_BUF_EVENT(xfs_buf_wait_buftarg);
344DEFINE_BUF_EVENT(xfs_trans_read_buf_io); 345DEFINE_BUF_EVENT(xfs_trans_read_buf_io);
345DEFINE_BUF_EVENT(xfs_trans_read_buf_shut); 346DEFINE_BUF_EVENT(xfs_trans_read_buf_shut);
346 347
diff --git a/include/acpi/acbuffer.h b/include/acpi/acbuffer.h
index a1e45cdd729a..c927a0b1de78 100644
--- a/include/acpi/acbuffer.h
+++ b/include/acpi/acbuffer.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index 0943457e0fa5..14ceff788c40 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -138,7 +138,7 @@
138 138
139/* Maximum sleep allowed via Sleep() operator */ 139/* Maximum sleep allowed via Sleep() operator */
140 140
141#define ACPI_MAX_SLEEP 2000 /* Two seconds */ 141#define ACPI_MAX_SLEEP 2000 /* 2000 millisec == two seconds */
142 142
143/* Address Range lists are per-space_id (Memory and I/O only) */ 143/* Address Range lists are per-space_id (Memory and I/O only) */
144 144
@@ -150,11 +150,6 @@
150 * 150 *
151 *****************************************************************************/ 151 *****************************************************************************/
152 152
153/* Number of distinct GPE register blocks and register width */
154
155#define ACPI_MAX_GPE_BLOCKS 2
156#define ACPI_GPE_REGISTER_WIDTH 8
157
158/* Method info (in WALK_STATE), containing local variables and argumetns */ 153/* Method info (in WALK_STATE), containing local variables and argumetns */
159 154
160#define ACPI_METHOD_NUM_LOCALS 8 155#define ACPI_METHOD_NUM_LOCALS 8
@@ -163,12 +158,6 @@
163#define ACPI_METHOD_NUM_ARGS 7 158#define ACPI_METHOD_NUM_ARGS 7
164#define ACPI_METHOD_MAX_ARG 6 159#define ACPI_METHOD_MAX_ARG 6
165 160
166/* Length of _HID, _UID, _CID, and UUID values */
167
168#define ACPI_DEVICE_ID_LENGTH 0x09
169#define ACPI_MAX_CID_LENGTH 48
170#define ACPI_UUID_LENGTH 16
171
172/* 161/*
173 * Operand Stack (in WALK_STATE), Must be large enough to contain METHOD_MAX_ARG 162 * Operand Stack (in WALK_STATE), Must be large enough to contain METHOD_MAX_ARG
174 */ 163 */
@@ -186,17 +175,6 @@
186 */ 175 */
187#define ACPI_RESULTS_OBJ_NUM_MAX 255 176#define ACPI_RESULTS_OBJ_NUM_MAX 255
188 177
189/* Names within the namespace are 4 bytes long */
190
191#define ACPI_NAME_SIZE 4
192#define ACPI_PATH_SEGMENT_LENGTH 5 /* 4 chars for name + 1 char for separator */
193#define ACPI_PATH_SEPARATOR '.'
194
195/* Sizes for ACPI table headers */
196
197#define ACPI_OEM_ID_SIZE 6
198#define ACPI_OEM_TABLE_ID_SIZE 8
199
200/* Constants used in searching for the RSDP in low memory */ 178/* Constants used in searching for the RSDP in low memory */
201 179
202#define ACPI_EBDA_PTR_LOCATION 0x0000040E /* Physical Address */ 180#define ACPI_EBDA_PTR_LOCATION 0x0000040E /* Physical Address */
@@ -213,6 +191,7 @@
213/* Maximum space_ids for Operation Regions */ 191/* Maximum space_ids for Operation Regions */
214 192
215#define ACPI_MAX_ADDRESS_SPACE 255 193#define ACPI_MAX_ADDRESS_SPACE 255
194#define ACPI_NUM_DEFAULT_SPACES 4
216 195
217/* Array sizes. Used for range checking also */ 196/* Array sizes. Used for range checking also */
218 197
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index 6c3890e02140..9bf59d0e8aaa 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h
index 7665df663284..ce08ef7d969c 100644
--- a/include/acpi/acnames.h
+++ b/include/acpi/acnames.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index 2457ac849655..9885276178e0 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -72,6 +72,7 @@
72#define ACPI_EXAMPLE 0x00004000 72#define ACPI_EXAMPLE 0x00004000
73#define ACPI_DRIVER 0x00008000 73#define ACPI_DRIVER 0x00008000
74#define DT_COMPILER 0x00010000 74#define DT_COMPILER 0x00010000
75#define ASL_PREPROCESSOR 0x00020000
75 76
76#define ACPI_ALL_COMPONENTS 0x0001FFFF 77#define ACPI_ALL_COMPONENTS 0x0001FFFF
77#define ACPI_COMPONENT_DEFAULT (ACPI_ALL_COMPONENTS) 78#define ACPI_COMPONENT_DEFAULT (ACPI_ALL_COMPONENTS)
@@ -262,18 +263,140 @@
262 * Common parameters used for debug output functions: 263 * Common parameters used for debug output functions:
263 * line number, function name, module(file) name, component ID 264 * line number, function name, module(file) name, component ID
264 */ 265 */
265#define ACPI_DEBUG_PARAMETERS __LINE__, ACPI_GET_FUNCTION_NAME, _acpi_module_name, _COMPONENT 266#define ACPI_DEBUG_PARAMETERS \
267 __LINE__, ACPI_GET_FUNCTION_NAME, _acpi_module_name, _COMPONENT
268
269/* Check if debug output is currently dynamically enabled */
270
271#define ACPI_IS_DEBUG_ENABLED(level, component) \
272 ((level & acpi_dbg_level) && (component & acpi_dbg_layer))
266 273
267/* 274/*
268 * Master debug print macros 275 * Master debug print macros
269 * Print message if and only if: 276 * Print message if and only if:
270 * 1) Debug print for the current component is enabled 277 * 1) Debug print for the current component is enabled
271 * 2) Debug error level or trace level for the print statement is enabled 278 * 2) Debug error level or trace level for the print statement is enabled
279 *
280 * November 2012: Moved the runtime check for whether to actually emit the
281 * debug message outside of the print function itself. This improves overall
282 * performance at a relatively small code cost. Implementation involves the
283 * use of variadic macros supported by C99.
284 *
285 * Note: the ACPI_DO_WHILE0 macro is used to prevent some compilers from
286 * complaining about these constructs. On other compilers the do...while
287 * adds some extra code, so this feature is optional.
272 */ 288 */
273#define ACPI_DEBUG_PRINT(plist) acpi_debug_print plist 289#ifdef ACPI_USE_DO_WHILE_0
274#define ACPI_DEBUG_PRINT_RAW(plist) acpi_debug_print_raw plist 290#define ACPI_DO_WHILE0(a) do a while(0)
275
276#else 291#else
292#define ACPI_DO_WHILE0(a) a
293#endif
294
295/* DEBUG_PRINT functions */
296
297#define ACPI_DEBUG_PRINT(plist) ACPI_ACTUAL_DEBUG plist
298#define ACPI_DEBUG_PRINT_RAW(plist) ACPI_ACTUAL_DEBUG_RAW plist
299
300/* Helper macros for DEBUG_PRINT */
301
302#define ACPI_DO_DEBUG_PRINT(function, level, line, filename, modulename, component, ...) \
303 ACPI_DO_WHILE0 ({ \
304 if (ACPI_IS_DEBUG_ENABLED (level, component)) \
305 { \
306 function (level, line, filename, modulename, component, __VA_ARGS__); \
307 } \
308 })
309
310#define ACPI_ACTUAL_DEBUG(level, line, filename, modulename, component, ...) \
311 ACPI_DO_DEBUG_PRINT (acpi_debug_print, level, line, \
312 filename, modulename, component, __VA_ARGS__)
313
314#define ACPI_ACTUAL_DEBUG_RAW(level, line, filename, modulename, component, ...) \
315 ACPI_DO_DEBUG_PRINT (acpi_debug_print_raw, level, line, \
316 filename, modulename, component, __VA_ARGS__)
317
318/*
319 * Function entry tracing
320 *
321 * The name of the function is emitted as a local variable that is
322 * intended to be used by both the entry trace and the exit trace.
323 */
324
325/* Helper macro */
326
327#define ACPI_TRACE_ENTRY(name, function, cast, param) \
328 ACPI_FUNCTION_NAME (name) \
329 function (ACPI_DEBUG_PARAMETERS, cast (param))
330
331/* The actual entry trace macros */
332
333#define ACPI_FUNCTION_TRACE(name) \
334 ACPI_FUNCTION_NAME(name) \
335 acpi_ut_trace (ACPI_DEBUG_PARAMETERS)
336
337#define ACPI_FUNCTION_TRACE_PTR(name, pointer) \
338 ACPI_TRACE_ENTRY (name, acpi_ut_trace_ptr, (void *), pointer)
339
340#define ACPI_FUNCTION_TRACE_U32(name, value) \
341 ACPI_TRACE_ENTRY (name, acpi_ut_trace_u32, (u32), value)
342
343#define ACPI_FUNCTION_TRACE_STR(name, string) \
344 ACPI_TRACE_ENTRY (name, acpi_ut_trace_str, (char *), string)
345
346#define ACPI_FUNCTION_ENTRY() \
347 acpi_ut_track_stack_ptr()
348
349/*
350 * Function exit tracing
351 *
352 * These macros include a return statement. This is usually considered
353 * bad form, but having a separate exit macro before the actual return
354 * is very ugly and difficult to maintain.
355 *
356 * One of the FUNCTION_TRACE macros above must be used in conjunction
357 * with these macros so that "_AcpiFunctionName" is defined.
358 */
359
360/* Exit trace helper macro */
361
362#define ACPI_TRACE_EXIT(function, cast, param) \
363 ACPI_DO_WHILE0 ({ \
364 function (ACPI_DEBUG_PARAMETERS, cast (param)); \
365 return ((param)); \
366 })
367
368/* The actual exit macros */
369
370#define return_VOID \
371 ACPI_DO_WHILE0 ({ \
372 acpi_ut_exit (ACPI_DEBUG_PARAMETERS); \
373 return; \
374 })
375
376#define return_ACPI_STATUS(status) \
377 ACPI_TRACE_EXIT (acpi_ut_status_exit, (acpi_status), status)
378
379#define return_PTR(pointer) \
380 ACPI_TRACE_EXIT (acpi_ut_ptr_exit, (u8 *), pointer)
381
382#define return_VALUE(value) \
383 ACPI_TRACE_EXIT (acpi_ut_value_exit, (u64), value)
384
385/* Conditional execution */
386
387#define ACPI_DEBUG_EXEC(a) a
388#define ACPI_DEBUG_ONLY_MEMBERS(a) a;
389#define _VERBOSE_STRUCTURES
390
391/* Various object display routines for debug */
392
393#define ACPI_DUMP_STACK_ENTRY(a) acpi_ex_dump_operand((a), 0)
394#define ACPI_DUMP_OPERANDS(a, b ,c) acpi_ex_dump_operands(a, b, c)
395#define ACPI_DUMP_ENTRY(a, b) acpi_ns_dump_entry (a, b)
396#define ACPI_DUMP_PATHNAME(a, b, c, d) acpi_ns_dump_pathname(a, b, c, d)
397#define ACPI_DUMP_BUFFER(a, b) acpi_ut_debug_dump_buffer((u8 *) a, b, DB_BYTE_DISPLAY, _COMPONENT)
398
399#else /* ACPI_DEBUG_OUTPUT */
277/* 400/*
278 * This is the non-debug case -- make everything go away, 401 * This is the non-debug case -- make everything go away,
279 * leaving no executable debug code! 402 * leaving no executable debug code!
@@ -281,6 +404,32 @@
281#define ACPI_FUNCTION_NAME(a) 404#define ACPI_FUNCTION_NAME(a)
282#define ACPI_DEBUG_PRINT(pl) 405#define ACPI_DEBUG_PRINT(pl)
283#define ACPI_DEBUG_PRINT_RAW(pl) 406#define ACPI_DEBUG_PRINT_RAW(pl)
407#define ACPI_DEBUG_EXEC(a)
408#define ACPI_DEBUG_ONLY_MEMBERS(a)
409#define ACPI_FUNCTION_TRACE(a)
410#define ACPI_FUNCTION_TRACE_PTR(a, b)
411#define ACPI_FUNCTION_TRACE_U32(a, b)
412#define ACPI_FUNCTION_TRACE_STR(a, b)
413#define ACPI_FUNCTION_EXIT
414#define ACPI_FUNCTION_STATUS_EXIT(s)
415#define ACPI_FUNCTION_VALUE_EXIT(s)
416#define ACPI_FUNCTION_ENTRY()
417#define ACPI_DUMP_STACK_ENTRY(a)
418#define ACPI_DUMP_OPERANDS(a, b, c)
419#define ACPI_DUMP_ENTRY(a, b)
420#define ACPI_DUMP_TABLES(a, b)
421#define ACPI_DUMP_PATHNAME(a, b, c, d)
422#define ACPI_DUMP_BUFFER(a, b)
423#define ACPI_DEBUG_PRINT(pl)
424#define ACPI_DEBUG_PRINT_RAW(pl)
425#define ACPI_IS_DEBUG_ENABLED(level, component) 0
426
427/* Return macros must have a return statement at the minimum */
428
429#define return_VOID return
430#define return_ACPI_STATUS(s) return(s)
431#define return_VALUE(s) return(s)
432#define return_PTR(s) return(s)
284 433
285#endif /* ACPI_DEBUG_OUTPUT */ 434#endif /* ACPI_DEBUG_OUTPUT */
286 435
diff --git a/include/acpi/acpi.h b/include/acpi/acpi.h
index c1ea8436961f..618787715d56 100644
--- a/include/acpi/acpi.h
+++ b/include/acpi/acpi.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 7ced5dc20dd3..227ba7dc293d 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -84,28 +84,29 @@ struct acpi_driver;
84struct acpi_device; 84struct acpi_device;
85 85
86/* 86/*
87 * ACPI Scan Handler
88 * -----------------
89 */
90
91struct acpi_scan_handler {
92 const struct acpi_device_id *ids;
93 struct list_head list_node;
94 int (*attach)(struct acpi_device *dev, const struct acpi_device_id *id);
95 void (*detach)(struct acpi_device *dev);
96};
97
98/*
87 * ACPI Driver 99 * ACPI Driver
88 * ----------- 100 * -----------
89 */ 101 */
90 102
91typedef int (*acpi_op_add) (struct acpi_device * device); 103typedef int (*acpi_op_add) (struct acpi_device * device);
92typedef int (*acpi_op_remove) (struct acpi_device * device, int type); 104typedef int (*acpi_op_remove) (struct acpi_device * device);
93typedef int (*acpi_op_start) (struct acpi_device * device);
94typedef int (*acpi_op_bind) (struct acpi_device * device);
95typedef int (*acpi_op_unbind) (struct acpi_device * device);
96typedef void (*acpi_op_notify) (struct acpi_device * device, u32 event); 105typedef void (*acpi_op_notify) (struct acpi_device * device, u32 event);
97 106
98struct acpi_bus_ops {
99 u32 acpi_op_add:1;
100 u32 acpi_op_start:1;
101};
102
103struct acpi_device_ops { 107struct acpi_device_ops {
104 acpi_op_add add; 108 acpi_op_add add;
105 acpi_op_remove remove; 109 acpi_op_remove remove;
106 acpi_op_start start;
107 acpi_op_bind bind;
108 acpi_op_unbind unbind;
109 acpi_op_notify notify; 110 acpi_op_notify notify;
110}; 111};
111 112
@@ -148,7 +149,8 @@ struct acpi_device_flags {
148 u32 power_manageable:1; 149 u32 power_manageable:1;
149 u32 performance_manageable:1; 150 u32 performance_manageable:1;
150 u32 eject_pending:1; 151 u32 eject_pending:1;
151 u32 reserved:24; 152 u32 match_driver:1;
153 u32 reserved:23;
152}; 154};
153 155
154/* File System */ 156/* File System */
@@ -207,7 +209,7 @@ struct acpi_device_power_state {
207 } flags; 209 } flags;
208 int power; /* % Power (compared to D0) */ 210 int power; /* % Power (compared to D0) */
209 int latency; /* Dx->D0 time (microseconds) */ 211 int latency; /* Dx->D0 time (microseconds) */
210 struct acpi_handle_list resources; /* Power resources referenced */ 212 struct list_head resources; /* Power resources referenced */
211}; 213};
212 214
213struct acpi_device_power { 215struct acpi_device_power {
@@ -250,7 +252,7 @@ struct acpi_device_wakeup {
250 acpi_handle gpe_device; 252 acpi_handle gpe_device;
251 u64 gpe_number; 253 u64 gpe_number;
252 u64 sleep_state; 254 u64 sleep_state;
253 struct acpi_handle_list resources; 255 struct list_head resources;
254 struct acpi_device_wakeup_flags flags; 256 struct acpi_device_wakeup_flags flags;
255 int prepare_count; 257 int prepare_count;
256}; 258};
@@ -279,16 +281,17 @@ struct acpi_device {
279 struct acpi_device_wakeup wakeup; 281 struct acpi_device_wakeup wakeup;
280 struct acpi_device_perf performance; 282 struct acpi_device_perf performance;
281 struct acpi_device_dir dir; 283 struct acpi_device_dir dir;
282 struct acpi_device_ops ops; 284 struct acpi_scan_handler *handler;
283 struct acpi_driver *driver; 285 struct acpi_driver *driver;
284 void *driver_data; 286 void *driver_data;
285 struct device dev; 287 struct device dev;
286 struct acpi_bus_ops bus_ops; /* workaround for different code path for hotplug */
287 enum acpi_bus_removal_type removal_type; /* indicate for different removal type */ 288 enum acpi_bus_removal_type removal_type; /* indicate for different removal type */
288 u8 physical_node_count; 289 u8 physical_node_count;
289 struct list_head physical_node_list; 290 struct list_head physical_node_list;
290 struct mutex physical_node_lock; 291 struct mutex physical_node_lock;
291 DECLARE_BITMAP(physical_node_id_bitmap, ACPI_MAX_PHYSICAL_NODE); 292 DECLARE_BITMAP(physical_node_id_bitmap, ACPI_MAX_PHYSICAL_NODE);
293 struct list_head power_dependent;
294 void (*remove)(struct acpi_device *);
292}; 295};
293 296
294static inline void *acpi_driver_data(struct acpi_device *d) 297static inline void *acpi_driver_data(struct acpi_device *d)
@@ -316,7 +319,7 @@ struct acpi_bus_event {
316}; 319};
317 320
318struct acpi_eject_event { 321struct acpi_eject_event {
319 acpi_handle handle; 322 struct acpi_device *device;
320 u32 event; 323 u32 event;
321}; 324};
322 325
@@ -339,13 +342,51 @@ void acpi_bus_data_handler(acpi_handle handle, void *context);
339acpi_status acpi_bus_get_status_handle(acpi_handle handle, 342acpi_status acpi_bus_get_status_handle(acpi_handle handle,
340 unsigned long long *sta); 343 unsigned long long *sta);
341int acpi_bus_get_status(struct acpi_device *device); 344int acpi_bus_get_status(struct acpi_device *device);
345
346#ifdef CONFIG_PM
342int acpi_bus_set_power(acpi_handle handle, int state); 347int acpi_bus_set_power(acpi_handle handle, int state);
348const char *acpi_power_state_string(int state);
349int acpi_device_get_power(struct acpi_device *device, int *state);
343int acpi_device_set_power(struct acpi_device *device, int state); 350int acpi_device_set_power(struct acpi_device *device, int state);
351int acpi_bus_init_power(struct acpi_device *device);
344int acpi_bus_update_power(acpi_handle handle, int *state_p); 352int acpi_bus_update_power(acpi_handle handle, int *state_p);
345bool acpi_bus_power_manageable(acpi_handle handle); 353bool acpi_bus_power_manageable(acpi_handle handle);
346bool acpi_bus_can_wakeup(acpi_handle handle); 354bool acpi_bus_can_wakeup(acpi_handle handle);
347int acpi_power_resource_register_device(struct device *dev, acpi_handle handle); 355#else /* !CONFIG_PM */
348void acpi_power_resource_unregister_device(struct device *dev, acpi_handle handle); 356static inline int acpi_bus_set_power(acpi_handle handle, int state)
357{
358 return 0;
359}
360static inline const char *acpi_power_state_string(int state)
361{
362 return "D0";
363}
364static inline int acpi_device_get_power(struct acpi_device *device, int *state)
365{
366 return 0;
367}
368static inline int acpi_device_set_power(struct acpi_device *device, int state)
369{
370 return 0;
371}
372static inline int acpi_bus_init_power(struct acpi_device *device)
373{
374 return 0;
375}
376static inline int acpi_bus_update_power(acpi_handle handle, int *state_p)
377{
378 return 0;
379}
380static inline bool acpi_bus_power_manageable(acpi_handle handle)
381{
382 return false;
383}
384static inline bool acpi_bus_can_wakeup(acpi_handle handle)
385{
386 return false;
387}
388#endif /* !CONFIG_PM */
389
349#ifdef CONFIG_ACPI_PROC_EVENT 390#ifdef CONFIG_ACPI_PROC_EVENT
350int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data); 391int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data);
351int acpi_bus_generate_proc_event4(const char *class, const char *bid, u8 type, int data); 392int acpi_bus_generate_proc_event4(const char *class, const char *bid, u8 type, int data);
@@ -354,13 +395,15 @@ int acpi_bus_receive_event(struct acpi_bus_event *event);
354static inline int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data) 395static inline int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data)
355 { return 0; } 396 { return 0; }
356#endif 397#endif
398
399void acpi_scan_lock_acquire(void);
400void acpi_scan_lock_release(void);
401int acpi_scan_add_handler(struct acpi_scan_handler *handler);
357int acpi_bus_register_driver(struct acpi_driver *driver); 402int acpi_bus_register_driver(struct acpi_driver *driver);
358void acpi_bus_unregister_driver(struct acpi_driver *driver); 403void acpi_bus_unregister_driver(struct acpi_driver *driver);
359int acpi_bus_add(struct acpi_device **child, struct acpi_device *parent, 404int acpi_bus_scan(acpi_handle handle);
360 acpi_handle handle, int type);
361void acpi_bus_hot_remove_device(void *context); 405void acpi_bus_hot_remove_device(void *context);
362int acpi_bus_trim(struct acpi_device *start, int rmdevice); 406void acpi_bus_trim(struct acpi_device *start);
363int acpi_bus_start(struct acpi_device *device);
364acpi_status acpi_bus_get_ejd(acpi_handle handle, acpi_handle * ejd); 407acpi_status acpi_bus_get_ejd(acpi_handle handle, acpi_handle * ejd);
365int acpi_match_device_ids(struct acpi_device *device, 408int acpi_match_device_ids(struct acpi_device *device,
366 const struct acpi_device_id *ids); 409 const struct acpi_device_id *ids);
@@ -390,6 +433,8 @@ struct acpi_bus_type {
390 int (*find_device) (struct device *, acpi_handle *); 433 int (*find_device) (struct device *, acpi_handle *);
391 /* For bridges, such as PCI root bridge, IDE controller */ 434 /* For bridges, such as PCI root bridge, IDE controller */
392 int (*find_bridge) (struct device *, acpi_handle *); 435 int (*find_bridge) (struct device *, acpi_handle *);
436 void (*setup)(struct device *);
437 void (*cleanup)(struct device *);
393}; 438};
394int register_acpi_bus_type(struct acpi_bus_type *); 439int register_acpi_bus_type(struct acpi_bus_type *);
395int unregister_acpi_bus_type(struct acpi_bus_type *); 440int unregister_acpi_bus_type(struct acpi_bus_type *);
@@ -397,7 +442,6 @@ int unregister_acpi_bus_type(struct acpi_bus_type *);
397struct acpi_pci_root { 442struct acpi_pci_root {
398 struct list_head node; 443 struct list_head node;
399 struct acpi_device * device; 444 struct acpi_device * device;
400 struct acpi_pci_id id;
401 struct pci_bus *bus; 445 struct pci_bus *bus;
402 u16 segment; 446 u16 segment;
403 struct resource secondary; /* downstream bus range */ 447 struct resource secondary; /* downstream bus range */
@@ -425,6 +469,8 @@ acpi_status acpi_remove_pm_notifier(struct acpi_device *adev,
425int acpi_device_power_state(struct device *dev, struct acpi_device *adev, 469int acpi_device_power_state(struct device *dev, struct acpi_device *adev,
426 u32 target_state, int d_max_in, int *d_min_p); 470 u32 target_state, int d_max_in, int *d_min_p);
427int acpi_pm_device_sleep_state(struct device *, int *, int); 471int acpi_pm_device_sleep_state(struct device *, int *, int);
472void acpi_dev_pm_add_dependent(acpi_handle handle, struct device *depdev);
473void acpi_dev_pm_remove_dependent(acpi_handle handle, struct device *depdev);
428#else 474#else
429static inline acpi_status acpi_add_pm_notifier(struct acpi_device *adev, 475static inline acpi_status acpi_add_pm_notifier(struct acpi_device *adev,
430 acpi_notify_handler handler, 476 acpi_notify_handler handler,
@@ -454,6 +500,10 @@ static inline int acpi_pm_device_sleep_state(struct device *d, int *p, int m)
454{ 500{
455 return __acpi_device_power_state(m, p); 501 return __acpi_device_power_state(m, p);
456} 502}
503static inline void acpi_dev_pm_add_dependent(acpi_handle handle,
504 struct device *depdev) {}
505static inline void acpi_dev_pm_remove_dependent(acpi_handle handle,
506 struct device *depdev) {}
457#endif 507#endif
458 508
459#ifdef CONFIG_PM_RUNTIME 509#ifdef CONFIG_PM_RUNTIME
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index 43152742b46f..7d2a9eaab9eb 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -7,7 +7,7 @@
7 *****************************************************************************/ 7 *****************************************************************************/
8 8
9/* 9/*
10 * Copyright (C) 2000 - 2012, Intel Corp. 10 * Copyright (C) 2000 - 2013, Intel Corp.
11 * All rights reserved. 11 * All rights reserved.
12 * 12 *
13 * Redistribution and use in source and binary forms, with or without 13 * Redistribution and use in source and binary forms, with or without
@@ -102,10 +102,8 @@ acpi_os_physical_table_override(struct acpi_table_header *existing_table,
102/* 102/*
103 * Spinlock primitives 103 * Spinlock primitives
104 */ 104 */
105
106#ifndef acpi_os_create_lock 105#ifndef acpi_os_create_lock
107acpi_status 106acpi_status acpi_os_create_lock(acpi_spinlock * out_handle);
108acpi_os_create_lock(acpi_spinlock *out_handle);
109#endif 107#endif
110 108
111void acpi_os_delete_lock(acpi_spinlock handle); 109void acpi_os_delete_lock(acpi_spinlock handle);
@@ -148,6 +146,8 @@ void acpi_os_release_mutex(acpi_mutex handle);
148 */ 146 */
149void *acpi_os_allocate(acpi_size size); 147void *acpi_os_allocate(acpi_size size);
150 148
149void acpi_os_free(void *memory);
150
151void __iomem *acpi_os_map_memory(acpi_physical_address where, 151void __iomem *acpi_os_map_memory(acpi_physical_address where,
152 acpi_size length); 152 acpi_size length);
153 153
@@ -180,12 +180,13 @@ acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object);
180 * Interrupt handlers 180 * Interrupt handlers
181 */ 181 */
182acpi_status 182acpi_status
183acpi_os_install_interrupt_handler(u32 gsi, 183acpi_os_install_interrupt_handler(u32 interrupt_number,
184 acpi_osd_handler service_routine, 184 acpi_osd_handler service_routine,
185 void *context); 185 void *context);
186 186
187acpi_status 187acpi_status
188acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler service_routine); 188acpi_os_remove_interrupt_handler(u32 interrupt_number,
189 acpi_osd_handler service_routine);
189 190
190void acpi_os_gpe_count(u32 gpe_number); 191void acpi_os_gpe_count(u32 gpe_number);
191void acpi_os_fixed_event_count(u32 fixed_event_number); 192void acpi_os_fixed_event_count(u32 fixed_event_number);
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 3d88395d4d6f..03322dddd88e 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -46,7 +46,7 @@
46 46
47/* Current ACPICA subsystem version in YYYYMMDD format */ 47/* Current ACPICA subsystem version in YYYYMMDD format */
48 48
49#define ACPI_CA_VERSION 0x20121018 49#define ACPI_CA_VERSION 0x20130117
50 50
51#include <acpi/acconfig.h> 51#include <acpi/acconfig.h>
52#include <acpi/actypes.h> 52#include <acpi/actypes.h>
@@ -56,11 +56,20 @@
56extern u8 acpi_gbl_permanent_mmap; 56extern u8 acpi_gbl_permanent_mmap;
57 57
58/* 58/*
59 * Globals that are publicly available, allowing for 59 * Globals that are publically available
60 * run time configuration
61 */ 60 */
61extern u32 acpi_current_gpe_count;
62extern struct acpi_table_fadt acpi_gbl_FADT;
63extern u8 acpi_gbl_system_awake_and_running;
64extern u8 acpi_gbl_reduced_hardware; /* ACPI 5.0 */
65
66/* Runtime configuration of debug print levels */
67
62extern u32 acpi_dbg_level; 68extern u32 acpi_dbg_level;
63extern u32 acpi_dbg_layer; 69extern u32 acpi_dbg_layer;
70
71/* ACPICA runtime options */
72
64extern u8 acpi_gbl_enable_interpreter_slack; 73extern u8 acpi_gbl_enable_interpreter_slack;
65extern u8 acpi_gbl_all_methods_serialized; 74extern u8 acpi_gbl_all_methods_serialized;
66extern u8 acpi_gbl_create_osi_method; 75extern u8 acpi_gbl_create_osi_method;
@@ -99,14 +108,9 @@ extern u8 acpi_gbl_disable_auto_repair;
99 108
100#endif /* !ACPI_REDUCED_HARDWARE */ 109#endif /* !ACPI_REDUCED_HARDWARE */
101 110
102extern u32 acpi_current_gpe_count;
103extern struct acpi_table_fadt acpi_gbl_FADT;
104extern u8 acpi_gbl_system_awake_and_running;
105extern u8 acpi_gbl_reduced_hardware; /* ACPI 5.0 */
106
107extern u32 acpi_rsdt_forced; 111extern u32 acpi_rsdt_forced;
108/* 112/*
109 * Global interfaces 113 * Initialization
110 */ 114 */
111acpi_status 115acpi_status
112acpi_initialize_tables(struct acpi_table_desc *initial_storage, 116acpi_initialize_tables(struct acpi_table_desc *initial_storage,
@@ -120,13 +124,15 @@ acpi_status acpi_initialize_objects(u32 flags);
120 124
121acpi_status acpi_terminate(void); 125acpi_status acpi_terminate(void);
122 126
127/*
128 * Miscellaneous global interfaces
129 */
130ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable(void))
131ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable(void))
123#ifdef ACPI_FUTURE_USAGE 132#ifdef ACPI_FUTURE_USAGE
124acpi_status acpi_subsystem_status(void); 133acpi_status acpi_subsystem_status(void);
125#endif 134#endif
126 135
127ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable(void))
128ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable(void))
129
130#ifdef ACPI_FUTURE_USAGE 136#ifdef ACPI_FUTURE_USAGE
131acpi_status acpi_get_system_info(struct acpi_buffer *ret_buffer); 137acpi_status acpi_get_system_info(struct acpi_buffer *ret_buffer);
132#endif 138#endif
@@ -191,9 +197,9 @@ acpi_status
191acpi_get_table_by_index(u32 table_index, struct acpi_table_header **out_table); 197acpi_get_table_by_index(u32 table_index, struct acpi_table_header **out_table);
192 198
193acpi_status 199acpi_status
194acpi_install_table_handler(acpi_tbl_handler handler, void *context); 200acpi_install_table_handler(acpi_table_handler handler, void *context);
195 201
196acpi_status acpi_remove_table_handler(acpi_tbl_handler handler); 202acpi_status acpi_remove_table_handler(acpi_table_handler handler);
197 203
198/* 204/*
199 * Namespace and name interfaces 205 * Namespace and name interfaces
@@ -438,6 +444,11 @@ acpi_get_event_resources(acpi_handle device_handle,
438 struct acpi_buffer *ret_buffer); 444 struct acpi_buffer *ret_buffer);
439 445
440acpi_status 446acpi_status
447acpi_walk_resource_buffer(struct acpi_buffer *buffer,
448 acpi_walk_resource_callback user_function,
449 void *context);
450
451acpi_status
441acpi_walk_resources(acpi_handle device, 452acpi_walk_resources(acpi_handle device,
442 char *name, 453 char *name,
443 acpi_walk_resource_callback user_function, void *context); 454 acpi_walk_resource_callback user_function, void *context);
@@ -462,6 +473,10 @@ acpi_buffer_to_resource(u8 *aml_buffer,
462 */ 473 */
463acpi_status acpi_reset(void); 474acpi_status acpi_reset(void);
464 475
476acpi_status acpi_read(u64 *value, struct acpi_generic_address *reg);
477
478acpi_status acpi_write(u64 value, struct acpi_generic_address *reg);
479
465ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status 480ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
466 acpi_read_bit_register(u32 register_id, 481 acpi_read_bit_register(u32 register_id,
467 u32 *return_value)) 482 u32 *return_value))
@@ -470,20 +485,6 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
470 acpi_write_bit_register(u32 register_id, 485 acpi_write_bit_register(u32 register_id,
471 u32 value)) 486 u32 value))
472 487
473ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
474 acpi_set_firmware_waking_vector(u32
475 physical_address))
476
477#if ACPI_MACHINE_WIDTH == 64
478ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
479 acpi_set_firmware_waking_vector64(u64
480 physical_address))
481#endif
482
483acpi_status acpi_read(u64 *value, struct acpi_generic_address *reg);
484
485acpi_status acpi_write(u64 value, struct acpi_generic_address *reg);
486
487/* 488/*
488 * Sleep/Wake interfaces 489 * Sleep/Wake interfaces
489 */ 490 */
@@ -500,6 +501,15 @@ acpi_status acpi_leave_sleep_state_prep(u8 sleep_state);
500 501
501acpi_status acpi_leave_sleep_state(u8 sleep_state); 502acpi_status acpi_leave_sleep_state(u8 sleep_state);
502 503
504ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
505 acpi_set_firmware_waking_vector(u32
506 physical_address))
507
508#if ACPI_MACHINE_WIDTH == 64
509ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
510 acpi_set_firmware_waking_vector64(u64
511 physical_address))
512#endif
503/* 513/*
504 * ACPI Timer interfaces 514 * ACPI Timer interfaces
505 */ 515 */
diff --git a/include/acpi/acrestyp.h b/include/acpi/acrestyp.h
index 40349ae65464..cbf4bf977f75 100644
--- a/include/acpi/acrestyp.h
+++ b/include/acpi/acrestyp.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -102,8 +102,11 @@ typedef u32 acpi_rsdesc_size; /* Max Resource Descriptor size is (Length+3) = (6
102 102
103#define ACPI_EXCLUSIVE (u8) 0x00 103#define ACPI_EXCLUSIVE (u8) 0x00
104#define ACPI_SHARED (u8) 0x01 104#define ACPI_SHARED (u8) 0x01
105#define ACPI_EXCLUSIVE_AND_WAKE (u8) 0x02 105
106#define ACPI_SHARED_AND_WAKE (u8) 0x03 106/* Wake */
107
108#define ACPI_NOT_WAKE_CAPABLE (u8) 0x00
109#define ACPI_WAKE_CAPABLE (u8) 0x01
107 110
108/* 111/*
109 * DMA Attributes 112 * DMA Attributes
@@ -171,6 +174,7 @@ struct acpi_resource_irq {
171 u8 triggering; 174 u8 triggering;
172 u8 polarity; 175 u8 polarity;
173 u8 sharable; 176 u8 sharable;
177 u8 wake_capable;
174 u8 interrupt_count; 178 u8 interrupt_count;
175 u8 interrupts[1]; 179 u8 interrupts[1];
176}; 180};
@@ -346,6 +350,7 @@ struct acpi_resource_extended_irq {
346 u8 triggering; 350 u8 triggering;
347 u8 polarity; 351 u8 polarity;
348 u8 sharable; 352 u8 sharable;
353 u8 wake_capable;
349 u8 interrupt_count; 354 u8 interrupt_count;
350 struct acpi_resource_source resource_source; 355 struct acpi_resource_source resource_source;
351 u32 interrupts[1]; 356 u32 interrupts[1];
@@ -365,6 +370,7 @@ struct acpi_resource_gpio {
365 u8 producer_consumer; /* For values, see Producer/Consumer above */ 370 u8 producer_consumer; /* For values, see Producer/Consumer above */
366 u8 pin_config; 371 u8 pin_config;
367 u8 sharable; /* For values, see Interrupt Attributes above */ 372 u8 sharable; /* For values, see Interrupt Attributes above */
373 u8 wake_capable; /* For values, see Interrupt Attributes above */
368 u8 io_restriction; 374 u8 io_restriction;
369 u8 triggering; /* For values, see Interrupt Attributes above */ 375 u8 triggering; /* For values, see Interrupt Attributes above */
370 u8 polarity; /* For values, see Interrupt Attributes above */ 376 u8 polarity; /* For values, see Interrupt Attributes above */
@@ -591,7 +597,10 @@ struct acpi_resource {
591#define ACPI_RS_SIZE_MIN (u32) ACPI_ROUND_UP_TO_NATIVE_WORD (12) 597#define ACPI_RS_SIZE_MIN (u32) ACPI_ROUND_UP_TO_NATIVE_WORD (12)
592#define ACPI_RS_SIZE(type) (u32) (ACPI_RS_SIZE_NO_DATA + sizeof (type)) 598#define ACPI_RS_SIZE(type) (u32) (ACPI_RS_SIZE_NO_DATA + sizeof (type))
593 599
594#define ACPI_NEXT_RESOURCE(res) (struct acpi_resource *)((u8 *) res + res->length) 600/* Macro for walking resource templates with multiple descriptors */
601
602#define ACPI_NEXT_RESOURCE(res) \
603 ACPI_ADD_PTR (struct acpi_resource, (res), (res)->length)
595 604
596struct acpi_pci_routing_table { 605struct acpi_pci_routing_table {
597 u32 length; 606 u32 length;
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index 4f94b1d812d5..9b58a8f43771 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -326,8 +326,6 @@ enum acpi_preferred_pm_profiles {
326 326
327#pragma pack() 327#pragma pack()
328 328
329#define ACPI_FADT_OFFSET(f) (u16) ACPI_OFFSET (struct acpi_table_fadt, f)
330
331/* 329/*
332 * Internal table-related structures 330 * Internal table-related structures
333 */ 331 */
@@ -359,11 +357,14 @@ struct acpi_table_desc {
359/* 357/*
360 * Get the remaining ACPI tables 358 * Get the remaining ACPI tables
361 */ 359 */
362
363#include <acpi/actbl1.h> 360#include <acpi/actbl1.h>
364#include <acpi/actbl2.h> 361#include <acpi/actbl2.h>
365#include <acpi/actbl3.h> 362#include <acpi/actbl3.h>
366 363
364/* Macros used to generate offsets to specific table fields */
365
366#define ACPI_FADT_OFFSET(f) (u16) ACPI_OFFSET (struct acpi_table_fadt, f)
367
367/* 368/*
368 * Sizes of the various flavors of FADT. We need to look closely 369 * Sizes of the various flavors of FADT. We need to look closely
369 * at the FADT length because the version number essentially tells 370 * at the FADT length because the version number essentially tells
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 280fc45b59dd..0bd750ebeb49 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -768,7 +768,7 @@ struct acpi_madt_interrupt_source {
768 768
769struct acpi_madt_local_x2apic { 769struct acpi_madt_local_x2apic {
770 struct acpi_subtable_header header; 770 struct acpi_subtable_header header;
771 u16 reserved; /* Reserved - must be zero */ 771 u16 reserved; /* reserved - must be zero */
772 u32 local_apic_id; /* Processor x2APIC ID */ 772 u32 local_apic_id; /* Processor x2APIC ID */
773 u32 lapic_flags; 773 u32 lapic_flags;
774 u32 uid; /* ACPI processor UID */ 774 u32 uid; /* ACPI processor UID */
@@ -781,14 +781,14 @@ struct acpi_madt_local_x2apic_nmi {
781 u16 inti_flags; 781 u16 inti_flags;
782 u32 uid; /* ACPI processor UID */ 782 u32 uid; /* ACPI processor UID */
783 u8 lint; /* LINTn to which NMI is connected */ 783 u8 lint; /* LINTn to which NMI is connected */
784 u8 reserved[3]; 784 u8 reserved[3]; /* reserved - must be zero */
785}; 785};
786 786
787/* 11: Generic Interrupt (ACPI 5.0) */ 787/* 11: Generic Interrupt (ACPI 5.0) */
788 788
789struct acpi_madt_generic_interrupt { 789struct acpi_madt_generic_interrupt {
790 struct acpi_subtable_header header; 790 struct acpi_subtable_header header;
791 u16 reserved; /* Reserved - must be zero */ 791 u16 reserved; /* reserved - must be zero */
792 u32 gic_id; 792 u32 gic_id;
793 u32 uid; 793 u32 uid;
794 u32 flags; 794 u32 flags;
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index 1b2b356486d1..77dc7a4099a3 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -261,9 +261,28 @@ struct acpi_csrt_group {
261 u16 subdevice_id; 261 u16 subdevice_id;
262 u16 revision; 262 u16 revision;
263 u16 reserved; 263 u16 reserved;
264 u32 info_length; 264 u32 shared_info_length;
265 265
266 /* Shared data (length = info_length) immediately follows */ 266 /* Shared data immediately follows (Length = shared_info_length) */
267};
268
269/* Shared Info subtable */
270
271struct acpi_csrt_shared_info {
272 u16 major_version;
273 u16 minor_version;
274 u32 mmio_base_low;
275 u32 mmio_base_high;
276 u32 gsi_interrupt;
277 u8 interrupt_polarity;
278 u8 interrupt_mode;
279 u8 num_channels;
280 u8 dma_address_width;
281 u16 base_request_line;
282 u16 num_handshake_signals;
283 u32 max_block_size;
284
285 /* Resource descriptors immediately follow (Length = Group length - shared_info_length) */
267}; 286};
268 287
269/* Resource Descriptor subtable */ 288/* Resource Descriptor subtable */
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
index 6585141e4b97..332b17e3bec8 100644
--- a/include/acpi/actbl3.h
+++ b/include/acpi/actbl3.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -68,13 +68,13 @@
68#define ACPI_SIG_PCCT "PCCT" /* Platform Communications Channel Table */ 68#define ACPI_SIG_PCCT "PCCT" /* Platform Communications Channel Table */
69#define ACPI_SIG_PMTT "PMTT" /* Platform Memory Topology Table */ 69#define ACPI_SIG_PMTT "PMTT" /* Platform Memory Topology Table */
70#define ACPI_SIG_RASF "RASF" /* RAS Feature table */ 70#define ACPI_SIG_RASF "RASF" /* RAS Feature table */
71#define ACPI_SIG_TPM2 "TPM2" /* Trusted Platform Module 2.0 H/W interface table */
71 72
72#define ACPI_SIG_S3PT "S3PT" /* S3 Performance (sub)Table */ 73#define ACPI_SIG_S3PT "S3PT" /* S3 Performance (sub)Table */
73#define ACPI_SIG_PCCS "PCC" /* PCC Shared Memory Region */ 74#define ACPI_SIG_PCCS "PCC" /* PCC Shared Memory Region */
74 75
75/* Reserved table signatures */ 76/* Reserved table signatures */
76 77
77#define ACPI_SIG_CSRT "CSRT" /* Core System Resources Table */
78#define ACPI_SIG_MATR "MATR" /* Memory Address Translation Table */ 78#define ACPI_SIG_MATR "MATR" /* Memory Address Translation Table */
79#define ACPI_SIG_MSDM "MSDM" /* Microsoft Data Management Table */ 79#define ACPI_SIG_MSDM "MSDM" /* Microsoft Data Management Table */
80#define ACPI_SIG_WPBT "WPBT" /* Windows Platform Binary Table */ 80#define ACPI_SIG_WPBT "WPBT" /* Windows Platform Binary Table */
@@ -550,6 +550,36 @@ enum acpi_rasf_status {
550#define ACPI_RASF_ERROR (1<<2) 550#define ACPI_RASF_ERROR (1<<2)
551#define ACPI_RASF_STATUS (0x1F<<3) 551#define ACPI_RASF_STATUS (0x1F<<3)
552 552
553/*******************************************************************************
554 *
555 * TPM2 - Trusted Platform Module (TPM) 2.0 Hardware Interface Table
556 * Version 3
557 *
558 * Conforms to "TPM 2.0 Hardware Interface Table (TPM2)" 29 November 2011
559 *
560 ******************************************************************************/
561
562struct acpi_table_tpm2 {
563 struct acpi_table_header header; /* Common ACPI table header */
564 u32 flags;
565 u64 control_address;
566 u32 start_method;
567};
568
569/* Control area structure (not part of table, pointed to by control_address) */
570
571struct acpi_tpm2_control {
572 u32 reserved;
573 u32 error;
574 u32 cancel;
575 u32 start;
576 u64 interrupt_control;
577 u32 command_size;
578 u64 command_address;
579 u32 response_size;
580 u64 response_address;
581};
582
553/* Reset to default packing */ 583/* Reset to default packing */
554 584
555#pragma pack() 585#pragma pack()
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 4f43f1fba132..845e75f1ffd8 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -341,7 +341,7 @@ typedef u32 acpi_physical_address;
341 341
342/* PM Timer ticks per second (HZ) */ 342/* PM Timer ticks per second (HZ) */
343 343
344#define PM_TIMER_FREQUENCY 3579545 344#define ACPI_PM_TIMER_FREQUENCY 3579545
345 345
346/******************************************************************************* 346/*******************************************************************************
347 * 347 *
@@ -373,6 +373,21 @@ typedef u32 acpi_name; /* 4-byte ACPI name */
373typedef char *acpi_string; /* Null terminated ASCII string */ 373typedef char *acpi_string; /* Null terminated ASCII string */
374typedef void *acpi_handle; /* Actually a ptr to a NS Node */ 374typedef void *acpi_handle; /* Actually a ptr to a NS Node */
375 375
376/* Time constants for timer calculations */
377
378#define ACPI_MSEC_PER_SEC 1000L
379
380#define ACPI_USEC_PER_MSEC 1000L
381#define ACPI_USEC_PER_SEC 1000000L
382
383#define ACPI_100NSEC_PER_USEC 10L
384#define ACPI_100NSEC_PER_MSEC 10000L
385#define ACPI_100NSEC_PER_SEC 10000000L
386
387#define ACPI_NSEC_PER_USEC 1000L
388#define ACPI_NSEC_PER_MSEC 1000000L
389#define ACPI_NSEC_PER_SEC 1000000000L
390
376/* Owner IDs are used to track namespace nodes for selective deletion */ 391/* Owner IDs are used to track namespace nodes for selective deletion */
377 392
378typedef u8 acpi_owner_id; 393typedef u8 acpi_owner_id;
@@ -390,10 +405,6 @@ typedef u8 acpi_owner_id;
390#define ACPI_MAX16_DECIMAL_DIGITS 5 405#define ACPI_MAX16_DECIMAL_DIGITS 5
391#define ACPI_MAX8_DECIMAL_DIGITS 3 406#define ACPI_MAX8_DECIMAL_DIGITS 3
392 407
393/* PM Timer ticks per second (HZ) */
394
395#define PM_TIMER_FREQUENCY 3579545
396
397/* 408/*
398 * Constants with special meanings 409 * Constants with special meanings
399 */ 410 */
@@ -474,6 +485,7 @@ typedef u64 acpi_integer;
474 */ 485 */
475#define ACPI_FULL_INITIALIZATION 0x00 486#define ACPI_FULL_INITIALIZATION 0x00
476#define ACPI_NO_ADDRESS_SPACE_INIT 0x01 487#define ACPI_NO_ADDRESS_SPACE_INIT 0x01
488#define ACPI_NO_HARDWARE_INIT 0x02
477#define ACPI_NO_EVENT_INIT 0x04 489#define ACPI_NO_EVENT_INIT 0x04
478#define ACPI_NO_HANDLER_INIT 0x08 490#define ACPI_NO_HANDLER_INIT 0x08
479#define ACPI_NO_ACPI_ENABLE 0x10 491#define ACPI_NO_ACPI_ENABLE 0x10
@@ -595,7 +607,7 @@ typedef u32 acpi_object_type;
595 607
596/* 608/*
597 * These are special object types that never appear in 609 * These are special object types that never appear in
598 * a Namespace node, only in a union acpi_operand_object 610 * a Namespace node, only in an object of union acpi_operand_object
599 */ 611 */
600#define ACPI_TYPE_LOCAL_EXTRA 0x1C 612#define ACPI_TYPE_LOCAL_EXTRA 0x1C
601#define ACPI_TYPE_LOCAL_DATA 0x1D 613#define ACPI_TYPE_LOCAL_DATA 0x1D
@@ -662,7 +674,7 @@ typedef u32 acpi_event_status;
662#define ACPI_GPE_MAX 0xFF 674#define ACPI_GPE_MAX 0xFF
663#define ACPI_NUM_GPE 256 675#define ACPI_NUM_GPE 256
664 676
665/* Actions for acpi_set_gpe_wake_mask, acpi_hw_low_set_gpe */ 677/* Actions for acpi_set_gpe, acpi_gpe_wakeup, acpi_hw_low_set_gpe */
666 678
667#define ACPI_GPE_ENABLE 0 679#define ACPI_GPE_ENABLE 0
668#define ACPI_GPE_DISABLE 1 680#define ACPI_GPE_DISABLE 1
@@ -880,6 +892,10 @@ struct acpi_buffer {
880 void *pointer; /* pointer to buffer */ 892 void *pointer; /* pointer to buffer */
881}; 893};
882 894
895/* Free a buffer created in an struct acpi_buffer via ACPI_ALLOCATE_LOCAL_BUFFER */
896
897#define ACPI_FREE_BUFFER(b) ACPI_FREE(b.pointer)
898
883/* 899/*
884 * name_type for acpi_get_name 900 * name_type for acpi_get_name
885 */ 901 */
@@ -968,7 +984,11 @@ acpi_status(*acpi_exception_handler) (acpi_status aml_status,
968/* Table Event handler (Load, load_table, etc.) and types */ 984/* Table Event handler (Load, load_table, etc.) and types */
969 985
970typedef 986typedef
971acpi_status(*acpi_tbl_handler) (u32 event, void *table, void *context); 987acpi_status(*acpi_table_handler) (u32 event, void *table, void *context);
988
989#define ACPI_TABLE_LOAD 0x0
990#define ACPI_TABLE_UNLOAD 0x1
991#define ACPI_NUM_TABLE_EVENTS 2
972 992
973/* Address Spaces (For Operation Regions) */ 993/* Address Spaces (For Operation Regions) */
974 994
diff --git a/include/acpi/container.h b/include/acpi/container.h
deleted file mode 100644
index a703f14e049e..000000000000
--- a/include/acpi/container.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifndef __ACPI_CONTAINER_H
2#define __ACPI_CONTAINER_H
3
4#include <linux/kernel.h>
5
6struct acpi_container {
7 acpi_handle handle;
8 unsigned long sun;
9 int state;
10};
11
12#endif /* __ACPI_CONTAINER_H */
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index 89cee88dd2a5..ef04b36ca6ed 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -1,11 +1,11 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Name: acenv.h - Generation environment specific items 3 * Name: acenv.h - Host and compiler configuration
4 * 4 *
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -44,6 +44,12 @@
44#ifndef __ACENV_H__ 44#ifndef __ACENV_H__
45#define __ACENV_H__ 45#define __ACENV_H__
46 46
47/*
48 * Environment configuration. The purpose of this file is to interface ACPICA
49 * to the local environment. This includes compiler-specific, OS-specific,
50 * and machine-specific configuration.
51 */
52
47/* Types for ACPI_MUTEX_TYPE */ 53/* Types for ACPI_MUTEX_TYPE */
48 54
49#define ACPI_BINARY_SEMAPHORE 0 55#define ACPI_BINARY_SEMAPHORE 0
@@ -60,139 +66,170 @@
60 * 66 *
61 *****************************************************************************/ 67 *****************************************************************************/
62 68
63#ifdef ACPI_LIBRARY 69/* iASL configuration */
64/*
65 * Note: The non-debug version of the acpi_library does not contain any
66 * debug support, for minimal size. The debug version uses ACPI_FULL_DEBUG
67 */
68#define ACPI_USE_LOCAL_CACHE
69#endif
70 70
71#ifdef ACPI_ASL_COMPILER 71#ifdef ACPI_ASL_COMPILER
72#define ACPI_DEBUG_OUTPUT
73#define ACPI_APPLICATION 72#define ACPI_APPLICATION
74#define ACPI_DISASSEMBLER 73#define ACPI_DISASSEMBLER
74#define ACPI_DEBUG_OUTPUT
75#define ACPI_CONSTANT_EVAL_ONLY 75#define ACPI_CONSTANT_EVAL_ONLY
76#define ACPI_LARGE_NAMESPACE_NODE 76#define ACPI_LARGE_NAMESPACE_NODE
77#define ACPI_DATA_TABLE_DISASSEMBLY 77#define ACPI_DATA_TABLE_DISASSEMBLY
78#define ACPI_SINGLE_THREADED
78#endif 79#endif
79 80
81/* acpi_exec configuration. Multithreaded with full AML debugger */
82
80#ifdef ACPI_EXEC_APP 83#ifdef ACPI_EXEC_APP
81#undef DEBUGGER_THREADING
82#define DEBUGGER_THREADING DEBUGGER_SINGLE_THREADED
83#define ACPI_FULL_DEBUG
84#define ACPI_APPLICATION 84#define ACPI_APPLICATION
85#define ACPI_DEBUGGER 85#define ACPI_FULL_DEBUG
86#define ACPI_MUTEX_DEBUG 86#define ACPI_MUTEX_DEBUG
87#define ACPI_DBG_TRACK_ALLOCATIONS 87#define ACPI_DBG_TRACK_ALLOCATIONS
88#endif 88#endif
89 89
90/* acpi_names configuration. Single threaded with debugger output enabled. */
91
92#ifdef ACPI_NAMES_APP
93#define ACPI_DEBUGGER
94#define ACPI_APPLICATION
95#define ACPI_SINGLE_THREADED
96#endif
97
98/*
99 * acpi_bin/acpi_help/acpi_src configuration. All single threaded, with
100 * no debug output.
101 */
102#if (defined ACPI_BIN_APP) || \
103 (defined ACPI_SRC_APP) || \
104 (defined ACPI_XTRACT_APP)
105#define ACPI_APPLICATION
106#define ACPI_SINGLE_THREADED
107#endif
108
109#ifdef ACPI_HELP_APP
110#define ACPI_APPLICATION
111#define ACPI_SINGLE_THREADED
112#define ACPI_NO_ERROR_MESSAGES
113#endif
114
115/* Linkable ACPICA library */
116
117#ifdef ACPI_LIBRARY
118#define ACPI_USE_LOCAL_CACHE
119#define ACPI_FUTURE_USAGE
120#endif
121
122/* Common for all ACPICA applications */
123
90#ifdef ACPI_APPLICATION 124#ifdef ACPI_APPLICATION
91#define ACPI_USE_SYSTEM_CLIBRARY 125#define ACPI_USE_SYSTEM_CLIBRARY
92#define ACPI_USE_LOCAL_CACHE 126#define ACPI_USE_LOCAL_CACHE
93#endif 127#endif
94 128
129/* Common debug support */
130
95#ifdef ACPI_FULL_DEBUG 131#ifdef ACPI_FULL_DEBUG
96#define ACPI_DEBUGGER 132#define ACPI_DEBUGGER
97#define ACPI_DEBUG_OUTPUT 133#define ACPI_DEBUG_OUTPUT
98#define ACPI_DISASSEMBLER 134#define ACPI_DISASSEMBLER
99#endif 135#endif
100 136
101/*
102 * Environment configuration. The purpose of this file is to interface to the
103 * local generation environment.
104 *
105 * 1) ACPI_USE_SYSTEM_CLIBRARY - Define this if linking to an actual C library.
106 * Otherwise, local versions of string/memory functions will be used.
107 * 2) ACPI_USE_STANDARD_HEADERS - Define this if linking to a C library and
108 * the standard header files may be used.
109 *
110 * The ACPI subsystem only uses low level C library functions that do not call
111 * operating system services and may therefore be inlined in the code.
112 *
113 * It may be necessary to tailor these include files to the target
114 * generation environment.
115 *
116 *
117 * Functions and constants used from each header:
118 *
119 * string.h: memcpy
120 * memset
121 * strcat
122 * strcmp
123 * strcpy
124 * strlen
125 * strncmp
126 * strncat
127 * strncpy
128 *
129 * stdlib.h: strtoul
130 *
131 * stdarg.h: va_list
132 * va_arg
133 * va_start
134 * va_end
135 *
136 */
137 137
138/*! [Begin] no source code translation */ 138/*! [Begin] no source code translation */
139 139
140/******************************************************************************
141 *
142 * Host configuration files. The compiler configuration files are included
143 * by the host files.
144 *
145 *****************************************************************************/
146
140#if defined(_LINUX) || defined(__linux__) 147#if defined(_LINUX) || defined(__linux__)
141#include <acpi/platform/aclinux.h> 148#include <acpi/platform/aclinux.h>
142 149
143#elif defined(_AED_EFI)
144#include "acefi.h"
145
146#elif defined(WIN32)
147#include "acwin.h"
148
149#elif defined(WIN64)
150#include "acwin64.h"
151
152#elif defined(MSDOS) /* Must appear after WIN32 and WIN64 check */
153#include "acdos16.h"
154
155#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) 150#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
156#include "acfreebsd.h" 151#include "acfreebsd.h"
157 152
158#elif defined(__NetBSD__) 153#elif defined(__NetBSD__)
159#include "acnetbsd.h" 154#include "acnetbsd.h"
160 155
156#elif defined(__sun)
157#include "acsolaris.h"
158
161#elif defined(MODESTO) 159#elif defined(MODESTO)
162#include "acmodesto.h" 160#include "acmodesto.h"
163 161
164#elif defined(NETWARE) 162#elif defined(NETWARE)
165#include "acnetware.h" 163#include "acnetware.h"
166 164
167#elif defined(__sun) 165#elif defined(_CYGWIN)
168#include "acsolaris.h" 166#include "accygwin.h"
169 167
170#else 168#elif defined(WIN32)
169#include "acwin.h"
170
171#elif defined(WIN64)
172#include "acwin64.h"
171 173
172/* All other environments */ 174#elif defined(_WRS_LIB_BUILD)
175#include "acvxworks.h"
173 176
174#define ACPI_USE_STANDARD_HEADERS 177#elif defined(__OS2__)
178#include "acos2.h"
175 179
176#define COMPILER_DEPENDENT_INT64 long long 180#elif defined(_AED_EFI)
177#define COMPILER_DEPENDENT_UINT64 unsigned long long 181#include "acefi.h"
182
183#elif defined(__HAIKU__)
184#include "achaiku.h"
178 185
186#else
187
188/* Unknown environment */
189
190#error Unknown target environment
179#endif 191#endif
180 192
181/*! [End] no source code translation !*/ 193/*! [End] no source code translation !*/
182 194
183/****************************************************************************** 195/******************************************************************************
184 * 196 *
185 * Miscellaneous configuration 197 * Setup defaults for the required symbols that were not defined in one of
198 * the host/compiler files above.
186 * 199 *
187 *****************************************************************************/ 200 *****************************************************************************/
188 201
189/* 202/* 64-bit data types */
190 * Are mutexes supported by the host? default is no, use binary semaphores. 203
191 */ 204#ifndef COMPILER_DEPENDENT_INT64
205#define COMPILER_DEPENDENT_INT64 long long
206#endif
207
208#ifndef COMPILER_DEPENDENT_UINT64
209#define COMPILER_DEPENDENT_UINT64 unsigned long long
210#endif
211
212/* Type of mutex supported by host. Default is binary semaphores. */
192#ifndef ACPI_MUTEX_TYPE 213#ifndef ACPI_MUTEX_TYPE
193#define ACPI_MUTEX_TYPE ACPI_BINARY_SEMAPHORE 214#define ACPI_MUTEX_TYPE ACPI_BINARY_SEMAPHORE
194#endif 215#endif
195 216
217/* Global Lock acquire/release */
218
219#ifndef ACPI_ACQUIRE_GLOBAL_LOCK
220#define ACPI_ACQUIRE_GLOBAL_LOCK(Glptr, acquired) acquired = 1
221#endif
222
223#ifndef ACPI_RELEASE_GLOBAL_LOCK
224#define ACPI_RELEASE_GLOBAL_LOCK(Glptr, pending) pending = 0
225#endif
226
227/* Flush CPU cache - used when going to sleep. Wbinvd or similar. */
228
229#ifndef ACPI_FLUSH_CPU_CACHE
230#define ACPI_FLUSH_CPU_CACHE()
231#endif
232
196/* "inline" keywords - configurable since inline is not standardized */ 233/* "inline" keywords - configurable since inline is not standardized */
197 234
198#ifndef ACPI_INLINE 235#ifndef ACPI_INLINE
@@ -200,6 +237,30 @@
200#endif 237#endif
201 238
202/* 239/*
240 * Configurable calling conventions:
241 *
242 * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads)
243 * ACPI_EXTERNAL_XFACE - External ACPI interfaces
244 * ACPI_INTERNAL_XFACE - Internal ACPI interfaces
245 * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces
246 */
247#ifndef ACPI_SYSTEM_XFACE
248#define ACPI_SYSTEM_XFACE
249#endif
250
251#ifndef ACPI_EXTERNAL_XFACE
252#define ACPI_EXTERNAL_XFACE
253#endif
254
255#ifndef ACPI_INTERNAL_XFACE
256#define ACPI_INTERNAL_XFACE
257#endif
258
259#ifndef ACPI_INTERNAL_VAR_XFACE
260#define ACPI_INTERNAL_VAR_XFACE
261#endif
262
263/*
203 * Debugger threading model 264 * Debugger threading model
204 * Use single threaded if the entire subsystem is contained in an application 265 * Use single threaded if the entire subsystem is contained in an application
205 * Use multiple threaded when the subsystem is running in the kernel. 266 * Use multiple threaded when the subsystem is running in the kernel.
@@ -222,17 +283,26 @@
222 * 283 *
223 *****************************************************************************/ 284 *****************************************************************************/
224 285
225#define ACPI_IS_ASCII(c) ((c) < 0x80)
226
227#ifdef ACPI_USE_SYSTEM_CLIBRARY
228/* 286/*
229 * Use the standard C library headers. 287 * ACPI_USE_SYSTEM_CLIBRARY - Define this if linking to an actual C library.
230 * We want to keep these to a minimum. 288 * Otherwise, local versions of string/memory functions will be used.
289 * ACPI_USE_STANDARD_HEADERS - Define this if linking to a C library and
290 * the standard header files may be used.
291 *
292 * The ACPICA subsystem only uses low level C library functions that do not call
293 * operating system services and may therefore be inlined in the code.
294 *
295 * It may be necessary to tailor these include files to the target
296 * generation environment.
231 */ 297 */
298#ifdef ACPI_USE_SYSTEM_CLIBRARY
299
300/* Use the standard C library headers. We want to keep these to a minimum. */
301
232#ifdef ACPI_USE_STANDARD_HEADERS 302#ifdef ACPI_USE_STANDARD_HEADERS
233/* 303
234 * Use the standard headers from the standard locations 304/* Use the standard headers from the standard locations */
235 */ 305
236#include <stdarg.h> 306#include <stdarg.h>
237#include <stdlib.h> 307#include <stdlib.h>
238#include <string.h> 308#include <string.h>
@@ -240,9 +310,8 @@
240 310
241#endif /* ACPI_USE_STANDARD_HEADERS */ 311#endif /* ACPI_USE_STANDARD_HEADERS */
242 312
243/* 313/* We will be linking to the standard Clib functions */
244 * We will be linking to the standard Clib functions 314
245 */
246#define ACPI_STRSTR(s1,s2) strstr((s1), (s2)) 315#define ACPI_STRSTR(s1,s2) strstr((s1), (s2))
247#define ACPI_STRCHR(s1,c) strchr((s1), (c)) 316#define ACPI_STRCHR(s1,c) strchr((s1), (c))
248#define ACPI_STRLEN(s) (acpi_size) strlen((s)) 317#define ACPI_STRLEN(s) (acpi_size) strlen((s))
@@ -274,13 +343,12 @@
274 * 343 *
275 *****************************************************************************/ 344 *****************************************************************************/
276 345
277 /* 346/*
278 * Use local definitions of C library macros and functions 347 * Use local definitions of C library macros and functions. These function
279 * NOTE: The function implementations may not be as efficient 348 * implementations may not be as efficient as an inline or assembly code
280 * as an inline or assembly code implementation provided by a 349 * implementation provided by a native C library, but they are functionally
281 * native C library. 350 * equivalent.
282 */ 351 */
283
284#ifndef va_arg 352#ifndef va_arg
285 353
286#ifndef _VALIST 354#ifndef _VALIST
@@ -288,22 +356,22 @@
288typedef char *va_list; 356typedef char *va_list;
289#endif /* _VALIST */ 357#endif /* _VALIST */
290 358
291/* 359/* Storage alignment properties */
292 * Storage alignment properties 360
293 */
294#define _AUPBND (sizeof (acpi_native_int) - 1) 361#define _AUPBND (sizeof (acpi_native_int) - 1)
295#define _ADNBND (sizeof (acpi_native_int) - 1) 362#define _ADNBND (sizeof (acpi_native_int) - 1)
296 363
297/* 364/* Variable argument list macro definitions */
298 * Variable argument list macro definitions 365
299 */
300#define _bnd(X, bnd) (((sizeof (X)) + (bnd)) & (~(bnd))) 366#define _bnd(X, bnd) (((sizeof (X)) + (bnd)) & (~(bnd)))
301#define va_arg(ap, T) (*(T *)(((ap) += (_bnd (T, _AUPBND))) - (_bnd (T,_ADNBND)))) 367#define va_arg(ap, T) (*(T *)(((ap) += (_bnd (T, _AUPBND))) - (_bnd (T,_ADNBND))))
302#define va_end(ap) (void) 0 368#define va_end(ap) (ap = (va_list) NULL)
303#define va_start(ap, A) (void) ((ap) = (((char *) &(A)) + (_bnd (A,_AUPBND)))) 369#define va_start(ap, A) (void) ((ap) = (((char *) &(A)) + (_bnd (A,_AUPBND))))
304 370
305#endif /* va_arg */ 371#endif /* va_arg */
306 372
373/* Use the local (ACPICA) definitions of the clib functions */
374
307#define ACPI_STRSTR(s1,s2) acpi_ut_strstr ((s1), (s2)) 375#define ACPI_STRSTR(s1,s2) acpi_ut_strstr ((s1), (s2))
308#define ACPI_STRCHR(s1,c) acpi_ut_strchr ((s1), (c)) 376#define ACPI_STRCHR(s1,c) acpi_ut_strchr ((s1), (c))
309#define ACPI_STRLEN(s) (acpi_size) acpi_ut_strlen ((s)) 377#define ACPI_STRLEN(s) (acpi_size) acpi_ut_strlen ((s))
@@ -322,59 +390,4 @@ typedef char *va_list;
322 390
323#endif /* ACPI_USE_SYSTEM_CLIBRARY */ 391#endif /* ACPI_USE_SYSTEM_CLIBRARY */
324 392
325/******************************************************************************
326 *
327 * Assembly code macros
328 *
329 *****************************************************************************/
330
331/*
332 * Handle platform- and compiler-specific assembly language differences.
333 * These should already have been defined by the platform includes above.
334 *
335 * Notes:
336 * 1) Interrupt 3 is used to break into a debugger
337 * 2) Interrupts are turned off during ACPI register setup
338 */
339
340/* Unrecognized compiler, use defaults */
341
342#ifndef ACPI_ASM_MACROS
343
344/*
345 * Calling conventions:
346 *
347 * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads)
348 * ACPI_EXTERNAL_XFACE - External ACPI interfaces
349 * ACPI_INTERNAL_XFACE - Internal ACPI interfaces
350 * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces
351 */
352#define ACPI_SYSTEM_XFACE
353#define ACPI_EXTERNAL_XFACE
354#define ACPI_INTERNAL_XFACE
355#define ACPI_INTERNAL_VAR_XFACE
356
357#define ACPI_ASM_MACROS
358#define BREAKPOINT3
359#define ACPI_DISABLE_IRQS()
360#define ACPI_ENABLE_IRQS()
361#define ACPI_ACQUIRE_GLOBAL_LOCK(Glptr, acq)
362#define ACPI_RELEASE_GLOBAL_LOCK(Glptr, acq)
363
364#endif /* ACPI_ASM_MACROS */
365
366#ifdef ACPI_APPLICATION
367
368/* Don't want software interrupts within a ring3 application */
369
370#undef BREAKPOINT3
371#define BREAKPOINT3
372#endif
373
374/******************************************************************************
375 *
376 * Compiler-specific information is contained in the compiler-specific
377 * headers.
378 *
379 *****************************************************************************/
380#endif /* __ACENV_H__ */ 393#endif /* __ACENV_H__ */
diff --git a/include/acpi/platform/acgcc.h b/include/acpi/platform/acgcc.h
index 72553b0c9f33..e077ce6c38ca 100644
--- a/include/acpi/platform/acgcc.h
+++ b/include/acpi/platform/acgcc.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -64,8 +64,4 @@
64 */ 64 */
65#define ACPI_UNUSED_VAR __attribute__ ((unused)) 65#define ACPI_UNUSED_VAR __attribute__ ((unused))
66 66
67#ifdef _ANSI
68#define inline
69#endif
70
71#endif /* __ACGCC_H__ */ 67#endif /* __ACGCC_H__ */
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index 85d5d8f38452..68534ef86ec8 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2012, Intel Corp. 8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -108,7 +108,6 @@
108 108
109#include <acpi/platform/acgcc.h> 109#include <acpi/platform/acgcc.h>
110 110
111
112#ifdef __KERNEL__ 111#ifdef __KERNEL__
113#include <acpi/actypes.h> 112#include <acpi/actypes.h>
114/* 113/*
diff --git a/include/asm-generic/cputime.h b/include/asm-generic/cputime.h
index 9a62937c56ca..51969436b8b8 100644
--- a/include/asm-generic/cputime.h
+++ b/include/asm-generic/cputime.h
@@ -4,66 +4,12 @@
4#include <linux/time.h> 4#include <linux/time.h>
5#include <linux/jiffies.h> 5#include <linux/jiffies.h>
6 6
7typedef unsigned long __nocast cputime_t; 7#ifndef CONFIG_VIRT_CPU_ACCOUNTING
8 8# include <asm-generic/cputime_jiffies.h>
9#define cputime_one_jiffy jiffies_to_cputime(1) 9#endif
10#define cputime_to_jiffies(__ct) (__force unsigned long)(__ct)
11#define cputime_to_scaled(__ct) (__ct)
12#define jiffies_to_cputime(__hz) (__force cputime_t)(__hz)
13
14typedef u64 __nocast cputime64_t;
15
16#define cputime64_to_jiffies64(__ct) (__force u64)(__ct)
17#define jiffies64_to_cputime64(__jif) (__force cputime64_t)(__jif)
18
19#define nsecs_to_cputime64(__ct) \
20 jiffies64_to_cputime64(nsecs_to_jiffies64(__ct))
21
22
23/*
24 * Convert cputime to microseconds and back.
25 */
26#define cputime_to_usecs(__ct) \
27 jiffies_to_usecs(cputime_to_jiffies(__ct))
28#define usecs_to_cputime(__usec) \
29 jiffies_to_cputime(usecs_to_jiffies(__usec))
30#define usecs_to_cputime64(__usec) \
31 jiffies64_to_cputime64(nsecs_to_jiffies64((__usec) * 1000))
32
33/*
34 * Convert cputime to seconds and back.
35 */
36#define cputime_to_secs(jif) (cputime_to_jiffies(jif) / HZ)
37#define secs_to_cputime(sec) jiffies_to_cputime((sec) * HZ)
38
39/*
40 * Convert cputime to timespec and back.
41 */
42#define timespec_to_cputime(__val) \
43 jiffies_to_cputime(timespec_to_jiffies(__val))
44#define cputime_to_timespec(__ct,__val) \
45 jiffies_to_timespec(cputime_to_jiffies(__ct),__val)
46
47/*
48 * Convert cputime to timeval and back.
49 */
50#define timeval_to_cputime(__val) \
51 jiffies_to_cputime(timeval_to_jiffies(__val))
52#define cputime_to_timeval(__ct,__val) \
53 jiffies_to_timeval(cputime_to_jiffies(__ct),__val)
54
55/*
56 * Convert cputime to clock and back.
57 */
58#define cputime_to_clock_t(__ct) \
59 jiffies_to_clock_t(cputime_to_jiffies(__ct))
60#define clock_t_to_cputime(__x) \
61 jiffies_to_cputime(clock_t_to_jiffies(__x))
62 10
63/* 11#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
64 * Convert cputime64 to clock. 12# include <asm-generic/cputime_nsecs.h>
65 */ 13#endif
66#define cputime64_to_clock_t(__ct) \
67 jiffies_64_to_clock_t(cputime64_to_jiffies64(__ct))
68 14
69#endif 15#endif
diff --git a/include/asm-generic/cputime_jiffies.h b/include/asm-generic/cputime_jiffies.h
new file mode 100644
index 000000000000..272ecba9f588
--- /dev/null
+++ b/include/asm-generic/cputime_jiffies.h
@@ -0,0 +1,72 @@
1#ifndef _ASM_GENERIC_CPUTIME_JIFFIES_H
2#define _ASM_GENERIC_CPUTIME_JIFFIES_H
3
4typedef unsigned long __nocast cputime_t;
5
6#define cputime_one_jiffy jiffies_to_cputime(1)
7#define cputime_to_jiffies(__ct) (__force unsigned long)(__ct)
8#define cputime_to_scaled(__ct) (__ct)
9#define jiffies_to_cputime(__hz) (__force cputime_t)(__hz)
10
11typedef u64 __nocast cputime64_t;
12
13#define cputime64_to_jiffies64(__ct) (__force u64)(__ct)
14#define jiffies64_to_cputime64(__jif) (__force cputime64_t)(__jif)
15
16
17/*
18 * Convert nanoseconds to cputime
19 */
20#define nsecs_to_cputime64(__nsec) \
21 jiffies64_to_cputime64(nsecs_to_jiffies64(__nsec))
22#define nsecs_to_cputime(__nsec) \
23 jiffies_to_cputime(nsecs_to_jiffies(__nsec))
24
25
26/*
27 * Convert cputime to microseconds and back.
28 */
29#define cputime_to_usecs(__ct) \
30 jiffies_to_usecs(cputime_to_jiffies(__ct))
31#define usecs_to_cputime(__usec) \
32 jiffies_to_cputime(usecs_to_jiffies(__usec))
33#define usecs_to_cputime64(__usec) \
34 jiffies64_to_cputime64(nsecs_to_jiffies64((__usec) * 1000))
35
36/*
37 * Convert cputime to seconds and back.
38 */
39#define cputime_to_secs(jif) (cputime_to_jiffies(jif) / HZ)
40#define secs_to_cputime(sec) jiffies_to_cputime((sec) * HZ)
41
42/*
43 * Convert cputime to timespec and back.
44 */
45#define timespec_to_cputime(__val) \
46 jiffies_to_cputime(timespec_to_jiffies(__val))
47#define cputime_to_timespec(__ct,__val) \
48 jiffies_to_timespec(cputime_to_jiffies(__ct),__val)
49
50/*
51 * Convert cputime to timeval and back.
52 */
53#define timeval_to_cputime(__val) \
54 jiffies_to_cputime(timeval_to_jiffies(__val))
55#define cputime_to_timeval(__ct,__val) \
56 jiffies_to_timeval(cputime_to_jiffies(__ct),__val)
57
58/*
59 * Convert cputime to clock and back.
60 */
61#define cputime_to_clock_t(__ct) \
62 jiffies_to_clock_t(cputime_to_jiffies(__ct))
63#define clock_t_to_cputime(__x) \
64 jiffies_to_cputime(clock_t_to_jiffies(__x))
65
66/*
67 * Convert cputime64 to clock.
68 */
69#define cputime64_to_clock_t(__ct) \
70 jiffies_64_to_clock_t(cputime64_to_jiffies64(__ct))
71
72#endif
diff --git a/include/asm-generic/cputime_nsecs.h b/include/asm-generic/cputime_nsecs.h
new file mode 100644
index 000000000000..b6485cafb7bd
--- /dev/null
+++ b/include/asm-generic/cputime_nsecs.h
@@ -0,0 +1,104 @@
1/*
2 * Definitions for measuring cputime in nsecs resolution.
3 *
4 * Based on <arch/ia64/include/asm/cputime.h>
5 *
6 * Copyright (C) 2007 FUJITSU LIMITED
7 * Copyright (C) 2007 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 *
14 */
15
16#ifndef _ASM_GENERIC_CPUTIME_NSECS_H
17#define _ASM_GENERIC_CPUTIME_NSECS_H
18
19typedef u64 __nocast cputime_t;
20typedef u64 __nocast cputime64_t;
21
22#define cputime_one_jiffy jiffies_to_cputime(1)
23
24/*
25 * Convert cputime <-> jiffies (HZ)
26 */
27#define cputime_to_jiffies(__ct) \
28 ((__force u64)(__ct) / (NSEC_PER_SEC / HZ))
29#define cputime_to_scaled(__ct) (__ct)
30#define jiffies_to_cputime(__jif) \
31 (__force cputime_t)((__jif) * (NSEC_PER_SEC / HZ))
32#define cputime64_to_jiffies64(__ct) \
33 ((__force u64)(__ct) / (NSEC_PER_SEC / HZ))
34#define jiffies64_to_cputime64(__jif) \
35 (__force cputime64_t)((__jif) * (NSEC_PER_SEC / HZ))
36
37
38/*
39 * Convert cputime <-> nanoseconds
40 */
41#define nsecs_to_cputime(__nsecs) ((__force u64)(__nsecs))
42
43
44/*
45 * Convert cputime <-> microseconds
46 */
47#define cputime_to_usecs(__ct) \
48 ((__force u64)(__ct) / NSEC_PER_USEC)
49#define usecs_to_cputime(__usecs) \
50 (__force cputime_t)((__usecs) * NSEC_PER_USEC)
51#define usecs_to_cputime64(__usecs) \
52 (__force cputime64_t)((__usecs) * NSEC_PER_USEC)
53
54/*
55 * Convert cputime <-> seconds
56 */
57#define cputime_to_secs(__ct) \
58 ((__force u64)(__ct) / NSEC_PER_SEC)
59#define secs_to_cputime(__secs) \
60 (__force cputime_t)((__secs) * NSEC_PER_SEC)
61
62/*
63 * Convert cputime <-> timespec (nsec)
64 */
65static inline cputime_t timespec_to_cputime(const struct timespec *val)
66{
67 u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
68 return (__force cputime_t) ret;
69}
70static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
71{
72 val->tv_sec = (__force u64) ct / NSEC_PER_SEC;
73 val->tv_nsec = (__force u64) ct % NSEC_PER_SEC;
74}
75
76/*
77 * Convert cputime <-> timeval (msec)
78 */
79static inline cputime_t timeval_to_cputime(struct timeval *val)
80{
81 u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC;
82 return (__force cputime_t) ret;
83}
84static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
85{
86 val->tv_sec = (__force u64) ct / NSEC_PER_SEC;
87 val->tv_usec = ((__force u64) ct % NSEC_PER_SEC) / NSEC_PER_USEC;
88}
89
90/*
91 * Convert cputime <-> clock (USER_HZ)
92 */
93#define cputime_to_clock_t(__ct) \
94 ((__force u64)(__ct) / (NSEC_PER_SEC / USER_HZ))
95#define clock_t_to_cputime(__x) \
96 (__force cputime_t)((__x) * (NSEC_PER_SEC / USER_HZ))
97
98/*
99 * Convert cputime64 to clock.
100 */
101#define cputime64_to_clock_t(__ct) \
102 cputime_to_clock_t((__force cputime_t)__ct)
103
104#endif
diff --git a/include/asm-generic/dma-mapping-broken.h b/include/asm-generic/dma-mapping-broken.h
index ccf7b4f34a3c..6c32af918c2f 100644
--- a/include/asm-generic/dma-mapping-broken.h
+++ b/include/asm-generic/dma-mapping-broken.h
@@ -16,6 +16,22 @@ extern void
16dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, 16dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
17 dma_addr_t dma_handle); 17 dma_addr_t dma_handle);
18 18
19static inline void *dma_alloc_attrs(struct device *dev, size_t size,
20 dma_addr_t *dma_handle, gfp_t flag,
21 struct dma_attrs *attrs)
22{
23 /* attrs is not supported and ignored */
24 return dma_alloc_coherent(dev, size, dma_handle, flag);
25}
26
27static inline void dma_free_attrs(struct device *dev, size_t size,
28 void *cpu_addr, dma_addr_t dma_handle,
29 struct dma_attrs *attrs)
30{
31 /* attrs is not supported and ignored */
32 dma_free_coherent(dev, size, cpu_addr, dma_handle);
33}
34
19#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 35#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
20#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 36#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
21 37
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 701beab27aab..5cf680a98f9b 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -461,10 +461,8 @@ static inline int is_zero_pfn(unsigned long pfn)
461 return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT); 461 return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
462} 462}
463 463
464static inline unsigned long my_zero_pfn(unsigned long addr) 464#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
465{ 465
466 return page_to_pfn(ZERO_PAGE(addr));
467}
468#else 466#else
469static inline int is_zero_pfn(unsigned long pfn) 467static inline int is_zero_pfn(unsigned long pfn)
470{ 468{
diff --git a/include/asm-generic/syscalls.h b/include/asm-generic/syscalls.h
index 58f466ff00d3..1db51b8524e9 100644
--- a/include/asm-generic/syscalls.h
+++ b/include/asm-generic/syscalls.h
@@ -21,10 +21,12 @@ asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
21 unsigned long fd, off_t pgoff); 21 unsigned long fd, off_t pgoff);
22#endif 22#endif
23 23
24#ifndef CONFIG_GENERIC_SIGALTSTACK
24#ifndef sys_sigaltstack 25#ifndef sys_sigaltstack
25asmlinkage long sys_sigaltstack(const stack_t __user *, stack_t __user *, 26asmlinkage long sys_sigaltstack(const stack_t __user *, stack_t __user *,
26 struct pt_regs *); 27 struct pt_regs *);
27#endif 28#endif
29#endif
28 30
29#ifndef sys_rt_sigreturn 31#ifndef sys_rt_sigreturn
30asmlinkage long sys_rt_sigreturn(struct pt_regs *regs); 32asmlinkage long sys_rt_sigreturn(struct pt_regs *regs);
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index d1ea7ce0b4cb..c1fe60ad1540 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -150,6 +150,15 @@
150#endif 150#endif
151 151
152 152
153#ifdef CONFIG_COMMON_CLK
154#define CLK_OF_TABLES() . = ALIGN(8); \
155 VMLINUX_SYMBOL(__clk_of_table) = .; \
156 *(__clk_of_table) \
157 *(__clk_of_table_end)
158#else
159#define CLK_OF_TABLES()
160#endif
161
153#define KERNEL_DTB() \ 162#define KERNEL_DTB() \
154 STRUCT_ALIGN(); \ 163 STRUCT_ALIGN(); \
155 VMLINUX_SYMBOL(__dtb_start) = .; \ 164 VMLINUX_SYMBOL(__dtb_start) = .; \
@@ -493,6 +502,7 @@
493 DEV_DISCARD(init.rodata) \ 502 DEV_DISCARD(init.rodata) \
494 CPU_DISCARD(init.rodata) \ 503 CPU_DISCARD(init.rodata) \
495 MEM_DISCARD(init.rodata) \ 504 MEM_DISCARD(init.rodata) \
505 CLK_OF_TABLES() \
496 KERNEL_DTB() 506 KERNEL_DTB()
497 507
498#define INIT_TEXT \ 508#define INIT_TEXT \
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 3994d7790b23..bcbdd7484e58 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -74,9 +74,10 @@ enum acpi_address_range_id {
74 74
75/* Table Handlers */ 75/* Table Handlers */
76 76
77typedef int (*acpi_table_handler) (struct acpi_table_header *table); 77typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *table);
78 78
79typedef int (*acpi_table_entry_handler) (struct acpi_subtable_header *header, const unsigned long end); 79typedef int (*acpi_tbl_entry_handler)(struct acpi_subtable_header *header,
80 const unsigned long end);
80 81
81#ifdef CONFIG_ACPI_INITRD_TABLE_OVERRIDE 82#ifdef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
82void acpi_initrd_override(void *data, size_t size); 83void acpi_initrd_override(void *data, size_t size);
@@ -95,10 +96,14 @@ int acpi_mps_check (void);
95int acpi_numa_init (void); 96int acpi_numa_init (void);
96 97
97int acpi_table_init (void); 98int acpi_table_init (void);
98int acpi_table_parse (char *id, acpi_table_handler handler); 99int acpi_table_parse(char *id, acpi_tbl_table_handler handler);
99int __init acpi_table_parse_entries(char *id, unsigned long table_size, 100int __init acpi_table_parse_entries(char *id, unsigned long table_size,
100 int entry_id, acpi_table_entry_handler handler, unsigned int max_entries); 101 int entry_id,
101int acpi_table_parse_madt (enum acpi_madt_type id, acpi_table_entry_handler handler, unsigned int max_entries); 102 acpi_tbl_entry_handler handler,
103 unsigned int max_entries);
104int acpi_table_parse_madt(enum acpi_madt_type id,
105 acpi_tbl_entry_handler handler,
106 unsigned int max_entries);
102int acpi_parse_mcfg (struct acpi_table_header *header); 107int acpi_parse_mcfg (struct acpi_table_header *header);
103void acpi_table_print_madt_entry (struct acpi_subtable_header *madt); 108void acpi_table_print_madt_entry (struct acpi_subtable_header *madt);
104 109
@@ -358,8 +363,7 @@ extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
358#if defined(CONFIG_ACPI_HOTPLUG_CPU) && \ 363#if defined(CONFIG_ACPI_HOTPLUG_CPU) && \
359 (defined(CONFIG_ACPI_HOTPLUG_MEMORY) || \ 364 (defined(CONFIG_ACPI_HOTPLUG_MEMORY) || \
360 defined(CONFIG_ACPI_HOTPLUG_MEMORY_MODULE)) && \ 365 defined(CONFIG_ACPI_HOTPLUG_MEMORY_MODULE)) && \
361 (defined(CONFIG_ACPI_CONTAINER) || \ 366 defined(CONFIG_ACPI_CONTAINER)
362 defined(CONFIG_ACPI_CONTAINER_MODULE))
363#define ACPI_HOTPLUG_OST 367#define ACPI_HOTPLUG_OST
364#endif 368#endif
365 369
@@ -511,7 +515,7 @@ static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; }
511static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; } 515static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; }
512#endif 516#endif
513 517
514#ifdef CONFIG_ACPI_SLEEP 518#if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP)
515int acpi_dev_suspend_late(struct device *dev); 519int acpi_dev_suspend_late(struct device *dev);
516int acpi_dev_resume_early(struct device *dev); 520int acpi_dev_resume_early(struct device *dev);
517int acpi_subsys_prepare(struct device *dev); 521int acpi_subsys_prepare(struct device *dev);
@@ -526,9 +530,14 @@ static inline int acpi_subsys_resume_early(struct device *dev) { return 0; }
526#endif 530#endif
527 531
528#if defined(CONFIG_ACPI) && defined(CONFIG_PM) 532#if defined(CONFIG_ACPI) && defined(CONFIG_PM)
533struct acpi_device *acpi_dev_pm_get_node(struct device *dev);
529int acpi_dev_pm_attach(struct device *dev, bool power_on); 534int acpi_dev_pm_attach(struct device *dev, bool power_on);
530void acpi_dev_pm_detach(struct device *dev, bool power_off); 535void acpi_dev_pm_detach(struct device *dev, bool power_off);
531#else 536#else
537static inline struct acpi_device *acpi_dev_pm_get_node(struct device *dev)
538{
539 return NULL;
540}
532static inline int acpi_dev_pm_attach(struct device *dev, bool power_on) 541static inline int acpi_dev_pm_attach(struct device *dev, bool power_on)
533{ 542{
534 return -ENODEV; 543 return -ENODEV;
diff --git a/include/linux/aer.h b/include/linux/aer.h
index 544abdb2238c..ec10e1b24c1c 100644
--- a/include/linux/aer.h
+++ b/include/linux/aer.h
@@ -49,8 +49,8 @@ static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
49} 49}
50#endif 50#endif
51 51
52extern void cper_print_aer(const char *prefix, int cper_severity, 52extern void cper_print_aer(const char *prefix, struct pci_dev *dev,
53 struct aer_capability_regs *aer); 53 int cper_severity, struct aer_capability_regs *aer);
54extern int cper_severity_to_aer(int cper_severity); 54extern int cper_severity_to_aer(int cper_severity);
55extern void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn, 55extern void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
56 int severity); 56 int severity);
diff --git a/include/linux/async.h b/include/linux/async.h
index 7a24fe9b44b4..a2e3f18b2ad6 100644
--- a/include/linux/async.h
+++ b/include/linux/async.h
@@ -19,8 +19,7 @@ typedef u64 async_cookie_t;
19typedef void (async_func_ptr) (void *data, async_cookie_t cookie); 19typedef void (async_func_ptr) (void *data, async_cookie_t cookie);
20struct async_domain { 20struct async_domain {
21 struct list_head node; 21 struct list_head node;
22 struct list_head domain; 22 struct list_head pending;
23 int count;
24 unsigned registered:1; 23 unsigned registered:1;
25}; 24};
26 25
@@ -29,8 +28,7 @@ struct async_domain {
29 */ 28 */
30#define ASYNC_DOMAIN(_name) \ 29#define ASYNC_DOMAIN(_name) \
31 struct async_domain _name = { .node = LIST_HEAD_INIT(_name.node), \ 30 struct async_domain _name = { .node = LIST_HEAD_INIT(_name.node), \
32 .domain = LIST_HEAD_INIT(_name.domain), \ 31 .pending = LIST_HEAD_INIT(_name.pending), \
33 .count = 0, \
34 .registered = 1 } 32 .registered = 1 }
35 33
36/* 34/*
@@ -39,8 +37,7 @@ struct async_domain {
39 */ 37 */
40#define ASYNC_DOMAIN_EXCLUSIVE(_name) \ 38#define ASYNC_DOMAIN_EXCLUSIVE(_name) \
41 struct async_domain _name = { .node = LIST_HEAD_INIT(_name.node), \ 39 struct async_domain _name = { .node = LIST_HEAD_INIT(_name.node), \
42 .domain = LIST_HEAD_INIT(_name.domain), \ 40 .pending = LIST_HEAD_INIT(_name.pending), \
43 .count = 0, \
44 .registered = 0 } 41 .registered = 0 }
45 42
46extern async_cookie_t async_schedule(async_func_ptr *ptr, void *data); 43extern async_cookie_t async_schedule(async_func_ptr *ptr, void *data);
@@ -52,4 +49,5 @@ extern void async_synchronize_full_domain(struct async_domain *domain);
52extern void async_synchronize_cookie(async_cookie_t cookie); 49extern void async_synchronize_cookie(async_cookie_t cookie);
53extern void async_synchronize_cookie_domain(async_cookie_t cookie, 50extern void async_synchronize_cookie_domain(async_cookie_t cookie,
54 struct async_domain *domain); 51 struct async_domain *domain);
52extern bool current_is_async(void);
55#endif 53#endif
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 408da9502177..8f7a3d68371a 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -297,10 +297,12 @@ enum {
297 ATA_LOG_SATA_NCQ = 0x10, 297 ATA_LOG_SATA_NCQ = 0x10,
298 ATA_LOG_SATA_ID_DEV_DATA = 0x30, 298 ATA_LOG_SATA_ID_DEV_DATA = 0x30,
299 ATA_LOG_SATA_SETTINGS = 0x08, 299 ATA_LOG_SATA_SETTINGS = 0x08,
300 ATA_LOG_DEVSLP_MDAT = 0x30, 300 ATA_LOG_DEVSLP_OFFSET = 0x30,
301 ATA_LOG_DEVSLP_SIZE = 0x08,
302 ATA_LOG_DEVSLP_MDAT = 0x00,
301 ATA_LOG_DEVSLP_MDAT_MASK = 0x1F, 303 ATA_LOG_DEVSLP_MDAT_MASK = 0x1F,
302 ATA_LOG_DEVSLP_DETO = 0x31, 304 ATA_LOG_DEVSLP_DETO = 0x01,
303 ATA_LOG_DEVSLP_VALID = 0x37, 305 ATA_LOG_DEVSLP_VALID = 0x07,
304 ATA_LOG_DEVSLP_VALID_MASK = 0x80, 306 ATA_LOG_DEVSLP_VALID_MASK = 0x80,
305 307
306 /* READ/WRITE LONG (obsolete) */ 308 /* READ/WRITE LONG (obsolete) */
diff --git a/include/linux/bma150.h b/include/linux/bma150.h
index 7911fda23bb4..97ade7cdc870 100644
--- a/include/linux/bma150.h
+++ b/include/linux/bma150.h
@@ -22,6 +22,18 @@
22 22
23#define BMA150_DRIVER "bma150" 23#define BMA150_DRIVER "bma150"
24 24
25#define BMA150_RANGE_2G 0
26#define BMA150_RANGE_4G 1
27#define BMA150_RANGE_8G 2
28
29#define BMA150_BW_25HZ 0
30#define BMA150_BW_50HZ 1
31#define BMA150_BW_100HZ 2
32#define BMA150_BW_190HZ 3
33#define BMA150_BW_375HZ 4
34#define BMA150_BW_750HZ 5
35#define BMA150_BW_1500HZ 6
36
25struct bma150_cfg { 37struct bma150_cfg {
26 bool any_motion_int; /* Set to enable any-motion interrupt */ 38 bool any_motion_int; /* Set to enable any-motion interrupt */
27 bool hg_int; /* Set to enable high-G interrupt */ 39 bool hg_int; /* Set to enable high-G interrupt */
@@ -34,8 +46,8 @@ struct bma150_cfg {
34 unsigned char lg_hyst; /* Low-G hysterisis */ 46 unsigned char lg_hyst; /* Low-G hysterisis */
35 unsigned char lg_dur; /* Low-G duration */ 47 unsigned char lg_dur; /* Low-G duration */
36 unsigned char lg_thres; /* Low-G threshold */ 48 unsigned char lg_thres; /* Low-G threshold */
37 unsigned char range; /* BMA0150_RANGE_xxx (in G) */ 49 unsigned char range; /* one of BMA0150_RANGE_xxx */
38 unsigned char bandwidth; /* BMA0150_BW_xxx (in Hz) */ 50 unsigned char bandwidth; /* one of BMA0150_BW_xxx */
39}; 51};
40 52
41struct bma150_platform_data { 53struct bma150_platform_data {
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 7d73905dcba2..900af5964f55 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -203,6 +203,7 @@ struct cgroup {
203 203
204 /* For RCU-protected deletion */ 204 /* For RCU-protected deletion */
205 struct rcu_head rcu_head; 205 struct rcu_head rcu_head;
206 struct work_struct free_work;
206 207
207 /* List of events which userspace want to receive */ 208 /* List of events which userspace want to receive */
208 struct list_head event_list; 209 struct list_head event_list;
@@ -558,6 +559,7 @@ static inline struct cgroup* task_cgroup(struct task_struct *task,
558 559
559struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos, 560struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos,
560 struct cgroup *cgroup); 561 struct cgroup *cgroup);
562struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos);
561 563
562/** 564/**
563 * cgroup_for_each_descendant_pre - pre-order walk of a cgroup's descendants 565 * cgroup_for_each_descendant_pre - pre-order walk of a cgroup's descendants
@@ -706,7 +708,6 @@ struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id);
706static inline int cgroup_init_early(void) { return 0; } 708static inline int cgroup_init_early(void) { return 0; }
707static inline int cgroup_init(void) { return 0; } 709static inline int cgroup_init(void) { return 0; }
708static inline void cgroup_fork(struct task_struct *p) {} 710static inline void cgroup_fork(struct task_struct *p) {}
709static inline void cgroup_fork_callbacks(struct task_struct *p) {}
710static inline void cgroup_post_fork(struct task_struct *p) {} 711static inline void cgroup_post_fork(struct task_struct *p) {}
711static inline void cgroup_exit(struct task_struct *p, int callbacks) {} 712static inline void cgroup_exit(struct task_struct *p, int callbacks) {}
712 713
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 4989b8a7bed1..7f197d7addb0 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -379,7 +379,13 @@ struct clk_onecell_data {
379}; 379};
380struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data); 380struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data);
381const char *of_clk_get_parent_name(struct device_node *np, int index); 381const char *of_clk_get_parent_name(struct device_node *np, int index);
382
382void of_clk_init(const struct of_device_id *matches); 383void of_clk_init(const struct of_device_id *matches);
383 384
385#define CLK_OF_DECLARE(name, compat, fn) \
386 static const struct of_device_id __clk_of_table_##name \
387 __used __section(__clk_of_table) \
388 = { .compatible = compat, .data = fn };
389
384#endif /* CONFIG_COMMON_CLK */ 390#endif /* CONFIG_COMMON_CLK */
385#endif /* CLK_PROVIDER_H */ 391#endif /* CLK_PROVIDER_H */
diff --git a/include/linux/clk/sunxi.h b/include/linux/clk/sunxi.h
deleted file mode 100644
index e074fdd5a236..000000000000
--- a/include/linux/clk/sunxi.h
+++ /dev/null
@@ -1,22 +0,0 @@
1/*
2 * Copyright 2012 Maxime Ripard
3 *
4 * Maxime Ripard <maxime.ripard@free-electrons.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#ifndef __LINUX_CLK_SUNXI_H_
18#define __LINUX_CLK_SUNXI_H_
19
20void __init sunxi_init_clocks(void);
21
22#endif
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 8a7096fcb01e..66346521cb65 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -161,6 +161,15 @@ clockevents_calc_mult_shift(struct clock_event_device *ce, u32 freq, u32 minsec)
161extern void clockevents_suspend(void); 161extern void clockevents_suspend(void);
162extern void clockevents_resume(void); 162extern void clockevents_resume(void);
163 163
164#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
165#ifdef CONFIG_ARCH_HAS_TICK_BROADCAST
166extern void tick_broadcast(const struct cpumask *mask);
167#else
168#define tick_broadcast NULL
169#endif
170extern int tick_receive_broadcast(void);
171#endif
172
164#ifdef CONFIG_GENERIC_CLOCKEVENTS 173#ifdef CONFIG_GENERIC_CLOCKEVENTS
165extern void clockevents_notify(unsigned long reason, void *arg); 174extern void clockevents_notify(unsigned long reason, void *arg);
166#else 175#else
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index e24339ccb7f0..b28d161c1091 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -3,12 +3,40 @@
3 3
4#ifdef CONFIG_CONTEXT_TRACKING 4#ifdef CONFIG_CONTEXT_TRACKING
5#include <linux/sched.h> 5#include <linux/sched.h>
6#include <linux/percpu.h>
7
8struct context_tracking {
9 /*
10 * When active is false, probes are unset in order
11 * to minimize overhead: TIF flags are cleared
12 * and calls to user_enter/exit are ignored. This
13 * may be further optimized using static keys.
14 */
15 bool active;
16 enum {
17 IN_KERNEL = 0,
18 IN_USER,
19 } state;
20};
21
22DECLARE_PER_CPU(struct context_tracking, context_tracking);
23
24static inline bool context_tracking_in_user(void)
25{
26 return __this_cpu_read(context_tracking.state) == IN_USER;
27}
28
29static inline bool context_tracking_active(void)
30{
31 return __this_cpu_read(context_tracking.active);
32}
6 33
7extern void user_enter(void); 34extern void user_enter(void);
8extern void user_exit(void); 35extern void user_exit(void);
9extern void context_tracking_task_switch(struct task_struct *prev, 36extern void context_tracking_task_switch(struct task_struct *prev,
10 struct task_struct *next); 37 struct task_struct *next);
11#else 38#else
39static inline bool context_tracking_in_user(void) { return false; }
12static inline void user_enter(void) { } 40static inline void user_enter(void) { }
13static inline void user_exit(void) { } 41static inline void user_exit(void) { }
14static inline void context_tracking_task_switch(struct task_struct *prev, 42static inline void context_tracking_task_switch(struct task_struct *prev,
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index a55b88eaf96a..a22944ca0526 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -89,11 +89,15 @@ struct cpufreq_real_policy {
89}; 89};
90 90
91struct cpufreq_policy { 91struct cpufreq_policy {
92 cpumask_var_t cpus; /* CPUs requiring sw coordination */ 92 /* CPUs sharing clock, require sw coordination */
93 cpumask_var_t related_cpus; /* CPUs with any coordination */ 93 cpumask_var_t cpus; /* Online CPUs only */
94 unsigned int shared_type; /* ANY or ALL affected CPUs 94 cpumask_var_t related_cpus; /* Online + Offline CPUs */
95
96 unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs
95 should set cpufreq */ 97 should set cpufreq */
96 unsigned int cpu; /* cpu nr of registered CPU */ 98 unsigned int cpu; /* cpu nr of CPU managing this policy */
99 unsigned int last_cpu; /* cpu nr of previous CPU that managed
100 * this policy */
97 struct cpufreq_cpuinfo cpuinfo;/* see above */ 101 struct cpufreq_cpuinfo cpuinfo;/* see above */
98 102
99 unsigned int min; /* in kHz */ 103 unsigned int min; /* in kHz */
@@ -112,16 +116,23 @@ struct cpufreq_policy {
112 struct completion kobj_unregister; 116 struct completion kobj_unregister;
113}; 117};
114 118
115#define CPUFREQ_ADJUST (0) 119#define CPUFREQ_ADJUST (0)
116#define CPUFREQ_INCOMPATIBLE (1) 120#define CPUFREQ_INCOMPATIBLE (1)
117#define CPUFREQ_NOTIFY (2) 121#define CPUFREQ_NOTIFY (2)
118#define CPUFREQ_START (3) 122#define CPUFREQ_START (3)
123#define CPUFREQ_UPDATE_POLICY_CPU (4)
119 124
125/* Only for ACPI */
120#define CPUFREQ_SHARED_TYPE_NONE (0) /* None */ 126#define CPUFREQ_SHARED_TYPE_NONE (0) /* None */
121#define CPUFREQ_SHARED_TYPE_HW (1) /* HW does needed coordination */ 127#define CPUFREQ_SHARED_TYPE_HW (1) /* HW does needed coordination */
122#define CPUFREQ_SHARED_TYPE_ALL (2) /* All dependent CPUs should set freq */ 128#define CPUFREQ_SHARED_TYPE_ALL (2) /* All dependent CPUs should set freq */
123#define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/ 129#define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/
124 130
131static inline bool policy_is_shared(struct cpufreq_policy *policy)
132{
133 return cpumask_weight(policy->cpus) > 1;
134}
135
125/******************** cpufreq transition notifiers *******************/ 136/******************** cpufreq transition notifiers *******************/
126 137
127#define CPUFREQ_PRECHANGE (0) 138#define CPUFREQ_PRECHANGE (0)
@@ -173,6 +184,7 @@ static inline unsigned long cpufreq_scale(unsigned long old, u_int div, u_int mu
173 184
174struct cpufreq_governor { 185struct cpufreq_governor {
175 char name[CPUFREQ_NAME_LEN]; 186 char name[CPUFREQ_NAME_LEN];
187 int initialized;
176 int (*governor) (struct cpufreq_policy *policy, 188 int (*governor) (struct cpufreq_policy *policy,
177 unsigned int event); 189 unsigned int event);
178 ssize_t (*show_setspeed) (struct cpufreq_policy *policy, 190 ssize_t (*show_setspeed) (struct cpufreq_policy *policy,
@@ -308,6 +320,9 @@ __ATTR(_name, 0444, show_##_name, NULL)
308static struct global_attr _name = \ 320static struct global_attr _name = \
309__ATTR(_name, 0644, show_##_name, store_##_name) 321__ATTR(_name, 0644, show_##_name, store_##_name)
310 322
323struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
324void cpufreq_cpu_put(struct cpufreq_policy *data);
325const char *cpufreq_get_current_driver(void);
311 326
312/********************************************************************* 327/*********************************************************************
313 * CPUFREQ 2.6. INTERFACE * 328 * CPUFREQ 2.6. INTERFACE *
@@ -397,14 +412,13 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
397 412
398/* the following 3 funtions are for cpufreq core use only */ 413/* the following 3 funtions are for cpufreq core use only */
399struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu); 414struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu);
400struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
401void cpufreq_cpu_put(struct cpufreq_policy *data);
402 415
403/* the following are really really optional */ 416/* the following are really really optional */
404extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs; 417extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
405 418
406void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, 419void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table,
407 unsigned int cpu); 420 unsigned int cpu);
421void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy);
408 422
409void cpufreq_frequency_table_put_attr(unsigned int cpu); 423void cpufreq_frequency_table_put_attr(unsigned int cpu);
410#endif /* _LINUX_CPUFREQ_H */ 424#endif /* _LINUX_CPUFREQ_H */
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 24cd1037b6d6..480c14dc1ddd 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -32,8 +32,6 @@ struct cpuidle_driver;
32 ****************************/ 32 ****************************/
33 33
34struct cpuidle_state_usage { 34struct cpuidle_state_usage {
35 void *driver_data;
36
37 unsigned long long disable; 35 unsigned long long disable;
38 unsigned long long usage; 36 unsigned long long usage;
39 unsigned long long time; /* in US */ 37 unsigned long long time; /* in US */
@@ -62,26 +60,6 @@ struct cpuidle_state {
62 60
63#define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000) 61#define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)
64 62
65/**
66 * cpuidle_get_statedata - retrieves private driver state data
67 * @st_usage: the state usage statistics
68 */
69static inline void *cpuidle_get_statedata(struct cpuidle_state_usage *st_usage)
70{
71 return st_usage->driver_data;
72}
73
74/**
75 * cpuidle_set_statedata - stores private driver state data
76 * @st_usage: the state usage statistics
77 * @data: the private data
78 */
79static inline void
80cpuidle_set_statedata(struct cpuidle_state_usage *st_usage, void *data)
81{
82 st_usage->driver_data = data;
83}
84
85struct cpuidle_device { 63struct cpuidle_device {
86 unsigned int registered:1; 64 unsigned int registered:1;
87 unsigned int enabled:1; 65 unsigned int enabled:1;
diff --git a/include/linux/device.h b/include/linux/device.h
index 43dcda937ddf..001f6637aa47 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -21,6 +21,7 @@
21#include <linux/compiler.h> 21#include <linux/compiler.h>
22#include <linux/types.h> 22#include <linux/types.h>
23#include <linux/mutex.h> 23#include <linux/mutex.h>
24#include <linux/pinctrl/devinfo.h>
24#include <linux/pm.h> 25#include <linux/pm.h>
25#include <linux/atomic.h> 26#include <linux/atomic.h>
26#include <linux/ratelimit.h> 27#include <linux/ratelimit.h>
@@ -620,6 +621,8 @@ struct acpi_dev_node {
620 * @pm_domain: Provide callbacks that are executed during system suspend, 621 * @pm_domain: Provide callbacks that are executed during system suspend,
621 * hibernation, system resume and during runtime PM transitions 622 * hibernation, system resume and during runtime PM transitions
622 * along with subsystem-level and driver-level callbacks. 623 * along with subsystem-level and driver-level callbacks.
624 * @pins: For device pin management.
625 * See Documentation/pinctrl.txt for details.
623 * @numa_node: NUMA node this device is close to. 626 * @numa_node: NUMA node this device is close to.
624 * @dma_mask: Dma mask (if dma'ble device). 627 * @dma_mask: Dma mask (if dma'ble device).
625 * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all 628 * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
@@ -672,6 +675,10 @@ struct device {
672 struct dev_pm_info power; 675 struct dev_pm_info power;
673 struct dev_pm_domain *pm_domain; 676 struct dev_pm_domain *pm_domain;
674 677
678#ifdef CONFIG_PINCTRL
679 struct dev_pin_info *pins;
680#endif
681
675#ifdef CONFIG_NUMA 682#ifdef CONFIG_NUMA
676 int numa_node; /* NUMA node this device is close to */ 683 int numa_node; /* NUMA node this device is close to */
677#endif 684#endif
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 8b84916dc671..7a9498ab3c2d 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -618,18 +618,30 @@ extern int __init efi_setup_pcdp_console(char *);
618#endif 618#endif
619 619
620/* 620/*
621 * We play games with efi_enabled so that the compiler will, if possible, remove 621 * We play games with efi_enabled so that the compiler will, if
622 * EFI-related code altogether. 622 * possible, remove EFI-related code altogether.
623 */ 623 */
624#define EFI_BOOT 0 /* Were we booted from EFI? */
625#define EFI_SYSTEM_TABLES 1 /* Can we use EFI system tables? */
626#define EFI_CONFIG_TABLES 2 /* Can we use EFI config tables? */
627#define EFI_RUNTIME_SERVICES 3 /* Can we use runtime services? */
628#define EFI_MEMMAP 4 /* Can we use EFI memory map? */
629#define EFI_64BIT 5 /* Is the firmware 64-bit? */
630
624#ifdef CONFIG_EFI 631#ifdef CONFIG_EFI
625# ifdef CONFIG_X86 632# ifdef CONFIG_X86
626 extern int efi_enabled; 633extern int efi_enabled(int facility);
627 extern bool efi_64bit;
628# else 634# else
629# define efi_enabled 1 635static inline int efi_enabled(int facility)
636{
637 return 1;
638}
630# endif 639# endif
631#else 640#else
632# define efi_enabled 0 641static inline int efi_enabled(int facility)
642{
643 return 0;
644}
633#endif 645#endif
634 646
635/* 647/*
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index c03af7687bb4..186620631750 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -138,6 +138,7 @@ extern void elv_drain_elevator(struct request_queue *);
138/* 138/*
139 * io scheduler registration 139 * io scheduler registration
140 */ 140 */
141extern void __init load_default_elevator_module(void);
141extern int elv_register(struct elevator_type *); 142extern int elv_register(struct elevator_type *);
142extern void elv_unregister(struct elevator_type *); 143extern void elv_unregister(struct elevator_type *);
143 144
@@ -206,5 +207,9 @@ enum {
206 INIT_LIST_HEAD(&(rq)->csd.list); \ 207 INIT_LIST_HEAD(&(rq)->csd.list); \
207 } while (0) 208 } while (0)
208 209
210#else /* CONFIG_BLOCK */
211
212static inline void load_default_elevator_module(void) { }
213
209#endif /* CONFIG_BLOCK */ 214#endif /* CONFIG_BLOCK */
210#endif 215#endif
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index e4238ceaa4d6..e70df40d84f6 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -13,6 +13,11 @@ extern bool pm_freezing; /* PM freezing in effect */
13extern bool pm_nosig_freezing; /* PM nosig freezing in effect */ 13extern bool pm_nosig_freezing; /* PM nosig freezing in effect */
14 14
15/* 15/*
16 * Timeout for stopping processes
17 */
18extern unsigned int freeze_timeout_msecs;
19
20/*
16 * Check if a process has been frozen 21 * Check if a process has been frozen
17 */ 22 */
18static inline bool frozen(struct task_struct *p) 23static inline bool frozen(struct task_struct *p)
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 92691d85c320..e5ca8ef50e9b 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -74,7 +74,7 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
74 * SAVE_REGS - The ftrace_ops wants regs saved at each function called 74 * SAVE_REGS - The ftrace_ops wants regs saved at each function called
75 * and passed to the callback. If this flag is set, but the 75 * and passed to the callback. If this flag is set, but the
76 * architecture does not support passing regs 76 * architecture does not support passing regs
77 * (ARCH_SUPPORTS_FTRACE_SAVE_REGS is not defined), then the 77 * (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the
78 * ftrace_ops will fail to register, unless the next flag 78 * ftrace_ops will fail to register, unless the next flag
79 * is set. 79 * is set.
80 * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the 80 * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
@@ -418,7 +418,7 @@ void ftrace_modify_all_code(int command);
418#endif 418#endif
419 419
420#ifndef FTRACE_REGS_ADDR 420#ifndef FTRACE_REGS_ADDR
421#ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS 421#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
422# define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller) 422# define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
423#else 423#else
424# define FTRACE_REGS_ADDR FTRACE_ADDR 424# define FTRACE_REGS_ADDR FTRACE_ADDR
@@ -480,7 +480,7 @@ extern int ftrace_make_nop(struct module *mod,
480 */ 480 */
481extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); 481extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
482 482
483#ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS 483#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
484/** 484/**
485 * ftrace_modify_call - convert from one addr to another (no nop) 485 * ftrace_modify_call - convert from one addr to another (no nop)
486 * @rec: the mcount call site record 486 * @rec: the mcount call site record
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index a3d489531d83..13a54d0bdfa8 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -49,7 +49,6 @@ struct trace_entry {
49 unsigned char flags; 49 unsigned char flags;
50 unsigned char preempt_count; 50 unsigned char preempt_count;
51 int pid; 51 int pid;
52 int padding;
53}; 52};
54 53
55#define FTRACE_MAX_EVENT \ 54#define FTRACE_MAX_EVENT \
@@ -84,6 +83,9 @@ struct trace_iterator {
84 long idx; 83 long idx;
85 84
86 cpumask_var_t started; 85 cpumask_var_t started;
86
87 /* it's true when current open file is snapshot */
88 bool snapshot;
87}; 89};
88 90
89enum trace_iter_flags { 91enum trace_iter_flags {
@@ -272,7 +274,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
272extern int trace_add_event_call(struct ftrace_event_call *call); 274extern int trace_add_event_call(struct ftrace_event_call *call);
273extern void trace_remove_event_call(struct ftrace_event_call *call); 275extern void trace_remove_event_call(struct ftrace_event_call *call);
274 276
275#define is_signed_type(type) (((type)(-1)) < 0) 277#define is_signed_type(type) (((type)(-1)) < (type)0)
276 278
277int trace_set_clr_event(const char *system, const char *event, int set); 279int trace_set_clr_event(const char *system, const char *event, int set);
278 280
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 624ef3f45c8e..29eb805ea4a6 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -153,7 +153,7 @@ extern void rcu_nmi_exit(void);
153 */ 153 */
154#define __irq_enter() \ 154#define __irq_enter() \
155 do { \ 155 do { \
156 vtime_account_irq_enter(current); \ 156 account_irq_enter_time(current); \
157 add_preempt_count(HARDIRQ_OFFSET); \ 157 add_preempt_count(HARDIRQ_OFFSET); \
158 trace_hardirq_enter(); \ 158 trace_hardirq_enter(); \
159 } while (0) 159 } while (0)
@@ -169,7 +169,7 @@ extern void irq_enter(void);
169#define __irq_exit() \ 169#define __irq_exit() \
170 do { \ 170 do { \
171 trace_hardirq_exit(); \ 171 trace_hardirq_exit(); \
172 vtime_account_irq_exit(current); \ 172 account_irq_exit_time(current); \
173 sub_preempt_count(HARDIRQ_OFFSET); \ 173 sub_preempt_count(HARDIRQ_OFFSET); \
174 } while (0) 174 } while (0)
175 175
@@ -180,10 +180,10 @@ extern void irq_exit(void);
180 180
181#define nmi_enter() \ 181#define nmi_enter() \
182 do { \ 182 do { \
183 lockdep_off(); \
183 ftrace_nmi_enter(); \ 184 ftrace_nmi_enter(); \
184 BUG_ON(in_nmi()); \ 185 BUG_ON(in_nmi()); \
185 add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ 186 add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
186 lockdep_off(); \
187 rcu_nmi_enter(); \ 187 rcu_nmi_enter(); \
188 trace_hardirq_enter(); \ 188 trace_hardirq_enter(); \
189 } while (0) 189 } while (0)
@@ -192,10 +192,10 @@ extern void irq_exit(void);
192 do { \ 192 do { \
193 trace_hardirq_exit(); \ 193 trace_hardirq_exit(); \
194 rcu_nmi_exit(); \ 194 rcu_nmi_exit(); \
195 lockdep_on(); \
196 BUG_ON(!in_nmi()); \ 195 BUG_ON(!in_nmi()); \
197 sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ 196 sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
198 ftrace_nmi_exit(); \ 197 ftrace_nmi_exit(); \
198 lockdep_on(); \
199 } while (0) 199 } while (0)
200 200
201#endif /* LINUX_HARDIRQ_H */ 201#endif /* LINUX_HARDIRQ_H */
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index 82b29ae6ebb0..b2514f70d591 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -20,16 +20,4 @@ struct device *hwmon_device_register(struct device *dev);
20 20
21void hwmon_device_unregister(struct device *dev); 21void hwmon_device_unregister(struct device *dev);
22 22
23/* Scale user input to sensible values */
24static inline int SENSORS_LIMIT(long value, long low, long high)
25{
26 if (value < low)
27 return low;
28 else if (value > high)
29 return high;
30 else
31 return value;
32}
33
34#endif 23#endif
35
diff --git a/include/linux/init.h b/include/linux/init.h
index 10ed4f436458..861814710d52 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -153,6 +153,7 @@ extern unsigned int reset_devices;
153/* used by init/main.c */ 153/* used by init/main.c */
154void setup_arch(char **); 154void setup_arch(char **);
155void prepare_namespace(void); 155void prepare_namespace(void);
156void __init load_default_modules(void);
156 157
157extern void (*late_time_init)(void); 158extern void (*late_time_init)(void);
158 159
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 6d087c5f57f7..5cd0f0949927 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -10,7 +10,9 @@
10#include <linux/pid_namespace.h> 10#include <linux/pid_namespace.h>
11#include <linux/user_namespace.h> 11#include <linux/user_namespace.h>
12#include <linux/securebits.h> 12#include <linux/securebits.h>
13#include <linux/seqlock.h>
13#include <net/net_namespace.h> 14#include <net/net_namespace.h>
15#include <linux/sched/rt.h>
14 16
15#ifdef CONFIG_SMP 17#ifdef CONFIG_SMP
16# define INIT_PUSHABLE_TASKS(tsk) \ 18# define INIT_PUSHABLE_TASKS(tsk) \
@@ -141,6 +143,15 @@ extern struct task_group root_task_group;
141# define INIT_PERF_EVENTS(tsk) 143# define INIT_PERF_EVENTS(tsk)
142#endif 144#endif
143 145
146#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
147# define INIT_VTIME(tsk) \
148 .vtime_seqlock = __SEQLOCK_UNLOCKED(tsk.vtime_seqlock), \
149 .vtime_snap = 0, \
150 .vtime_snap_whence = VTIME_SYS,
151#else
152# define INIT_VTIME(tsk)
153#endif
154
144#define INIT_TASK_COMM "swapper" 155#define INIT_TASK_COMM "swapper"
145 156
146/* 157/*
@@ -210,6 +221,7 @@ extern struct task_group root_task_group;
210 INIT_TRACE_RECURSION \ 221 INIT_TRACE_RECURSION \
211 INIT_TASK_RCU_PREEMPT(tsk) \ 222 INIT_TASK_RCU_PREEMPT(tsk) \
212 INIT_CPUSET_SEQ \ 223 INIT_CPUSET_SEQ \
224 INIT_VTIME(tsk) \
213} 225}
214 226
215 227
diff --git a/include/linux/input/adxl34x.h b/include/linux/input/adxl34x.h
index 57e01a7cb006..010d98175efa 100644
--- a/include/linux/input/adxl34x.h
+++ b/include/linux/input/adxl34x.h
@@ -13,6 +13,8 @@
13#ifndef __LINUX_INPUT_ADXL34X_H__ 13#ifndef __LINUX_INPUT_ADXL34X_H__
14#define __LINUX_INPUT_ADXL34X_H__ 14#define __LINUX_INPUT_ADXL34X_H__
15 15
16#include <linux/input.h>
17
16struct adxl34x_platform_data { 18struct adxl34x_platform_data {
17 19
18 /* 20 /*
diff --git a/include/linux/input/tegra_kbc.h b/include/linux/input/tegra_kbc.h
deleted file mode 100644
index a13025612939..000000000000
--- a/include/linux/input/tegra_kbc.h
+++ /dev/null
@@ -1,62 +0,0 @@
1/*
2 * Platform definitions for tegra-kbc keyboard input driver
3 *
4 * Copyright (c) 2010-2011, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
20
21#ifndef ASMARM_ARCH_TEGRA_KBC_H
22#define ASMARM_ARCH_TEGRA_KBC_H
23
24#include <linux/types.h>
25#include <linux/input/matrix_keypad.h>
26
27#define KBC_MAX_GPIO 24
28#define KBC_MAX_KPENT 8
29
30#define KBC_MAX_ROW 16
31#define KBC_MAX_COL 8
32#define KBC_MAX_KEY (KBC_MAX_ROW * KBC_MAX_COL)
33
34enum tegra_pin_type {
35 PIN_CFG_IGNORE,
36 PIN_CFG_COL,
37 PIN_CFG_ROW,
38};
39
40struct tegra_kbc_pin_cfg {
41 enum tegra_pin_type type;
42 unsigned char num;
43};
44
45struct tegra_kbc_wake_key {
46 u8 row:4;
47 u8 col:4;
48};
49
50struct tegra_kbc_platform_data {
51 unsigned int debounce_cnt;
52 unsigned int repeat_cnt;
53
54 struct tegra_kbc_pin_cfg pin_cfg[KBC_MAX_GPIO];
55 const struct matrix_keymap_data *keymap_data;
56
57 u32 wakeup_key;
58 bool wakeup;
59 bool use_fn_map;
60 bool use_ghost_filter;
61};
62#endif
diff --git a/include/linux/irq.h b/include/linux/irq.h
index fdf2c4a238cc..bc4e06611958 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -509,8 +509,11 @@ static inline void irq_set_percpu_devid_flags(unsigned int irq)
509 509
510/* Handle dynamic irq creation and destruction */ 510/* Handle dynamic irq creation and destruction */
511extern unsigned int create_irq_nr(unsigned int irq_want, int node); 511extern unsigned int create_irq_nr(unsigned int irq_want, int node);
512extern unsigned int __create_irqs(unsigned int from, unsigned int count,
513 int node);
512extern int create_irq(void); 514extern int create_irq(void);
513extern void destroy_irq(unsigned int irq); 515extern void destroy_irq(unsigned int irq);
516extern void destroy_irqs(unsigned int irq, unsigned int count);
514 517
515/* 518/*
516 * Dynamic irq helper functions. Obsolete. Use irq_alloc_desc* and 519 * Dynamic irq helper functions. Obsolete. Use irq_alloc_desc* and
@@ -528,6 +531,8 @@ extern int irq_set_handler_data(unsigned int irq, void *data);
528extern int irq_set_chip_data(unsigned int irq, void *data); 531extern int irq_set_chip_data(unsigned int irq, void *data);
529extern int irq_set_irq_type(unsigned int irq, unsigned int type); 532extern int irq_set_irq_type(unsigned int irq, unsigned int type);
530extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry); 533extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry);
534extern int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
535 struct msi_desc *entry);
531extern struct irq_data *irq_get_irq_data(unsigned int irq); 536extern struct irq_data *irq_get_irq_data(unsigned int irq);
532 537
533static inline struct irq_chip *irq_get_chip(unsigned int irq) 538static inline struct irq_chip *irq_get_chip(unsigned int irq)
@@ -590,6 +595,9 @@ int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
590#define irq_alloc_desc_from(from, node) \ 595#define irq_alloc_desc_from(from, node) \
591 irq_alloc_descs(-1, from, 1, node) 596 irq_alloc_descs(-1, from, 1, node)
592 597
598#define irq_alloc_descs_from(from, cnt, node) \
599 irq_alloc_descs(-1, from, cnt, node)
600
593void irq_free_descs(unsigned int irq, unsigned int cnt); 601void irq_free_descs(unsigned int irq, unsigned int cnt);
594int irq_reserve_irqs(unsigned int from, unsigned int cnt); 602int irq_reserve_irqs(unsigned int from, unsigned int cnt);
595 603
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
index 6a9e8f5399e2..f5dbce50466e 100644
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -3,6 +3,20 @@
3 3
4#include <linux/llist.h> 4#include <linux/llist.h>
5 5
6/*
7 * An entry can be in one of four states:
8 *
9 * free NULL, 0 -> {claimed} : free to be used
10 * claimed NULL, 3 -> {pending} : claimed to be enqueued
11 * pending next, 3 -> {busy} : queued, pending callback
12 * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
13 */
14
15#define IRQ_WORK_PENDING 1UL
16#define IRQ_WORK_BUSY 2UL
17#define IRQ_WORK_FLAGS 3UL
18#define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */
19
6struct irq_work { 20struct irq_work {
7 unsigned long flags; 21 unsigned long flags;
8 struct llist_node llnode; 22 struct llist_node llnode;
@@ -16,8 +30,14 @@ void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *))
16 work->func = func; 30 work->func = func;
17} 31}
18 32
19bool irq_work_queue(struct irq_work *work); 33void irq_work_queue(struct irq_work *work);
20void irq_work_run(void); 34void irq_work_run(void);
21void irq_work_sync(struct irq_work *work); 35void irq_work_sync(struct irq_work *work);
22 36
37#ifdef CONFIG_IRQ_WORK
38bool irq_work_needs_cpu(void);
39#else
40static bool irq_work_needs_cpu(void) { return false; }
41#endif
42
23#endif /* _LINUX_IRQ_WORK_H */ 43#endif /* _LINUX_IRQ_WORK_H */
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 66b70780e910..ed5f6ed6eb77 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -127,7 +127,7 @@ extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t)
127extern void account_steal_time(cputime_t); 127extern void account_steal_time(cputime_t);
128extern void account_idle_time(cputime_t); 128extern void account_idle_time(cputime_t);
129 129
130#ifdef CONFIG_VIRT_CPU_ACCOUNTING 130#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
131static inline void account_process_tick(struct task_struct *tsk, int user) 131static inline void account_process_tick(struct task_struct *tsk, int user)
132{ 132{
133 vtime_account_user(tsk); 133 vtime_account_user(tsk);
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 23755ba42abc..4b6ef4d33cc2 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -49,16 +49,6 @@
49#define KPROBE_REENTER 0x00000004 49#define KPROBE_REENTER 0x00000004
50#define KPROBE_HIT_SSDONE 0x00000008 50#define KPROBE_HIT_SSDONE 0x00000008
51 51
52/*
53 * If function tracer is enabled and the arch supports full
54 * passing of pt_regs to function tracing, then kprobes can
55 * optimize on top of function tracing.
56 */
57#if defined(CONFIG_FUNCTION_TRACER) && defined(ARCH_SUPPORTS_FTRACE_SAVE_REGS) \
58 && defined(ARCH_SUPPORTS_KPROBES_ON_FTRACE)
59# define KPROBES_CAN_USE_FTRACE
60#endif
61
62/* Attach to insert probes on any functions which should be ignored*/ 52/* Attach to insert probes on any functions which should be ignored*/
63#define __kprobes __attribute__((__section__(".kprobes.text"))) 53#define __kprobes __attribute__((__section__(".kprobes.text")))
64 54
@@ -316,7 +306,7 @@ extern int proc_kprobes_optimization_handler(struct ctl_table *table,
316#endif 306#endif
317 307
318#endif /* CONFIG_OPTPROBES */ 308#endif /* CONFIG_OPTPROBES */
319#ifdef KPROBES_CAN_USE_FTRACE 309#ifdef CONFIG_KPROBES_ON_FTRACE
320extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, 310extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
321 struct ftrace_ops *ops, struct pt_regs *regs); 311 struct ftrace_ops *ops, struct pt_regs *regs);
322extern int arch_prepare_kprobe_ftrace(struct kprobe *p); 312extern int arch_prepare_kprobe_ftrace(struct kprobe *p);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 2c497ab0d03d..b7996a768eb2 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -22,6 +22,7 @@
22#include <linux/rcupdate.h> 22#include <linux/rcupdate.h>
23#include <linux/ratelimit.h> 23#include <linux/ratelimit.h>
24#include <linux/err.h> 24#include <linux/err.h>
25#include <linux/irqflags.h>
25#include <asm/signal.h> 26#include <asm/signal.h>
26 27
27#include <linux/kvm.h> 28#include <linux/kvm.h>
@@ -740,15 +741,52 @@ static inline int kvm_deassign_device(struct kvm *kvm,
740} 741}
741#endif /* CONFIG_IOMMU_API */ 742#endif /* CONFIG_IOMMU_API */
742 743
743static inline void kvm_guest_enter(void) 744static inline void __guest_enter(void)
744{ 745{
745 BUG_ON(preemptible());
746 /* 746 /*
747 * This is running in ioctl context so we can avoid 747 * This is running in ioctl context so we can avoid
748 * the call to vtime_account() with its unnecessary idle check. 748 * the call to vtime_account() with its unnecessary idle check.
749 */ 749 */
750 vtime_account_system_irqsafe(current); 750 vtime_account_system(current);
751 current->flags |= PF_VCPU; 751 current->flags |= PF_VCPU;
752}
753
754static inline void __guest_exit(void)
755{
756 /*
757 * This is running in ioctl context so we can avoid
758 * the call to vtime_account() with its unnecessary idle check.
759 */
760 vtime_account_system(current);
761 current->flags &= ~PF_VCPU;
762}
763
764#ifdef CONFIG_CONTEXT_TRACKING
765extern void guest_enter(void);
766extern void guest_exit(void);
767
768#else /* !CONFIG_CONTEXT_TRACKING */
769static inline void guest_enter(void)
770{
771 __guest_enter();
772}
773
774static inline void guest_exit(void)
775{
776 __guest_exit();
777}
778#endif /* !CONFIG_CONTEXT_TRACKING */
779
780static inline void kvm_guest_enter(void)
781{
782 unsigned long flags;
783
784 BUG_ON(preemptible());
785
786 local_irq_save(flags);
787 guest_enter();
788 local_irq_restore(flags);
789
752 /* KVM does not hold any references to rcu protected data when it 790 /* KVM does not hold any references to rcu protected data when it
753 * switches CPU into a guest mode. In fact switching to a guest mode 791 * switches CPU into a guest mode. In fact switching to a guest mode
754 * is very similar to exiting to userspase from rcu point of view. In 792 * is very similar to exiting to userspase from rcu point of view. In
@@ -761,12 +799,11 @@ static inline void kvm_guest_enter(void)
761 799
762static inline void kvm_guest_exit(void) 800static inline void kvm_guest_exit(void)
763{ 801{
764 /* 802 unsigned long flags;
765 * This is running in ioctl context so we can avoid 803
766 * the call to vtime_account() with its unnecessary idle check. 804 local_irq_save(flags);
767 */ 805 guest_exit();
768 vtime_account_system_irqsafe(current); 806 local_irq_restore(flags);
769 current->flags &= ~PF_VCPU;
770} 807}
771 808
772/* 809/*
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 83ba0ab2c915..649e5f86b5f0 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -652,8 +652,8 @@ struct ata_device {
652 u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */ 652 u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */
653 }; 653 };
654 654
655 /* Identify Device Data Log (30h), SATA Settings (page 08h) */ 655 /* DEVSLP Timing Variables from Identify Device Data Log */
656 u8 sata_settings[ATA_SECT_SIZE]; 656 u8 devslp_timing[ATA_LOG_DEVSLP_SIZE];
657 657
658 /* error history */ 658 /* error history */
659 int spdn_cnt; 659 int spdn_cnt;
diff --git a/include/linux/libps2.h b/include/linux/libps2.h
index 79603a6c356f..4ad06e824f76 100644
--- a/include/linux/libps2.h
+++ b/include/linux/libps2.h
@@ -36,7 +36,7 @@ struct ps2dev {
36 wait_queue_head_t wait; 36 wait_queue_head_t wait;
37 37
38 unsigned long flags; 38 unsigned long flags;
39 unsigned char cmdbuf[6]; 39 unsigned char cmdbuf[8];
40 unsigned char cmdcnt; 40 unsigned char cmdcnt;
41 unsigned char nak; 41 unsigned char nak;
42}; 42};
diff --git a/include/linux/llist.h b/include/linux/llist.h
index a5199f6d0e82..d0ab98f73d38 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -125,6 +125,31 @@ static inline void init_llist_head(struct llist_head *list)
125 (pos) = llist_entry((pos)->member.next, typeof(*(pos)), member)) 125 (pos) = llist_entry((pos)->member.next, typeof(*(pos)), member))
126 126
127/** 127/**
128 * llist_for_each_entry_safe - iterate safely against remove over some entries
129 * of lock-less list of given type.
130 * @pos: the type * to use as a loop cursor.
131 * @n: another type * to use as a temporary storage.
132 * @node: the fist entry of deleted list entries.
133 * @member: the name of the llist_node with the struct.
134 *
135 * In general, some entries of the lock-less list can be traversed
136 * safely only after being removed from list, so start with an entry
137 * instead of list head. This variant allows removal of entries
138 * as we iterate.
139 *
140 * If being used on entries deleted from lock-less list directly, the
141 * traverse order is from the newest to the oldest added entry. If
142 * you want to traverse from the oldest to the newest, you must
143 * reverse the order by yourself before traversing.
144 */
145#define llist_for_each_entry_safe(pos, n, node, member) \
146 for ((pos) = llist_entry((node), typeof(*(pos)), member), \
147 (n) = (pos)->member.next; \
148 &(pos)->member != NULL; \
149 (pos) = llist_entry(n, typeof(*(pos)), member), \
150 (n) = (&(pos)->member != NULL) ? (pos)->member.next : NULL)
151
152/**
128 * llist_empty - tests whether a lock-less list is empty 153 * llist_empty - tests whether a lock-less list is empty
129 * @head: the list to test 154 * @head: the list to test
130 * 155 *
diff --git a/include/linux/mailbox.h b/include/linux/mailbox.h
new file mode 100644
index 000000000000..5161f63ec1c8
--- /dev/null
+++ b/include/linux/mailbox.h
@@ -0,0 +1,17 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms and conditions of the GNU General Public License,
4 * version 2, as published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
9 * more details.
10 *
11 * You should have received a copy of the GNU General Public License along with
12 * this program. If not, see <http://www.gnu.org/licenses/>.
13 */
14
15int pl320_ipc_transmit(u32 *data);
16int pl320_ipc_register_notifier(struct notifier_block *nb);
17int pl320_ipc_unregister_notifier(struct notifier_block *nb);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 0108a56f814e..28bd5fa2ff2e 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -429,7 +429,7 @@ extern int memcg_limited_groups_array_size;
429 * the slab_mutex must be held when looping through those caches 429 * the slab_mutex must be held when looping through those caches
430 */ 430 */
431#define for_each_memcg_cache_index(_idx) \ 431#define for_each_memcg_cache_index(_idx) \
432 for ((_idx) = 0; i < memcg_limited_groups_array_size; (_idx)++) 432 for ((_idx) = 0; (_idx) < memcg_limited_groups_array_size; (_idx)++)
433 433
434static inline bool memcg_kmem_enabled(void) 434static inline bool memcg_kmem_enabled(void)
435{ 435{
diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
index 2138bd33021a..80e3b8683a84 100644
--- a/include/linux/mfd/abx500.h
+++ b/include/linux/mfd/abx500.h
@@ -131,7 +131,7 @@ struct abx500_maxim_parameters {
131 * @nominal_voltage: Nominal voltage of the battery in mV 131 * @nominal_voltage: Nominal voltage of the battery in mV
132 * @termination_vol: max voltage upto which battery can be charged 132 * @termination_vol: max voltage upto which battery can be charged
133 * @termination_curr battery charging termination current in mA 133 * @termination_curr battery charging termination current in mA
134 * @recharge_vol battery voltage limit that will trigger a new 134 * @recharge_cap battery capacity limit that will trigger a new
135 * full charging cycle in the case where maintenan- 135 * full charging cycle in the case where maintenan-
136 * -ce charging has been disabled 136 * -ce charging has been disabled
137 * @normal_cur_lvl: charger current in normal state in mA 137 * @normal_cur_lvl: charger current in normal state in mA
@@ -160,7 +160,7 @@ struct abx500_battery_type {
160 int nominal_voltage; 160 int nominal_voltage;
161 int termination_vol; 161 int termination_vol;
162 int termination_curr; 162 int termination_curr;
163 int recharge_vol; 163 int recharge_cap;
164 int normal_cur_lvl; 164 int normal_cur_lvl;
165 int normal_vol_lvl; 165 int normal_vol_lvl;
166 int maint_a_cur_lvl; 166 int maint_a_cur_lvl;
@@ -224,6 +224,7 @@ struct abx500_bm_charger_parameters {
224 * @bkup_bat_v voltage which we charge the backup battery with 224 * @bkup_bat_v voltage which we charge the backup battery with
225 * @bkup_bat_i current which we charge the backup battery with 225 * @bkup_bat_i current which we charge the backup battery with
226 * @no_maintenance indicates that maintenance charging is disabled 226 * @no_maintenance indicates that maintenance charging is disabled
227 * @capacity_scaling indicates whether capacity scaling is to be used
227 * @abx500_adc_therm placement of thermistor, batctrl or battemp adc 228 * @abx500_adc_therm placement of thermistor, batctrl or battemp adc
228 * @chg_unknown_bat flag to enable charging of unknown batteries 229 * @chg_unknown_bat flag to enable charging of unknown batteries
229 * @enable_overshoot flag to enable VBAT overshoot control 230 * @enable_overshoot flag to enable VBAT overshoot control
@@ -253,7 +254,11 @@ struct abx500_bm_data {
253 int usb_safety_tmr_h; 254 int usb_safety_tmr_h;
254 int bkup_bat_v; 255 int bkup_bat_v;
255 int bkup_bat_i; 256 int bkup_bat_i;
257 bool autopower_cfg;
258 bool ac_enabled;
259 bool usb_enabled;
256 bool no_maintenance; 260 bool no_maintenance;
261 bool capacity_scaling;
257 bool chg_unknown_bat; 262 bool chg_unknown_bat;
258 bool enable_overshoot; 263 bool enable_overshoot;
259 bool auto_trig; 264 bool auto_trig;
@@ -272,16 +277,14 @@ struct abx500_bm_data {
272 const struct abx500_fg_parameters *fg_params; 277 const struct abx500_fg_parameters *fg_params;
273}; 278};
274 279
275extern struct abx500_bm_data ab8500_bm_data;
276
277enum { 280enum {
278 NTC_EXTERNAL = 0, 281 NTC_EXTERNAL = 0,
279 NTC_INTERNAL, 282 NTC_INTERNAL,
280}; 283};
281 284
282int bmdevs_of_probe(struct device *dev, 285int ab8500_bm_of_probe(struct device *dev,
283 struct device_node *np, 286 struct device_node *np,
284 struct abx500_bm_data **battery); 287 struct abx500_bm_data *bm);
285 288
286int abx500_set_register_interruptible(struct device *dev, u8 bank, u8 reg, 289int abx500_set_register_interruptible(struct device *dev, u8 bank, u8 reg,
287 u8 value); 290 u8 value);
diff --git a/include/linux/mfd/abx500/ab8500-bm.h b/include/linux/mfd/abx500/ab8500-bm.h
index 44310c98ee6e..8d35bfe164c8 100644
--- a/include/linux/mfd/abx500/ab8500-bm.h
+++ b/include/linux/mfd/abx500/ab8500-bm.h
@@ -23,6 +23,7 @@
23 * Bank : 0x5 23 * Bank : 0x5
24 */ 24 */
25#define AB8500_USB_LINE_STAT_REG 0x80 25#define AB8500_USB_LINE_STAT_REG 0x80
26#define AB8500_USB_LINK1_STAT_REG 0x94
26 27
27/* 28/*
28 * Charger / status register offfsets 29 * Charger / status register offfsets
@@ -225,6 +226,8 @@
225/* BatCtrl Current Source Constants */ 226/* BatCtrl Current Source Constants */
226#define BAT_CTRL_7U_ENA 0x01 227#define BAT_CTRL_7U_ENA 0x01
227#define BAT_CTRL_20U_ENA 0x02 228#define BAT_CTRL_20U_ENA 0x02
229#define BAT_CTRL_18U_ENA 0x01
230#define BAT_CTRL_16U_ENA 0x02
228#define BAT_CTRL_CMP_ENA 0x04 231#define BAT_CTRL_CMP_ENA 0x04
229#define FORCE_BAT_CTRL_CMP_HIGH 0x08 232#define FORCE_BAT_CTRL_CMP_HIGH 0x08
230#define BAT_CTRL_PULL_UP_ENA 0x10 233#define BAT_CTRL_PULL_UP_ENA 0x10
@@ -355,6 +358,7 @@ struct ab8500_bm_charger_parameters {
355 * @bkup_bat_v voltage which we charge the backup battery with 358 * @bkup_bat_v voltage which we charge the backup battery with
356 * @bkup_bat_i current which we charge the backup battery with 359 * @bkup_bat_i current which we charge the backup battery with
357 * @no_maintenance indicates that maintenance charging is disabled 360 * @no_maintenance indicates that maintenance charging is disabled
361 * @capacity_scaling indicates whether capacity scaling is to be used
358 * @adc_therm placement of thermistor, batctrl or battemp adc 362 * @adc_therm placement of thermistor, batctrl or battemp adc
359 * @chg_unknown_bat flag to enable charging of unknown batteries 363 * @chg_unknown_bat flag to enable charging of unknown batteries
360 * @enable_overshoot flag to enable VBAT overshoot control 364 * @enable_overshoot flag to enable VBAT overshoot control
@@ -383,6 +387,7 @@ struct ab8500_bm_data {
383 int bkup_bat_v; 387 int bkup_bat_v;
384 int bkup_bat_i; 388 int bkup_bat_i;
385 bool no_maintenance; 389 bool no_maintenance;
390 bool capacity_scaling;
386 bool chg_unknown_bat; 391 bool chg_unknown_bat;
387 bool enable_overshoot; 392 bool enable_overshoot;
388 enum abx500_adc_therm adc_therm; 393 enum abx500_adc_therm adc_therm;
@@ -399,30 +404,13 @@ struct ab8500_bm_data {
399 const struct ab8500_fg_parameters *fg_params; 404 const struct ab8500_fg_parameters *fg_params;
400}; 405};
401 406
402struct ab8500_charger_platform_data {
403 char **supplied_to;
404 size_t num_supplicants;
405 bool autopower_cfg;
406};
407
408struct ab8500_btemp_platform_data {
409 char **supplied_to;
410 size_t num_supplicants;
411};
412
413struct ab8500_fg_platform_data {
414 char **supplied_to;
415 size_t num_supplicants;
416};
417
418struct ab8500_chargalg_platform_data {
419 char **supplied_to;
420 size_t num_supplicants;
421};
422struct ab8500_btemp; 407struct ab8500_btemp;
423struct ab8500_gpadc; 408struct ab8500_gpadc;
424struct ab8500_fg; 409struct ab8500_fg;
410
425#ifdef CONFIG_AB8500_BM 411#ifdef CONFIG_AB8500_BM
412extern struct abx500_bm_data ab8500_bm_data;
413
426void ab8500_fg_reinit(void); 414void ab8500_fg_reinit(void);
427void ab8500_charger_usb_state_changed(u8 bm_usb_state, u16 mA); 415void ab8500_charger_usb_state_changed(u8 bm_usb_state, u16 mA);
428struct ab8500_btemp *ab8500_btemp_get(void); 416struct ab8500_btemp *ab8500_btemp_get(void);
@@ -431,44 +419,10 @@ struct ab8500_fg *ab8500_fg_get(void);
431int ab8500_fg_inst_curr_blocking(struct ab8500_fg *dev); 419int ab8500_fg_inst_curr_blocking(struct ab8500_fg *dev);
432int ab8500_fg_inst_curr_start(struct ab8500_fg *di); 420int ab8500_fg_inst_curr_start(struct ab8500_fg *di);
433int ab8500_fg_inst_curr_finalize(struct ab8500_fg *di, int *res); 421int ab8500_fg_inst_curr_finalize(struct ab8500_fg *di, int *res);
422int ab8500_fg_inst_curr_started(struct ab8500_fg *di);
434int ab8500_fg_inst_curr_done(struct ab8500_fg *di); 423int ab8500_fg_inst_curr_done(struct ab8500_fg *di);
435 424
436#else 425#else
437int ab8500_fg_inst_curr_done(struct ab8500_fg *di) 426static struct abx500_bm_data ab8500_bm_data;
438{
439}
440static void ab8500_fg_reinit(void)
441{
442}
443static void ab8500_charger_usb_state_changed(u8 bm_usb_state, u16 mA)
444{
445}
446static struct ab8500_btemp *ab8500_btemp_get(void)
447{
448 return NULL;
449}
450static int ab8500_btemp_get_batctrl_temp(struct ab8500_btemp *btemp)
451{
452 return 0;
453}
454struct ab8500_fg *ab8500_fg_get(void)
455{
456 return NULL;
457}
458static int ab8500_fg_inst_curr_blocking(struct ab8500_fg *dev)
459{
460 return -ENODEV;
461}
462
463static inline int ab8500_fg_inst_curr_start(struct ab8500_fg *di)
464{
465 return -ENODEV;
466}
467
468static inline int ab8500_fg_inst_curr_finalize(struct ab8500_fg *di, int *res)
469{
470 return -ENODEV;
471}
472
473#endif 427#endif
474#endif /* _AB8500_BM_H */ 428#endif /* _AB8500_BM_H */
diff --git a/include/linux/mfd/abx500/ab8500-gpio.h b/include/linux/mfd/abx500/ab8500-gpio.h
index 2387c207ea86..172b2f201ae0 100644
--- a/include/linux/mfd/abx500/ab8500-gpio.h
+++ b/include/linux/mfd/abx500/ab8500-gpio.h
@@ -14,10 +14,20 @@
14 * registers. 14 * registers.
15 */ 15 */
16 16
17struct ab8500_gpio_platform_data { 17struct abx500_gpio_platform_data {
18 int gpio_base; 18 int gpio_base;
19 u32 irq_base; 19};
20 u8 config_reg[8]; 20
21enum abx500_gpio_pull_updown {
22 ABX500_GPIO_PULL_DOWN = 0x0,
23 ABX500_GPIO_PULL_NONE = 0x1,
24 ABX500_GPIO_PULL_UP = 0x3,
25};
26
27enum abx500_gpio_vinsel {
28 ABX500_GPIO_VINSEL_VBAT = 0x0,
29 ABX500_GPIO_VINSEL_VIN_1V8 = 0x1,
30 ABX500_GPIO_VINSEL_VDD_BIF = 0x2,
21}; 31};
22 32
23#endif /* _AB8500_GPIO_H */ 33#endif /* _AB8500_GPIO_H */
diff --git a/include/linux/mfd/abx500/ab8500.h b/include/linux/mfd/abx500/ab8500.h
index 1cb5698b4d76..fc0534483c72 100644
--- a/include/linux/mfd/abx500/ab8500.h
+++ b/include/linux/mfd/abx500/ab8500.h
@@ -24,7 +24,7 @@ enum ab8500_version {
24 AB8500_VERSION_AB8500 = 0x0, 24 AB8500_VERSION_AB8500 = 0x0,
25 AB8500_VERSION_AB8505 = 0x1, 25 AB8500_VERSION_AB8505 = 0x1,
26 AB8500_VERSION_AB9540 = 0x2, 26 AB8500_VERSION_AB9540 = 0x2,
27 AB8500_VERSION_AB8540 = 0x3, 27 AB8500_VERSION_AB8540 = 0x4,
28 AB8500_VERSION_UNDEFINED, 28 AB8500_VERSION_UNDEFINED,
29}; 29};
30 30
@@ -32,6 +32,7 @@ enum ab8500_version {
32#define AB8500_CUTEARLY 0x00 32#define AB8500_CUTEARLY 0x00
33#define AB8500_CUT1P0 0x10 33#define AB8500_CUT1P0 0x10
34#define AB8500_CUT1P1 0x11 34#define AB8500_CUT1P1 0x11
35#define AB8500_CUT1P2 0x12 /* Only valid for AB8540 */
35#define AB8500_CUT2P0 0x20 36#define AB8500_CUT2P0 0x20
36#define AB8500_CUT3P0 0x30 37#define AB8500_CUT3P0 0x30
37#define AB8500_CUT3P3 0x33 38#define AB8500_CUT3P3 0x33
@@ -39,6 +40,7 @@ enum ab8500_version {
39/* 40/*
40 * AB8500 bank addresses 41 * AB8500 bank addresses
41 */ 42 */
43#define AB8500_M_FSM_RANK 0x0
42#define AB8500_SYS_CTRL1_BLOCK 0x1 44#define AB8500_SYS_CTRL1_BLOCK 0x1
43#define AB8500_SYS_CTRL2_BLOCK 0x2 45#define AB8500_SYS_CTRL2_BLOCK 0x2
44#define AB8500_REGU_CTRL1 0x3 46#define AB8500_REGU_CTRL1 0x3
@@ -58,6 +60,7 @@ enum ab8500_version {
58#define AB8500_DEVELOPMENT 0x11 60#define AB8500_DEVELOPMENT 0x11
59#define AB8500_DEBUG 0x12 61#define AB8500_DEBUG 0x12
60#define AB8500_PROD_TEST 0x13 62#define AB8500_PROD_TEST 0x13
63#define AB8500_STE_TEST 0x14
61#define AB8500_OTP_EMUL 0x15 64#define AB8500_OTP_EMUL 0x15
62 65
63/* 66/*
@@ -65,11 +68,11 @@ enum ab8500_version {
65 * Values used to index into array ab8500_irq_regoffset[] defined in 68 * Values used to index into array ab8500_irq_regoffset[] defined in
66 * drivers/mdf/ab8500-core.c 69 * drivers/mdf/ab8500-core.c
67 */ 70 */
68/* Definitions for AB8500 and AB9540 */ 71/* Definitions for AB8500, AB9540 and AB8540 */
69/* ab8500_irq_regoffset[0] -> IT[Source|Latch|Mask]1 */ 72/* ab8500_irq_regoffset[0] -> IT[Source|Latch|Mask]1 */
70#define AB8500_INT_MAIN_EXT_CH_NOT_OK 0 /* not 8505/9540 */ 73#define AB8500_INT_MAIN_EXT_CH_NOT_OK 0 /* not 8505/9540 */
71#define AB8500_INT_UN_PLUG_TV_DET 1 /* not 8505/9540 */ 74#define AB8500_INT_UN_PLUG_TV_DET 1 /* not 8505/9540/8540 */
72#define AB8500_INT_PLUG_TV_DET 2 /* not 8505/9540 */ 75#define AB8500_INT_PLUG_TV_DET 2 /* not 8505/9540/8540 */
73#define AB8500_INT_TEMP_WARM 3 76#define AB8500_INT_TEMP_WARM 3
74#define AB8500_INT_PON_KEY2DB_F 4 77#define AB8500_INT_PON_KEY2DB_F 4
75#define AB8500_INT_PON_KEY2DB_R 5 78#define AB8500_INT_PON_KEY2DB_R 5
@@ -77,18 +80,19 @@ enum ab8500_version {
77#define AB8500_INT_PON_KEY1DB_R 7 80#define AB8500_INT_PON_KEY1DB_R 7
78/* ab8500_irq_regoffset[1] -> IT[Source|Latch|Mask]2 */ 81/* ab8500_irq_regoffset[1] -> IT[Source|Latch|Mask]2 */
79#define AB8500_INT_BATT_OVV 8 82#define AB8500_INT_BATT_OVV 8
80#define AB8500_INT_MAIN_CH_UNPLUG_DET 10 /* not 8505 */ 83#define AB8500_INT_MAIN_CH_UNPLUG_DET 10 /* not 8505/8540 */
81#define AB8500_INT_MAIN_CH_PLUG_DET 11 /* not 8505 */ 84#define AB8500_INT_MAIN_CH_PLUG_DET 11 /* not 8505/8540 */
82#define AB8500_INT_VBUS_DET_F 14 85#define AB8500_INT_VBUS_DET_F 14
83#define AB8500_INT_VBUS_DET_R 15 86#define AB8500_INT_VBUS_DET_R 15
84/* ab8500_irq_regoffset[2] -> IT[Source|Latch|Mask]3 */ 87/* ab8500_irq_regoffset[2] -> IT[Source|Latch|Mask]3 */
85#define AB8500_INT_VBUS_CH_DROP_END 16 88#define AB8500_INT_VBUS_CH_DROP_END 16
86#define AB8500_INT_RTC_60S 17 89#define AB8500_INT_RTC_60S 17
87#define AB8500_INT_RTC_ALARM 18 90#define AB8500_INT_RTC_ALARM 18
91#define AB8540_INT_BIF_INT 19
88#define AB8500_INT_BAT_CTRL_INDB 20 92#define AB8500_INT_BAT_CTRL_INDB 20
89#define AB8500_INT_CH_WD_EXP 21 93#define AB8500_INT_CH_WD_EXP 21
90#define AB8500_INT_VBUS_OVV 22 94#define AB8500_INT_VBUS_OVV 22
91#define AB8500_INT_MAIN_CH_DROP_END 23 /* not 8505/9540 */ 95#define AB8500_INT_MAIN_CH_DROP_END 23 /* not 8505/9540/8540 */
92/* ab8500_irq_regoffset[3] -> IT[Source|Latch|Mask]4 */ 96/* ab8500_irq_regoffset[3] -> IT[Source|Latch|Mask]4 */
93#define AB8500_INT_CCN_CONV_ACC 24 97#define AB8500_INT_CCN_CONV_ACC 24
94#define AB8500_INT_INT_AUD 25 98#define AB8500_INT_INT_AUD 25
@@ -99,7 +103,7 @@ enum ab8500_version {
99#define AB8500_INT_BUP_CHG_NOT_OK 30 103#define AB8500_INT_BUP_CHG_NOT_OK 30
100#define AB8500_INT_BUP_CHG_OK 31 104#define AB8500_INT_BUP_CHG_OK 31
101/* ab8500_irq_regoffset[4] -> IT[Source|Latch|Mask]5 */ 105/* ab8500_irq_regoffset[4] -> IT[Source|Latch|Mask]5 */
102#define AB8500_INT_GP_HW_ADC_CONV_END 32 /* not 8505 */ 106#define AB8500_INT_GP_HW_ADC_CONV_END 32 /* not 8505/8540 */
103#define AB8500_INT_ACC_DETECT_1DB_F 33 107#define AB8500_INT_ACC_DETECT_1DB_F 33
104#define AB8500_INT_ACC_DETECT_1DB_R 34 108#define AB8500_INT_ACC_DETECT_1DB_R 34
105#define AB8500_INT_ACC_DETECT_22DB_F 35 109#define AB8500_INT_ACC_DETECT_22DB_F 35
@@ -108,23 +112,23 @@ enum ab8500_version {
108#define AB8500_INT_ACC_DETECT_21DB_R 38 112#define AB8500_INT_ACC_DETECT_21DB_R 38
109#define AB8500_INT_GP_SW_ADC_CONV_END 39 113#define AB8500_INT_GP_SW_ADC_CONV_END 39
110/* ab8500_irq_regoffset[5] -> IT[Source|Latch|Mask]7 */ 114/* ab8500_irq_regoffset[5] -> IT[Source|Latch|Mask]7 */
111#define AB8500_INT_GPIO6R 40 /* not 8505/9540 */ 115#define AB8500_INT_GPIO6R 40 /* not 8505/9540/8540 */
112#define AB8500_INT_GPIO7R 41 /* not 8505/9540 */ 116#define AB8500_INT_GPIO7R 41 /* not 8505/9540/8540 */
113#define AB8500_INT_GPIO8R 42 /* not 8505/9540 */ 117#define AB8500_INT_GPIO8R 42 /* not 8505/9540/8540 */
114#define AB8500_INT_GPIO9R 43 /* not 8505/9540 */ 118#define AB8500_INT_GPIO9R 43 /* not 8505/9540/8540 */
115#define AB8500_INT_GPIO10R 44 119#define AB8500_INT_GPIO10R 44 /* not 8540 */
116#define AB8500_INT_GPIO11R 45 120#define AB8500_INT_GPIO11R 45 /* not 8540 */
117#define AB8500_INT_GPIO12R 46 /* not 8505 */ 121#define AB8500_INT_GPIO12R 46 /* not 8505/8540 */
118#define AB8500_INT_GPIO13R 47 122#define AB8500_INT_GPIO13R 47 /* not 8540 */
119/* ab8500_irq_regoffset[6] -> IT[Source|Latch|Mask]8 */ 123/* ab8500_irq_regoffset[6] -> IT[Source|Latch|Mask]8 */
120#define AB8500_INT_GPIO24R 48 /* not 8505 */ 124#define AB8500_INT_GPIO24R 48 /* not 8505/8540 */
121#define AB8500_INT_GPIO25R 49 /* not 8505 */ 125#define AB8500_INT_GPIO25R 49 /* not 8505/8540 */
122#define AB8500_INT_GPIO36R 50 /* not 8505/9540 */ 126#define AB8500_INT_GPIO36R 50 /* not 8505/9540/8540 */
123#define AB8500_INT_GPIO37R 51 /* not 8505/9540 */ 127#define AB8500_INT_GPIO37R 51 /* not 8505/9540/8540 */
124#define AB8500_INT_GPIO38R 52 /* not 8505/9540 */ 128#define AB8500_INT_GPIO38R 52 /* not 8505/9540/8540 */
125#define AB8500_INT_GPIO39R 53 /* not 8505/9540 */ 129#define AB8500_INT_GPIO39R 53 /* not 8505/9540/8540 */
126#define AB8500_INT_GPIO40R 54 130#define AB8500_INT_GPIO40R 54 /* not 8540 */
127#define AB8500_INT_GPIO41R 55 131#define AB8500_INT_GPIO41R 55 /* not 8540 */
128/* ab8500_irq_regoffset[7] -> IT[Source|Latch|Mask]9 */ 132/* ab8500_irq_regoffset[7] -> IT[Source|Latch|Mask]9 */
129#define AB8500_INT_GPIO6F 56 /* not 8505/9540 */ 133#define AB8500_INT_GPIO6F 56 /* not 8505/9540 */
130#define AB8500_INT_GPIO7F 57 /* not 8505/9540 */ 134#define AB8500_INT_GPIO7F 57 /* not 8505/9540 */
@@ -135,14 +139,14 @@ enum ab8500_version {
135#define AB8500_INT_GPIO12F 62 /* not 8505 */ 139#define AB8500_INT_GPIO12F 62 /* not 8505 */
136#define AB8500_INT_GPIO13F 63 140#define AB8500_INT_GPIO13F 63
137/* ab8500_irq_regoffset[8] -> IT[Source|Latch|Mask]10 */ 141/* ab8500_irq_regoffset[8] -> IT[Source|Latch|Mask]10 */
138#define AB8500_INT_GPIO24F 64 /* not 8505 */ 142#define AB8500_INT_GPIO24F 64 /* not 8505/8540 */
139#define AB8500_INT_GPIO25F 65 /* not 8505 */ 143#define AB8500_INT_GPIO25F 65 /* not 8505/8540 */
140#define AB8500_INT_GPIO36F 66 /* not 8505/9540 */ 144#define AB8500_INT_GPIO36F 66 /* not 8505/9540/8540 */
141#define AB8500_INT_GPIO37F 67 /* not 8505/9540 */ 145#define AB8500_INT_GPIO37F 67 /* not 8505/9540/8540 */
142#define AB8500_INT_GPIO38F 68 /* not 8505/9540 */ 146#define AB8500_INT_GPIO38F 68 /* not 8505/9540/8540 */
143#define AB8500_INT_GPIO39F 69 /* not 8505/9540 */ 147#define AB8500_INT_GPIO39F 69 /* not 8505/9540/8540 */
144#define AB8500_INT_GPIO40F 70 148#define AB8500_INT_GPIO40F 70 /* not 8540 */
145#define AB8500_INT_GPIO41F 71 149#define AB8500_INT_GPIO41F 71 /* not 8540 */
146/* ab8500_irq_regoffset[9] -> IT[Source|Latch|Mask]12 */ 150/* ab8500_irq_regoffset[9] -> IT[Source|Latch|Mask]12 */
147#define AB8500_INT_ADP_SOURCE_ERROR 72 151#define AB8500_INT_ADP_SOURCE_ERROR 72
148#define AB8500_INT_ADP_SINK_ERROR 73 152#define AB8500_INT_ADP_SINK_ERROR 73
@@ -160,42 +164,44 @@ enum ab8500_version {
160#define AB8500_INT_SRP_DETECT 88 164#define AB8500_INT_SRP_DETECT 88
161#define AB8500_INT_USB_CHARGER_NOT_OKR 89 165#define AB8500_INT_USB_CHARGER_NOT_OKR 89
162#define AB8500_INT_ID_WAKEUP_R 90 166#define AB8500_INT_ID_WAKEUP_R 90
167#define AB8500_INT_ID_DET_PLUGR 91 /* 8505/9540 cut2.0 */
163#define AB8500_INT_ID_DET_R1R 92 168#define AB8500_INT_ID_DET_R1R 92
164#define AB8500_INT_ID_DET_R2R 93 169#define AB8500_INT_ID_DET_R2R 93
165#define AB8500_INT_ID_DET_R3R 94 170#define AB8500_INT_ID_DET_R3R 94
166#define AB8500_INT_ID_DET_R4R 95 171#define AB8500_INT_ID_DET_R4R 95
167/* ab8500_irq_regoffset[12] -> IT[Source|Latch|Mask]21 */ 172/* ab8500_irq_regoffset[12] -> IT[Source|Latch|Mask]21 */
168#define AB8500_INT_ID_WAKEUP_F 96 173#define AB8500_INT_ID_WAKEUP_F 96 /* not 8505/9540 */
169#define AB8500_INT_ID_DET_R1F 98 174#define AB8500_INT_ID_DET_PLUGF 97 /* 8505/9540 cut2.0 */
170#define AB8500_INT_ID_DET_R2F 99 175#define AB8500_INT_ID_DET_R1F 98 /* not 8505/9540 */
171#define AB8500_INT_ID_DET_R3F 100 176#define AB8500_INT_ID_DET_R2F 99 /* not 8505/9540 */
172#define AB8500_INT_ID_DET_R4F 101 177#define AB8500_INT_ID_DET_R3F 100 /* not 8505/9540 */
173#define AB8500_INT_CHAUTORESTARTAFTSEC 102 178#define AB8500_INT_ID_DET_R4F 101 /* not 8505/9540 */
179#define AB8500_INT_CHAUTORESTARTAFTSEC 102 /* not 8505/9540 */
174#define AB8500_INT_CHSTOPBYSEC 103 180#define AB8500_INT_CHSTOPBYSEC 103
175/* ab8500_irq_regoffset[13] -> IT[Source|Latch|Mask]22 */ 181/* ab8500_irq_regoffset[13] -> IT[Source|Latch|Mask]22 */
176#define AB8500_INT_USB_CH_TH_PROT_F 104 182#define AB8500_INT_USB_CH_TH_PROT_F 104
177#define AB8500_INT_USB_CH_TH_PROT_R 105 183#define AB8500_INT_USB_CH_TH_PROT_R 105
178#define AB8500_INT_MAIN_CH_TH_PROT_F 106 /* not 8505/9540 */ 184#define AB8500_INT_MAIN_CH_TH_PROT_F 106 /* not 8505/9540 */
179#define AB8500_INT_MAIN_CH_TH_PROT_R 107 /* not 8505/9540 */ 185#define AB8500_INT_MAIN_CH_TH_PROT_R 107 /* not 8505/9540 */
180#define AB8500_INT_CHCURLIMNOHSCHIRP 109 186#define AB8500_INT_CHCURLIMNOHSCHIRP 109
181#define AB8500_INT_CHCURLIMHSCHIRP 110 187#define AB8500_INT_CHCURLIMHSCHIRP 110
182#define AB8500_INT_XTAL32K_KO 111 188#define AB8500_INT_XTAL32K_KO 111
183 189
184/* Definitions for AB9540 */ 190/* Definitions for AB9540 / AB8505 */
185/* ab8500_irq_regoffset[14] -> IT[Source|Latch|Mask]13 */ 191/* ab8500_irq_regoffset[14] -> IT[Source|Latch|Mask]13 */
186#define AB9540_INT_GPIO50R 113 192#define AB9540_INT_GPIO50R 113 /* not 8540 */
187#define AB9540_INT_GPIO51R 114 /* not 8505 */ 193#define AB9540_INT_GPIO51R 114 /* not 8505/8540 */
188#define AB9540_INT_GPIO52R 115 194#define AB9540_INT_GPIO52R 115 /* not 8540 */
189#define AB9540_INT_GPIO53R 116 195#define AB9540_INT_GPIO53R 116 /* not 8540 */
190#define AB9540_INT_GPIO54R 117 /* not 8505 */ 196#define AB9540_INT_GPIO54R 117 /* not 8505/8540 */
191#define AB9540_INT_IEXT_CH_RF_BFN_R 118 197#define AB9540_INT_IEXT_CH_RF_BFN_R 118
192#define AB9540_INT_IEXT_CH_RF_BFN_F 119
193/* ab8500_irq_regoffset[15] -> IT[Source|Latch|Mask]14 */ 198/* ab8500_irq_regoffset[15] -> IT[Source|Latch|Mask]14 */
194#define AB9540_INT_GPIO50F 121 199#define AB9540_INT_GPIO50F 121 /* not 8540 */
195#define AB9540_INT_GPIO51F 122 /* not 8505 */ 200#define AB9540_INT_GPIO51F 122 /* not 8505/8540 */
196#define AB9540_INT_GPIO52F 123 201#define AB9540_INT_GPIO52F 123 /* not 8540 */
197#define AB9540_INT_GPIO53F 124 202#define AB9540_INT_GPIO53F 124 /* not 8540 */
198#define AB9540_INT_GPIO54F 125 /* not 8505 */ 203#define AB9540_INT_GPIO54F 125 /* not 8505/8540 */
204#define AB9540_INT_IEXT_CH_RF_BFN_F 126
199/* ab8500_irq_regoffset[16] -> IT[Source|Latch|Mask]25 */ 205/* ab8500_irq_regoffset[16] -> IT[Source|Latch|Mask]25 */
200#define AB8505_INT_KEYSTUCK 128 206#define AB8505_INT_KEYSTUCK 128
201#define AB8505_INT_IKR 129 207#define AB8505_INT_IKR 129
@@ -204,6 +210,87 @@ enum ab8500_version {
204#define AB8505_INT_KEYDEGLITCH 132 210#define AB8505_INT_KEYDEGLITCH 132
205#define AB8505_INT_MODPWRSTATUSF 134 211#define AB8505_INT_MODPWRSTATUSF 134
206#define AB8505_INT_MODPWRSTATUSR 135 212#define AB8505_INT_MODPWRSTATUSR 135
213/* ab8500_irq_regoffset[17] -> IT[Source|Latch|Mask]6 */
214#define AB8500_INT_HOOK_DET_NEG_F 138
215#define AB8500_INT_HOOK_DET_NEG_R 139
216#define AB8500_INT_HOOK_DET_POS_F 140
217#define AB8500_INT_HOOK_DET_POS_R 141
218#define AB8500_INT_PLUG_DET_COMP_F 142
219#define AB8500_INT_PLUG_DET_COMP_R 143
220/* ab8500_irq_regoffset[18] -> IT[Source|Latch|Mask]23 */
221#define AB8505_INT_COLL 144
222#define AB8505_INT_RESERR 145
223#define AB8505_INT_FRAERR 146
224#define AB8505_INT_COMERR 147
225#define AB8505_INT_SPDSET 148
226#define AB8505_INT_DSENT 149
227#define AB8505_INT_DREC 150
228#define AB8505_INT_ACC_INT 151
229/* ab8500_irq_regoffset[19] -> IT[Source|Latch|Mask]24 */
230#define AB8505_INT_NOPINT 152
231/* ab8540_irq_regoffset[20] -> IT[Source|Latch|Mask]26 */
232#define AB8540_INT_IDPLUGDETCOMPF 160
233#define AB8540_INT_IDPLUGDETCOMPR 161
234#define AB8540_INT_FMDETCOMPLOF 162
235#define AB8540_INT_FMDETCOMPLOR 163
236#define AB8540_INT_FMDETCOMPHIF 164
237#define AB8540_INT_FMDETCOMPHIR 165
238#define AB8540_INT_ID5VDETCOMPF 166
239#define AB8540_INT_ID5VDETCOMPR 167
240/* ab8540_irq_regoffset[21] -> IT[Source|Latch|Mask]27 */
241#define AB8540_INT_GPIO43F 168
242#define AB8540_INT_GPIO43R 169
243#define AB8540_INT_GPIO44F 170
244#define AB8540_INT_GPIO44R 171
245#define AB8540_INT_KEYPOSDETCOMPF 172
246#define AB8540_INT_KEYPOSDETCOMPR 173
247#define AB8540_INT_KEYNEGDETCOMPF 174
248#define AB8540_INT_KEYNEGDETCOMPR 175
249/* ab8540_irq_regoffset[22] -> IT[Source|Latch|Mask]28 */
250#define AB8540_INT_GPIO1VBATF 176
251#define AB8540_INT_GPIO1VBATR 177
252#define AB8540_INT_GPIO2VBATF 178
253#define AB8540_INT_GPIO2VBATR 179
254#define AB8540_INT_GPIO3VBATF 180
255#define AB8540_INT_GPIO3VBATR 181
256#define AB8540_INT_GPIO4VBATF 182
257#define AB8540_INT_GPIO4VBATR 183
258/* ab8540_irq_regoffset[23] -> IT[Source|Latch|Mask]29 */
259#define AB8540_INT_SYSCLKREQ2F 184
260#define AB8540_INT_SYSCLKREQ2R 185
261#define AB8540_INT_SYSCLKREQ3F 186
262#define AB8540_INT_SYSCLKREQ3R 187
263#define AB8540_INT_SYSCLKREQ4F 188
264#define AB8540_INT_SYSCLKREQ4R 189
265#define AB8540_INT_SYSCLKREQ5F 190
266#define AB8540_INT_SYSCLKREQ5R 191
267/* ab8540_irq_regoffset[24] -> IT[Source|Latch|Mask]30 */
268#define AB8540_INT_PWMOUT1F 192
269#define AB8540_INT_PWMOUT1R 193
270#define AB8540_INT_PWMCTRL0F 194
271#define AB8540_INT_PWMCTRL0R 195
272#define AB8540_INT_PWMCTRL1F 196
273#define AB8540_INT_PWMCTRL1R 197
274#define AB8540_INT_SYSCLKREQ6F 198
275#define AB8540_INT_SYSCLKREQ6R 199
276/* ab8540_irq_regoffset[25] -> IT[Source|Latch|Mask]31 */
277#define AB8540_INT_PWMEXTVIBRA1F 200
278#define AB8540_INT_PWMEXTVIBRA1R 201
279#define AB8540_INT_PWMEXTVIBRA2F 202
280#define AB8540_INT_PWMEXTVIBRA2R 203
281#define AB8540_INT_PWMOUT2F 204
282#define AB8540_INT_PWMOUT2R 205
283#define AB8540_INT_PWMOUT3F 206
284#define AB8540_INT_PWMOUT3R 207
285/* ab8540_irq_regoffset[26] -> IT[Source|Latch|Mask]32 */
286#define AB8540_INT_ADDATA2F 208
287#define AB8540_INT_ADDATA2R 209
288#define AB8540_INT_DADATA2F 210
289#define AB8540_INT_DADATA2R 211
290#define AB8540_INT_FSYNC2F 212
291#define AB8540_INT_FSYNC2R 213
292#define AB8540_INT_BITCLK2F 214
293#define AB8540_INT_BITCLK2R 215
207 294
208/* 295/*
209 * AB8500_AB9540_NR_IRQS is used when configuring the IRQ numbers for the 296 * AB8500_AB9540_NR_IRQS is used when configuring the IRQ numbers for the
@@ -213,13 +300,24 @@ enum ab8500_version {
213 * which is larger. 300 * which is larger.
214 */ 301 */
215#define AB8500_NR_IRQS 112 302#define AB8500_NR_IRQS 112
216#define AB8505_NR_IRQS 136 303#define AB8505_NR_IRQS 153
217#define AB9540_NR_IRQS 136 304#define AB9540_NR_IRQS 153
305#define AB8540_NR_IRQS 216
218/* This is set to the roof of any AB8500 chip variant IRQ counts */ 306/* This is set to the roof of any AB8500 chip variant IRQ counts */
219#define AB8500_MAX_NR_IRQS AB9540_NR_IRQS 307#define AB8500_MAX_NR_IRQS AB8540_NR_IRQS
220 308
221#define AB8500_NUM_IRQ_REGS 14 309#define AB8500_NUM_IRQ_REGS 14
222#define AB9540_NUM_IRQ_REGS 17 310#define AB9540_NUM_IRQ_REGS 20
311#define AB8540_NUM_IRQ_REGS 27
312
313/* Turn On Status Event */
314#define AB8500_POR_ON_VBAT 0x01
315#define AB8500_POW_KEY_1_ON 0x02
316#define AB8500_POW_KEY_2_ON 0x04
317#define AB8500_RTC_ALARM 0x08
318#define AB8500_MAIN_CH_DET 0x10
319#define AB8500_VBUS_DET 0x20
320#define AB8500_USB_ID_DET 0x40
223 321
224/** 322/**
225 * struct ab8500 - ab8500 internal structure 323 * struct ab8500 - ab8500 internal structure
@@ -287,7 +385,7 @@ struct ab8500_platform_data {
287 struct ab8500_regulator_reg_init *regulator_reg_init; 385 struct ab8500_regulator_reg_init *regulator_reg_init;
288 int num_regulator; 386 int num_regulator;
289 struct regulator_init_data *regulator; 387 struct regulator_init_data *regulator;
290 struct ab8500_gpio_platform_data *gpio; 388 struct abx500_gpio_platform_data *gpio;
291 struct ab8500_codec_platform_data *codec; 389 struct ab8500_codec_platform_data *codec;
292}; 390};
293 391
@@ -335,10 +433,79 @@ static inline int is_ab8500_2p0_or_earlier(struct ab8500 *ab)
335 return (is_ab8500(ab) && (ab->chip_id <= AB8500_CUT2P0)); 433 return (is_ab8500(ab) && (ab->chip_id <= AB8500_CUT2P0));
336} 434}
337 435
436static inline int is_ab8500_3p3_or_earlier(struct ab8500 *ab)
437{
438 return (is_ab8500(ab) && (ab->chip_id <= AB8500_CUT3P3));
439}
440
338/* exclude also ab8505, ab9540... */ 441/* exclude also ab8505, ab9540... */
339static inline int is_ab8500_2p0(struct ab8500 *ab) 442static inline int is_ab8500_2p0(struct ab8500 *ab)
340{ 443{
341 return (is_ab8500(ab) && (ab->chip_id == AB8500_CUT2P0)); 444 return (is_ab8500(ab) && (ab->chip_id == AB8500_CUT2P0));
342} 445}
343 446
447static inline int is_ab8505_1p0_or_earlier(struct ab8500 *ab)
448{
449 return (is_ab8505(ab) && (ab->chip_id <= AB8500_CUT1P0));
450}
451
452static inline int is_ab8505_2p0(struct ab8500 *ab)
453{
454 return (is_ab8505(ab) && (ab->chip_id == AB8500_CUT2P0));
455}
456
457static inline int is_ab9540_1p0_or_earlier(struct ab8500 *ab)
458{
459 return (is_ab9540(ab) && (ab->chip_id <= AB8500_CUT1P0));
460}
461
462static inline int is_ab9540_2p0(struct ab8500 *ab)
463{
464 return (is_ab9540(ab) && (ab->chip_id == AB8500_CUT2P0));
465}
466
467/*
468 * Be careful, the marketing name for this chip is 2.1
469 * but the value read from the chip is 3.0 (0x30)
470 */
471static inline int is_ab9540_3p0(struct ab8500 *ab)
472{
473 return (is_ab9540(ab) && (ab->chip_id == AB8500_CUT3P0));
474}
475
476static inline int is_ab8540_1p0_or_earlier(struct ab8500 *ab)
477{
478 return is_ab8540(ab) && (ab->chip_id <= AB8500_CUT1P0);
479}
480
481static inline int is_ab8540_1p1_or_earlier(struct ab8500 *ab)
482{
483 return is_ab8540(ab) && (ab->chip_id <= AB8500_CUT1P1);
484}
485
486static inline int is_ab8540_1p2_or_earlier(struct ab8500 *ab)
487{
488 return is_ab8540(ab) && (ab->chip_id <= AB8500_CUT1P2);
489}
490
491static inline int is_ab8540_2p0_or_earlier(struct ab8500 *ab)
492{
493 return is_ab8540(ab) && (ab->chip_id <= AB8500_CUT2P0);
494}
495
496static inline int is_ab8540_2p0(struct ab8500 *ab)
497{
498 return is_ab8540(ab) && (ab->chip_id == AB8500_CUT2P0);
499}
500
501static inline int is_ab8505_2p0_earlier(struct ab8500 *ab)
502{
503 return (is_ab8505(ab) && (ab->chip_id < AB8500_CUT2P0));
504}
505
506static inline int is_ab9540_2p0_or_earlier(struct ab8500 *ab)
507{
508 return (is_ab9540(ab) && (ab->chip_id < AB8500_CUT2P0));
509}
510
344#endif /* MFD_AB8500_H */ 511#endif /* MFD_AB8500_H */
diff --git a/include/linux/mfd/abx500/ux500_chargalg.h b/include/linux/mfd/abx500/ux500_chargalg.h
index 9b07725750c9..d43ac0f35526 100644
--- a/include/linux/mfd/abx500/ux500_chargalg.h
+++ b/include/linux/mfd/abx500/ux500_chargalg.h
@@ -27,12 +27,17 @@ struct ux500_charger_ops {
27 * @ops ux500 charger operations 27 * @ops ux500 charger operations
28 * @max_out_volt maximum output charger voltage in mV 28 * @max_out_volt maximum output charger voltage in mV
29 * @max_out_curr maximum output charger current in mA 29 * @max_out_curr maximum output charger current in mA
30 * @enabled indicates if this charger is used or not
31 * @external external charger unit (pm2xxx)
30 */ 32 */
31struct ux500_charger { 33struct ux500_charger {
32 struct power_supply psy; 34 struct power_supply psy;
33 struct ux500_charger_ops ops; 35 struct ux500_charger_ops ops;
34 int max_out_volt; 36 int max_out_volt;
35 int max_out_curr; 37 int max_out_curr;
38 int wdt_refresh;
39 bool enabled;
40 bool external;
36}; 41};
37 42
38#endif 43#endif
diff --git a/include/linux/mfd/da9052/da9052.h b/include/linux/mfd/da9052/da9052.h
index 86dd93de6ff2..786d02eb79d2 100644
--- a/include/linux/mfd/da9052/da9052.h
+++ b/include/linux/mfd/da9052/da9052.h
@@ -99,6 +99,9 @@ struct da9052 {
99 u8 chip_id; 99 u8 chip_id;
100 100
101 int chip_irq; 101 int chip_irq;
102
103 /* SOC I/O transfer related fixes for DA9052/53 */
104 int (*fix_io) (struct da9052 *da9052, unsigned char reg);
102}; 105};
103 106
104/* ADC API */ 107/* ADC API */
@@ -113,32 +116,87 @@ static inline int da9052_reg_read(struct da9052 *da9052, unsigned char reg)
113 ret = regmap_read(da9052->regmap, reg, &val); 116 ret = regmap_read(da9052->regmap, reg, &val);
114 if (ret < 0) 117 if (ret < 0)
115 return ret; 118 return ret;
119
120 if (da9052->fix_io) {
121 ret = da9052->fix_io(da9052, reg);
122 if (ret < 0)
123 return ret;
124 }
125
116 return val; 126 return val;
117} 127}
118 128
119static inline int da9052_reg_write(struct da9052 *da9052, unsigned char reg, 129static inline int da9052_reg_write(struct da9052 *da9052, unsigned char reg,
120 unsigned char val) 130 unsigned char val)
121{ 131{
122 return regmap_write(da9052->regmap, reg, val); 132 int ret;
133
134 ret = regmap_write(da9052->regmap, reg, val);
135 if (ret < 0)
136 return ret;
137
138 if (da9052->fix_io) {
139 ret = da9052->fix_io(da9052, reg);
140 if (ret < 0)
141 return ret;
142 }
143
144 return ret;
123} 145}
124 146
125static inline int da9052_group_read(struct da9052 *da9052, unsigned char reg, 147static inline int da9052_group_read(struct da9052 *da9052, unsigned char reg,
126 unsigned reg_cnt, unsigned char *val) 148 unsigned reg_cnt, unsigned char *val)
127{ 149{
128 return regmap_bulk_read(da9052->regmap, reg, val, reg_cnt); 150 int ret;
151
152 ret = regmap_bulk_read(da9052->regmap, reg, val, reg_cnt);
153 if (ret < 0)
154 return ret;
155
156 if (da9052->fix_io) {
157 ret = da9052->fix_io(da9052, reg);
158 if (ret < 0)
159 return ret;
160 }
161
162 return ret;
129} 163}
130 164
131static inline int da9052_group_write(struct da9052 *da9052, unsigned char reg, 165static inline int da9052_group_write(struct da9052 *da9052, unsigned char reg,
132 unsigned reg_cnt, unsigned char *val) 166 unsigned reg_cnt, unsigned char *val)
133{ 167{
134 return regmap_raw_write(da9052->regmap, reg, val, reg_cnt); 168 int ret;
169
170 ret = regmap_raw_write(da9052->regmap, reg, val, reg_cnt);
171 if (ret < 0)
172 return ret;
173
174 if (da9052->fix_io) {
175 ret = da9052->fix_io(da9052, reg);
176 if (ret < 0)
177 return ret;
178 }
179
180 return ret;
135} 181}
136 182
137static inline int da9052_reg_update(struct da9052 *da9052, unsigned char reg, 183static inline int da9052_reg_update(struct da9052 *da9052, unsigned char reg,
138 unsigned char bit_mask, 184 unsigned char bit_mask,
139 unsigned char reg_val) 185 unsigned char reg_val)
140{ 186{
141 return regmap_update_bits(da9052->regmap, reg, bit_mask, reg_val); 187 int ret;
188
189 ret = regmap_update_bits(da9052->regmap, reg, bit_mask, reg_val);
190 if (ret < 0)
191 return ret;
192
193 if (da9052->fix_io) {
194 ret = da9052->fix_io(da9052, reg);
195 if (ret < 0)
196 return ret;
197 }
198
199 return ret;
142} 200}
143 201
144int da9052_device_init(struct da9052 *da9052, u8 chip_id); 202int da9052_device_init(struct da9052 *da9052, u8 chip_id);
diff --git a/include/linux/mfd/da9052/reg.h b/include/linux/mfd/da9052/reg.h
index b97f7309d7f6..c4dd3a8add21 100644
--- a/include/linux/mfd/da9052/reg.h
+++ b/include/linux/mfd/da9052/reg.h
@@ -34,6 +34,9 @@
34#define DA9052_STATUS_C_REG 3 34#define DA9052_STATUS_C_REG 3
35#define DA9052_STATUS_D_REG 4 35#define DA9052_STATUS_D_REG 4
36 36
37/* PARK REGISTER */
38#define DA9052_PARK_REGISTER DA9052_STATUS_D_REG
39
37/* EVENT REGISTERS */ 40/* EVENT REGISTERS */
38#define DA9052_EVENT_A_REG 5 41#define DA9052_EVENT_A_REG 5
39#define DA9052_EVENT_B_REG 6 42#define DA9052_EVENT_B_REG 6
diff --git a/include/linux/mfd/rtsx_common.h b/include/linux/mfd/rtsx_common.h
index a8d393e3066b..2b13970596f5 100644
--- a/include/linux/mfd/rtsx_common.h
+++ b/include/linux/mfd/rtsx_common.h
@@ -38,6 +38,9 @@
38#define RTSX_SD_CARD 0 38#define RTSX_SD_CARD 0
39#define RTSX_MS_CARD 1 39#define RTSX_MS_CARD 1
40 40
41#define CLK_TO_DIV_N 0
42#define DIV_N_TO_CLK 1
43
41struct platform_device; 44struct platform_device;
42 45
43struct rtsx_slot { 46struct rtsx_slot {
diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/mfd/rtsx_pci.h
index 060b721fcbfb..4b117a3f54d4 100644
--- a/include/linux/mfd/rtsx_pci.h
+++ b/include/linux/mfd/rtsx_pci.h
@@ -158,10 +158,9 @@
158#define SG_TRANS_DATA (0x02 << 4) 158#define SG_TRANS_DATA (0x02 << 4)
159#define SG_LINK_DESC (0x03 << 4) 159#define SG_LINK_DESC (0x03 << 4)
160 160
161/* SD bank voltage */ 161/* Output voltage */
162#define SD_IO_3V3 0 162#define OUTPUT_3V3 0
163#define SD_IO_1V8 1 163#define OUTPUT_1V8 1
164
165 164
166/* Card Clock Enable Register */ 165/* Card Clock Enable Register */
167#define SD_CLK_EN 0x04 166#define SD_CLK_EN 0x04
@@ -201,6 +200,20 @@
201#define CHANGE_CLK 0x01 200#define CHANGE_CLK 0x01
202 201
203/* LDO_CTL */ 202/* LDO_CTL */
203#define BPP_ASIC_1V7 0x00
204#define BPP_ASIC_1V8 0x01
205#define BPP_ASIC_1V9 0x02
206#define BPP_ASIC_2V0 0x03
207#define BPP_ASIC_2V7 0x04
208#define BPP_ASIC_2V8 0x05
209#define BPP_ASIC_3V2 0x06
210#define BPP_ASIC_3V3 0x07
211#define BPP_REG_TUNED18 0x07
212#define BPP_TUNED18_SHIFT_8402 5
213#define BPP_TUNED18_SHIFT_8411 4
214#define BPP_PAD_MASK 0x04
215#define BPP_PAD_3V3 0x04
216#define BPP_PAD_1V8 0x00
204#define BPP_LDO_POWB 0x03 217#define BPP_LDO_POWB 0x03
205#define BPP_LDO_ON 0x00 218#define BPP_LDO_ON 0x00
206#define BPP_LDO_SUSPEND 0x02 219#define BPP_LDO_SUSPEND 0x02
@@ -688,7 +701,10 @@ struct pcr_ops {
688 int (*disable_auto_blink)(struct rtsx_pcr *pcr); 701 int (*disable_auto_blink)(struct rtsx_pcr *pcr);
689 int (*card_power_on)(struct rtsx_pcr *pcr, int card); 702 int (*card_power_on)(struct rtsx_pcr *pcr, int card);
690 int (*card_power_off)(struct rtsx_pcr *pcr, int card); 703 int (*card_power_off)(struct rtsx_pcr *pcr, int card);
704 int (*switch_output_voltage)(struct rtsx_pcr *pcr,
705 u8 voltage);
691 unsigned int (*cd_deglitch)(struct rtsx_pcr *pcr); 706 unsigned int (*cd_deglitch)(struct rtsx_pcr *pcr);
707 int (*conv_clk_and_div_n)(int clk, int dir);
692}; 708};
693 709
694enum PDEV_STAT {PDEV_STAT_IDLE, PDEV_STAT_RUN}; 710enum PDEV_STAT {PDEV_STAT_IDLE, PDEV_STAT_RUN};
@@ -783,6 +799,7 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
783 u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk); 799 u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk);
784int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card); 800int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card);
785int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card); 801int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card);
802int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage);
786unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr); 803unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr);
787void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr); 804void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr);
788 805
diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h
index b50c38f8bc48..f0f4de3b4ccc 100644
--- a/include/linux/mfd/samsung/core.h
+++ b/include/linux/mfd/samsung/core.h
@@ -26,6 +26,7 @@ enum sec_device_type {
26/** 26/**
27 * struct sec_pmic_dev - s5m87xx master device for sub-drivers 27 * struct sec_pmic_dev - s5m87xx master device for sub-drivers
28 * @dev: master device of the chip (can be used to access platform data) 28 * @dev: master device of the chip (can be used to access platform data)
29 * @pdata: pointer to private data used to pass platform data to child
29 * @i2c: i2c client private data for regulator 30 * @i2c: i2c client private data for regulator
30 * @rtc: i2c client private data for rtc 31 * @rtc: i2c client private data for rtc
31 * @iolock: mutex for serializing io access 32 * @iolock: mutex for serializing io access
@@ -39,6 +40,7 @@ enum sec_device_type {
39 */ 40 */
40struct sec_pmic_dev { 41struct sec_pmic_dev {
41 struct device *dev; 42 struct device *dev;
43 struct sec_platform_data *pdata;
42 struct regmap *regmap; 44 struct regmap *regmap;
43 struct i2c_client *i2c; 45 struct i2c_client *i2c;
44 struct i2c_client *rtc; 46 struct i2c_client *rtc;
@@ -82,11 +84,11 @@ struct sec_platform_data {
82 84
83 int buck_gpios[3]; 85 int buck_gpios[3];
84 int buck_ds[3]; 86 int buck_ds[3];
85 int buck2_voltage[8]; 87 unsigned int buck2_voltage[8];
86 bool buck2_gpiodvs; 88 bool buck2_gpiodvs;
87 int buck3_voltage[8]; 89 unsigned int buck3_voltage[8];
88 bool buck3_gpiodvs; 90 bool buck3_gpiodvs;
89 int buck4_voltage[8]; 91 unsigned int buck4_voltage[8];
90 bool buck4_gpiodvs; 92 bool buck4_gpiodvs;
91 93
92 int buck_set1; 94 int buck_set1;
@@ -127,6 +129,7 @@ struct sec_platform_data {
127struct sec_regulator_data { 129struct sec_regulator_data {
128 int id; 130 int id;
129 struct regulator_init_data *initdata; 131 struct regulator_init_data *initdata;
132 struct device_node *reg_node;
130}; 133};
131 134
132/* 135/*
@@ -136,7 +139,7 @@ struct sec_regulator_data {
136 */ 139 */
137struct sec_opmode_data { 140struct sec_opmode_data {
138 int id; 141 int id;
139 int mode; 142 unsigned int mode;
140}; 143};
141 144
142/* 145/*
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index bc823c4c028b..deca87452528 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -151,7 +151,7 @@ struct mmu_notifier_ops {
151 * Therefore notifier chains can only be traversed when either 151 * Therefore notifier chains can only be traversed when either
152 * 152 *
153 * 1. mmap_sem is held. 153 * 1. mmap_sem is held.
154 * 2. One of the reverse map locks is held (i_mmap_mutex or anon_vma->mutex). 154 * 2. One of the reverse map locks is held (i_mmap_mutex or anon_vma->rwsem).
155 * 3. No other concurrent thread can access the list (release) 155 * 3. No other concurrent thread can access the list (release)
156 */ 156 */
157struct mmu_notifier { 157struct mmu_notifier {
diff --git a/include/linux/module.h b/include/linux/module.h
index 7760c6d344a3..1375ee3f03aa 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -199,11 +199,11 @@ struct module_use {
199 struct module *source, *target; 199 struct module *source, *target;
200}; 200};
201 201
202enum module_state 202enum module_state {
203{ 203 MODULE_STATE_LIVE, /* Normal state. */
204 MODULE_STATE_LIVE, 204 MODULE_STATE_COMING, /* Full formed, running module_init. */
205 MODULE_STATE_COMING, 205 MODULE_STATE_GOING, /* Going away. */
206 MODULE_STATE_GOING, 206 MODULE_STATE_UNFORMED, /* Still setting it up. */
207}; 207};
208 208
209/** 209/**
diff --git a/include/linux/of.h b/include/linux/of.h
index 5ebcc5c8e423..a0f129284948 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -92,7 +92,7 @@ static inline void of_node_put(struct device_node *node) { }
92extern struct device_node *of_allnodes; 92extern struct device_node *of_allnodes;
93extern struct device_node *of_chosen; 93extern struct device_node *of_chosen;
94extern struct device_node *of_aliases; 94extern struct device_node *of_aliases;
95extern rwlock_t devtree_lock; 95extern raw_spinlock_t devtree_lock;
96 96
97static inline bool of_have_populated_dt(void) 97static inline bool of_have_populated_dt(void)
98{ 98{
@@ -160,7 +160,7 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size)
160 160
161#define OF_BAD_ADDR ((u64)-1) 161#define OF_BAD_ADDR ((u64)-1)
162 162
163static inline const char* of_node_full_name(struct device_node *np) 163static inline const char *of_node_full_name(const struct device_node *np)
164{ 164{
165 return np ? np->full_name : "<no-node>"; 165 return np ? np->full_name : "<no-node>";
166} 166}
@@ -277,6 +277,8 @@ extern struct device_node *of_parse_phandle(const struct device_node *np,
277extern int of_parse_phandle_with_args(const struct device_node *np, 277extern int of_parse_phandle_with_args(const struct device_node *np,
278 const char *list_name, const char *cells_name, int index, 278 const char *list_name, const char *cells_name, int index,
279 struct of_phandle_args *out_args); 279 struct of_phandle_args *out_args);
280extern int of_count_phandle_with_args(const struct device_node *np,
281 const char *list_name, const char *cells_name);
280 282
281extern void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align)); 283extern void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align));
282extern int of_alias_get_id(struct device_node *np, const char *stem); 284extern int of_alias_get_id(struct device_node *np, const char *stem);
@@ -467,6 +469,13 @@ static inline int of_parse_phandle_with_args(struct device_node *np,
467 return -ENOSYS; 469 return -ENOSYS;
468} 470}
469 471
472static inline int of_count_phandle_with_args(struct device_node *np,
473 const char *list_name,
474 const char *cells_name)
475{
476 return -ENOSYS;
477}
478
470static inline int of_alias_get_id(struct device_node *np, const char *stem) 479static inline int of_alias_get_id(struct device_node *np, const char *stem)
471{ 480{
472 return -ENOSYS; 481 return -ENOSYS;
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index c454f5796747..a83dc6f5008e 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -50,9 +50,6 @@ static inline struct of_mm_gpio_chip *to_of_mm_gpio_chip(struct gpio_chip *gc)
50extern int of_get_named_gpio_flags(struct device_node *np, 50extern int of_get_named_gpio_flags(struct device_node *np,
51 const char *list_name, int index, enum of_gpio_flags *flags); 51 const char *list_name, int index, enum of_gpio_flags *flags);
52 52
53extern unsigned int of_gpio_named_count(struct device_node *np,
54 const char* propname);
55
56extern int of_mm_gpiochip_add(struct device_node *np, 53extern int of_mm_gpiochip_add(struct device_node *np,
57 struct of_mm_gpio_chip *mm_gc); 54 struct of_mm_gpio_chip *mm_gc);
58 55
@@ -71,12 +68,6 @@ static inline int of_get_named_gpio_flags(struct device_node *np,
71 return -ENOSYS; 68 return -ENOSYS;
72} 69}
73 70
74static inline unsigned int of_gpio_named_count(struct device_node *np,
75 const char* propname)
76{
77 return 0;
78}
79
80static inline int of_gpio_simple_xlate(struct gpio_chip *gc, 71static inline int of_gpio_simple_xlate(struct gpio_chip *gc,
81 const struct of_phandle_args *gpiospec, 72 const struct of_phandle_args *gpiospec,
82 u32 *flags) 73 u32 *flags)
@@ -90,22 +81,37 @@ static inline void of_gpiochip_remove(struct gpio_chip *gc) { }
90#endif /* CONFIG_OF_GPIO */ 81#endif /* CONFIG_OF_GPIO */
91 82
92/** 83/**
93 * of_gpio_count - Count GPIOs for a device 84 * of_gpio_named_count() - Count GPIOs for a device
94 * @np: device node to count GPIOs for 85 * @np: device node to count GPIOs for
86 * @propname: property name containing gpio specifier(s)
95 * 87 *
96 * The function returns the count of GPIOs specified for a node. 88 * The function returns the count of GPIOs specified for a node.
89 * Note that the empty GPIO specifiers count too. Returns either
90 * Number of gpios defined in property,
91 * -EINVAL for an incorrectly formed gpios property, or
92 * -ENOENT for a missing gpios property
97 * 93 *
98 * Note that the empty GPIO specifiers counts too. For example, 94 * Example:
99 *
100 * gpios = <0 95 * gpios = <0
101 * &pio1 1 2 96 * &gpio1 1 2
102 * 0 97 * 0
103 * &pio2 3 4>; 98 * &gpio2 3 4>;
99 *
100 * The above example defines four GPIOs, two of which are not specified.
101 * This function will return '4'
102 */
103static inline int of_gpio_named_count(struct device_node *np, const char* propname)
104{
105 return of_count_phandle_with_args(np, propname, "#gpio-cells");
106}
107
108/**
109 * of_gpio_count() - Count GPIOs for a device
110 * @np: device node to count GPIOs for
104 * 111 *
105 * defines four GPIOs (so this function will return 4), two of which 112 * Same as of_gpio_named_count, but hard coded to use the 'gpios' property
106 * are not specified.
107 */ 113 */
108static inline unsigned int of_gpio_count(struct device_node *np) 114static inline int of_gpio_count(struct device_node *np)
109{ 115{
110 return of_gpio_named_count(np, "gpios"); 116 return of_gpio_named_count(np, "gpios");
111} 117}
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 15472d691ee6..6fa4dd2a3b9e 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1101,6 +1101,12 @@ static inline int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec)
1101 return -1; 1101 return -1;
1102} 1102}
1103 1103
1104static inline int
1105pci_enable_msi_block_auto(struct pci_dev *dev, unsigned int *maxvec)
1106{
1107 return -1;
1108}
1109
1104static inline void pci_msi_shutdown(struct pci_dev *dev) 1110static inline void pci_msi_shutdown(struct pci_dev *dev)
1105{ } 1111{ }
1106static inline void pci_disable_msi(struct pci_dev *dev) 1112static inline void pci_disable_msi(struct pci_dev *dev)
@@ -1132,6 +1138,7 @@ static inline int pci_msi_enabled(void)
1132} 1138}
1133#else 1139#else
1134extern int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec); 1140extern int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec);
1141extern int pci_enable_msi_block_auto(struct pci_dev *dev, unsigned int *maxvec);
1135extern void pci_msi_shutdown(struct pci_dev *dev); 1142extern void pci_msi_shutdown(struct pci_dev *dev);
1136extern void pci_disable_msi(struct pci_dev *dev); 1143extern void pci_disable_msi(struct pci_dev *dev);
1137extern int pci_msix_table_size(struct pci_dev *dev); 1144extern int pci_msix_table_size(struct pci_dev *dev);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 6bfb2faa0b19..e47ee462c2f2 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -135,16 +135,21 @@ struct hw_perf_event {
135 struct { /* software */ 135 struct { /* software */
136 struct hrtimer hrtimer; 136 struct hrtimer hrtimer;
137 }; 137 };
138 struct { /* tracepoint */
139 struct task_struct *tp_target;
140 /* for tp_event->class */
141 struct list_head tp_list;
142 };
138#ifdef CONFIG_HAVE_HW_BREAKPOINT 143#ifdef CONFIG_HAVE_HW_BREAKPOINT
139 struct { /* breakpoint */ 144 struct { /* breakpoint */
140 struct arch_hw_breakpoint info;
141 struct list_head bp_list;
142 /* 145 /*
143 * Crufty hack to avoid the chicken and egg 146 * Crufty hack to avoid the chicken and egg
144 * problem hw_breakpoint has with context 147 * problem hw_breakpoint has with context
145 * creation and event initalization. 148 * creation and event initalization.
146 */ 149 */
147 struct task_struct *bp_target; 150 struct task_struct *bp_target;
151 struct arch_hw_breakpoint info;
152 struct list_head bp_list;
148 }; 153 };
149#endif 154#endif
150 }; 155 };
@@ -817,6 +822,17 @@ do { \
817} while (0) 822} while (0)
818 823
819 824
825struct perf_pmu_events_attr {
826 struct device_attribute attr;
827 u64 id;
828};
829
830#define PMU_EVENT_ATTR(_name, _var, _id, _show) \
831static struct perf_pmu_events_attr _var = { \
832 .attr = __ATTR(_name, 0444, _show, NULL), \
833 .id = _id, \
834};
835
820#define PMU_FORMAT_ATTR(_name, _format) \ 836#define PMU_FORMAT_ATTR(_name, _format) \
821static ssize_t \ 837static ssize_t \
822_name##_show(struct device *dev, \ 838_name##_show(struct device *dev, \
diff --git a/include/linux/pinctrl/devinfo.h b/include/linux/pinctrl/devinfo.h
new file mode 100644
index 000000000000..6e5f8a985ea7
--- /dev/null
+++ b/include/linux/pinctrl/devinfo.h
@@ -0,0 +1,45 @@
1/*
2 * Per-device information from the pin control system.
3 * This is the stuff that get included into the device
4 * core.
5 *
6 * Copyright (C) 2012 ST-Ericsson SA
7 * Written on behalf of Linaro for ST-Ericsson
8 * This interface is used in the core to keep track of pins.
9 *
10 * Author: Linus Walleij <linus.walleij@linaro.org>
11 *
12 * License terms: GNU General Public License (GPL) version 2
13 */
14
15#ifndef PINCTRL_DEVINFO_H
16#define PINCTRL_DEVINFO_H
17
18#ifdef CONFIG_PINCTRL
19
20/* The device core acts as a consumer toward pinctrl */
21#include <linux/pinctrl/consumer.h>
22
23/**
24 * struct dev_pin_info - pin state container for devices
25 * @p: pinctrl handle for the containing device
26 * @default_state: the default state for the handle, if found
27 */
28struct dev_pin_info {
29 struct pinctrl *p;
30 struct pinctrl_state *default_state;
31};
32
33extern int pinctrl_bind_pins(struct device *dev);
34
35#else
36
37/* Stubs if we're not using pinctrl */
38
39static inline int pinctrl_bind_pins(struct device *dev)
40{
41 return 0;
42}
43
44#endif /* CONFIG_PINCTRL */
45#endif /* PINCTRL_DEVINFO_H */
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index 47a1bdd88878..72474e18f1e0 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -46,7 +46,11 @@
46 * @PIN_CONFIG_DRIVE_OPEN_SOURCE: the pin will be driven with open source 46 * @PIN_CONFIG_DRIVE_OPEN_SOURCE: the pin will be driven with open source
47 * (open emitter). Sending this config will enabale open drain mode, the 47 * (open emitter). Sending this config will enabale open drain mode, the
48 * argument is ignored. 48 * argument is ignored.
49 * @PIN_CONFIG_INPUT_SCHMITT_DISABLE: disable schmitt-trigger mode on the pin. 49 * @PIN_CONFIG_DRIVE_STRENGTH: the pin will output the current passed as
50 * argument. The argument is in mA.
51 * @PIN_CONFIG_INPUT_SCHMITT_ENABLE: control schmitt-trigger mode on the pin.
52 * If the argument != 0, schmitt-trigger mode is enabled. If it's 0,
53 * schmitt-trigger mode is disabled.
50 * @PIN_CONFIG_INPUT_SCHMITT: this will configure an input pin to run in 54 * @PIN_CONFIG_INPUT_SCHMITT: this will configure an input pin to run in
51 * schmitt-trigger mode. If the schmitt-trigger has adjustable hysteresis, 55 * schmitt-trigger mode. If the schmitt-trigger has adjustable hysteresis,
52 * the threshold value is given on a custom format as argument when 56 * the threshold value is given on a custom format as argument when
@@ -58,10 +62,15 @@
58 * @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power 62 * @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power
59 * supplies, the argument to this parameter (on a custom format) tells 63 * supplies, the argument to this parameter (on a custom format) tells
60 * the driver which alternative power source to use. 64 * the driver which alternative power source to use.
65 * @PIN_CONFIG_SLEW_RATE: if the pin can select slew rate, the argument to
66 * this parameter (on a custom format) tells the driver which alternative
67 * slew rate to use.
61 * @PIN_CONFIG_LOW_POWER_MODE: this will configure the pin for low power 68 * @PIN_CONFIG_LOW_POWER_MODE: this will configure the pin for low power
62 * operation, if several modes of operation are supported these can be 69 * operation, if several modes of operation are supported these can be
63 * passed in the argument on a custom form, else just use argument 1 70 * passed in the argument on a custom form, else just use argument 1
64 * to indicate low power mode, argument 0 turns low power mode off. 71 * to indicate low power mode, argument 0 turns low power mode off.
72 * @PIN_CONFIG_OUTPUT: this will configure the pin in output, use argument
73 * 1 to indicate high level, argument 0 to indicate low level.
65 * @PIN_CONFIG_END: this is the last enumerator for pin configurations, if 74 * @PIN_CONFIG_END: this is the last enumerator for pin configurations, if
66 * you need to pass in custom configurations to the pin controller, use 75 * you need to pass in custom configurations to the pin controller, use
67 * PIN_CONFIG_END+1 as the base offset. 76 * PIN_CONFIG_END+1 as the base offset.
@@ -74,11 +83,14 @@ enum pin_config_param {
74 PIN_CONFIG_DRIVE_PUSH_PULL, 83 PIN_CONFIG_DRIVE_PUSH_PULL,
75 PIN_CONFIG_DRIVE_OPEN_DRAIN, 84 PIN_CONFIG_DRIVE_OPEN_DRAIN,
76 PIN_CONFIG_DRIVE_OPEN_SOURCE, 85 PIN_CONFIG_DRIVE_OPEN_SOURCE,
77 PIN_CONFIG_INPUT_SCHMITT_DISABLE, 86 PIN_CONFIG_DRIVE_STRENGTH,
87 PIN_CONFIG_INPUT_SCHMITT_ENABLE,
78 PIN_CONFIG_INPUT_SCHMITT, 88 PIN_CONFIG_INPUT_SCHMITT,
79 PIN_CONFIG_INPUT_DEBOUNCE, 89 PIN_CONFIG_INPUT_DEBOUNCE,
80 PIN_CONFIG_POWER_SOURCE, 90 PIN_CONFIG_POWER_SOURCE,
91 PIN_CONFIG_SLEW_RATE,
81 PIN_CONFIG_LOW_POWER_MODE, 92 PIN_CONFIG_LOW_POWER_MODE,
93 PIN_CONFIG_OUTPUT,
82 PIN_CONFIG_END = 0x7FFF, 94 PIN_CONFIG_END = 0x7FFF,
83}; 95};
84 96
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h
index 04d6700d99af..778804df293f 100644
--- a/include/linux/pinctrl/pinctrl.h
+++ b/include/linux/pinctrl/pinctrl.h
@@ -154,6 +154,7 @@ struct pinctrl_dev *of_pinctrl_get(struct device_node *np)
154#endif /* CONFIG_OF */ 154#endif /* CONFIG_OF */
155 155
156extern const char *pinctrl_dev_get_name(struct pinctrl_dev *pctldev); 156extern const char *pinctrl_dev_get_name(struct pinctrl_dev *pctldev);
157extern const char *pinctrl_dev_get_devname(struct pinctrl_dev *pctldev);
157extern void *pinctrl_dev_get_drvdata(struct pinctrl_dev *pctldev); 158extern void *pinctrl_dev_get_drvdata(struct pinctrl_dev *pctldev);
158#else 159#else
159 160
diff --git a/arch/arm/mach-imx/iram.h b/include/linux/platform_data/imx-iram.h
index 022690c33702..022690c33702 100644
--- a/arch/arm/mach-imx/iram.h
+++ b/include/linux/platform_data/imx-iram.h
diff --git a/include/linux/platform_data/lp8755.h b/include/linux/platform_data/lp8755.h
new file mode 100644
index 000000000000..a7fd0776c9bf
--- /dev/null
+++ b/include/linux/platform_data/lp8755.h
@@ -0,0 +1,71 @@
1/*
2 * LP8755 High Performance Power Management Unit Driver:System Interface Driver
3 *
4 * Copyright (C) 2012 Texas Instruments
5 *
6 * Author: Daniel(Geon Si) Jeong <daniel.jeong@ti.com>
7 * G.Shark Jeong <gshark.jeong@gmail.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 */
14
15#ifndef _LP8755_H
16#define _LP8755_H
17
18#include <linux/regulator/consumer.h>
19
20#define LP8755_NAME "lp8755-regulator"
21/*
22 *PWR FAULT : power fault detected
23 *OCP : over current protect activated
24 *OVP : over voltage protect activated
25 *TEMP_WARN : thermal warning
26 *TEMP_SHDN : thermal shutdonw detected
27 *I_LOAD : current measured
28 */
29#define LP8755_EVENT_PWR_FAULT REGULATOR_EVENT_FAIL
30#define LP8755_EVENT_OCP REGULATOR_EVENT_OVER_CURRENT
31#define LP8755_EVENT_OVP 0x10000
32#define LP8755_EVENT_TEMP_WARN 0x2000
33#define LP8755_EVENT_TEMP_SHDN REGULATOR_EVENT_OVER_TEMP
34#define LP8755_EVENT_I_LOAD 0x40000
35
36enum lp8755_bucks {
37 LP8755_BUCK0 = 0,
38 LP8755_BUCK1,
39 LP8755_BUCK2,
40 LP8755_BUCK3,
41 LP8755_BUCK4,
42 LP8755_BUCK5,
43 LP8755_BUCK_MAX,
44};
45
46/**
47 * multiphase configuration options
48 */
49enum lp8755_mphase_config {
50 MPHASE_CONF0,
51 MPHASE_CONF1,
52 MPHASE_CONF2,
53 MPHASE_CONF3,
54 MPHASE_CONF4,
55 MPHASE_CONF5,
56 MPHASE_CONF6,
57 MPHASE_CONF7,
58 MPHASE_CONF8,
59 MPHASE_CONF_MAX
60};
61
62/**
63 * struct lp8755_platform_data
64 * @mphase_type : Multiphase Switcher Configurations.
65 * @buck_data : buck0~6 init voltage in uV
66 */
67struct lp8755_platform_data {
68 int mphase;
69 struct regulator_init_data *buck_data[LP8755_BUCK_MAX];
70};
71#endif
diff --git a/include/linux/platform_data/max6697.h b/include/linux/platform_data/max6697.h
new file mode 100644
index 000000000000..ed9d3b3daf02
--- /dev/null
+++ b/include/linux/platform_data/max6697.h
@@ -0,0 +1,36 @@
1/*
2 * max6697.h
3 * Copyright (c) 2012 Guenter Roeck <linux@roeck-us.net>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#ifndef MAX6697_H
11#define MAX6697_H
12
13#include <linux/types.h>
14
15/*
16 * For all bit masks:
17 * bit 0: local temperature
18 * bit 1..7: remote temperatures
19 */
20struct max6697_platform_data {
21 bool smbus_timeout_disable; /* set to disable SMBus timeouts */
22 bool extended_range_enable; /* set to enable extended temp range */
23 bool beta_compensation; /* set to enable beta compensation */
24 u8 alert_mask; /* set bit to 1 to disable alert */
25 u8 over_temperature_mask; /* set bit to 1 to disable */
26 u8 resistance_cancellation; /* set bit to 0 to disable
27 * bit mask for MAX6581,
28 * boolean for other chips
29 */
30 u8 ideality_mask; /* set bit to 0 to disable */
31 u8 ideality_value; /* transistor ideality as per
32 * MAX6581 datasheet
33 */
34};
35
36#endif /* MAX6697_H */
diff --git a/include/linux/platform_data/spi-omap2-mcspi.h b/include/linux/platform_data/spi-omap2-mcspi.h
index a65572d53211..c100456eab17 100644
--- a/include/linux/platform_data/spi-omap2-mcspi.h
+++ b/include/linux/platform_data/spi-omap2-mcspi.h
@@ -22,6 +22,9 @@ struct omap2_mcspi_dev_attr {
22 22
23struct omap2_mcspi_device_config { 23struct omap2_mcspi_device_config {
24 unsigned turbo_mode:1; 24 unsigned turbo_mode:1;
25
26 /* toggle chip select after every word */
27 unsigned cs_per_word:1;
25}; 28};
26 29
27#endif 30#endif
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 03d7bb145311..97bcf23e045a 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -31,7 +31,6 @@
31/* 31/*
32 * Callbacks for platform drivers to implement. 32 * Callbacks for platform drivers to implement.
33 */ 33 */
34extern void (*pm_idle)(void);
35extern void (*pm_power_off)(void); 34extern void (*pm_power_off)(void);
36extern void (*pm_power_off_prepare)(void); 35extern void (*pm_power_off_prepare)(void);
37 36
diff --git a/include/linux/pm2301_charger.h b/include/linux/pm2301_charger.h
new file mode 100644
index 000000000000..fc3f026922ae
--- /dev/null
+++ b/include/linux/pm2301_charger.h
@@ -0,0 +1,61 @@
1/*
2 * PM2301 charger driver.
3 *
4 * Copyright (C) 2012 ST Ericsson Corporation
5 *
6 * Contact: Olivier LAUNAY (olivier.launay@stericsson.com
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 */
22
23#ifndef __LINUX_PM2301_H
24#define __LINUX_PM2301_H
25
26/**
27 * struct pm2xxx_bm_charger_parameters - Charger specific parameters
28 * @ac_volt_max: maximum allowed AC charger voltage in mV
29 * @ac_curr_max: maximum allowed AC charger current in mA
30 */
31struct pm2xxx_bm_charger_parameters {
32 int ac_volt_max;
33 int ac_curr_max;
34};
35
36/**
37 * struct pm2xxx_bm_data - pm2xxx battery management data
38 * @enable_overshoot flag to enable VBAT overshoot control
39 * @chg_params charger parameters
40 */
41struct pm2xxx_bm_data {
42 bool enable_overshoot;
43 const struct pm2xxx_bm_charger_parameters *chg_params;
44};
45
46struct pm2xxx_charger_platform_data {
47 char **supplied_to;
48 size_t num_supplicants;
49 int i2c_bus;
50 const char *label;
51 int irq_number;
52 unsigned int lpn_gpio;
53 int irq_type;
54};
55
56struct pm2xxx_platform_data {
57 struct pm2xxx_charger_platform_data *wall_charger;
58 struct pm2xxx_bm_data *battery;
59};
60
61#endif /* __LINUX_PM2301_H */
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index f271860c78d5..c785c215abfc 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -80,6 +80,12 @@ static inline bool pm_runtime_suspended(struct device *dev)
80 && !dev->power.disable_depth; 80 && !dev->power.disable_depth;
81} 81}
82 82
83static inline bool pm_runtime_active(struct device *dev)
84{
85 return dev->power.runtime_status == RPM_ACTIVE
86 || dev->power.disable_depth;
87}
88
83static inline bool pm_runtime_status_suspended(struct device *dev) 89static inline bool pm_runtime_status_suspended(struct device *dev)
84{ 90{
85 return dev->power.runtime_status == RPM_SUSPENDED; 91 return dev->power.runtime_status == RPM_SUSPENDED;
@@ -132,6 +138,7 @@ static inline void pm_runtime_put_noidle(struct device *dev) {}
132static inline bool device_run_wake(struct device *dev) { return false; } 138static inline bool device_run_wake(struct device *dev) { return false; }
133static inline void device_set_run_wake(struct device *dev, bool enable) {} 139static inline void device_set_run_wake(struct device *dev, bool enable) {}
134static inline bool pm_runtime_suspended(struct device *dev) { return false; } 140static inline bool pm_runtime_suspended(struct device *dev) { return false; }
141static inline bool pm_runtime_active(struct device *dev) { return true; }
135static inline bool pm_runtime_status_suspended(struct device *dev) { return false; } 142static inline bool pm_runtime_status_suspended(struct device *dev) { return false; }
136static inline bool pm_runtime_enabled(struct device *dev) { return false; } 143static inline bool pm_runtime_enabled(struct device *dev) { return false; }
137 144
diff --git a/include/linux/power/bq2415x_charger.h b/include/linux/power/bq2415x_charger.h
index 97a1665eaeaf..8dcc0f46fc0a 100644
--- a/include/linux/power/bq2415x_charger.h
+++ b/include/linux/power/bq2415x_charger.h
@@ -75,7 +75,8 @@
75 75
76/* Supported modes with maximal current limit */ 76/* Supported modes with maximal current limit */
77enum bq2415x_mode { 77enum bq2415x_mode {
78 BQ2415X_MODE_NONE, /* unknown or no charger (100mA) */ 78 BQ2415X_MODE_OFF, /* offline mode (charger disabled) */
79 BQ2415X_MODE_NONE, /* unknown charger (100mA) */
79 BQ2415X_MODE_HOST_CHARGER, /* usb host/hub charger (500mA) */ 80 BQ2415X_MODE_HOST_CHARGER, /* usb host/hub charger (500mA) */
80 BQ2415X_MODE_DEDICATED_CHARGER, /* dedicated charger (unlimited) */ 81 BQ2415X_MODE_DEDICATED_CHARGER, /* dedicated charger (unlimited) */
81 BQ2415X_MODE_BOOST, /* boost mode (charging disabled) */ 82 BQ2415X_MODE_BOOST, /* boost mode (charging disabled) */
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 1f0ab90aff00..25c0982eb9b1 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -54,6 +54,8 @@ enum {
54 POWER_SUPPLY_HEALTH_OVERVOLTAGE, 54 POWER_SUPPLY_HEALTH_OVERVOLTAGE,
55 POWER_SUPPLY_HEALTH_UNSPEC_FAILURE, 55 POWER_SUPPLY_HEALTH_UNSPEC_FAILURE,
56 POWER_SUPPLY_HEALTH_COLD, 56 POWER_SUPPLY_HEALTH_COLD,
57 POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE,
58 POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE,
57}; 59};
58 60
59enum { 61enum {
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 9afc01e5a0a6..86c4b6294713 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -98,9 +98,6 @@ int no_printk(const char *fmt, ...)
98extern asmlinkage __printf(1, 2) 98extern asmlinkage __printf(1, 2)
99void early_printk(const char *fmt, ...); 99void early_printk(const char *fmt, ...);
100 100
101extern int printk_needs_cpu(int cpu);
102extern void printk_tick(void);
103
104#ifdef CONFIG_PRINTK 101#ifdef CONFIG_PRINTK
105asmlinkage __printf(5, 0) 102asmlinkage __printf(5, 0)
106int vprintk_emit(int facility, int level, 103int vprintk_emit(int facility, int level,
diff --git a/include/linux/profile.h b/include/linux/profile.h
index a0fc32279fc0..21123902366d 100644
--- a/include/linux/profile.h
+++ b/include/linux/profile.h
@@ -82,9 +82,6 @@ int task_handoff_unregister(struct notifier_block * n);
82int profile_event_register(enum profile_type, struct notifier_block * n); 82int profile_event_register(enum profile_type, struct notifier_block * n);
83int profile_event_unregister(enum profile_type, struct notifier_block * n); 83int profile_event_unregister(enum profile_type, struct notifier_block * n);
84 84
85int register_timer_hook(int (*hook)(struct pt_regs *));
86void unregister_timer_hook(int (*hook)(struct pt_regs *));
87
88struct pt_regs; 85struct pt_regs;
89 86
90#else 87#else
@@ -135,16 +132,6 @@ static inline int profile_event_unregister(enum profile_type t, struct notifier_
135#define profile_handoff_task(a) (0) 132#define profile_handoff_task(a) (0)
136#define profile_munmap(a) do { } while (0) 133#define profile_munmap(a) do { } while (0)
137 134
138static inline int register_timer_hook(int (*hook)(struct pt_regs *))
139{
140 return -ENOSYS;
141}
142
143static inline void unregister_timer_hook(int (*hook)(struct pt_regs *))
144{
145 return;
146}
147
148#endif /* CONFIG_PROFILING */ 135#endif /* CONFIG_PROFILING */
149 136
150#endif /* _LINUX_PROFILE_H */ 137#endif /* _LINUX_PROFILE_H */
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 1693775ecfe8..89573a33ab3c 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -45,7 +45,6 @@ extern long arch_ptrace(struct task_struct *child, long request,
45extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len); 45extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
46extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len); 46extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
47extern void ptrace_disable(struct task_struct *); 47extern void ptrace_disable(struct task_struct *);
48extern int ptrace_check_attach(struct task_struct *task, bool ignore_state);
49extern int ptrace_request(struct task_struct *child, long request, 48extern int ptrace_request(struct task_struct *child, long request,
50 unsigned long addr, unsigned long data); 49 unsigned long addr, unsigned long data);
51extern void ptrace_notify(int exit_code); 50extern void ptrace_notify(int exit_code);
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index f36632061c66..467cc6307b62 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -155,6 +155,14 @@
155#define SSACD_ACDS(x) ((x) << 0) /* Audio clock divider select */ 155#define SSACD_ACDS(x) ((x) << 0) /* Audio clock divider select */
156#define SSACD_SCDX8 (1 << 7) /* SYSCLK division ratio select */ 156#define SSACD_SCDX8 (1 << 7) /* SYSCLK division ratio select */
157 157
158/* LPSS SSP */
159#define SSITF 0x44 /* TX FIFO trigger level */
160#define SSITF_TxLoThresh(x) (((x) - 1) << 8)
161#define SSITF_TxHiThresh(x) ((x) - 1)
162
163#define SSIRF 0x48 /* RX FIFO trigger level */
164#define SSIRF_RxThresh(x) ((x) - 1)
165
158enum pxa_ssp_type { 166enum pxa_ssp_type {
159 SSP_UNDEFINED = 0, 167 SSP_UNDEFINED = 0,
160 PXA25x_SSP, /* pxa 210, 250, 255, 26x */ 168 PXA25x_SSP, /* pxa 210, 250, 255, 26x */
@@ -164,6 +172,7 @@ enum pxa_ssp_type {
164 PXA168_SSP, 172 PXA168_SSP,
165 PXA910_SSP, 173 PXA910_SSP,
166 CE4100_SSP, 174 CE4100_SSP,
175 LPSS_SSP,
167}; 176};
168 177
169struct ssp_device { 178struct ssp_device {
@@ -206,6 +215,15 @@ static inline u32 pxa_ssp_read_reg(struct ssp_device *dev, u32 reg)
206 return __raw_readl(dev->mmio_base + reg); 215 return __raw_readl(dev->mmio_base + reg);
207} 216}
208 217
218#ifdef CONFIG_ARCH_PXA
209struct ssp_device *pxa_ssp_request(int port, const char *label); 219struct ssp_device *pxa_ssp_request(int port, const char *label);
210void pxa_ssp_free(struct ssp_device *); 220void pxa_ssp_free(struct ssp_device *);
221#else
222static inline struct ssp_device *pxa_ssp_request(int port, const char *label)
223{
224 return NULL;
225}
226static inline void pxa_ssp_free(struct ssp_device *ssp) {}
227#endif
228
211#endif 229#endif
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 275aa3f1062d..b758ce17b309 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -53,7 +53,10 @@ extern int rcutorture_runnable; /* for sysctl */
53extern void rcutorture_record_test_transition(void); 53extern void rcutorture_record_test_transition(void);
54extern void rcutorture_record_progress(unsigned long vernum); 54extern void rcutorture_record_progress(unsigned long vernum);
55extern void do_trace_rcu_torture_read(char *rcutorturename, 55extern void do_trace_rcu_torture_read(char *rcutorturename,
56 struct rcu_head *rhp); 56 struct rcu_head *rhp,
57 unsigned long secs,
58 unsigned long c_old,
59 unsigned long c);
57#else 60#else
58static inline void rcutorture_record_test_transition(void) 61static inline void rcutorture_record_test_transition(void)
59{ 62{
@@ -63,9 +66,13 @@ static inline void rcutorture_record_progress(unsigned long vernum)
63} 66}
64#ifdef CONFIG_RCU_TRACE 67#ifdef CONFIG_RCU_TRACE
65extern void do_trace_rcu_torture_read(char *rcutorturename, 68extern void do_trace_rcu_torture_read(char *rcutorturename,
66 struct rcu_head *rhp); 69 struct rcu_head *rhp,
70 unsigned long secs,
71 unsigned long c_old,
72 unsigned long c);
67#else 73#else
68#define do_trace_rcu_torture_read(rcutorturename, rhp) do { } while (0) 74#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
75 do { } while (0)
69#endif 76#endif
70#endif 77#endif
71 78
@@ -749,7 +756,7 @@ static inline void rcu_preempt_sleep_check(void)
749 * preemptible RCU implementations (TREE_PREEMPT_RCU and TINY_PREEMPT_RCU) 756 * preemptible RCU implementations (TREE_PREEMPT_RCU and TINY_PREEMPT_RCU)
750 * in CONFIG_PREEMPT kernel builds, RCU read-side critical sections may 757 * in CONFIG_PREEMPT kernel builds, RCU read-side critical sections may
751 * be preempted, but explicit blocking is illegal. Finally, in preemptible 758 * be preempted, but explicit blocking is illegal. Finally, in preemptible
752 * RCU implementations in real-time (CONFIG_PREEMPT_RT) kernel builds, 759 * RCU implementations in real-time (with -rt patchset) kernel builds,
753 * RCU read-side critical sections may be preempted and they may also 760 * RCU read-side critical sections may be preempted and they may also
754 * block, but only when acquiring spinlocks that are subject to priority 761 * block, but only when acquiring spinlocks that are subject to priority
755 * inheritance. 762 * inheritance.
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index b7e95bf942c9..bf77dfdabef9 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -28,7 +28,8 @@ struct regmap_range_cfg;
28enum regcache_type { 28enum regcache_type {
29 REGCACHE_NONE, 29 REGCACHE_NONE,
30 REGCACHE_RBTREE, 30 REGCACHE_RBTREE,
31 REGCACHE_COMPRESSED 31 REGCACHE_COMPRESSED,
32 REGCACHE_FLAT,
32}; 33};
33 34
34/** 35/**
@@ -127,7 +128,18 @@ typedef void (*regmap_unlock)(void *);
127 * @lock_arg: this field is passed as the only argument of lock/unlock 128 * @lock_arg: this field is passed as the only argument of lock/unlock
128 * functions (ignored in case regular lock/unlock functions 129 * functions (ignored in case regular lock/unlock functions
129 * are not overridden). 130 * are not overridden).
130 * 131 * @reg_read: Optional callback that if filled will be used to perform
132 * all the reads from the registers. Should only be provided for
133 * devices whos read operation cannot be represented as a simple read
134 * operation on a bus such as SPI, I2C, etc. Most of the devices do
135 * not need this.
136 * @reg_write: Same as above for writing.
137 * @fast_io: Register IO is fast. Use a spinlock instead of a mutex
138 * to perform locking. This field is ignored if custom lock/unlock
139 * functions are used (see fields lock/unlock of struct regmap_config).
140 * This field is a duplicate of a similar file in
141 * 'struct regmap_bus' and serves exact same purpose.
142 * Use it only for "no-bus" cases.
131 * @max_register: Optional, specifies the maximum valid register index. 143 * @max_register: Optional, specifies the maximum valid register index.
132 * @wr_table: Optional, points to a struct regmap_access_table specifying 144 * @wr_table: Optional, points to a struct regmap_access_table specifying
133 * valid ranges for write access. 145 * valid ranges for write access.
@@ -177,6 +189,11 @@ struct regmap_config {
177 regmap_unlock unlock; 189 regmap_unlock unlock;
178 void *lock_arg; 190 void *lock_arg;
179 191
192 int (*reg_read)(void *context, unsigned int reg, unsigned int *val);
193 int (*reg_write)(void *context, unsigned int reg, unsigned int val);
194
195 bool fast_io;
196
180 unsigned int max_register; 197 unsigned int max_register;
181 const struct regmap_access_table *wr_table; 198 const struct regmap_access_table *wr_table;
182 const struct regmap_access_table *rd_table; 199 const struct regmap_access_table *rd_table;
@@ -235,14 +252,21 @@ struct regmap_range_cfg {
235 unsigned int window_len; 252 unsigned int window_len;
236}; 253};
237 254
255struct regmap_async;
256
238typedef int (*regmap_hw_write)(void *context, const void *data, 257typedef int (*regmap_hw_write)(void *context, const void *data,
239 size_t count); 258 size_t count);
240typedef int (*regmap_hw_gather_write)(void *context, 259typedef int (*regmap_hw_gather_write)(void *context,
241 const void *reg, size_t reg_len, 260 const void *reg, size_t reg_len,
242 const void *val, size_t val_len); 261 const void *val, size_t val_len);
262typedef int (*regmap_hw_async_write)(void *context,
263 const void *reg, size_t reg_len,
264 const void *val, size_t val_len,
265 struct regmap_async *async);
243typedef int (*regmap_hw_read)(void *context, 266typedef int (*regmap_hw_read)(void *context,
244 const void *reg_buf, size_t reg_size, 267 const void *reg_buf, size_t reg_size,
245 void *val_buf, size_t val_size); 268 void *val_buf, size_t val_size);
269typedef struct regmap_async *(*regmap_hw_async_alloc)(void);
246typedef void (*regmap_hw_free_context)(void *context); 270typedef void (*regmap_hw_free_context)(void *context);
247 271
248/** 272/**
@@ -255,8 +279,11 @@ typedef void (*regmap_hw_free_context)(void *context);
255 * @write: Write operation. 279 * @write: Write operation.
256 * @gather_write: Write operation with split register/value, return -ENOTSUPP 280 * @gather_write: Write operation with split register/value, return -ENOTSUPP
257 * if not implemented on a given device. 281 * if not implemented on a given device.
282 * @async_write: Write operation which completes asynchronously, optional and
283 * must serialise with respect to non-async I/O.
258 * @read: Read operation. Data is returned in the buffer used to transmit 284 * @read: Read operation. Data is returned in the buffer used to transmit
259 * data. 285 * data.
286 * @async_alloc: Allocate a regmap_async() structure.
260 * @read_flag_mask: Mask to be set in the top byte of the register when doing 287 * @read_flag_mask: Mask to be set in the top byte of the register when doing
261 * a read. 288 * a read.
262 * @reg_format_endian_default: Default endianness for formatted register 289 * @reg_format_endian_default: Default endianness for formatted register
@@ -265,13 +292,16 @@ typedef void (*regmap_hw_free_context)(void *context);
265 * @val_format_endian_default: Default endianness for formatted register 292 * @val_format_endian_default: Default endianness for formatted register
266 * values. Used when the regmap_config specifies DEFAULT. If this is 293 * values. Used when the regmap_config specifies DEFAULT. If this is
267 * DEFAULT, BIG is assumed. 294 * DEFAULT, BIG is assumed.
295 * @async_size: Size of struct used for async work.
268 */ 296 */
269struct regmap_bus { 297struct regmap_bus {
270 bool fast_io; 298 bool fast_io;
271 regmap_hw_write write; 299 regmap_hw_write write;
272 regmap_hw_gather_write gather_write; 300 regmap_hw_gather_write gather_write;
301 regmap_hw_async_write async_write;
273 regmap_hw_read read; 302 regmap_hw_read read;
274 regmap_hw_free_context free_context; 303 regmap_hw_free_context free_context;
304 regmap_hw_async_alloc async_alloc;
275 u8 read_flag_mask; 305 u8 read_flag_mask;
276 enum regmap_endian reg_format_endian_default; 306 enum regmap_endian reg_format_endian_default;
277 enum regmap_endian val_format_endian_default; 307 enum regmap_endian val_format_endian_default;
@@ -285,9 +315,9 @@ struct regmap *regmap_init_i2c(struct i2c_client *i2c,
285 const struct regmap_config *config); 315 const struct regmap_config *config);
286struct regmap *regmap_init_spi(struct spi_device *dev, 316struct regmap *regmap_init_spi(struct spi_device *dev,
287 const struct regmap_config *config); 317 const struct regmap_config *config);
288struct regmap *regmap_init_mmio(struct device *dev, 318struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id,
289 void __iomem *regs, 319 void __iomem *regs,
290 const struct regmap_config *config); 320 const struct regmap_config *config);
291 321
292struct regmap *devm_regmap_init(struct device *dev, 322struct regmap *devm_regmap_init(struct device *dev,
293 const struct regmap_bus *bus, 323 const struct regmap_bus *bus,
@@ -297,9 +327,44 @@ struct regmap *devm_regmap_init_i2c(struct i2c_client *i2c,
297 const struct regmap_config *config); 327 const struct regmap_config *config);
298struct regmap *devm_regmap_init_spi(struct spi_device *dev, 328struct regmap *devm_regmap_init_spi(struct spi_device *dev,
299 const struct regmap_config *config); 329 const struct regmap_config *config);
300struct regmap *devm_regmap_init_mmio(struct device *dev, 330struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id,
301 void __iomem *regs, 331 void __iomem *regs,
302 const struct regmap_config *config); 332 const struct regmap_config *config);
333
334/**
335 * regmap_init_mmio(): Initialise register map
336 *
337 * @dev: Device that will be interacted with
338 * @regs: Pointer to memory-mapped IO region
339 * @config: Configuration for register map
340 *
341 * The return value will be an ERR_PTR() on error or a valid pointer to
342 * a struct regmap.
343 */
344static inline struct regmap *regmap_init_mmio(struct device *dev,
345 void __iomem *regs,
346 const struct regmap_config *config)
347{
348 return regmap_init_mmio_clk(dev, NULL, regs, config);
349}
350
351/**
352 * devm_regmap_init_mmio(): Initialise managed register map
353 *
354 * @dev: Device that will be interacted with
355 * @regs: Pointer to memory-mapped IO region
356 * @config: Configuration for register map
357 *
358 * The return value will be an ERR_PTR() on error or a valid pointer
359 * to a struct regmap. The regmap will be automatically freed by the
360 * device management code.
361 */
362static inline struct regmap *devm_regmap_init_mmio(struct device *dev,
363 void __iomem *regs,
364 const struct regmap_config *config)
365{
366 return devm_regmap_init_mmio_clk(dev, NULL, regs, config);
367}
303 368
304void regmap_exit(struct regmap *map); 369void regmap_exit(struct regmap *map);
305int regmap_reinit_cache(struct regmap *map, 370int regmap_reinit_cache(struct regmap *map,
@@ -310,6 +375,8 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
310 const void *val, size_t val_len); 375 const void *val, size_t val_len);
311int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, 376int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
312 size_t val_count); 377 size_t val_count);
378int regmap_raw_write_async(struct regmap *map, unsigned int reg,
379 const void *val, size_t val_len);
313int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val); 380int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val);
314int regmap_raw_read(struct regmap *map, unsigned int reg, 381int regmap_raw_read(struct regmap *map, unsigned int reg,
315 void *val, size_t val_len); 382 void *val, size_t val_len);
@@ -321,6 +388,7 @@ int regmap_update_bits_check(struct regmap *map, unsigned int reg,
321 unsigned int mask, unsigned int val, 388 unsigned int mask, unsigned int val,
322 bool *change); 389 bool *change);
323int regmap_get_val_bytes(struct regmap *map); 390int regmap_get_val_bytes(struct regmap *map);
391int regmap_async_complete(struct regmap *map);
324 392
325int regcache_sync(struct regmap *map); 393int regcache_sync(struct regmap *map);
326int regcache_sync_region(struct regmap *map, unsigned int min, 394int regcache_sync_region(struct regmap *map, unsigned int min,
@@ -381,6 +449,7 @@ struct regmap_irq_chip {
381 unsigned int wake_base; 449 unsigned int wake_base;
382 unsigned int irq_reg_stride; 450 unsigned int irq_reg_stride;
383 unsigned int mask_invert; 451 unsigned int mask_invert;
452 unsigned int wake_invert;
384 bool runtime_pm; 453 bool runtime_pm;
385 454
386 int num_regs; 455 int num_regs;
@@ -422,6 +491,13 @@ static inline int regmap_raw_write(struct regmap *map, unsigned int reg,
422 return -EINVAL; 491 return -EINVAL;
423} 492}
424 493
494static inline int regmap_raw_write_async(struct regmap *map, unsigned int reg,
495 const void *val, size_t val_len)
496{
497 WARN_ONCE(1, "regmap API is disabled");
498 return -EINVAL;
499}
500
425static inline int regmap_bulk_write(struct regmap *map, unsigned int reg, 501static inline int regmap_bulk_write(struct regmap *map, unsigned int reg,
426 const void *val, size_t val_count) 502 const void *val, size_t val_count)
427{ 503{
@@ -500,6 +576,11 @@ static inline void regcache_mark_dirty(struct regmap *map)
500 WARN_ONCE(1, "regmap API is disabled"); 576 WARN_ONCE(1, "regmap API is disabled");
501} 577}
502 578
579static inline void regmap_async_complete(struct regmap *map)
580{
581 WARN_ONCE(1, "regmap API is disabled");
582}
583
503static inline int regmap_register_patch(struct regmap *map, 584static inline int regmap_register_patch(struct regmap *map,
504 const struct reg_default *regs, 585 const struct reg_default *regs,
505 int num_regs) 586 int num_regs)
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index d10bb0f39c5e..23070fd83872 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -193,6 +193,10 @@ enum regulator_type {
193 * 193 *
194 * @vsel_reg: Register for selector when using regulator_regmap_X_voltage_ 194 * @vsel_reg: Register for selector when using regulator_regmap_X_voltage_
195 * @vsel_mask: Mask for register bitfield used for selector 195 * @vsel_mask: Mask for register bitfield used for selector
196 * @apply_reg: Register for initiate voltage change on the output when
197 * using regulator_set_voltage_sel_regmap
198 * @apply_bit: Register bitfield used for initiate voltage change on the
199 * output when using regulator_set_voltage_sel_regmap
196 * @enable_reg: Register for control when using regmap enable/disable ops 200 * @enable_reg: Register for control when using regmap enable/disable ops
197 * @enable_mask: Mask for control when using regmap enable/disable ops 201 * @enable_mask: Mask for control when using regmap enable/disable ops
198 * 202 *
@@ -218,6 +222,8 @@ struct regulator_desc {
218 222
219 unsigned int vsel_reg; 223 unsigned int vsel_reg;
220 unsigned int vsel_mask; 224 unsigned int vsel_mask;
225 unsigned int apply_reg;
226 unsigned int apply_bit;
221 unsigned int enable_reg; 227 unsigned int enable_reg;
222 unsigned int enable_mask; 228 unsigned int enable_mask;
223 unsigned int bypass_reg; 229 unsigned int bypass_reg;
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 519777e3fa01..1342e69542f3 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -167,6 +167,7 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu);
167unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); 167unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu);
168unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu); 168unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu);
169unsigned long ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu); 169unsigned long ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu);
170unsigned long ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu);
170 171
171u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu); 172u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu);
172void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, 173void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 9531845c419f..11d05f9fe8b6 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -138,6 +138,7 @@ extern void rtc_device_unregister(struct rtc_device *rtc);
138extern int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm); 138extern int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm);
139extern int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm); 139extern int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm);
140extern int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs); 140extern int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs);
141extern int rtc_set_ntp_time(struct timespec now);
141int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm); 142int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm);
142extern int rtc_read_alarm(struct rtc_device *rtc, 143extern int rtc_read_alarm(struct rtc_device *rtc,
143 struct rtc_wkalrm *alrm); 144 struct rtc_wkalrm *alrm);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 6fc8f45de4e9..e4112aad2964 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -304,19 +304,6 @@ static inline void lockup_detector_init(void)
304} 304}
305#endif 305#endif
306 306
307#ifdef CONFIG_DETECT_HUNG_TASK
308extern unsigned int sysctl_hung_task_panic;
309extern unsigned long sysctl_hung_task_check_count;
310extern unsigned long sysctl_hung_task_timeout_secs;
311extern unsigned long sysctl_hung_task_warnings;
312extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
313 void __user *buffer,
314 size_t *lenp, loff_t *ppos);
315#else
316/* Avoid need for ifdefs elsewhere in the code */
317enum { sysctl_hung_task_timeout_secs = 0 };
318#endif
319
320/* Attach to any functions which should be ignored in wchan output. */ 307/* Attach to any functions which should be ignored in wchan output. */
321#define __sched __attribute__((__section__(".sched.text"))) 308#define __sched __attribute__((__section__(".sched.text")))
322 309
@@ -338,23 +325,6 @@ extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
338struct nsproxy; 325struct nsproxy;
339struct user_namespace; 326struct user_namespace;
340 327
341/*
342 * Default maximum number of active map areas, this limits the number of vmas
343 * per mm struct. Users can overwrite this number by sysctl but there is a
344 * problem.
345 *
346 * When a program's coredump is generated as ELF format, a section is created
347 * per a vma. In ELF, the number of sections is represented in unsigned short.
348 * This means the number of sections should be smaller than 65535 at coredump.
349 * Because the kernel adds some informative sections to a image of program at
350 * generating coredump, we need some margin. The number of extra sections is
351 * 1-3 now and depends on arch. We use "5" as safe margin, here.
352 */
353#define MAPCOUNT_ELF_CORE_MARGIN (5)
354#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
355
356extern int sysctl_max_map_count;
357
358#include <linux/aio.h> 328#include <linux/aio.h>
359 329
360#ifdef CONFIG_MMU 330#ifdef CONFIG_MMU
@@ -1194,6 +1164,7 @@ struct sched_entity {
1194 /* rq "owned" by this entity/group: */ 1164 /* rq "owned" by this entity/group: */
1195 struct cfs_rq *my_q; 1165 struct cfs_rq *my_q;
1196#endif 1166#endif
1167
1197/* 1168/*
1198 * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be 1169 * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
1199 * removed when useful for applications beyond shares distribution (e.g. 1170 * removed when useful for applications beyond shares distribution (e.g.
@@ -1208,6 +1179,7 @@ struct sched_entity {
1208struct sched_rt_entity { 1179struct sched_rt_entity {
1209 struct list_head run_list; 1180 struct list_head run_list;
1210 unsigned long timeout; 1181 unsigned long timeout;
1182 unsigned long watchdog_stamp;
1211 unsigned int time_slice; 1183 unsigned int time_slice;
1212 1184
1213 struct sched_rt_entity *back; 1185 struct sched_rt_entity *back;
@@ -1220,11 +1192,6 @@ struct sched_rt_entity {
1220#endif 1192#endif
1221}; 1193};
1222 1194
1223/*
1224 * default timeslice is 100 msecs (used only for SCHED_RR tasks).
1225 * Timeslices get refilled after they expire.
1226 */
1227#define RR_TIMESLICE (100 * HZ / 1000)
1228 1195
1229struct rcu_node; 1196struct rcu_node;
1230 1197
@@ -1368,6 +1335,15 @@ struct task_struct {
1368#ifndef CONFIG_VIRT_CPU_ACCOUNTING 1335#ifndef CONFIG_VIRT_CPU_ACCOUNTING
1369 struct cputime prev_cputime; 1336 struct cputime prev_cputime;
1370#endif 1337#endif
1338#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1339 seqlock_t vtime_seqlock;
1340 unsigned long long vtime_snap;
1341 enum {
1342 VTIME_SLEEPING = 0,
1343 VTIME_USER,
1344 VTIME_SYS,
1345 } vtime_snap_whence;
1346#endif
1371 unsigned long nvcsw, nivcsw; /* context switch counts */ 1347 unsigned long nvcsw, nivcsw; /* context switch counts */
1372 struct timespec start_time; /* monotonic time */ 1348 struct timespec start_time; /* monotonic time */
1373 struct timespec real_start_time; /* boot based time */ 1349 struct timespec real_start_time; /* boot based time */
@@ -1622,37 +1598,6 @@ static inline void set_numabalancing_state(bool enabled)
1622} 1598}
1623#endif 1599#endif
1624 1600
1625/*
1626 * Priority of a process goes from 0..MAX_PRIO-1, valid RT
1627 * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
1628 * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
1629 * values are inverted: lower p->prio value means higher priority.
1630 *
1631 * The MAX_USER_RT_PRIO value allows the actual maximum
1632 * RT priority to be separate from the value exported to
1633 * user-space. This allows kernel threads to set their
1634 * priority to a value higher than any user task. Note:
1635 * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
1636 */
1637
1638#define MAX_USER_RT_PRIO 100
1639#define MAX_RT_PRIO MAX_USER_RT_PRIO
1640
1641#define MAX_PRIO (MAX_RT_PRIO + 40)
1642#define DEFAULT_PRIO (MAX_RT_PRIO + 20)
1643
1644static inline int rt_prio(int prio)
1645{
1646 if (unlikely(prio < MAX_RT_PRIO))
1647 return 1;
1648 return 0;
1649}
1650
1651static inline int rt_task(struct task_struct *p)
1652{
1653 return rt_prio(p->prio);
1654}
1655
1656static inline struct pid *task_pid(struct task_struct *task) 1601static inline struct pid *task_pid(struct task_struct *task)
1657{ 1602{
1658 return task->pids[PIDTYPE_PID].pid; 1603 return task->pids[PIDTYPE_PID].pid;
@@ -1792,6 +1737,37 @@ static inline void put_task_struct(struct task_struct *t)
1792 __put_task_struct(t); 1737 __put_task_struct(t);
1793} 1738}
1794 1739
1740#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1741extern void task_cputime(struct task_struct *t,
1742 cputime_t *utime, cputime_t *stime);
1743extern void task_cputime_scaled(struct task_struct *t,
1744 cputime_t *utimescaled, cputime_t *stimescaled);
1745extern cputime_t task_gtime(struct task_struct *t);
1746#else
1747static inline void task_cputime(struct task_struct *t,
1748 cputime_t *utime, cputime_t *stime)
1749{
1750 if (utime)
1751 *utime = t->utime;
1752 if (stime)
1753 *stime = t->stime;
1754}
1755
1756static inline void task_cputime_scaled(struct task_struct *t,
1757 cputime_t *utimescaled,
1758 cputime_t *stimescaled)
1759{
1760 if (utimescaled)
1761 *utimescaled = t->utimescaled;
1762 if (stimescaled)
1763 *stimescaled = t->stimescaled;
1764}
1765
1766static inline cputime_t task_gtime(struct task_struct *t)
1767{
1768 return t->gtime;
1769}
1770#endif
1795extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); 1771extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
1796extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); 1772extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
1797 1773
@@ -2033,58 +2009,7 @@ extern void wake_up_idle_cpu(int cpu);
2033static inline void wake_up_idle_cpu(int cpu) { } 2009static inline void wake_up_idle_cpu(int cpu) { }
2034#endif 2010#endif
2035 2011
2036extern unsigned int sysctl_sched_latency;
2037extern unsigned int sysctl_sched_min_granularity;
2038extern unsigned int sysctl_sched_wakeup_granularity;
2039extern unsigned int sysctl_sched_child_runs_first;
2040
2041enum sched_tunable_scaling {
2042 SCHED_TUNABLESCALING_NONE,
2043 SCHED_TUNABLESCALING_LOG,
2044 SCHED_TUNABLESCALING_LINEAR,
2045 SCHED_TUNABLESCALING_END,
2046};
2047extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
2048
2049extern unsigned int sysctl_numa_balancing_scan_delay;
2050extern unsigned int sysctl_numa_balancing_scan_period_min;
2051extern unsigned int sysctl_numa_balancing_scan_period_max;
2052extern unsigned int sysctl_numa_balancing_scan_period_reset;
2053extern unsigned int sysctl_numa_balancing_scan_size;
2054extern unsigned int sysctl_numa_balancing_settle_count;
2055
2056#ifdef CONFIG_SCHED_DEBUG
2057extern unsigned int sysctl_sched_migration_cost;
2058extern unsigned int sysctl_sched_nr_migrate;
2059extern unsigned int sysctl_sched_time_avg;
2060extern unsigned int sysctl_timer_migration;
2061extern unsigned int sysctl_sched_shares_window;
2062
2063int sched_proc_update_handler(struct ctl_table *table, int write,
2064 void __user *buffer, size_t *length,
2065 loff_t *ppos);
2066#endif
2067#ifdef CONFIG_SCHED_DEBUG
2068static inline unsigned int get_sysctl_timer_migration(void)
2069{
2070 return sysctl_timer_migration;
2071}
2072#else
2073static inline unsigned int get_sysctl_timer_migration(void)
2074{
2075 return 1;
2076}
2077#endif
2078extern unsigned int sysctl_sched_rt_period;
2079extern int sysctl_sched_rt_runtime;
2080
2081int sched_rt_handler(struct ctl_table *table, int write,
2082 void __user *buffer, size_t *lenp,
2083 loff_t *ppos);
2084
2085#ifdef CONFIG_SCHED_AUTOGROUP 2012#ifdef CONFIG_SCHED_AUTOGROUP
2086extern unsigned int sysctl_sched_autogroup_enabled;
2087
2088extern void sched_autogroup_create_attach(struct task_struct *p); 2013extern void sched_autogroup_create_attach(struct task_struct *p);
2089extern void sched_autogroup_detach(struct task_struct *p); 2014extern void sched_autogroup_detach(struct task_struct *p);
2090extern void sched_autogroup_fork(struct signal_struct *sig); 2015extern void sched_autogroup_fork(struct signal_struct *sig);
@@ -2100,30 +2025,6 @@ static inline void sched_autogroup_fork(struct signal_struct *sig) { }
2100static inline void sched_autogroup_exit(struct signal_struct *sig) { } 2025static inline void sched_autogroup_exit(struct signal_struct *sig) { }
2101#endif 2026#endif
2102 2027
2103#ifdef CONFIG_CFS_BANDWIDTH
2104extern unsigned int sysctl_sched_cfs_bandwidth_slice;
2105#endif
2106
2107#ifdef CONFIG_RT_MUTEXES
2108extern int rt_mutex_getprio(struct task_struct *p);
2109extern void rt_mutex_setprio(struct task_struct *p, int prio);
2110extern void rt_mutex_adjust_pi(struct task_struct *p);
2111static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
2112{
2113 return tsk->pi_blocked_on != NULL;
2114}
2115#else
2116static inline int rt_mutex_getprio(struct task_struct *p)
2117{
2118 return p->normal_prio;
2119}
2120# define rt_mutex_adjust_pi(p) do { } while (0)
2121static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
2122{
2123 return false;
2124}
2125#endif
2126
2127extern bool yield_to(struct task_struct *p, bool preempt); 2028extern bool yield_to(struct task_struct *p, bool preempt);
2128extern void set_user_nice(struct task_struct *p, long nice); 2029extern void set_user_nice(struct task_struct *p, long nice);
2129extern int task_prio(const struct task_struct *p); 2030extern int task_prio(const struct task_struct *p);
@@ -2714,7 +2615,16 @@ static inline void thread_group_cputime_init(struct signal_struct *sig)
2714extern void recalc_sigpending_and_wake(struct task_struct *t); 2615extern void recalc_sigpending_and_wake(struct task_struct *t);
2715extern void recalc_sigpending(void); 2616extern void recalc_sigpending(void);
2716 2617
2717extern void signal_wake_up(struct task_struct *t, int resume_stopped); 2618extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
2619
2620static inline void signal_wake_up(struct task_struct *t, bool resume)
2621{
2622 signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
2623}
2624static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
2625{
2626 signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
2627}
2718 2628
2719/* 2629/*
2720 * Wrappers for p->thread_info->cpu access. No-op on UP. 2630 * Wrappers for p->thread_info->cpu access. No-op on UP.
@@ -2744,14 +2654,15 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2744extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); 2654extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2745extern long sched_getaffinity(pid_t pid, struct cpumask *mask); 2655extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2746 2656
2747extern void normalize_rt_tasks(void);
2748
2749#ifdef CONFIG_CGROUP_SCHED 2657#ifdef CONFIG_CGROUP_SCHED
2750 2658
2751extern struct task_group root_task_group; 2659extern struct task_group root_task_group;
2752 2660
2753extern struct task_group *sched_create_group(struct task_group *parent); 2661extern struct task_group *sched_create_group(struct task_group *parent);
2662extern void sched_online_group(struct task_group *tg,
2663 struct task_group *parent);
2754extern void sched_destroy_group(struct task_group *tg); 2664extern void sched_destroy_group(struct task_group *tg);
2665extern void sched_offline_group(struct task_group *tg);
2755extern void sched_move_task(struct task_struct *tsk); 2666extern void sched_move_task(struct task_struct *tsk);
2756#ifdef CONFIG_FAIR_GROUP_SCHED 2667#ifdef CONFIG_FAIR_GROUP_SCHED
2757extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); 2668extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
new file mode 100644
index 000000000000..94e19ea28fc3
--- /dev/null
+++ b/include/linux/sched/rt.h
@@ -0,0 +1,58 @@
1#ifndef _SCHED_RT_H
2#define _SCHED_RT_H
3
4/*
5 * Priority of a process goes from 0..MAX_PRIO-1, valid RT
6 * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
7 * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
8 * values are inverted: lower p->prio value means higher priority.
9 *
10 * The MAX_USER_RT_PRIO value allows the actual maximum
11 * RT priority to be separate from the value exported to
12 * user-space. This allows kernel threads to set their
13 * priority to a value higher than any user task. Note:
14 * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
15 */
16
17#define MAX_USER_RT_PRIO 100
18#define MAX_RT_PRIO MAX_USER_RT_PRIO
19
20#define MAX_PRIO (MAX_RT_PRIO + 40)
21#define DEFAULT_PRIO (MAX_RT_PRIO + 20)
22
23static inline int rt_prio(int prio)
24{
25 if (unlikely(prio < MAX_RT_PRIO))
26 return 1;
27 return 0;
28}
29
30static inline int rt_task(struct task_struct *p)
31{
32 return rt_prio(p->prio);
33}
34
35#ifdef CONFIG_RT_MUTEXES
36extern int rt_mutex_getprio(struct task_struct *p);
37extern void rt_mutex_setprio(struct task_struct *p, int prio);
38extern void rt_mutex_adjust_pi(struct task_struct *p);
39static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
40{
41 return tsk->pi_blocked_on != NULL;
42}
43#else
44static inline int rt_mutex_getprio(struct task_struct *p)
45{
46 return p->normal_prio;
47}
48# define rt_mutex_adjust_pi(p) do { } while (0)
49static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
50{
51 return false;
52}
53#endif
54
55extern void normalize_rt_tasks(void);
56
57
58#endif /* _SCHED_RT_H */
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
new file mode 100644
index 000000000000..d2bb0ae979d0
--- /dev/null
+++ b/include/linux/sched/sysctl.h
@@ -0,0 +1,110 @@
1#ifndef _SCHED_SYSCTL_H
2#define _SCHED_SYSCTL_H
3
4#ifdef CONFIG_DETECT_HUNG_TASK
5extern unsigned int sysctl_hung_task_panic;
6extern unsigned long sysctl_hung_task_check_count;
7extern unsigned long sysctl_hung_task_timeout_secs;
8extern unsigned long sysctl_hung_task_warnings;
9extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
10 void __user *buffer,
11 size_t *lenp, loff_t *ppos);
12#else
13/* Avoid need for ifdefs elsewhere in the code */
14enum { sysctl_hung_task_timeout_secs = 0 };
15#endif
16
17/*
18 * Default maximum number of active map areas, this limits the number of vmas
19 * per mm struct. Users can overwrite this number by sysctl but there is a
20 * problem.
21 *
22 * When a program's coredump is generated as ELF format, a section is created
23 * per a vma. In ELF, the number of sections is represented in unsigned short.
24 * This means the number of sections should be smaller than 65535 at coredump.
25 * Because the kernel adds some informative sections to a image of program at
26 * generating coredump, we need some margin. The number of extra sections is
27 * 1-3 now and depends on arch. We use "5" as safe margin, here.
28 */
29#define MAPCOUNT_ELF_CORE_MARGIN (5)
30#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
31
32extern int sysctl_max_map_count;
33
34extern unsigned int sysctl_sched_latency;
35extern unsigned int sysctl_sched_min_granularity;
36extern unsigned int sysctl_sched_wakeup_granularity;
37extern unsigned int sysctl_sched_child_runs_first;
38
39enum sched_tunable_scaling {
40 SCHED_TUNABLESCALING_NONE,
41 SCHED_TUNABLESCALING_LOG,
42 SCHED_TUNABLESCALING_LINEAR,
43 SCHED_TUNABLESCALING_END,
44};
45extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
46
47extern unsigned int sysctl_numa_balancing_scan_delay;
48extern unsigned int sysctl_numa_balancing_scan_period_min;
49extern unsigned int sysctl_numa_balancing_scan_period_max;
50extern unsigned int sysctl_numa_balancing_scan_period_reset;
51extern unsigned int sysctl_numa_balancing_scan_size;
52extern unsigned int sysctl_numa_balancing_settle_count;
53
54#ifdef CONFIG_SCHED_DEBUG
55extern unsigned int sysctl_sched_migration_cost;
56extern unsigned int sysctl_sched_nr_migrate;
57extern unsigned int sysctl_sched_time_avg;
58extern unsigned int sysctl_timer_migration;
59extern unsigned int sysctl_sched_shares_window;
60
61int sched_proc_update_handler(struct ctl_table *table, int write,
62 void __user *buffer, size_t *length,
63 loff_t *ppos);
64#endif
65#ifdef CONFIG_SCHED_DEBUG
66static inline unsigned int get_sysctl_timer_migration(void)
67{
68 return sysctl_timer_migration;
69}
70#else
71static inline unsigned int get_sysctl_timer_migration(void)
72{
73 return 1;
74}
75#endif
76
77/*
78 * control realtime throttling:
79 *
80 * /proc/sys/kernel/sched_rt_period_us
81 * /proc/sys/kernel/sched_rt_runtime_us
82 */
83extern unsigned int sysctl_sched_rt_period;
84extern int sysctl_sched_rt_runtime;
85
86#ifdef CONFIG_CFS_BANDWIDTH
87extern unsigned int sysctl_sched_cfs_bandwidth_slice;
88#endif
89
90#ifdef CONFIG_SCHED_AUTOGROUP
91extern unsigned int sysctl_sched_autogroup_enabled;
92#endif
93
94/*
95 * default timeslice is 100 msecs (used only for SCHED_RR tasks).
96 * Timeslices get refilled after they expire.
97 */
98#define RR_TIMESLICE (100 * HZ / 1000)
99
100extern int sched_rr_timeslice;
101
102extern int sched_rr_handler(struct ctl_table *table, int write,
103 void __user *buffer, size_t *lenp,
104 loff_t *ppos);
105
106extern int sched_rt_handler(struct ctl_table *table, int write,
107 void __user *buffer, size_t *lenp,
108 loff_t *ppos);
109
110#endif /* _SCHED_SYSCTL_H */
diff --git a/include/linux/security.h b/include/linux/security.h
index 0f6afc657f77..eee7478cda70 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -989,17 +989,29 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
989 * tells the LSM to decrement the number of secmark labeling rules loaded 989 * tells the LSM to decrement the number of secmark labeling rules loaded
990 * @req_classify_flow: 990 * @req_classify_flow:
991 * Sets the flow's sid to the openreq sid. 991 * Sets the flow's sid to the openreq sid.
992 * @tun_dev_alloc_security:
993 * This hook allows a module to allocate a security structure for a TUN
994 * device.
995 * @security pointer to a security structure pointer.
996 * Returns a zero on success, negative values on failure.
997 * @tun_dev_free_security:
998 * This hook allows a module to free the security structure for a TUN
999 * device.
1000 * @security pointer to the TUN device's security structure
992 * @tun_dev_create: 1001 * @tun_dev_create:
993 * Check permissions prior to creating a new TUN device. 1002 * Check permissions prior to creating a new TUN device.
994 * @tun_dev_post_create: 1003 * @tun_dev_attach_queue:
995 * This hook allows a module to update or allocate a per-socket security 1004 * Check permissions prior to attaching to a TUN device queue.
996 * structure. 1005 * @security pointer to the TUN device's security structure.
997 * @sk contains the newly created sock structure.
998 * @tun_dev_attach: 1006 * @tun_dev_attach:
999 * Check permissions prior to attaching to a persistent TUN device. This 1007 * This hook can be used by the module to update any security state
1000 * hook can also be used by the module to update any security state
1001 * associated with the TUN device's sock structure. 1008 * associated with the TUN device's sock structure.
1002 * @sk contains the existing sock structure. 1009 * @sk contains the existing sock structure.
1010 * @security pointer to the TUN device's security structure.
1011 * @tun_dev_open:
1012 * This hook can be used by the module to update any security state
1013 * associated with the TUN device's security structure.
1014 * @security pointer to the TUN devices's security structure.
1003 * 1015 *
1004 * Security hooks for XFRM operations. 1016 * Security hooks for XFRM operations.
1005 * 1017 *
@@ -1620,9 +1632,12 @@ struct security_operations {
1620 void (*secmark_refcount_inc) (void); 1632 void (*secmark_refcount_inc) (void);
1621 void (*secmark_refcount_dec) (void); 1633 void (*secmark_refcount_dec) (void);
1622 void (*req_classify_flow) (const struct request_sock *req, struct flowi *fl); 1634 void (*req_classify_flow) (const struct request_sock *req, struct flowi *fl);
1623 int (*tun_dev_create)(void); 1635 int (*tun_dev_alloc_security) (void **security);
1624 void (*tun_dev_post_create)(struct sock *sk); 1636 void (*tun_dev_free_security) (void *security);
1625 int (*tun_dev_attach)(struct sock *sk); 1637 int (*tun_dev_create) (void);
1638 int (*tun_dev_attach_queue) (void *security);
1639 int (*tun_dev_attach) (struct sock *sk, void *security);
1640 int (*tun_dev_open) (void *security);
1626#endif /* CONFIG_SECURITY_NETWORK */ 1641#endif /* CONFIG_SECURITY_NETWORK */
1627 1642
1628#ifdef CONFIG_SECURITY_NETWORK_XFRM 1643#ifdef CONFIG_SECURITY_NETWORK_XFRM
@@ -2566,9 +2581,12 @@ void security_inet_conn_established(struct sock *sk,
2566int security_secmark_relabel_packet(u32 secid); 2581int security_secmark_relabel_packet(u32 secid);
2567void security_secmark_refcount_inc(void); 2582void security_secmark_refcount_inc(void);
2568void security_secmark_refcount_dec(void); 2583void security_secmark_refcount_dec(void);
2584int security_tun_dev_alloc_security(void **security);
2585void security_tun_dev_free_security(void *security);
2569int security_tun_dev_create(void); 2586int security_tun_dev_create(void);
2570void security_tun_dev_post_create(struct sock *sk); 2587int security_tun_dev_attach_queue(void *security);
2571int security_tun_dev_attach(struct sock *sk); 2588int security_tun_dev_attach(struct sock *sk, void *security);
2589int security_tun_dev_open(void *security);
2572 2590
2573#else /* CONFIG_SECURITY_NETWORK */ 2591#else /* CONFIG_SECURITY_NETWORK */
2574static inline int security_unix_stream_connect(struct sock *sock, 2592static inline int security_unix_stream_connect(struct sock *sock,
@@ -2733,16 +2751,31 @@ static inline void security_secmark_refcount_dec(void)
2733{ 2751{
2734} 2752}
2735 2753
2754static inline int security_tun_dev_alloc_security(void **security)
2755{
2756 return 0;
2757}
2758
2759static inline void security_tun_dev_free_security(void *security)
2760{
2761}
2762
2736static inline int security_tun_dev_create(void) 2763static inline int security_tun_dev_create(void)
2737{ 2764{
2738 return 0; 2765 return 0;
2739} 2766}
2740 2767
2741static inline void security_tun_dev_post_create(struct sock *sk) 2768static inline int security_tun_dev_attach_queue(void *security)
2769{
2770 return 0;
2771}
2772
2773static inline int security_tun_dev_attach(struct sock *sk, void *security)
2742{ 2774{
2775 return 0;
2743} 2776}
2744 2777
2745static inline int security_tun_dev_attach(struct sock *sk) 2778static inline int security_tun_dev_open(void *security)
2746{ 2779{
2747 return 0; 2780 return 0;
2748} 2781}
diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h
index e0106d8581d3..c65dee059913 100644
--- a/include/linux/smpboot.h
+++ b/include/linux/smpboot.h
@@ -14,6 +14,8 @@ struct smpboot_thread_data;
14 * @thread_should_run: Check whether the thread should run or not. Called with 14 * @thread_should_run: Check whether the thread should run or not. Called with
15 * preemption disabled. 15 * preemption disabled.
16 * @thread_fn: The associated thread function 16 * @thread_fn: The associated thread function
17 * @create: Optional setup function, called when the thread gets
18 * created (Not called from the thread context)
17 * @setup: Optional setup function, called when the thread gets 19 * @setup: Optional setup function, called when the thread gets
18 * operational the first time 20 * operational the first time
19 * @cleanup: Optional cleanup function, called when the thread 21 * @cleanup: Optional cleanup function, called when the thread
@@ -22,6 +24,7 @@ struct smpboot_thread_data;
22 * parked (cpu offline) 24 * parked (cpu offline)
23 * @unpark: Optional unpark function, called when the thread is 25 * @unpark: Optional unpark function, called when the thread is
24 * unparked (cpu online) 26 * unparked (cpu online)
27 * @selfparking: Thread is not parked by the park function.
25 * @thread_comm: The base name of the thread 28 * @thread_comm: The base name of the thread
26 */ 29 */
27struct smp_hotplug_thread { 30struct smp_hotplug_thread {
@@ -29,10 +32,12 @@ struct smp_hotplug_thread {
29 struct list_head list; 32 struct list_head list;
30 int (*thread_should_run)(unsigned int cpu); 33 int (*thread_should_run)(unsigned int cpu);
31 void (*thread_fn)(unsigned int cpu); 34 void (*thread_fn)(unsigned int cpu);
35 void (*create)(unsigned int cpu);
32 void (*setup)(unsigned int cpu); 36 void (*setup)(unsigned int cpu);
33 void (*cleanup)(unsigned int cpu, bool online); 37 void (*cleanup)(unsigned int cpu, bool online);
34 void (*park)(unsigned int cpu); 38 void (*park)(unsigned int cpu);
35 void (*unpark)(unsigned int cpu); 39 void (*unpark)(unsigned int cpu);
40 bool selfparking;
36 const char *thread_comm; 41 const char *thread_comm;
37}; 42};
38 43
diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h
index c73d1445c77e..82d5111cd0c2 100644
--- a/include/linux/spi/pxa2xx_spi.h
+++ b/include/linux/spi/pxa2xx_spi.h
@@ -28,6 +28,15 @@ struct pxa2xx_spi_master {
28 u32 clock_enable; 28 u32 clock_enable;
29 u16 num_chipselect; 29 u16 num_chipselect;
30 u8 enable_dma; 30 u8 enable_dma;
31
32 /* DMA engine specific config */
33 int rx_chan_id;
34 int tx_chan_id;
35 int rx_slave_id;
36 int tx_slave_id;
37
38 /* For non-PXA arches */
39 struct ssp_device ssp;
31}; 40};
32 41
33/* spi_board_info.controller_data for SPI slave devices, 42/* spi_board_info.controller_data for SPI slave devices,
@@ -35,6 +44,7 @@ struct pxa2xx_spi_master {
35 */ 44 */
36struct pxa2xx_spi_chip { 45struct pxa2xx_spi_chip {
37 u8 tx_threshold; 46 u8 tx_threshold;
47 u8 tx_hi_threshold;
38 u8 rx_threshold; 48 u8 rx_threshold;
39 u8 dma_burst_size; 49 u8 dma_burst_size;
40 u32 timeout; 50 u32 timeout;
@@ -50,103 +60,5 @@ struct pxa2xx_spi_chip {
50 60
51extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info); 61extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info);
52 62
53#else
54/*
55 * This is the implemtation for CE4100 on x86. ARM defines them in mach/ or
56 * plat/ include path.
57 * The CE4100 does not provide DMA support. This bits are here to let the driver
58 * compile and will never be used. Maybe we get DMA support at a later point in
59 * time.
60 */
61
62#define DCSR(n) (n)
63#define DSADR(n) (n)
64#define DTADR(n) (n)
65#define DCMD(n) (n)
66#define DRCMR(n) (n)
67
68#define DCSR_RUN (1 << 31) /* Run Bit */
69#define DCSR_NODESC (1 << 30) /* No-Descriptor Fetch */
70#define DCSR_STOPIRQEN (1 << 29) /* Stop Interrupt Enable */
71#define DCSR_REQPEND (1 << 8) /* Request Pending (read-only) */
72#define DCSR_STOPSTATE (1 << 3) /* Stop State (read-only) */
73#define DCSR_ENDINTR (1 << 2) /* End Interrupt */
74#define DCSR_STARTINTR (1 << 1) /* Start Interrupt */
75#define DCSR_BUSERR (1 << 0) /* Bus Error Interrupt */
76
77#define DCSR_EORIRQEN (1 << 28) /* End of Receive Interrupt Enable */
78#define DCSR_EORJMPEN (1 << 27) /* Jump to next descriptor on EOR */
79#define DCSR_EORSTOPEN (1 << 26) /* STOP on an EOR */
80#define DCSR_SETCMPST (1 << 25) /* Set Descriptor Compare Status */
81#define DCSR_CLRCMPST (1 << 24) /* Clear Descriptor Compare Status */
82#define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */
83#define DCSR_EORINTR (1 << 9) /* The end of Receive */
84
85#define DRCMR_MAPVLD (1 << 7) /* Map Valid */
86#define DRCMR_CHLNUM 0x1f /* mask for Channel Number */
87
88#define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor */
89#define DDADR_STOP (1 << 0) /* Stop */
90
91#define DCMD_INCSRCADDR (1 << 31) /* Source Address Increment Setting. */
92#define DCMD_INCTRGADDR (1 << 30) /* Target Address Increment Setting. */
93#define DCMD_FLOWSRC (1 << 29) /* Flow Control by the source. */
94#define DCMD_FLOWTRG (1 << 28) /* Flow Control by the target. */
95#define DCMD_STARTIRQEN (1 << 22) /* Start Interrupt Enable */
96#define DCMD_ENDIRQEN (1 << 21) /* End Interrupt Enable */
97#define DCMD_ENDIAN (1 << 18) /* Device Endian-ness. */
98#define DCMD_BURST8 (1 << 16) /* 8 byte burst */
99#define DCMD_BURST16 (2 << 16) /* 16 byte burst */
100#define DCMD_BURST32 (3 << 16) /* 32 byte burst */
101#define DCMD_WIDTH1 (1 << 14) /* 1 byte width */
102#define DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */
103#define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */
104#define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
105
106/*
107 * Descriptor structure for PXA's DMA engine
108 * Note: this structure must always be aligned to a 16-byte boundary.
109 */
110
111typedef enum {
112 DMA_PRIO_HIGH = 0,
113 DMA_PRIO_MEDIUM = 1,
114 DMA_PRIO_LOW = 2
115} pxa_dma_prio;
116
117/*
118 * DMA registration
119 */
120
121static inline int pxa_request_dma(char *name,
122 pxa_dma_prio prio,
123 void (*irq_handler)(int, void *),
124 void *data)
125{
126 return -ENODEV;
127}
128
129static inline void pxa_free_dma(int dma_ch)
130{
131}
132
133/*
134 * The CE4100 does not have the clk framework implemented and SPI clock can
135 * not be switched on/off or the divider changed.
136 */
137static inline void clk_disable(struct clk *clk)
138{
139}
140
141static inline int clk_enable(struct clk *clk)
142{
143 return 0;
144}
145
146static inline unsigned long clk_get_rate(struct clk *clk)
147{
148 return 3686400;
149}
150
151#endif 63#endif
152#endif 64#endif
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index f62918946d86..30e9c50a5e20 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -57,6 +57,8 @@ extern struct bus_type spi_bus_type;
57 * @modalias: Name of the driver to use with this device, or an alias 57 * @modalias: Name of the driver to use with this device, or an alias
58 * for that name. This appears in the sysfs "modalias" attribute 58 * for that name. This appears in the sysfs "modalias" attribute
59 * for driver coldplugging, and in uevents used for hotplugging 59 * for driver coldplugging, and in uevents used for hotplugging
60 * @cs_gpio: gpio number of the chipselect line (optional, -EINVAL when
61 * when not using a GPIO line)
60 * 62 *
61 * A @spi_device is used to interchange data between an SPI slave 63 * A @spi_device is used to interchange data between an SPI slave
62 * (usually a discrete chip) and CPU memory. 64 * (usually a discrete chip) and CPU memory.
@@ -258,6 +260,9 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
258 * @unprepare_transfer_hardware: there are currently no more messages on the 260 * @unprepare_transfer_hardware: there are currently no more messages on the
259 * queue so the subsystem notifies the driver that it may relax the 261 * queue so the subsystem notifies the driver that it may relax the
260 * hardware by issuing this call 262 * hardware by issuing this call
263 * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
264 * number. Any individual value may be -EINVAL for CS lines that
265 * are not GPIOs (driven by the SPI controller itself).
261 * 266 *
262 * Each SPI master controller can communicate with one or more @spi_device 267 * Each SPI master controller can communicate with one or more @spi_device
263 * children. These make a small bus, sharing MOSI, MISO and SCK signals 268 * children. These make a small bus, sharing MOSI, MISO and SCK signals
diff --git a/include/linux/spi/spi_gpio.h b/include/linux/spi/spi_gpio.h
index 369b3d7d5b95..1634ce31c06d 100644
--- a/include/linux/spi/spi_gpio.h
+++ b/include/linux/spi/spi_gpio.h
@@ -62,8 +62,8 @@
62 */ 62 */
63struct spi_gpio_platform_data { 63struct spi_gpio_platform_data {
64 unsigned sck; 64 unsigned sck;
65 unsigned mosi; 65 unsigned long mosi;
66 unsigned miso; 66 unsigned long miso;
67 67
68 u16 num_chipselect; 68 u16 num_chipselect;
69}; 69};
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 6eb691b08358..04f4121a23ae 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -151,30 +151,14 @@ void srcu_barrier(struct srcu_struct *sp);
151 * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot 151 * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
152 * and while lockdep is disabled. 152 * and while lockdep is disabled.
153 * 153 *
154 * Note that if the CPU is in the idle loop from an RCU point of view 154 * Note that SRCU is based on its own statemachine and it doesn't
155 * (ie: that we are in the section between rcu_idle_enter() and 155 * relies on normal RCU, it can be called from the CPU which
156 * rcu_idle_exit()) then srcu_read_lock_held() returns false even if 156 * is in the idle loop from an RCU point of view or offline.
157 * the CPU did an srcu_read_lock(). The reason for this is that RCU
158 * ignores CPUs that are in such a section, considering these as in
159 * extended quiescent state, so such a CPU is effectively never in an
160 * RCU read-side critical section regardless of what RCU primitives it
161 * invokes. This state of affairs is required --- we need to keep an
162 * RCU-free window in idle where the CPU may possibly enter into low
163 * power mode. This way we can notice an extended quiescent state to
164 * other CPUs that started a grace period. Otherwise we would delay any
165 * grace period as long as we run in the idle task.
166 *
167 * Similarly, we avoid claiming an SRCU read lock held if the current
168 * CPU is offline.
169 */ 157 */
170static inline int srcu_read_lock_held(struct srcu_struct *sp) 158static inline int srcu_read_lock_held(struct srcu_struct *sp)
171{ 159{
172 if (!debug_lockdep_rcu_enabled()) 160 if (!debug_lockdep_rcu_enabled())
173 return 1; 161 return 1;
174 if (rcu_is_cpu_idle())
175 return 0;
176 if (!rcu_lockdep_current_cpu_online())
177 return 0;
178 return lock_is_held(&sp->dep_map); 162 return lock_is_held(&sp->dep_map);
179} 163}
180 164
@@ -236,8 +220,6 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
236 int retval = __srcu_read_lock(sp); 220 int retval = __srcu_read_lock(sp);
237 221
238 rcu_lock_acquire(&(sp)->dep_map); 222 rcu_lock_acquire(&(sp)->dep_map);
239 rcu_lockdep_assert(!rcu_is_cpu_idle(),
240 "srcu_read_lock() used illegally while idle");
241 return retval; 223 return retval;
242} 224}
243 225
@@ -251,8 +233,6 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
251static inline void srcu_read_unlock(struct srcu_struct *sp, int idx) 233static inline void srcu_read_unlock(struct srcu_struct *sp, int idx)
252 __releases(sp) 234 __releases(sp)
253{ 235{
254 rcu_lockdep_assert(!rcu_is_cpu_idle(),
255 "srcu_read_unlock() used illegally while idle");
256 rcu_lock_release(&(sp)->dep_map); 236 rcu_lock_release(&(sp)->dep_map);
257 __srcu_read_unlock(sp, idx); 237 __srcu_read_unlock(sp, idx);
258} 238}
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 0c808d7fa579..d4e3f16d5e89 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -34,8 +34,10 @@ static inline void pm_restore_console(void)
34typedef int __bitwise suspend_state_t; 34typedef int __bitwise suspend_state_t;
35 35
36#define PM_SUSPEND_ON ((__force suspend_state_t) 0) 36#define PM_SUSPEND_ON ((__force suspend_state_t) 0)
37#define PM_SUSPEND_STANDBY ((__force suspend_state_t) 1) 37#define PM_SUSPEND_FREEZE ((__force suspend_state_t) 1)
38#define PM_SUSPEND_STANDBY ((__force suspend_state_t) 2)
38#define PM_SUSPEND_MEM ((__force suspend_state_t) 3) 39#define PM_SUSPEND_MEM ((__force suspend_state_t) 3)
40#define PM_SUSPEND_MIN PM_SUSPEND_FREEZE
39#define PM_SUSPEND_MAX ((__force suspend_state_t) 4) 41#define PM_SUSPEND_MAX ((__force suspend_state_t) 4)
40 42
41enum suspend_stat_step { 43enum suspend_stat_step {
@@ -192,6 +194,7 @@ struct platform_suspend_ops {
192 */ 194 */
193extern void suspend_set_ops(const struct platform_suspend_ops *ops); 195extern void suspend_set_ops(const struct platform_suspend_ops *ops);
194extern int suspend_valid_only_mem(suspend_state_t state); 196extern int suspend_valid_only_mem(suspend_state_t state);
197extern void freeze_wake(void);
195 198
196/** 199/**
197 * arch_suspend_disable_irqs - disable IRQs for suspend 200 * arch_suspend_disable_irqs - disable IRQs for suspend
@@ -217,6 +220,7 @@ extern int pm_suspend(suspend_state_t state);
217 220
218static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {} 221static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
219static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; } 222static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
223static inline void freeze_wake(void) {}
220#endif /* !CONFIG_SUSPEND */ 224#endif /* !CONFIG_SUSPEND */
221 225
222/* struct pbe is used for creating lists of pages that should be restored 226/* struct pbe is used for creating lists of pages that should be restored
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 381f06db2fe5..e2cee22f578a 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -181,6 +181,10 @@ int sysfs_merge_group(struct kobject *kobj,
181 const struct attribute_group *grp); 181 const struct attribute_group *grp);
182void sysfs_unmerge_group(struct kobject *kobj, 182void sysfs_unmerge_group(struct kobject *kobj,
183 const struct attribute_group *grp); 183 const struct attribute_group *grp);
184int sysfs_add_link_to_group(struct kobject *kobj, const char *group_name,
185 struct kobject *target, const char *link_name);
186void sysfs_remove_link_from_group(struct kobject *kobj, const char *group_name,
187 const char *link_name);
184 188
185void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr); 189void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr);
186void sysfs_notify_dirent(struct sysfs_dirent *sd); 190void sysfs_notify_dirent(struct sysfs_dirent *sd);
@@ -326,6 +330,18 @@ static inline void sysfs_unmerge_group(struct kobject *kobj,
326{ 330{
327} 331}
328 332
333static inline int sysfs_add_link_to_group(struct kobject *kobj,
334 const char *group_name, struct kobject *target,
335 const char *link_name)
336{
337 return 0;
338}
339
340static inline void sysfs_remove_link_from_group(struct kobject *kobj,
341 const char *group_name, const char *link_name)
342{
343}
344
329static inline void sysfs_notify(struct kobject *kobj, const char *dir, 345static inline void sysfs_notify(struct kobject *kobj, const char *dir,
330 const char *attr) 346 const char *attr)
331{ 347{
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 1a6567b48492..553272e6af55 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -8,6 +8,8 @@
8 8
9#include <linux/clockchips.h> 9#include <linux/clockchips.h>
10#include <linux/irqflags.h> 10#include <linux/irqflags.h>
11#include <linux/percpu.h>
12#include <linux/hrtimer.h>
11 13
12#ifdef CONFIG_GENERIC_CLOCKEVENTS 14#ifdef CONFIG_GENERIC_CLOCKEVENTS
13 15
@@ -122,13 +124,26 @@ static inline int tick_oneshot_mode_active(void) { return 0; }
122#endif /* !CONFIG_GENERIC_CLOCKEVENTS */ 124#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
123 125
124# ifdef CONFIG_NO_HZ 126# ifdef CONFIG_NO_HZ
127DECLARE_PER_CPU(struct tick_sched, tick_cpu_sched);
128
129static inline int tick_nohz_tick_stopped(void)
130{
131 return __this_cpu_read(tick_cpu_sched.tick_stopped);
132}
133
125extern void tick_nohz_idle_enter(void); 134extern void tick_nohz_idle_enter(void);
126extern void tick_nohz_idle_exit(void); 135extern void tick_nohz_idle_exit(void);
127extern void tick_nohz_irq_exit(void); 136extern void tick_nohz_irq_exit(void);
128extern ktime_t tick_nohz_get_sleep_length(void); 137extern ktime_t tick_nohz_get_sleep_length(void);
129extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); 138extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
130extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); 139extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
131# else 140
141# else /* !CONFIG_NO_HZ */
142static inline int tick_nohz_tick_stopped(void)
143{
144 return 0;
145}
146
132static inline void tick_nohz_idle_enter(void) { } 147static inline void tick_nohz_idle_enter(void) { }
133static inline void tick_nohz_idle_exit(void) { } 148static inline void tick_nohz_idle_exit(void) { }
134 149
diff --git a/include/linux/time.h b/include/linux/time.h
index 4d358e9d10f1..a3ab6a814a9c 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -115,8 +115,20 @@ static inline bool timespec_valid_strict(const struct timespec *ts)
115 return true; 115 return true;
116} 116}
117 117
118extern bool persistent_clock_exist;
119
120#ifdef ALWAYS_USE_PERSISTENT_CLOCK
121#define has_persistent_clock() true
122#else
123static inline bool has_persistent_clock(void)
124{
125 return persistent_clock_exist;
126}
127#endif
128
118extern void read_persistent_clock(struct timespec *ts); 129extern void read_persistent_clock(struct timespec *ts);
119extern void read_boot_clock(struct timespec *ts); 130extern void read_boot_clock(struct timespec *ts);
131extern int persistent_clock_is_local;
120extern int update_persistent_clock(struct timespec now); 132extern int update_persistent_clock(struct timespec now);
121void timekeeping_init(void); 133void timekeeping_init(void);
122extern int timekeeping_suspended; 134extern int timekeeping_suspended;
@@ -158,6 +170,7 @@ extern int do_setitimer(int which, struct itimerval *value,
158 struct itimerval *ovalue); 170 struct itimerval *ovalue);
159extern unsigned int alarm_setitimer(unsigned int seconds); 171extern unsigned int alarm_setitimer(unsigned int seconds);
160extern int do_getitimer(int which, struct itimerval *value); 172extern int do_getitimer(int which, struct itimerval *value);
173extern int __getnstimeofday(struct timespec *tv);
161extern void getnstimeofday(struct timespec *tv); 174extern void getnstimeofday(struct timespec *tv);
162extern void getrawmonotonic(struct timespec *ts); 175extern void getrawmonotonic(struct timespec *ts);
163extern void getnstime_raw_and_real(struct timespec *ts_raw, 176extern void getnstime_raw_and_real(struct timespec *ts_raw,
diff --git a/include/linux/tsacct_kern.h b/include/linux/tsacct_kern.h
index 44893e5ec8f7..3251965bf4cc 100644
--- a/include/linux/tsacct_kern.h
+++ b/include/linux/tsacct_kern.h
@@ -23,12 +23,15 @@ static inline void bacct_add_tsk(struct user_namespace *user_ns,
23#ifdef CONFIG_TASK_XACCT 23#ifdef CONFIG_TASK_XACCT
24extern void xacct_add_tsk(struct taskstats *stats, struct task_struct *p); 24extern void xacct_add_tsk(struct taskstats *stats, struct task_struct *p);
25extern void acct_update_integrals(struct task_struct *tsk); 25extern void acct_update_integrals(struct task_struct *tsk);
26extern void acct_account_cputime(struct task_struct *tsk);
26extern void acct_clear_integrals(struct task_struct *tsk); 27extern void acct_clear_integrals(struct task_struct *tsk);
27#else 28#else
28static inline void xacct_add_tsk(struct taskstats *stats, struct task_struct *p) 29static inline void xacct_add_tsk(struct taskstats *stats, struct task_struct *p)
29{} 30{}
30static inline void acct_update_integrals(struct task_struct *tsk) 31static inline void acct_update_integrals(struct task_struct *tsk)
31{} 32{}
33static inline void acct_account_cputime(struct task_struct *tsk)
34{}
32static inline void acct_clear_integrals(struct task_struct *tsk) 35static inline void acct_clear_integrals(struct task_struct *tsk)
33{} 36{}
34#endif /* CONFIG_TASK_XACCT */ 37#endif /* CONFIG_TASK_XACCT */
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index 4f628a6fc5b4..02b83db8e2c5 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -35,13 +35,20 @@ struct inode;
35# include <asm/uprobes.h> 35# include <asm/uprobes.h>
36#endif 36#endif
37 37
38#define UPROBE_HANDLER_REMOVE 1
39#define UPROBE_HANDLER_MASK 1
40
41enum uprobe_filter_ctx {
42 UPROBE_FILTER_REGISTER,
43 UPROBE_FILTER_UNREGISTER,
44 UPROBE_FILTER_MMAP,
45};
46
38struct uprobe_consumer { 47struct uprobe_consumer {
39 int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs); 48 int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs);
40 /* 49 bool (*filter)(struct uprobe_consumer *self,
41 * filter is optional; If a filter exists, handler is run 50 enum uprobe_filter_ctx ctx,
42 * if and only if filter returns true. 51 struct mm_struct *mm);
43 */
44 bool (*filter)(struct uprobe_consumer *self, struct task_struct *task);
45 52
46 struct uprobe_consumer *next; 53 struct uprobe_consumer *next;
47}; 54};
@@ -94,6 +101,7 @@ extern int __weak set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsign
94extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr); 101extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
95extern bool __weak is_swbp_insn(uprobe_opcode_t *insn); 102extern bool __weak is_swbp_insn(uprobe_opcode_t *insn);
96extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc); 103extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
104extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool);
97extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc); 105extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
98extern int uprobe_mmap(struct vm_area_struct *vma); 106extern int uprobe_mmap(struct vm_area_struct *vma);
99extern void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end); 107extern void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end);
@@ -117,6 +125,11 @@ uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
117{ 125{
118 return -ENOSYS; 126 return -ENOSYS;
119} 127}
128static inline int
129uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool add)
130{
131 return -ENOSYS;
132}
120static inline void 133static inline void
121uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc) 134uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
122{ 135{
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 689b14b26c8d..4d22d0f6167a 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -357,6 +357,8 @@ struct usb_bus {
357 int bandwidth_int_reqs; /* number of Interrupt requests */ 357 int bandwidth_int_reqs; /* number of Interrupt requests */
358 int bandwidth_isoc_reqs; /* number of Isoc. requests */ 358 int bandwidth_isoc_reqs; /* number of Isoc. requests */
359 359
360 unsigned resuming_ports; /* bit array: resuming root-hub ports */
361
360#if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE) 362#if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
361 struct mon_bus *mon_bus; /* non-null when associated */ 363 struct mon_bus *mon_bus; /* non-null when associated */
362 int monitored; /* non-zero when monitored */ 364 int monitored; /* non-zero when monitored */
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 608050b2545f..0a78df5f6cfd 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -430,6 +430,9 @@ extern void usb_hcd_poll_rh_status(struct usb_hcd *hcd);
430extern void usb_wakeup_notification(struct usb_device *hdev, 430extern void usb_wakeup_notification(struct usb_device *hdev,
431 unsigned int portnum); 431 unsigned int portnum);
432 432
433extern void usb_hcd_start_port_resume(struct usb_bus *bus, int portnum);
434extern void usb_hcd_end_port_resume(struct usb_bus *bus, int portnum);
435
433/* The D0/D1 toggle bits ... USE WITH CAUTION (they're almost hcd-internal) */ 436/* The D0/D1 toggle bits ... USE WITH CAUTION (they're almost hcd-internal) */
434#define usb_gettoggle(dev, ep, out) (((dev)->toggle[out] >> (ep)) & 1) 437#define usb_gettoggle(dev, ep, out) (((dev)->toggle[out] >> (ep)) & 1)
435#define usb_dotoggle(dev, ep, out) ((dev)->toggle[out] ^= (1 << (ep))) 438#define usb_dotoggle(dev, ep, out) ((dev)->toggle[out] ^= (1 << (ep)))
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index bd45eb7bedc8..0e5ac93bab10 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -33,6 +33,7 @@ struct usbnet {
33 wait_queue_head_t *wait; 33 wait_queue_head_t *wait;
34 struct mutex phy_mutex; 34 struct mutex phy_mutex;
35 unsigned char suspend_count; 35 unsigned char suspend_count;
36 unsigned char pkt_cnt, pkt_err;
36 37
37 /* i/o info: pipes etc */ 38 /* i/o info: pipes etc */
38 unsigned in, out; 39 unsigned in, out;
@@ -70,6 +71,7 @@ struct usbnet {
70# define EVENT_DEV_OPEN 7 71# define EVENT_DEV_OPEN 7
71# define EVENT_DEVICE_REPORT_IDLE 8 72# define EVENT_DEVICE_REPORT_IDLE 8
72# define EVENT_NO_RUNTIME_PM 9 73# define EVENT_NO_RUNTIME_PM 9
74# define EVENT_RX_KILL 10
73}; 75};
74 76
75static inline struct usb_driver *driver_of(struct usb_interface *intf) 77static inline struct usb_driver *driver_of(struct usb_interface *intf)
@@ -107,6 +109,7 @@ struct driver_info {
107 */ 109 */
108#define FLAG_MULTI_PACKET 0x2000 110#define FLAG_MULTI_PACKET 0x2000
109#define FLAG_RX_ASSEMBLE 0x4000 /* rx packets may span >1 frames */ 111#define FLAG_RX_ASSEMBLE 0x4000 /* rx packets may span >1 frames */
112#define FLAG_NOARP 0x8000 /* device can't do ARP */
110 113
111 /* init device ... can sleep, or cause probe() failure */ 114 /* init device ... can sleep, or cause probe() failure */
112 int (*bind)(struct usbnet *, struct usb_interface *); 115 int (*bind)(struct usbnet *, struct usb_interface *);
diff --git a/include/linux/vtime.h b/include/linux/vtime.h
index ae30ab58431a..71a5782d8c59 100644
--- a/include/linux/vtime.h
+++ b/include/linux/vtime.h
@@ -6,15 +6,46 @@ struct task_struct;
6#ifdef CONFIG_VIRT_CPU_ACCOUNTING 6#ifdef CONFIG_VIRT_CPU_ACCOUNTING
7extern void vtime_task_switch(struct task_struct *prev); 7extern void vtime_task_switch(struct task_struct *prev);
8extern void vtime_account_system(struct task_struct *tsk); 8extern void vtime_account_system(struct task_struct *tsk);
9extern void vtime_account_system_irqsafe(struct task_struct *tsk);
10extern void vtime_account_idle(struct task_struct *tsk); 9extern void vtime_account_idle(struct task_struct *tsk);
11extern void vtime_account_user(struct task_struct *tsk); 10extern void vtime_account_user(struct task_struct *tsk);
12extern void vtime_account(struct task_struct *tsk); 11extern void vtime_account_irq_enter(struct task_struct *tsk);
13#else 12
13#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
14static inline bool vtime_accounting_enabled(void) { return true; }
15#endif
16
17#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
18
14static inline void vtime_task_switch(struct task_struct *prev) { } 19static inline void vtime_task_switch(struct task_struct *prev) { }
15static inline void vtime_account_system(struct task_struct *tsk) { } 20static inline void vtime_account_system(struct task_struct *tsk) { }
16static inline void vtime_account_system_irqsafe(struct task_struct *tsk) { } 21static inline void vtime_account_user(struct task_struct *tsk) { }
17static inline void vtime_account(struct task_struct *tsk) { } 22static inline void vtime_account_irq_enter(struct task_struct *tsk) { }
23static inline bool vtime_accounting_enabled(void) { return false; }
24#endif
25
26#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
27extern void arch_vtime_task_switch(struct task_struct *tsk);
28extern void vtime_account_irq_exit(struct task_struct *tsk);
29extern bool vtime_accounting_enabled(void);
30extern void vtime_user_enter(struct task_struct *tsk);
31static inline void vtime_user_exit(struct task_struct *tsk)
32{
33 vtime_account_user(tsk);
34}
35extern void vtime_guest_enter(struct task_struct *tsk);
36extern void vtime_guest_exit(struct task_struct *tsk);
37extern void vtime_init_idle(struct task_struct *tsk);
38#else
39static inline void vtime_account_irq_exit(struct task_struct *tsk)
40{
41 /* On hard|softirq exit we always account to hard|softirq cputime */
42 vtime_account_system(tsk);
43}
44static inline void vtime_user_enter(struct task_struct *tsk) { }
45static inline void vtime_user_exit(struct task_struct *tsk) { }
46static inline void vtime_guest_enter(struct task_struct *tsk) { }
47static inline void vtime_guest_exit(struct task_struct *tsk) { }
48static inline void vtime_init_idle(struct task_struct *tsk) { }
18#endif 49#endif
19 50
20#ifdef CONFIG_IRQ_TIME_ACCOUNTING 51#ifdef CONFIG_IRQ_TIME_ACCOUNTING
@@ -23,25 +54,15 @@ extern void irqtime_account_irq(struct task_struct *tsk);
23static inline void irqtime_account_irq(struct task_struct *tsk) { } 54static inline void irqtime_account_irq(struct task_struct *tsk) { }
24#endif 55#endif
25 56
26static inline void vtime_account_irq_enter(struct task_struct *tsk) 57static inline void account_irq_enter_time(struct task_struct *tsk)
27{ 58{
28 /* 59 vtime_account_irq_enter(tsk);
29 * Hardirq can interrupt idle task anytime. So we need vtime_account()
30 * that performs the idle check in CONFIG_VIRT_CPU_ACCOUNTING.
31 * Softirq can also interrupt idle task directly if it calls
32 * local_bh_enable(). Such case probably don't exist but we never know.
33 * Ksoftirqd is not concerned because idle time is flushed on context
34 * switch. Softirqs in the end of hardirqs are also not a problem because
35 * the idle time is flushed on hardirq time already.
36 */
37 vtime_account(tsk);
38 irqtime_account_irq(tsk); 60 irqtime_account_irq(tsk);
39} 61}
40 62
41static inline void vtime_account_irq_exit(struct task_struct *tsk) 63static inline void account_irq_exit_time(struct task_struct *tsk)
42{ 64{
43 /* On hard|softirq exit we always account to hard|softirq cputime */ 65 vtime_account_irq_exit(tsk);
44 vtime_account_system(tsk);
45 irqtime_account_irq(tsk); 66 irqtime_account_irq(tsk);
46} 67}
47 68
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 2b58905d3504..8afab27cdbc2 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -27,7 +27,7 @@ void delayed_work_timer_fn(unsigned long __data);
27enum { 27enum {
28 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ 28 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
29 WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */ 29 WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */
30 WORK_STRUCT_CWQ_BIT = 2, /* data points to cwq */ 30 WORK_STRUCT_PWQ_BIT = 2, /* data points to pwq */
31 WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */ 31 WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */
32#ifdef CONFIG_DEBUG_OBJECTS_WORK 32#ifdef CONFIG_DEBUG_OBJECTS_WORK
33 WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */ 33 WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */
@@ -40,7 +40,7 @@ enum {
40 40
41 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, 41 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
42 WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT, 42 WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
43 WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT, 43 WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT,
44 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, 44 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
45#ifdef CONFIG_DEBUG_OBJECTS_WORK 45#ifdef CONFIG_DEBUG_OBJECTS_WORK
46 WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT, 46 WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT,
@@ -57,29 +57,36 @@ enum {
57 57
58 /* special cpu IDs */ 58 /* special cpu IDs */
59 WORK_CPU_UNBOUND = NR_CPUS, 59 WORK_CPU_UNBOUND = NR_CPUS,
60 WORK_CPU_NONE = NR_CPUS + 1, 60 WORK_CPU_END = NR_CPUS + 1,
61 WORK_CPU_LAST = WORK_CPU_NONE,
62 61
63 /* 62 /*
64 * Reserve 7 bits off of cwq pointer w/ debugobjects turned 63 * Reserve 7 bits off of pwq pointer w/ debugobjects turned off.
65 * off. This makes cwqs aligned to 256 bytes and allows 15 64 * This makes pwqs aligned to 256 bytes and allows 15 workqueue
66 * workqueue flush colors. 65 * flush colors.
67 */ 66 */
68 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + 67 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
69 WORK_STRUCT_COLOR_BITS, 68 WORK_STRUCT_COLOR_BITS,
70 69
71 /* data contains off-queue information when !WORK_STRUCT_CWQ */ 70 /* data contains off-queue information when !WORK_STRUCT_PWQ */
72 WORK_OFFQ_FLAG_BASE = WORK_STRUCT_FLAG_BITS, 71 WORK_OFFQ_FLAG_BASE = WORK_STRUCT_FLAG_BITS,
73 72
74 WORK_OFFQ_CANCELING = (1 << WORK_OFFQ_FLAG_BASE), 73 WORK_OFFQ_CANCELING = (1 << WORK_OFFQ_FLAG_BASE),
75 74
75 /*
76 * When a work item is off queue, its high bits point to the last
77 * pool it was on. Cap at 31 bits and use the highest number to
78 * indicate that no pool is associated.
79 */
76 WORK_OFFQ_FLAG_BITS = 1, 80 WORK_OFFQ_FLAG_BITS = 1,
77 WORK_OFFQ_CPU_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS, 81 WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
82 WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
83 WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
84 WORK_OFFQ_POOL_NONE = (1LU << WORK_OFFQ_POOL_BITS) - 1,
78 85
79 /* convenience constants */ 86 /* convenience constants */
80 WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1, 87 WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
81 WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK, 88 WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
82 WORK_STRUCT_NO_CPU = (unsigned long)WORK_CPU_NONE << WORK_OFFQ_CPU_SHIFT, 89 WORK_STRUCT_NO_POOL = (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT,
83 90
84 /* bit mask for work_busy() return values */ 91 /* bit mask for work_busy() return values */
85 WORK_BUSY_PENDING = 1 << 0, 92 WORK_BUSY_PENDING = 1 << 0,
@@ -95,13 +102,16 @@ struct work_struct {
95#endif 102#endif
96}; 103};
97 104
98#define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU) 105#define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL)
99#define WORK_DATA_STATIC_INIT() \ 106#define WORK_DATA_STATIC_INIT() \
100 ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU | WORK_STRUCT_STATIC) 107 ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC)
101 108
102struct delayed_work { 109struct delayed_work {
103 struct work_struct work; 110 struct work_struct work;
104 struct timer_list timer; 111 struct timer_list timer;
112
113 /* target workqueue and CPU ->timer uses to queue ->work */
114 struct workqueue_struct *wq;
105 int cpu; 115 int cpu;
106}; 116};
107 117
@@ -426,7 +436,6 @@ extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
426extern void workqueue_set_max_active(struct workqueue_struct *wq, 436extern void workqueue_set_max_active(struct workqueue_struct *wq,
427 int max_active); 437 int max_active);
428extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq); 438extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq);
429extern unsigned int work_cpu(struct work_struct *work);
430extern unsigned int work_busy(struct work_struct *work); 439extern unsigned int work_busy(struct work_struct *work);
431 440
432/* 441/*
diff --git a/include/net/ip.h b/include/net/ip.h
index 0707fb9551aa..a68f838a132c 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -143,6 +143,8 @@ static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
143extern int ip4_datagram_connect(struct sock *sk, 143extern int ip4_datagram_connect(struct sock *sk,
144 struct sockaddr *uaddr, int addr_len); 144 struct sockaddr *uaddr, int addr_len);
145 145
146extern void ip4_datagram_release_cb(struct sock *sk);
147
146struct ip_reply_arg { 148struct ip_reply_arg {
147 struct kvec iov[1]; 149 struct kvec iov[1];
148 int flags; 150 int flags;
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index d8f5b9f52169..e98aeb3da033 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -31,6 +31,8 @@ extern void nf_conntrack_cleanup(struct net *net);
31extern int nf_conntrack_proto_init(struct net *net); 31extern int nf_conntrack_proto_init(struct net *net);
32extern void nf_conntrack_proto_fini(struct net *net); 32extern void nf_conntrack_proto_fini(struct net *net);
33 33
34extern void nf_conntrack_cleanup_end(void);
35
34extern bool 36extern bool
35nf_ct_get_tuple(const struct sk_buff *skb, 37nf_ct_get_tuple(const struct sk_buff *skb,
36 unsigned int nhoff, 38 unsigned int nhoff,
diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h
index 498433dd067d..938b7fd11204 100644
--- a/include/net/transp_v6.h
+++ b/include/net/transp_v6.h
@@ -34,17 +34,17 @@ extern int udpv6_connect(struct sock *sk,
34 struct sockaddr *uaddr, 34 struct sockaddr *uaddr,
35 int addr_len); 35 int addr_len);
36 36
37extern int datagram_recv_ctl(struct sock *sk, 37extern int ip6_datagram_recv_ctl(struct sock *sk,
38 struct msghdr *msg, 38 struct msghdr *msg,
39 struct sk_buff *skb); 39 struct sk_buff *skb);
40 40
41extern int datagram_send_ctl(struct net *net, 41extern int ip6_datagram_send_ctl(struct net *net,
42 struct sock *sk, 42 struct sock *sk,
43 struct msghdr *msg, 43 struct msghdr *msg,
44 struct flowi6 *fl6, 44 struct flowi6 *fl6,
45 struct ipv6_txoptions *opt, 45 struct ipv6_txoptions *opt,
46 int *hlimit, int *tclass, 46 int *hlimit, int *tclass,
47 int *dontfrag); 47 int *dontfrag);
48 48
49#define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006) 49#define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006)
50 50
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index 0c9783841a30..427acab5d69a 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -99,98 +99,6 @@ DEFINE_EVENT(wakeup_source, wakeup_source_deactivate,
99 TP_ARGS(name, state) 99 TP_ARGS(name, state)
100); 100);
101 101
102#ifdef CONFIG_EVENT_POWER_TRACING_DEPRECATED
103
104/*
105 * The power events are used for cpuidle & suspend (power_start, power_end)
106 * and for cpufreq (power_frequency)
107 */
108DECLARE_EVENT_CLASS(power,
109
110 TP_PROTO(unsigned int type, unsigned int state, unsigned int cpu_id),
111
112 TP_ARGS(type, state, cpu_id),
113
114 TP_STRUCT__entry(
115 __field( u64, type )
116 __field( u64, state )
117 __field( u64, cpu_id )
118 ),
119
120 TP_fast_assign(
121 __entry->type = type;
122 __entry->state = state;
123 __entry->cpu_id = cpu_id;
124 ),
125
126 TP_printk("type=%lu state=%lu cpu_id=%lu", (unsigned long)__entry->type,
127 (unsigned long)__entry->state, (unsigned long)__entry->cpu_id)
128);
129
130DEFINE_EVENT(power, power_start,
131
132 TP_PROTO(unsigned int type, unsigned int state, unsigned int cpu_id),
133
134 TP_ARGS(type, state, cpu_id)
135);
136
137DEFINE_EVENT(power, power_frequency,
138
139 TP_PROTO(unsigned int type, unsigned int state, unsigned int cpu_id),
140
141 TP_ARGS(type, state, cpu_id)
142);
143
144TRACE_EVENT(power_end,
145
146 TP_PROTO(unsigned int cpu_id),
147
148 TP_ARGS(cpu_id),
149
150 TP_STRUCT__entry(
151 __field( u64, cpu_id )
152 ),
153
154 TP_fast_assign(
155 __entry->cpu_id = cpu_id;
156 ),
157
158 TP_printk("cpu_id=%lu", (unsigned long)__entry->cpu_id)
159
160);
161
162/* Deprecated dummy functions must be protected against multi-declartion */
163#ifndef _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED
164#define _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED
165
166enum {
167 POWER_NONE = 0,
168 POWER_CSTATE = 1,
169 POWER_PSTATE = 2,
170};
171#endif /* _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED */
172
173#else /* CONFIG_EVENT_POWER_TRACING_DEPRECATED */
174
175#ifndef _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED
176#define _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED
177enum {
178 POWER_NONE = 0,
179 POWER_CSTATE = 1,
180 POWER_PSTATE = 2,
181};
182
183/* These dummy declaration have to be ripped out when the deprecated
184 events get removed */
185static inline void trace_power_start(u64 type, u64 state, u64 cpuid) {};
186static inline void trace_power_end(u64 cpuid) {};
187static inline void trace_power_start_rcuidle(u64 type, u64 state, u64 cpuid) {};
188static inline void trace_power_end_rcuidle(u64 cpuid) {};
189static inline void trace_power_frequency(u64 type, u64 state, u64 cpuid) {};
190#endif /* _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED */
191
192#endif /* CONFIG_EVENT_POWER_TRACING_DEPRECATED */
193
194/* 102/*
195 * The clock events are used for clock enable/disable and for 103 * The clock events are used for clock enable/disable and for
196 * clock rate change 104 * clock rate change
diff --git a/include/trace/events/ras.h b/include/trace/events/ras.h
new file mode 100644
index 000000000000..88b878383797
--- /dev/null
+++ b/include/trace/events/ras.h
@@ -0,0 +1,77 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM ras
3
4#if !defined(_TRACE_AER_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_AER_H
6
7#include <linux/tracepoint.h>
8#include <linux/edac.h>
9
10
11/*
12 * PCIe AER Trace event
13 *
14 * These events are generated when hardware detects a corrected or
15 * uncorrected event on a PCIe device. The event report has
16 * the following structure:
17 *
18 * char * dev_name - The name of the slot where the device resides
19 * ([domain:]bus:device.function).
20 * u32 status - Either the correctable or uncorrectable register
21 * indicating what error or errors have been seen
22 * u8 severity - error severity 0:NONFATAL 1:FATAL 2:CORRECTED
23 */
24
25#define aer_correctable_errors \
26 {BIT(0), "Receiver Error"}, \
27 {BIT(6), "Bad TLP"}, \
28 {BIT(7), "Bad DLLP"}, \
29 {BIT(8), "RELAY_NUM Rollover"}, \
30 {BIT(12), "Replay Timer Timeout"}, \
31 {BIT(13), "Advisory Non-Fatal"}
32
33#define aer_uncorrectable_errors \
34 {BIT(4), "Data Link Protocol"}, \
35 {BIT(12), "Poisoned TLP"}, \
36 {BIT(13), "Flow Control Protocol"}, \
37 {BIT(14), "Completion Timeout"}, \
38 {BIT(15), "Completer Abort"}, \
39 {BIT(16), "Unexpected Completion"}, \
40 {BIT(17), "Receiver Overflow"}, \
41 {BIT(18), "Malformed TLP"}, \
42 {BIT(19), "ECRC"}, \
43 {BIT(20), "Unsupported Request"}
44
45TRACE_EVENT(aer_event,
46 TP_PROTO(const char *dev_name,
47 const u32 status,
48 const u8 severity),
49
50 TP_ARGS(dev_name, status, severity),
51
52 TP_STRUCT__entry(
53 __string( dev_name, dev_name )
54 __field( u32, status )
55 __field( u8, severity )
56 ),
57
58 TP_fast_assign(
59 __assign_str(dev_name, dev_name);
60 __entry->status = status;
61 __entry->severity = severity;
62 ),
63
64 TP_printk("%s PCIe Bus Error: severity=%s, %s\n",
65 __get_str(dev_name),
66 __entry->severity == HW_EVENT_ERR_CORRECTED ? "Corrected" :
67 __entry->severity == HW_EVENT_ERR_FATAL ?
68 "Fatal" : "Uncorrected",
69 __entry->severity == HW_EVENT_ERR_CORRECTED ?
70 __print_flags(__entry->status, "|", aer_correctable_errors) :
71 __print_flags(__entry->status, "|", aer_uncorrectable_errors))
72);
73
74#endif /* _TRACE_AER_H */
75
76/* This part must be outside protection */
77#include <trace/define_trace.h>
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index d4f559b1ec34..1918e832da4f 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -44,8 +44,10 @@ TRACE_EVENT(rcu_utilization,
44 * of a new grace period or the end of an old grace period ("cpustart" 44 * of a new grace period or the end of an old grace period ("cpustart"
45 * and "cpuend", respectively), a CPU passing through a quiescent 45 * and "cpuend", respectively), a CPU passing through a quiescent
46 * state ("cpuqs"), a CPU coming online or going offline ("cpuonl" 46 * state ("cpuqs"), a CPU coming online or going offline ("cpuonl"
47 * and "cpuofl", respectively), and a CPU being kicked for being too 47 * and "cpuofl", respectively), a CPU being kicked for being too
48 * long in dyntick-idle mode ("kick"). 48 * long in dyntick-idle mode ("kick"), a CPU accelerating its new
49 * callbacks to RCU_NEXT_READY_TAIL ("AccReadyCB"), and a CPU
50 * accelerating its new callbacks to RCU_WAIT_TAIL ("AccWaitCB").
49 */ 51 */
50TRACE_EVENT(rcu_grace_period, 52TRACE_EVENT(rcu_grace_period,
51 53
@@ -393,7 +395,7 @@ TRACE_EVENT(rcu_kfree_callback,
393 */ 395 */
394TRACE_EVENT(rcu_batch_start, 396TRACE_EVENT(rcu_batch_start,
395 397
396 TP_PROTO(char *rcuname, long qlen_lazy, long qlen, int blimit), 398 TP_PROTO(char *rcuname, long qlen_lazy, long qlen, long blimit),
397 399
398 TP_ARGS(rcuname, qlen_lazy, qlen, blimit), 400 TP_ARGS(rcuname, qlen_lazy, qlen, blimit),
399 401
@@ -401,7 +403,7 @@ TRACE_EVENT(rcu_batch_start,
401 __field(char *, rcuname) 403 __field(char *, rcuname)
402 __field(long, qlen_lazy) 404 __field(long, qlen_lazy)
403 __field(long, qlen) 405 __field(long, qlen)
404 __field(int, blimit) 406 __field(long, blimit)
405 ), 407 ),
406 408
407 TP_fast_assign( 409 TP_fast_assign(
@@ -411,7 +413,7 @@ TRACE_EVENT(rcu_batch_start,
411 __entry->blimit = blimit; 413 __entry->blimit = blimit;
412 ), 414 ),
413 415
414 TP_printk("%s CBs=%ld/%ld bl=%d", 416 TP_printk("%s CBs=%ld/%ld bl=%ld",
415 __entry->rcuname, __entry->qlen_lazy, __entry->qlen, 417 __entry->rcuname, __entry->qlen_lazy, __entry->qlen,
416 __entry->blimit) 418 __entry->blimit)
417); 419);
@@ -523,22 +525,30 @@ TRACE_EVENT(rcu_batch_end,
523 */ 525 */
524TRACE_EVENT(rcu_torture_read, 526TRACE_EVENT(rcu_torture_read,
525 527
526 TP_PROTO(char *rcutorturename, struct rcu_head *rhp), 528 TP_PROTO(char *rcutorturename, struct rcu_head *rhp,
529 unsigned long secs, unsigned long c_old, unsigned long c),
527 530
528 TP_ARGS(rcutorturename, rhp), 531 TP_ARGS(rcutorturename, rhp, secs, c_old, c),
529 532
530 TP_STRUCT__entry( 533 TP_STRUCT__entry(
531 __field(char *, rcutorturename) 534 __field(char *, rcutorturename)
532 __field(struct rcu_head *, rhp) 535 __field(struct rcu_head *, rhp)
536 __field(unsigned long, secs)
537 __field(unsigned long, c_old)
538 __field(unsigned long, c)
533 ), 539 ),
534 540
535 TP_fast_assign( 541 TP_fast_assign(
536 __entry->rcutorturename = rcutorturename; 542 __entry->rcutorturename = rcutorturename;
537 __entry->rhp = rhp; 543 __entry->rhp = rhp;
544 __entry->secs = secs;
545 __entry->c_old = c_old;
546 __entry->c = c;
538 ), 547 ),
539 548
540 TP_printk("%s torture read %p", 549 TP_printk("%s torture read %p %luus c: %lu %lu",
541 __entry->rcutorturename, __entry->rhp) 550 __entry->rcutorturename, __entry->rhp,
551 __entry->secs, __entry->c_old, __entry->c)
542); 552);
543 553
544/* 554/*
@@ -608,7 +618,8 @@ TRACE_EVENT(rcu_barrier,
608#define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0) 618#define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0)
609#define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \ 619#define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \
610 do { } while (0) 620 do { } while (0)
611#define trace_rcu_torture_read(rcutorturename, rhp) do { } while (0) 621#define trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
622 do { } while (0)
612#define trace_rcu_barrier(name, s, cpu, cnt, done) do { } while (0) 623#define trace_rcu_barrier(name, s, cpu, cnt, done) do { } while (0)
613 624
614#endif /* #else #ifdef CONFIG_RCU_TRACE */ 625#endif /* #else #ifdef CONFIG_RCU_TRACE */
diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h
index f28d1b65f178..bf0e18ba6cfb 100644
--- a/include/trace/events/workqueue.h
+++ b/include/trace/events/workqueue.h
@@ -27,7 +27,7 @@ DECLARE_EVENT_CLASS(workqueue_work,
27/** 27/**
28 * workqueue_queue_work - called when a work gets queued 28 * workqueue_queue_work - called when a work gets queued
29 * @req_cpu: the requested cpu 29 * @req_cpu: the requested cpu
30 * @cwq: pointer to struct cpu_workqueue_struct 30 * @pwq: pointer to struct pool_workqueue
31 * @work: pointer to struct work_struct 31 * @work: pointer to struct work_struct
32 * 32 *
33 * This event occurs when a work is queued immediately or once a 33 * This event occurs when a work is queued immediately or once a
@@ -36,10 +36,10 @@ DECLARE_EVENT_CLASS(workqueue_work,
36 */ 36 */
37TRACE_EVENT(workqueue_queue_work, 37TRACE_EVENT(workqueue_queue_work,
38 38
39 TP_PROTO(unsigned int req_cpu, struct cpu_workqueue_struct *cwq, 39 TP_PROTO(unsigned int req_cpu, struct pool_workqueue *pwq,
40 struct work_struct *work), 40 struct work_struct *work),
41 41
42 TP_ARGS(req_cpu, cwq, work), 42 TP_ARGS(req_cpu, pwq, work),
43 43
44 TP_STRUCT__entry( 44 TP_STRUCT__entry(
45 __field( void *, work ) 45 __field( void *, work )
@@ -52,9 +52,9 @@ TRACE_EVENT(workqueue_queue_work,
52 TP_fast_assign( 52 TP_fast_assign(
53 __entry->work = work; 53 __entry->work = work;
54 __entry->function = work->func; 54 __entry->function = work->func;
55 __entry->workqueue = cwq->wq; 55 __entry->workqueue = pwq->wq;
56 __entry->req_cpu = req_cpu; 56 __entry->req_cpu = req_cpu;
57 __entry->cpu = cwq->pool->gcwq->cpu; 57 __entry->cpu = pwq->pool->cpu;
58 ), 58 ),
59 59
60 TP_printk("work struct=%p function=%pf workqueue=%p req_cpu=%u cpu=%u", 60 TP_printk("work struct=%p function=%pf workqueue=%p req_cpu=%u cpu=%u",
diff --git a/include/uapi/linux/auto_fs.h b/include/uapi/linux/auto_fs.h
index 77cdba9df274..bb991dfe134f 100644
--- a/include/uapi/linux/auto_fs.h
+++ b/include/uapi/linux/auto_fs.h
@@ -28,25 +28,16 @@
28#define AUTOFS_MIN_PROTO_VERSION AUTOFS_PROTO_VERSION 28#define AUTOFS_MIN_PROTO_VERSION AUTOFS_PROTO_VERSION
29 29
30/* 30/*
31 * Architectures where both 32- and 64-bit binaries can be executed 31 * The wait_queue_token (autofs_wqt_t) is part of a structure which is passed
32 * on 64-bit kernels need this. This keeps the structure format 32 * back to the kernel via ioctl from userspace. On architectures where 32- and
33 * uniform, and makes sure the wait_queue_token isn't too big to be 33 * 64-bit userspace binaries can be executed it's important that the size of
34 * passed back down to the kernel. 34 * autofs_wqt_t stays constant between 32- and 64-bit Linux kernels so that we
35 * 35 * do not break the binary ABI interface by changing the structure size.
36 * This assumes that on these architectures:
37 * mode 32 bit 64 bit
38 * -------------------------
39 * int 32 bit 32 bit
40 * long 32 bit 64 bit
41 *
42 * If so, 32-bit user-space code should be backwards compatible.
43 */ 36 */
44 37#if defined(__ia64__) || defined(__alpha__) /* pure 64bit architectures */
45#if defined(__sparc__) || defined(__mips__) || defined(__x86_64__) \
46 || defined(__powerpc__) || defined(__s390__)
47typedef unsigned int autofs_wqt_t;
48#else
49typedef unsigned long autofs_wqt_t; 38typedef unsigned long autofs_wqt_t;
39#else
40typedef unsigned int autofs_wqt_t;
50#endif 41#endif
51 42
52/* Packet types */ 43/* Packet types */
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index e6e5d4b13708..7f2360a46fc2 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -115,6 +115,7 @@ struct kvm_irq_level {
115 * ACPI gsi notion of irq. 115 * ACPI gsi notion of irq.
116 * For IA-64 (APIC model) IOAPIC0: irq 0-23; IOAPIC1: irq 24-47.. 116 * For IA-64 (APIC model) IOAPIC0: irq 0-23; IOAPIC1: irq 24-47..
117 * For X86 (standard AT mode) PIC0/1: irq 0-15. IOAPIC0: 0-23.. 117 * For X86 (standard AT mode) PIC0/1: irq 0-15. IOAPIC0: 0-23..
118 * For ARM: See Documentation/virtual/kvm/api.txt
118 */ 119 */
119 union { 120 union {
120 __u32 irq; 121 __u32 irq;
@@ -635,6 +636,7 @@ struct kvm_ppc_smmu_info {
635#define KVM_CAP_IRQFD_RESAMPLE 82 636#define KVM_CAP_IRQFD_RESAMPLE 82
636#define KVM_CAP_PPC_BOOKE_WATCHDOG 83 637#define KVM_CAP_PPC_BOOKE_WATCHDOG 83
637#define KVM_CAP_PPC_HTAB_FD 84 638#define KVM_CAP_PPC_HTAB_FD 84
639#define KVM_CAP_ARM_PSCI 87
638 640
639#ifdef KVM_CAP_IRQ_ROUTING 641#ifdef KVM_CAP_IRQ_ROUTING
640 642
@@ -764,6 +766,11 @@ struct kvm_dirty_tlb {
764#define KVM_REG_SIZE_U512 0x0060000000000000ULL 766#define KVM_REG_SIZE_U512 0x0060000000000000ULL
765#define KVM_REG_SIZE_U1024 0x0070000000000000ULL 767#define KVM_REG_SIZE_U1024 0x0070000000000000ULL
766 768
769struct kvm_reg_list {
770 __u64 n; /* number of regs */
771 __u64 reg[0];
772};
773
767struct kvm_one_reg { 774struct kvm_one_reg {
768 __u64 id; 775 __u64 id;
769 __u64 addr; 776 __u64 addr;
@@ -932,6 +939,8 @@ struct kvm_s390_ucas_mapping {
932#define KVM_SET_ONE_REG _IOW(KVMIO, 0xac, struct kvm_one_reg) 939#define KVM_SET_ONE_REG _IOW(KVMIO, 0xac, struct kvm_one_reg)
933/* VM is being stopped by host */ 940/* VM is being stopped by host */
934#define KVM_KVMCLOCK_CTRL _IO(KVMIO, 0xad) 941#define KVM_KVMCLOCK_CTRL _IO(KVMIO, 0xad)
942#define KVM_ARM_VCPU_INIT _IOW(KVMIO, 0xae, struct kvm_vcpu_init)
943#define KVM_GET_REG_LIST _IOWR(KVMIO, 0xb0, struct kvm_reg_list)
935 944
936#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) 945#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
937#define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1) 946#define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1)
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 4f63c05d27c9..9fa9c622a7f4 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -579,7 +579,8 @@ enum perf_event_type {
579 * { u32 size; 579 * { u32 size;
580 * char data[size];}&& PERF_SAMPLE_RAW 580 * char data[size];}&& PERF_SAMPLE_RAW
581 * 581 *
582 * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK 582 * { u64 nr;
583 * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
583 * 584 *
584 * { u64 abi; # enum perf_sample_regs_abi 585 * { u64 abi; # enum perf_sample_regs_abi
585 * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER 586 * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER
diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h
index 78f99d97475b..2c6c85f18ea0 100644
--- a/include/uapi/linux/serial_core.h
+++ b/include/uapi/linux/serial_core.h
@@ -50,7 +50,8 @@
50#define PORT_LPC3220 22 /* NXP LPC32xx SoC "Standard" UART */ 50#define PORT_LPC3220 22 /* NXP LPC32xx SoC "Standard" UART */
51#define PORT_8250_CIR 23 /* CIR infrared port, has its own driver */ 51#define PORT_8250_CIR 23 /* CIR infrared port, has its own driver */
52#define PORT_XR17V35X 24 /* Exar XR17V35x UARTs */ 52#define PORT_XR17V35X 24 /* Exar XR17V35x UARTs */
53#define PORT_MAX_8250 24 /* max port ID */ 53#define PORT_BRCM_TRUMANAGE 24
54#define PORT_MAX_8250 25 /* max port ID */
54 55
55/* 56/*
56 * ARM specific type numbers. These are not currently guaranteed 57 * ARM specific type numbers. These are not currently guaranteed
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index 50598472dc41..f738e25377ff 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -152,6 +152,12 @@
152#define USB_INTRF_FUNC_SUSPEND_LP (1 << (8 + 0)) 152#define USB_INTRF_FUNC_SUSPEND_LP (1 << (8 + 0))
153#define USB_INTRF_FUNC_SUSPEND_RW (1 << (8 + 1)) 153#define USB_INTRF_FUNC_SUSPEND_RW (1 << (8 + 1))
154 154
155/*
156 * Interface status, Figure 9-5 USB 3.0 spec
157 */
158#define USB_INTRF_STAT_FUNC_RW_CAP 1
159#define USB_INTRF_STAT_FUNC_RW 2
160
155#define USB_ENDPOINT_HALT 0 /* IN/OUT will STALL */ 161#define USB_ENDPOINT_HALT 0 /* IN/OUT will STALL */
156 162
157/* Bit array elements as returned by the USB_REQ_GET_STATUS request. */ 163/* Bit array elements as returned by the USB_REQ_GET_STATUS request. */
diff --git a/init/Kconfig b/init/Kconfig
index be8b7f55312d..7000d9657402 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -20,12 +20,8 @@ config CONSTRUCTORS
20 bool 20 bool
21 depends on !UML 21 depends on !UML
22 22
23config HAVE_IRQ_WORK
24 bool
25
26config IRQ_WORK 23config IRQ_WORK
27 bool 24 bool
28 depends on HAVE_IRQ_WORK
29 25
30config BUILDTIME_EXTABLE_SORT 26config BUILDTIME_EXTABLE_SORT
31 bool 27 bool
@@ -326,10 +322,13 @@ source "kernel/time/Kconfig"
326 322
327menu "CPU/Task time and stats accounting" 323menu "CPU/Task time and stats accounting"
328 324
325config VIRT_CPU_ACCOUNTING
326 bool
327
329choice 328choice
330 prompt "Cputime accounting" 329 prompt "Cputime accounting"
331 default TICK_CPU_ACCOUNTING if !PPC64 330 default TICK_CPU_ACCOUNTING if !PPC64
332 default VIRT_CPU_ACCOUNTING if PPC64 331 default VIRT_CPU_ACCOUNTING_NATIVE if PPC64
333 332
334# Kind of a stub config for the pure tick based cputime accounting 333# Kind of a stub config for the pure tick based cputime accounting
335config TICK_CPU_ACCOUNTING 334config TICK_CPU_ACCOUNTING
@@ -342,9 +341,10 @@ config TICK_CPU_ACCOUNTING
342 341
343 If unsure, say Y. 342 If unsure, say Y.
344 343
345config VIRT_CPU_ACCOUNTING 344config VIRT_CPU_ACCOUNTING_NATIVE
346 bool "Deterministic task and CPU time accounting" 345 bool "Deterministic task and CPU time accounting"
347 depends on HAVE_VIRT_CPU_ACCOUNTING 346 depends on HAVE_VIRT_CPU_ACCOUNTING
347 select VIRT_CPU_ACCOUNTING
348 help 348 help
349 Select this option to enable more accurate task and CPU time 349 Select this option to enable more accurate task and CPU time
350 accounting. This is done by reading a CPU counter on each 350 accounting. This is done by reading a CPU counter on each
@@ -354,6 +354,23 @@ config VIRT_CPU_ACCOUNTING
354 this also enables accounting of stolen time on logically-partitioned 354 this also enables accounting of stolen time on logically-partitioned
355 systems. 355 systems.
356 356
357config VIRT_CPU_ACCOUNTING_GEN
358 bool "Full dynticks CPU time accounting"
359 depends on HAVE_CONTEXT_TRACKING && 64BIT
360 select VIRT_CPU_ACCOUNTING
361 select CONTEXT_TRACKING
362 help
363 Select this option to enable task and CPU time accounting on full
364 dynticks systems. This accounting is implemented by watching every
365 kernel-user boundaries using the context tracking subsystem.
366 The accounting is thus performed at the expense of some significant
367 overhead.
368
369 For now this is only useful if you are working on the full
370 dynticks subsystem development.
371
372 If unsure, say N.
373
357config IRQ_TIME_ACCOUNTING 374config IRQ_TIME_ACCOUNTING
358 bool "Fine granularity task level IRQ time accounting" 375 bool "Fine granularity task level IRQ time accounting"
359 depends on HAVE_IRQ_TIME_ACCOUNTING 376 depends on HAVE_IRQ_TIME_ACCOUNTING
@@ -453,7 +470,7 @@ config TREE_RCU
453 470
454config TREE_PREEMPT_RCU 471config TREE_PREEMPT_RCU
455 bool "Preemptible tree-based hierarchical RCU" 472 bool "Preemptible tree-based hierarchical RCU"
456 depends on PREEMPT && SMP 473 depends on PREEMPT
457 help 474 help
458 This option selects the RCU implementation that is 475 This option selects the RCU implementation that is
459 designed for very large SMP systems with hundreds or 476 designed for very large SMP systems with hundreds or
@@ -461,6 +478,8 @@ config TREE_PREEMPT_RCU
461 is also required. It also scales down nicely to 478 is also required. It also scales down nicely to
462 smaller systems. 479 smaller systems.
463 480
481 Select this option if you are unsure.
482
464config TINY_RCU 483config TINY_RCU
465 bool "UP-only small-memory-footprint RCU" 484 bool "UP-only small-memory-footprint RCU"
466 depends on !PREEMPT && !SMP 485 depends on !PREEMPT && !SMP
@@ -486,6 +505,14 @@ config PREEMPT_RCU
486 This option enables preemptible-RCU code that is common between 505 This option enables preemptible-RCU code that is common between
487 the TREE_PREEMPT_RCU and TINY_PREEMPT_RCU implementations. 506 the TREE_PREEMPT_RCU and TINY_PREEMPT_RCU implementations.
488 507
508config RCU_STALL_COMMON
509 def_bool ( TREE_RCU || TREE_PREEMPT_RCU || RCU_TRACE )
510 help
511 This option enables RCU CPU stall code that is common between
512 the TINY and TREE variants of RCU. The purpose is to allow
513 the tiny variants to disable RCU CPU stall warnings, while
514 making these warnings mandatory for the tree variants.
515
489config CONTEXT_TRACKING 516config CONTEXT_TRACKING
490 bool 517 bool
491 518
@@ -1263,6 +1290,7 @@ config HOTPLUG
1263config PRINTK 1290config PRINTK
1264 default y 1291 default y
1265 bool "Enable support for printk" if EXPERT 1292 bool "Enable support for printk" if EXPERT
1293 select IRQ_WORK
1266 help 1294 help
1267 This option enables normal printk support. Removing it 1295 This option enables normal printk support. Removing it
1268 eliminates most of the message strings from the kernel image 1296 eliminates most of the message strings from the kernel image
diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
index 5e4ded51788e..a32ec1ce882b 100644
--- a/init/do_mounts_initrd.c
+++ b/init/do_mounts_initrd.c
@@ -36,6 +36,10 @@ __setup("noinitrd", no_initrd);
36static int init_linuxrc(struct subprocess_info *info, struct cred *new) 36static int init_linuxrc(struct subprocess_info *info, struct cred *new)
37{ 37{
38 sys_unshare(CLONE_FS | CLONE_FILES); 38 sys_unshare(CLONE_FS | CLONE_FILES);
39 /* stdin/stdout/stderr for /linuxrc */
40 sys_open("/dev/console", O_RDWR, 0);
41 sys_dup(0);
42 sys_dup(0);
39 /* move initrd over / and chdir/chroot in initrd root */ 43 /* move initrd over / and chdir/chroot in initrd root */
40 sys_chdir("/root"); 44 sys_chdir("/root");
41 sys_mount(".", "/", NULL, MS_MOVE, NULL); 45 sys_mount(".", "/", NULL, MS_MOVE, NULL);
@@ -57,6 +61,9 @@ static void __init handle_initrd(void)
57 sys_mkdir("/old", 0700); 61 sys_mkdir("/old", 0700);
58 sys_chdir("/old"); 62 sys_chdir("/old");
59 63
64 /* try loading default modules from initrd */
65 load_default_modules();
66
60 /* 67 /*
61 * In case that a resume from disk is carried out by linuxrc or one of 68 * In case that a resume from disk is carried out by linuxrc or one of
62 * its children, we need to tell the freezer not to wait for us. 69 * its children, we need to tell the freezer not to wait for us.
diff --git a/init/init_task.c b/init/init_task.c
index 8b2f3996b035..ba0a7f362d9e 100644
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -2,6 +2,8 @@
2#include <linux/export.h> 2#include <linux/export.h>
3#include <linux/mqueue.h> 3#include <linux/mqueue.h>
4#include <linux/sched.h> 4#include <linux/sched.h>
5#include <linux/sched/sysctl.h>
6#include <linux/sched/rt.h>
5#include <linux/init.h> 7#include <linux/init.h>
6#include <linux/fs.h> 8#include <linux/fs.h>
7#include <linux/mm.h> 9#include <linux/mm.h>
diff --git a/init/initramfs.c b/init/initramfs.c
index 84c6bf111300..a67ef9dbda9d 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -592,7 +592,7 @@ static int __init populate_rootfs(void)
592 initrd_end - initrd_start); 592 initrd_end - initrd_start);
593 if (!err) { 593 if (!err) {
594 free_initrd(); 594 free_initrd();
595 return 0; 595 goto done;
596 } else { 596 } else {
597 clean_rootfs(); 597 clean_rootfs();
598 unpack_to_rootfs(__initramfs_start, __initramfs_size); 598 unpack_to_rootfs(__initramfs_start, __initramfs_size);
@@ -607,6 +607,7 @@ static int __init populate_rootfs(void)
607 sys_close(fd); 607 sys_close(fd);
608 free_initrd(); 608 free_initrd();
609 } 609 }
610 done:
610#else 611#else
611 printk(KERN_INFO "Unpacking initramfs...\n"); 612 printk(KERN_INFO "Unpacking initramfs...\n");
612 err = unpack_to_rootfs((char *)initrd_start, 613 err = unpack_to_rootfs((char *)initrd_start,
@@ -615,6 +616,11 @@ static int __init populate_rootfs(void)
615 printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err); 616 printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err);
616 free_initrd(); 617 free_initrd();
617#endif 618#endif
619 /*
620 * Try loading default modules from initramfs. This gives
621 * us a chance to load before device_initcalls.
622 */
623 load_default_modules();
618 } 624 }
619 return 0; 625 return 0;
620} 626}
diff --git a/init/main.c b/init/main.c
index 85d69dffe864..63534a141b4e 100644
--- a/init/main.c
+++ b/init/main.c
@@ -70,6 +70,8 @@
70#include <linux/perf_event.h> 70#include <linux/perf_event.h>
71#include <linux/file.h> 71#include <linux/file.h>
72#include <linux/ptrace.h> 72#include <linux/ptrace.h>
73#include <linux/blkdev.h>
74#include <linux/elevator.h>
73 75
74#include <asm/io.h> 76#include <asm/io.h>
75#include <asm/bugs.h> 77#include <asm/bugs.h>
@@ -604,7 +606,7 @@ asmlinkage void __init start_kernel(void)
604 pidmap_init(); 606 pidmap_init();
605 anon_vma_init(); 607 anon_vma_init();
606#ifdef CONFIG_X86 608#ifdef CONFIG_X86
607 if (efi_enabled) 609 if (efi_enabled(EFI_RUNTIME_SERVICES))
608 efi_enter_virtual_mode(); 610 efi_enter_virtual_mode();
609#endif 611#endif
610 thread_info_cache_init(); 612 thread_info_cache_init();
@@ -632,7 +634,7 @@ asmlinkage void __init start_kernel(void)
632 acpi_early_init(); /* before LAPIC and SMP init */ 634 acpi_early_init(); /* before LAPIC and SMP init */
633 sfi_init_late(); 635 sfi_init_late();
634 636
635 if (efi_enabled) { 637 if (efi_enabled(EFI_RUNTIME_SERVICES)) {
636 efi_late_init(); 638 efi_late_init();
637 efi_free_boot_services(); 639 efi_free_boot_services();
638 } 640 }
@@ -794,6 +796,17 @@ static void __init do_pre_smp_initcalls(void)
794 do_one_initcall(*fn); 796 do_one_initcall(*fn);
795} 797}
796 798
799/*
800 * This function requests modules which should be loaded by default and is
801 * called twice right after initrd is mounted and right before init is
802 * exec'd. If such modules are on either initrd or rootfs, they will be
803 * loaded before control is passed to userland.
804 */
805void __init load_default_modules(void)
806{
807 load_default_elevator_module();
808}
809
797static int run_init_process(const char *init_filename) 810static int run_init_process(const char *init_filename)
798{ 811{
799 argv_init[0] = init_filename; 812 argv_init[0] = init_filename;
@@ -802,7 +815,7 @@ static int run_init_process(const char *init_filename)
802 (const char __user *const __user *)envp_init); 815 (const char __user *const __user *)envp_init);
803} 816}
804 817
805static void __init kernel_init_freeable(void); 818static noinline void __init kernel_init_freeable(void);
806 819
807static int __ref kernel_init(void *unused) 820static int __ref kernel_init(void *unused)
808{ 821{
@@ -845,7 +858,7 @@ static int __ref kernel_init(void *unused)
845 "See Linux Documentation/init.txt for guidance."); 858 "See Linux Documentation/init.txt for guidance.");
846} 859}
847 860
848static void __init kernel_init_freeable(void) 861static noinline void __init kernel_init_freeable(void)
849{ 862{
850 /* 863 /*
851 * Wait until kthreadd is all set-up. 864 * Wait until kthreadd is all set-up.
@@ -900,4 +913,7 @@ static void __init kernel_init_freeable(void)
900 * we're essentially up and running. Get rid of the 913 * we're essentially up and running. Get rid of the
901 * initmem segments and start the user-mode stuff.. 914 * initmem segments and start the user-mode stuff..
902 */ 915 */
916
917 /* rootfs is available now, try loading default modules */
918 load_default_modules();
903} 919}
diff --git a/kernel/acct.c b/kernel/acct.c
index 051e071a06e7..e8b1627ab9c7 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -566,6 +566,7 @@ out:
566void acct_collect(long exitcode, int group_dead) 566void acct_collect(long exitcode, int group_dead)
567{ 567{
568 struct pacct_struct *pacct = &current->signal->pacct; 568 struct pacct_struct *pacct = &current->signal->pacct;
569 cputime_t utime, stime;
569 unsigned long vsize = 0; 570 unsigned long vsize = 0;
570 571
571 if (group_dead && current->mm) { 572 if (group_dead && current->mm) {
@@ -593,8 +594,9 @@ void acct_collect(long exitcode, int group_dead)
593 pacct->ac_flag |= ACORE; 594 pacct->ac_flag |= ACORE;
594 if (current->flags & PF_SIGNALED) 595 if (current->flags & PF_SIGNALED)
595 pacct->ac_flag |= AXSIG; 596 pacct->ac_flag |= AXSIG;
596 pacct->ac_utime += current->utime; 597 task_cputime(current, &utime, &stime);
597 pacct->ac_stime += current->stime; 598 pacct->ac_utime += utime;
599 pacct->ac_stime += stime;
598 pacct->ac_minflt += current->min_flt; 600 pacct->ac_minflt += current->min_flt;
599 pacct->ac_majflt += current->maj_flt; 601 pacct->ac_majflt += current->maj_flt;
600 spin_unlock_irq(&current->sighand->siglock); 602 spin_unlock_irq(&current->sighand->siglock);
diff --git a/kernel/async.c b/kernel/async.c
index a1d585c351d6..8ddee2c3e5b0 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -57,56 +57,52 @@ asynchronous and synchronous parts of the kernel.
57#include <linux/slab.h> 57#include <linux/slab.h>
58#include <linux/workqueue.h> 58#include <linux/workqueue.h>
59 59
60#include "workqueue_internal.h"
61
60static async_cookie_t next_cookie = 1; 62static async_cookie_t next_cookie = 1;
61 63
62#define MAX_WORK 32768 64#define MAX_WORK 32768
65#define ASYNC_COOKIE_MAX ULLONG_MAX /* infinity cookie */
63 66
64static LIST_HEAD(async_pending); 67static LIST_HEAD(async_global_pending); /* pending from all registered doms */
65static ASYNC_DOMAIN(async_running); 68static ASYNC_DOMAIN(async_dfl_domain);
66static LIST_HEAD(async_domains);
67static DEFINE_SPINLOCK(async_lock); 69static DEFINE_SPINLOCK(async_lock);
68static DEFINE_MUTEX(async_register_mutex);
69 70
70struct async_entry { 71struct async_entry {
71 struct list_head list; 72 struct list_head domain_list;
73 struct list_head global_list;
72 struct work_struct work; 74 struct work_struct work;
73 async_cookie_t cookie; 75 async_cookie_t cookie;
74 async_func_ptr *func; 76 async_func_ptr *func;
75 void *data; 77 void *data;
76 struct async_domain *running; 78 struct async_domain *domain;
77}; 79};
78 80
79static DECLARE_WAIT_QUEUE_HEAD(async_done); 81static DECLARE_WAIT_QUEUE_HEAD(async_done);
80 82
81static atomic_t entry_count; 83static atomic_t entry_count;
82 84
83 85static async_cookie_t lowest_in_progress(struct async_domain *domain)
84/*
85 * MUST be called with the lock held!
86 */
87static async_cookie_t __lowest_in_progress(struct async_domain *running)
88{ 86{
89 struct async_entry *entry; 87 struct async_entry *first = NULL;
90 88 async_cookie_t ret = ASYNC_COOKIE_MAX;
91 if (!list_empty(&running->domain)) { 89 unsigned long flags;
92 entry = list_first_entry(&running->domain, typeof(*entry), list);
93 return entry->cookie;
94 }
95 90
96 list_for_each_entry(entry, &async_pending, list) 91 spin_lock_irqsave(&async_lock, flags);
97 if (entry->running == running)
98 return entry->cookie;
99 92
100 return next_cookie; /* "infinity" value */ 93 if (domain) {
101} 94 if (!list_empty(&domain->pending))
95 first = list_first_entry(&domain->pending,
96 struct async_entry, domain_list);
97 } else {
98 if (!list_empty(&async_global_pending))
99 first = list_first_entry(&async_global_pending,
100 struct async_entry, global_list);
101 }
102 102
103static async_cookie_t lowest_in_progress(struct async_domain *running) 103 if (first)
104{ 104 ret = first->cookie;
105 unsigned long flags;
106 async_cookie_t ret;
107 105
108 spin_lock_irqsave(&async_lock, flags);
109 ret = __lowest_in_progress(running);
110 spin_unlock_irqrestore(&async_lock, flags); 106 spin_unlock_irqrestore(&async_lock, flags);
111 return ret; 107 return ret;
112} 108}
@@ -120,14 +116,8 @@ static void async_run_entry_fn(struct work_struct *work)
120 container_of(work, struct async_entry, work); 116 container_of(work, struct async_entry, work);
121 unsigned long flags; 117 unsigned long flags;
122 ktime_t uninitialized_var(calltime), delta, rettime; 118 ktime_t uninitialized_var(calltime), delta, rettime;
123 struct async_domain *running = entry->running;
124
125 /* 1) move self to the running queue */
126 spin_lock_irqsave(&async_lock, flags);
127 list_move_tail(&entry->list, &running->domain);
128 spin_unlock_irqrestore(&async_lock, flags);
129 119
130 /* 2) run (and print duration) */ 120 /* 1) run (and print duration) */
131 if (initcall_debug && system_state == SYSTEM_BOOTING) { 121 if (initcall_debug && system_state == SYSTEM_BOOTING) {
132 printk(KERN_DEBUG "calling %lli_%pF @ %i\n", 122 printk(KERN_DEBUG "calling %lli_%pF @ %i\n",
133 (long long)entry->cookie, 123 (long long)entry->cookie,
@@ -144,23 +134,22 @@ static void async_run_entry_fn(struct work_struct *work)
144 (long long)ktime_to_ns(delta) >> 10); 134 (long long)ktime_to_ns(delta) >> 10);
145 } 135 }
146 136
147 /* 3) remove self from the running queue */ 137 /* 2) remove self from the pending queues */
148 spin_lock_irqsave(&async_lock, flags); 138 spin_lock_irqsave(&async_lock, flags);
149 list_del(&entry->list); 139 list_del_init(&entry->domain_list);
150 if (running->registered && --running->count == 0) 140 list_del_init(&entry->global_list);
151 list_del_init(&running->node);
152 141
153 /* 4) free the entry */ 142 /* 3) free the entry */
154 kfree(entry); 143 kfree(entry);
155 atomic_dec(&entry_count); 144 atomic_dec(&entry_count);
156 145
157 spin_unlock_irqrestore(&async_lock, flags); 146 spin_unlock_irqrestore(&async_lock, flags);
158 147
159 /* 5) wake up any waiters */ 148 /* 4) wake up any waiters */
160 wake_up(&async_done); 149 wake_up(&async_done);
161} 150}
162 151
163static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct async_domain *running) 152static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct async_domain *domain)
164{ 153{
165 struct async_entry *entry; 154 struct async_entry *entry;
166 unsigned long flags; 155 unsigned long flags;
@@ -183,16 +172,22 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct a
183 ptr(data, newcookie); 172 ptr(data, newcookie);
184 return newcookie; 173 return newcookie;
185 } 174 }
175 INIT_LIST_HEAD(&entry->domain_list);
176 INIT_LIST_HEAD(&entry->global_list);
186 INIT_WORK(&entry->work, async_run_entry_fn); 177 INIT_WORK(&entry->work, async_run_entry_fn);
187 entry->func = ptr; 178 entry->func = ptr;
188 entry->data = data; 179 entry->data = data;
189 entry->running = running; 180 entry->domain = domain;
190 181
191 spin_lock_irqsave(&async_lock, flags); 182 spin_lock_irqsave(&async_lock, flags);
183
184 /* allocate cookie and queue */
192 newcookie = entry->cookie = next_cookie++; 185 newcookie = entry->cookie = next_cookie++;
193 list_add_tail(&entry->list, &async_pending); 186
194 if (running->registered && running->count++ == 0) 187 list_add_tail(&entry->domain_list, &domain->pending);
195 list_add_tail(&running->node, &async_domains); 188 if (domain->registered)
189 list_add_tail(&entry->global_list, &async_global_pending);
190
196 atomic_inc(&entry_count); 191 atomic_inc(&entry_count);
197 spin_unlock_irqrestore(&async_lock, flags); 192 spin_unlock_irqrestore(&async_lock, flags);
198 193
@@ -215,7 +210,7 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct a
215 */ 210 */
216async_cookie_t async_schedule(async_func_ptr *ptr, void *data) 211async_cookie_t async_schedule(async_func_ptr *ptr, void *data)
217{ 212{
218 return __async_schedule(ptr, data, &async_running); 213 return __async_schedule(ptr, data, &async_dfl_domain);
219} 214}
220EXPORT_SYMBOL_GPL(async_schedule); 215EXPORT_SYMBOL_GPL(async_schedule);
221 216
@@ -223,18 +218,18 @@ EXPORT_SYMBOL_GPL(async_schedule);
223 * async_schedule_domain - schedule a function for asynchronous execution within a certain domain 218 * async_schedule_domain - schedule a function for asynchronous execution within a certain domain
224 * @ptr: function to execute asynchronously 219 * @ptr: function to execute asynchronously
225 * @data: data pointer to pass to the function 220 * @data: data pointer to pass to the function
226 * @running: running list for the domain 221 * @domain: the domain
227 * 222 *
228 * Returns an async_cookie_t that may be used for checkpointing later. 223 * Returns an async_cookie_t that may be used for checkpointing later.
229 * @running may be used in the async_synchronize_*_domain() functions 224 * @domain may be used in the async_synchronize_*_domain() functions to
230 * to wait within a certain synchronization domain rather than globally. 225 * wait within a certain synchronization domain rather than globally. A
231 * A synchronization domain is specified via the running queue @running to use. 226 * synchronization domain is specified via @domain. Note: This function
232 * Note: This function may be called from atomic or non-atomic contexts. 227 * may be called from atomic or non-atomic contexts.
233 */ 228 */
234async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data, 229async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data,
235 struct async_domain *running) 230 struct async_domain *domain)
236{ 231{
237 return __async_schedule(ptr, data, running); 232 return __async_schedule(ptr, data, domain);
238} 233}
239EXPORT_SYMBOL_GPL(async_schedule_domain); 234EXPORT_SYMBOL_GPL(async_schedule_domain);
240 235
@@ -245,18 +240,7 @@ EXPORT_SYMBOL_GPL(async_schedule_domain);
245 */ 240 */
246void async_synchronize_full(void) 241void async_synchronize_full(void)
247{ 242{
248 mutex_lock(&async_register_mutex); 243 async_synchronize_full_domain(NULL);
249 do {
250 struct async_domain *domain = NULL;
251
252 spin_lock_irq(&async_lock);
253 if (!list_empty(&async_domains))
254 domain = list_first_entry(&async_domains, typeof(*domain), node);
255 spin_unlock_irq(&async_lock);
256
257 async_synchronize_cookie_domain(next_cookie, domain);
258 } while (!list_empty(&async_domains));
259 mutex_unlock(&async_register_mutex);
260} 244}
261EXPORT_SYMBOL_GPL(async_synchronize_full); 245EXPORT_SYMBOL_GPL(async_synchronize_full);
262 246
@@ -271,51 +255,45 @@ EXPORT_SYMBOL_GPL(async_synchronize_full);
271 */ 255 */
272void async_unregister_domain(struct async_domain *domain) 256void async_unregister_domain(struct async_domain *domain)
273{ 257{
274 mutex_lock(&async_register_mutex);
275 spin_lock_irq(&async_lock); 258 spin_lock_irq(&async_lock);
276 WARN_ON(!domain->registered || !list_empty(&domain->node) || 259 WARN_ON(!domain->registered || !list_empty(&domain->pending));
277 !list_empty(&domain->domain));
278 domain->registered = 0; 260 domain->registered = 0;
279 spin_unlock_irq(&async_lock); 261 spin_unlock_irq(&async_lock);
280 mutex_unlock(&async_register_mutex);
281} 262}
282EXPORT_SYMBOL_GPL(async_unregister_domain); 263EXPORT_SYMBOL_GPL(async_unregister_domain);
283 264
284/** 265/**
285 * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain 266 * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
286 * @domain: running list to synchronize on 267 * @domain: the domain to synchronize
287 * 268 *
288 * This function waits until all asynchronous function calls for the 269 * This function waits until all asynchronous function calls for the
289 * synchronization domain specified by the running list @domain have been done. 270 * synchronization domain specified by @domain have been done.
290 */ 271 */
291void async_synchronize_full_domain(struct async_domain *domain) 272void async_synchronize_full_domain(struct async_domain *domain)
292{ 273{
293 async_synchronize_cookie_domain(next_cookie, domain); 274 async_synchronize_cookie_domain(ASYNC_COOKIE_MAX, domain);
294} 275}
295EXPORT_SYMBOL_GPL(async_synchronize_full_domain); 276EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
296 277
297/** 278/**
298 * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing 279 * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
299 * @cookie: async_cookie_t to use as checkpoint 280 * @cookie: async_cookie_t to use as checkpoint
300 * @running: running list to synchronize on 281 * @domain: the domain to synchronize (%NULL for all registered domains)
301 * 282 *
302 * This function waits until all asynchronous function calls for the 283 * This function waits until all asynchronous function calls for the
303 * synchronization domain specified by running list @running submitted 284 * synchronization domain specified by @domain submitted prior to @cookie
304 * prior to @cookie have been done. 285 * have been done.
305 */ 286 */
306void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *running) 287void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *domain)
307{ 288{
308 ktime_t uninitialized_var(starttime), delta, endtime; 289 ktime_t uninitialized_var(starttime), delta, endtime;
309 290
310 if (!running)
311 return;
312
313 if (initcall_debug && system_state == SYSTEM_BOOTING) { 291 if (initcall_debug && system_state == SYSTEM_BOOTING) {
314 printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current)); 292 printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current));
315 starttime = ktime_get(); 293 starttime = ktime_get();
316 } 294 }
317 295
318 wait_event(async_done, lowest_in_progress(running) >= cookie); 296 wait_event(async_done, lowest_in_progress(domain) >= cookie);
319 297
320 if (initcall_debug && system_state == SYSTEM_BOOTING) { 298 if (initcall_debug && system_state == SYSTEM_BOOTING) {
321 endtime = ktime_get(); 299 endtime = ktime_get();
@@ -337,6 +315,18 @@ EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
337 */ 315 */
338void async_synchronize_cookie(async_cookie_t cookie) 316void async_synchronize_cookie(async_cookie_t cookie)
339{ 317{
340 async_synchronize_cookie_domain(cookie, &async_running); 318 async_synchronize_cookie_domain(cookie, &async_dfl_domain);
341} 319}
342EXPORT_SYMBOL_GPL(async_synchronize_cookie); 320EXPORT_SYMBOL_GPL(async_synchronize_cookie);
321
322/**
323 * current_is_async - is %current an async worker task?
324 *
325 * Returns %true if %current is an async worker task.
326 */
327bool current_is_async(void)
328{
329 struct worker *worker = current_wq_worker();
330
331 return worker && worker->current_func == async_run_entry_fn;
332}
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 4855892798fd..b5c64327e712 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -52,7 +52,7 @@
52#include <linux/module.h> 52#include <linux/module.h>
53#include <linux/delayacct.h> 53#include <linux/delayacct.h>
54#include <linux/cgroupstats.h> 54#include <linux/cgroupstats.h>
55#include <linux/hash.h> 55#include <linux/hashtable.h>
56#include <linux/namei.h> 56#include <linux/namei.h>
57#include <linux/pid_namespace.h> 57#include <linux/pid_namespace.h>
58#include <linux/idr.h> 58#include <linux/idr.h>
@@ -376,22 +376,18 @@ static int css_set_count;
376 * account cgroups in empty hierarchies. 376 * account cgroups in empty hierarchies.
377 */ 377 */
378#define CSS_SET_HASH_BITS 7 378#define CSS_SET_HASH_BITS 7
379#define CSS_SET_TABLE_SIZE (1 << CSS_SET_HASH_BITS) 379static DEFINE_HASHTABLE(css_set_table, CSS_SET_HASH_BITS);
380static struct hlist_head css_set_table[CSS_SET_TABLE_SIZE];
381 380
382static struct hlist_head *css_set_hash(struct cgroup_subsys_state *css[]) 381static unsigned long css_set_hash(struct cgroup_subsys_state *css[])
383{ 382{
384 int i; 383 int i;
385 int index; 384 unsigned long key = 0UL;
386 unsigned long tmp = 0UL;
387 385
388 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) 386 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++)
389 tmp += (unsigned long)css[i]; 387 key += (unsigned long)css[i];
390 tmp = (tmp >> 16) ^ tmp; 388 key = (key >> 16) ^ key;
391 389
392 index = hash_long(tmp, CSS_SET_HASH_BITS); 390 return key;
393
394 return &css_set_table[index];
395} 391}
396 392
397/* We don't maintain the lists running through each css_set to its 393/* We don't maintain the lists running through each css_set to its
@@ -418,7 +414,7 @@ static void __put_css_set(struct css_set *cg, int taskexit)
418 } 414 }
419 415
420 /* This css_set is dead. unlink it and release cgroup refcounts */ 416 /* This css_set is dead. unlink it and release cgroup refcounts */
421 hlist_del(&cg->hlist); 417 hash_del(&cg->hlist);
422 css_set_count--; 418 css_set_count--;
423 419
424 list_for_each_entry_safe(link, saved_link, &cg->cg_links, 420 list_for_each_entry_safe(link, saved_link, &cg->cg_links,
@@ -426,12 +422,20 @@ static void __put_css_set(struct css_set *cg, int taskexit)
426 struct cgroup *cgrp = link->cgrp; 422 struct cgroup *cgrp = link->cgrp;
427 list_del(&link->cg_link_list); 423 list_del(&link->cg_link_list);
428 list_del(&link->cgrp_link_list); 424 list_del(&link->cgrp_link_list);
425
426 /*
427 * We may not be holding cgroup_mutex, and if cgrp->count is
428 * dropped to 0 the cgroup can be destroyed at any time, hence
429 * rcu_read_lock is used to keep it alive.
430 */
431 rcu_read_lock();
429 if (atomic_dec_and_test(&cgrp->count) && 432 if (atomic_dec_and_test(&cgrp->count) &&
430 notify_on_release(cgrp)) { 433 notify_on_release(cgrp)) {
431 if (taskexit) 434 if (taskexit)
432 set_bit(CGRP_RELEASABLE, &cgrp->flags); 435 set_bit(CGRP_RELEASABLE, &cgrp->flags);
433 check_for_release(cgrp); 436 check_for_release(cgrp);
434 } 437 }
438 rcu_read_unlock();
435 439
436 kfree(link); 440 kfree(link);
437 } 441 }
@@ -550,9 +554,9 @@ static struct css_set *find_existing_css_set(
550{ 554{
551 int i; 555 int i;
552 struct cgroupfs_root *root = cgrp->root; 556 struct cgroupfs_root *root = cgrp->root;
553 struct hlist_head *hhead;
554 struct hlist_node *node; 557 struct hlist_node *node;
555 struct css_set *cg; 558 struct css_set *cg;
559 unsigned long key;
556 560
557 /* 561 /*
558 * Build the set of subsystem state objects that we want to see in the 562 * Build the set of subsystem state objects that we want to see in the
@@ -572,8 +576,8 @@ static struct css_set *find_existing_css_set(
572 } 576 }
573 } 577 }
574 578
575 hhead = css_set_hash(template); 579 key = css_set_hash(template);
576 hlist_for_each_entry(cg, node, hhead, hlist) { 580 hash_for_each_possible(css_set_table, cg, node, hlist, key) {
577 if (!compare_css_sets(cg, oldcg, cgrp, template)) 581 if (!compare_css_sets(cg, oldcg, cgrp, template))
578 continue; 582 continue;
579 583
@@ -657,8 +661,8 @@ static struct css_set *find_css_set(
657 661
658 struct list_head tmp_cg_links; 662 struct list_head tmp_cg_links;
659 663
660 struct hlist_head *hhead;
661 struct cg_cgroup_link *link; 664 struct cg_cgroup_link *link;
665 unsigned long key;
662 666
663 /* First see if we already have a cgroup group that matches 667 /* First see if we already have a cgroup group that matches
664 * the desired set */ 668 * the desired set */
@@ -704,8 +708,8 @@ static struct css_set *find_css_set(
704 css_set_count++; 708 css_set_count++;
705 709
706 /* Add this cgroup group to the hash table */ 710 /* Add this cgroup group to the hash table */
707 hhead = css_set_hash(res->subsys); 711 key = css_set_hash(res->subsys);
708 hlist_add_head(&res->hlist, hhead); 712 hash_add(css_set_table, &res->hlist, key);
709 713
710 write_unlock(&css_set_lock); 714 write_unlock(&css_set_lock);
711 715
@@ -856,47 +860,54 @@ static struct inode *cgroup_new_inode(umode_t mode, struct super_block *sb)
856 return inode; 860 return inode;
857} 861}
858 862
859static void cgroup_diput(struct dentry *dentry, struct inode *inode) 863static void cgroup_free_fn(struct work_struct *work)
860{ 864{
861 /* is dentry a directory ? if so, kfree() associated cgroup */ 865 struct cgroup *cgrp = container_of(work, struct cgroup, free_work);
862 if (S_ISDIR(inode->i_mode)) { 866 struct cgroup_subsys *ss;
863 struct cgroup *cgrp = dentry->d_fsdata;
864 struct cgroup_subsys *ss;
865 BUG_ON(!(cgroup_is_removed(cgrp)));
866 /* It's possible for external users to be holding css
867 * reference counts on a cgroup; css_put() needs to
868 * be able to access the cgroup after decrementing
869 * the reference count in order to know if it needs to
870 * queue the cgroup to be handled by the release
871 * agent */
872 synchronize_rcu();
873 867
874 mutex_lock(&cgroup_mutex); 868 mutex_lock(&cgroup_mutex);
875 /* 869 /*
876 * Release the subsystem state objects. 870 * Release the subsystem state objects.
877 */ 871 */
878 for_each_subsys(cgrp->root, ss) 872 for_each_subsys(cgrp->root, ss)
879 ss->css_free(cgrp); 873 ss->css_free(cgrp);
880 874
881 cgrp->root->number_of_cgroups--; 875 cgrp->root->number_of_cgroups--;
882 mutex_unlock(&cgroup_mutex); 876 mutex_unlock(&cgroup_mutex);
883 877
884 /* 878 /*
885 * Drop the active superblock reference that we took when we 879 * Drop the active superblock reference that we took when we
886 * created the cgroup 880 * created the cgroup
887 */ 881 */
888 deactivate_super(cgrp->root->sb); 882 deactivate_super(cgrp->root->sb);
889 883
890 /* 884 /*
891 * if we're getting rid of the cgroup, refcount should ensure 885 * if we're getting rid of the cgroup, refcount should ensure
892 * that there are no pidlists left. 886 * that there are no pidlists left.
893 */ 887 */
894 BUG_ON(!list_empty(&cgrp->pidlists)); 888 BUG_ON(!list_empty(&cgrp->pidlists));
895 889
896 simple_xattrs_free(&cgrp->xattrs); 890 simple_xattrs_free(&cgrp->xattrs);
897 891
898 ida_simple_remove(&cgrp->root->cgroup_ida, cgrp->id); 892 ida_simple_remove(&cgrp->root->cgroup_ida, cgrp->id);
899 kfree_rcu(cgrp, rcu_head); 893 kfree(cgrp);
894}
895
896static void cgroup_free_rcu(struct rcu_head *head)
897{
898 struct cgroup *cgrp = container_of(head, struct cgroup, rcu_head);
899
900 schedule_work(&cgrp->free_work);
901}
902
903static void cgroup_diput(struct dentry *dentry, struct inode *inode)
904{
905 /* is dentry a directory ? if so, kfree() associated cgroup */
906 if (S_ISDIR(inode->i_mode)) {
907 struct cgroup *cgrp = dentry->d_fsdata;
908
909 BUG_ON(!(cgroup_is_removed(cgrp)));
910 call_rcu(&cgrp->rcu_head, cgroup_free_rcu);
900 } else { 911 } else {
901 struct cfent *cfe = __d_cfe(dentry); 912 struct cfent *cfe = __d_cfe(dentry);
902 struct cgroup *cgrp = dentry->d_parent->d_fsdata; 913 struct cgroup *cgrp = dentry->d_parent->d_fsdata;
@@ -925,13 +936,17 @@ static void remove_dir(struct dentry *d)
925 dput(parent); 936 dput(parent);
926} 937}
927 938
928static int cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft) 939static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
929{ 940{
930 struct cfent *cfe; 941 struct cfent *cfe;
931 942
932 lockdep_assert_held(&cgrp->dentry->d_inode->i_mutex); 943 lockdep_assert_held(&cgrp->dentry->d_inode->i_mutex);
933 lockdep_assert_held(&cgroup_mutex); 944 lockdep_assert_held(&cgroup_mutex);
934 945
946 /*
947 * If we're doing cleanup due to failure of cgroup_create(),
948 * the corresponding @cfe may not exist.
949 */
935 list_for_each_entry(cfe, &cgrp->files, node) { 950 list_for_each_entry(cfe, &cgrp->files, node) {
936 struct dentry *d = cfe->dentry; 951 struct dentry *d = cfe->dentry;
937 952
@@ -944,9 +959,8 @@ static int cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
944 list_del_init(&cfe->node); 959 list_del_init(&cfe->node);
945 dput(d); 960 dput(d);
946 961
947 return 0; 962 break;
948 } 963 }
949 return -ENOENT;
950} 964}
951 965
952/** 966/**
@@ -1083,7 +1097,6 @@ static int rebind_subsystems(struct cgroupfs_root *root,
1083 } 1097 }
1084 } 1098 }
1085 root->subsys_mask = root->actual_subsys_mask = final_subsys_mask; 1099 root->subsys_mask = root->actual_subsys_mask = final_subsys_mask;
1086 synchronize_rcu();
1087 1100
1088 return 0; 1101 return 0;
1089} 1102}
@@ -1393,6 +1406,7 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp)
1393 INIT_LIST_HEAD(&cgrp->allcg_node); 1406 INIT_LIST_HEAD(&cgrp->allcg_node);
1394 INIT_LIST_HEAD(&cgrp->release_list); 1407 INIT_LIST_HEAD(&cgrp->release_list);
1395 INIT_LIST_HEAD(&cgrp->pidlists); 1408 INIT_LIST_HEAD(&cgrp->pidlists);
1409 INIT_WORK(&cgrp->free_work, cgroup_free_fn);
1396 mutex_init(&cgrp->pidlist_mutex); 1410 mutex_init(&cgrp->pidlist_mutex);
1397 INIT_LIST_HEAD(&cgrp->event_list); 1411 INIT_LIST_HEAD(&cgrp->event_list);
1398 spin_lock_init(&cgrp->event_list_lock); 1412 spin_lock_init(&cgrp->event_list_lock);
@@ -1597,6 +1611,8 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
1597 struct cgroupfs_root *existing_root; 1611 struct cgroupfs_root *existing_root;
1598 const struct cred *cred; 1612 const struct cred *cred;
1599 int i; 1613 int i;
1614 struct hlist_node *node;
1615 struct css_set *cg;
1600 1616
1601 BUG_ON(sb->s_root != NULL); 1617 BUG_ON(sb->s_root != NULL);
1602 1618
@@ -1650,14 +1666,8 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
1650 /* Link the top cgroup in this hierarchy into all 1666 /* Link the top cgroup in this hierarchy into all
1651 * the css_set objects */ 1667 * the css_set objects */
1652 write_lock(&css_set_lock); 1668 write_lock(&css_set_lock);
1653 for (i = 0; i < CSS_SET_TABLE_SIZE; i++) { 1669 hash_for_each(css_set_table, i, node, cg, hlist)
1654 struct hlist_head *hhead = &css_set_table[i]; 1670 link_css_set(&tmp_cg_links, cg, root_cgrp);
1655 struct hlist_node *node;
1656 struct css_set *cg;
1657
1658 hlist_for_each_entry(cg, node, hhead, hlist)
1659 link_css_set(&tmp_cg_links, cg, root_cgrp);
1660 }
1661 write_unlock(&css_set_lock); 1671 write_unlock(&css_set_lock);
1662 1672
1663 free_cg_links(&tmp_cg_links); 1673 free_cg_links(&tmp_cg_links);
@@ -1773,7 +1783,7 @@ int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen)
1773 rcu_lockdep_assert(rcu_read_lock_held() || cgroup_lock_is_held(), 1783 rcu_lockdep_assert(rcu_read_lock_held() || cgroup_lock_is_held(),
1774 "cgroup_path() called without proper locking"); 1784 "cgroup_path() called without proper locking");
1775 1785
1776 if (!dentry || cgrp == dummytop) { 1786 if (cgrp == dummytop) {
1777 /* 1787 /*
1778 * Inactive subsystems have no dentry for their root 1788 * Inactive subsystems have no dentry for their root
1779 * cgroup 1789 * cgroup
@@ -1982,7 +1992,6 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1982 ss->attach(cgrp, &tset); 1992 ss->attach(cgrp, &tset);
1983 } 1993 }
1984 1994
1985 synchronize_rcu();
1986out: 1995out:
1987 if (retval) { 1996 if (retval) {
1988 for_each_subsys(root, ss) { 1997 for_each_subsys(root, ss) {
@@ -2151,7 +2160,6 @@ static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
2151 /* 2160 /*
2152 * step 5: success! and cleanup 2161 * step 5: success! and cleanup
2153 */ 2162 */
2154 synchronize_rcu();
2155 retval = 0; 2163 retval = 0;
2156out_put_css_set_refs: 2164out_put_css_set_refs:
2157 if (retval) { 2165 if (retval) {
@@ -2769,14 +2777,14 @@ static int cgroup_addrm_files(struct cgroup *cgrp, struct cgroup_subsys *subsys,
2769 if ((cft->flags & CFTYPE_ONLY_ON_ROOT) && cgrp->parent) 2777 if ((cft->flags & CFTYPE_ONLY_ON_ROOT) && cgrp->parent)
2770 continue; 2778 continue;
2771 2779
2772 if (is_add) 2780 if (is_add) {
2773 err = cgroup_add_file(cgrp, subsys, cft); 2781 err = cgroup_add_file(cgrp, subsys, cft);
2774 else 2782 if (err)
2775 err = cgroup_rm_file(cgrp, cft); 2783 pr_warn("cgroup_addrm_files: failed to add %s, err=%d\n",
2776 if (err) { 2784 cft->name, err);
2777 pr_warning("cgroup_addrm_files: failed to %s %s, err=%d\n",
2778 is_add ? "add" : "remove", cft->name, err);
2779 ret = err; 2785 ret = err;
2786 } else {
2787 cgroup_rm_file(cgrp, cft);
2780 } 2788 }
2781 } 2789 }
2782 return ret; 2790 return ret;
@@ -3017,6 +3025,32 @@ struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos,
3017} 3025}
3018EXPORT_SYMBOL_GPL(cgroup_next_descendant_pre); 3026EXPORT_SYMBOL_GPL(cgroup_next_descendant_pre);
3019 3027
3028/**
3029 * cgroup_rightmost_descendant - return the rightmost descendant of a cgroup
3030 * @pos: cgroup of interest
3031 *
3032 * Return the rightmost descendant of @pos. If there's no descendant,
3033 * @pos is returned. This can be used during pre-order traversal to skip
3034 * subtree of @pos.
3035 */
3036struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos)
3037{
3038 struct cgroup *last, *tmp;
3039
3040 WARN_ON_ONCE(!rcu_read_lock_held());
3041
3042 do {
3043 last = pos;
3044 /* ->prev isn't RCU safe, walk ->next till the end */
3045 pos = NULL;
3046 list_for_each_entry_rcu(tmp, &last->children, sibling)
3047 pos = tmp;
3048 } while (pos);
3049
3050 return last;
3051}
3052EXPORT_SYMBOL_GPL(cgroup_rightmost_descendant);
3053
3020static struct cgroup *cgroup_leftmost_descendant(struct cgroup *pos) 3054static struct cgroup *cgroup_leftmost_descendant(struct cgroup *pos)
3021{ 3055{
3022 struct cgroup *last; 3056 struct cgroup *last;
@@ -3752,8 +3786,13 @@ static void cgroup_event_remove(struct work_struct *work)
3752 remove); 3786 remove);
3753 struct cgroup *cgrp = event->cgrp; 3787 struct cgroup *cgrp = event->cgrp;
3754 3788
3789 remove_wait_queue(event->wqh, &event->wait);
3790
3755 event->cft->unregister_event(cgrp, event->cft, event->eventfd); 3791 event->cft->unregister_event(cgrp, event->cft, event->eventfd);
3756 3792
3793 /* Notify userspace the event is going away. */
3794 eventfd_signal(event->eventfd, 1);
3795
3757 eventfd_ctx_put(event->eventfd); 3796 eventfd_ctx_put(event->eventfd);
3758 kfree(event); 3797 kfree(event);
3759 dput(cgrp->dentry); 3798 dput(cgrp->dentry);
@@ -3773,15 +3812,25 @@ static int cgroup_event_wake(wait_queue_t *wait, unsigned mode,
3773 unsigned long flags = (unsigned long)key; 3812 unsigned long flags = (unsigned long)key;
3774 3813
3775 if (flags & POLLHUP) { 3814 if (flags & POLLHUP) {
3776 __remove_wait_queue(event->wqh, &event->wait);
3777 spin_lock(&cgrp->event_list_lock);
3778 list_del_init(&event->list);
3779 spin_unlock(&cgrp->event_list_lock);
3780 /* 3815 /*
3781 * We are in atomic context, but cgroup_event_remove() may 3816 * If the event has been detached at cgroup removal, we
3782 * sleep, so we have to call it in workqueue. 3817 * can simply return knowing the other side will cleanup
3818 * for us.
3819 *
3820 * We can't race against event freeing since the other
3821 * side will require wqh->lock via remove_wait_queue(),
3822 * which we hold.
3783 */ 3823 */
3784 schedule_work(&event->remove); 3824 spin_lock(&cgrp->event_list_lock);
3825 if (!list_empty(&event->list)) {
3826 list_del_init(&event->list);
3827 /*
3828 * We are in atomic context, but cgroup_event_remove()
3829 * may sleep, so we have to call it in workqueue.
3830 */
3831 schedule_work(&event->remove);
3832 }
3833 spin_unlock(&cgrp->event_list_lock);
3785 } 3834 }
3786 3835
3787 return 0; 3836 return 0;
@@ -3807,6 +3856,7 @@ static int cgroup_write_event_control(struct cgroup *cgrp, struct cftype *cft,
3807 const char *buffer) 3856 const char *buffer)
3808{ 3857{
3809 struct cgroup_event *event = NULL; 3858 struct cgroup_event *event = NULL;
3859 struct cgroup *cgrp_cfile;
3810 unsigned int efd, cfd; 3860 unsigned int efd, cfd;
3811 struct file *efile = NULL; 3861 struct file *efile = NULL;
3812 struct file *cfile = NULL; 3862 struct file *cfile = NULL;
@@ -3862,6 +3912,16 @@ static int cgroup_write_event_control(struct cgroup *cgrp, struct cftype *cft,
3862 goto fail; 3912 goto fail;
3863 } 3913 }
3864 3914
3915 /*
3916 * The file to be monitored must be in the same cgroup as
3917 * cgroup.event_control is.
3918 */
3919 cgrp_cfile = __d_cgrp(cfile->f_dentry->d_parent);
3920 if (cgrp_cfile != cgrp) {
3921 ret = -EINVAL;
3922 goto fail;
3923 }
3924
3865 if (!event->cft->register_event || !event->cft->unregister_event) { 3925 if (!event->cft->register_event || !event->cft->unregister_event) {
3866 ret = -EINVAL; 3926 ret = -EINVAL;
3867 goto fail; 3927 goto fail;
@@ -4135,6 +4195,9 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
4135 4195
4136 init_cgroup_housekeeping(cgrp); 4196 init_cgroup_housekeeping(cgrp);
4137 4197
4198 dentry->d_fsdata = cgrp;
4199 cgrp->dentry = dentry;
4200
4138 cgrp->parent = parent; 4201 cgrp->parent = parent;
4139 cgrp->root = parent->root; 4202 cgrp->root = parent->root;
4140 cgrp->top_cgroup = parent->top_cgroup; 4203 cgrp->top_cgroup = parent->top_cgroup;
@@ -4172,8 +4235,6 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
4172 lockdep_assert_held(&dentry->d_inode->i_mutex); 4235 lockdep_assert_held(&dentry->d_inode->i_mutex);
4173 4236
4174 /* allocation complete, commit to creation */ 4237 /* allocation complete, commit to creation */
4175 dentry->d_fsdata = cgrp;
4176 cgrp->dentry = dentry;
4177 list_add_tail(&cgrp->allcg_node, &root->allcg_list); 4238 list_add_tail(&cgrp->allcg_node, &root->allcg_list);
4178 list_add_tail_rcu(&cgrp->sibling, &cgrp->parent->children); 4239 list_add_tail_rcu(&cgrp->sibling, &cgrp->parent->children);
4179 root->number_of_cgroups++; 4240 root->number_of_cgroups++;
@@ -4340,20 +4401,14 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
4340 /* 4401 /*
4341 * Unregister events and notify userspace. 4402 * Unregister events and notify userspace.
4342 * Notify userspace about cgroup removing only after rmdir of cgroup 4403 * Notify userspace about cgroup removing only after rmdir of cgroup
4343 * directory to avoid race between userspace and kernelspace. Use 4404 * directory to avoid race between userspace and kernelspace.
4344 * a temporary list to avoid a deadlock with cgroup_event_wake(). Since
4345 * cgroup_event_wake() is called with the wait queue head locked,
4346 * remove_wait_queue() cannot be called while holding event_list_lock.
4347 */ 4405 */
4348 spin_lock(&cgrp->event_list_lock); 4406 spin_lock(&cgrp->event_list_lock);
4349 list_splice_init(&cgrp->event_list, &tmp_list); 4407 list_for_each_entry_safe(event, tmp, &cgrp->event_list, list) {
4350 spin_unlock(&cgrp->event_list_lock);
4351 list_for_each_entry_safe(event, tmp, &tmp_list, list) {
4352 list_del_init(&event->list); 4408 list_del_init(&event->list);
4353 remove_wait_queue(event->wqh, &event->wait);
4354 eventfd_signal(event->eventfd, 1);
4355 schedule_work(&event->remove); 4409 schedule_work(&event->remove);
4356 } 4410 }
4411 spin_unlock(&cgrp->event_list_lock);
4357 4412
4358 return 0; 4413 return 0;
4359} 4414}
@@ -4438,6 +4493,9 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
4438{ 4493{
4439 struct cgroup_subsys_state *css; 4494 struct cgroup_subsys_state *css;
4440 int i, ret; 4495 int i, ret;
4496 struct hlist_node *node, *tmp;
4497 struct css_set *cg;
4498 unsigned long key;
4441 4499
4442 /* check name and function validity */ 4500 /* check name and function validity */
4443 if (ss->name == NULL || strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN || 4501 if (ss->name == NULL || strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN ||
@@ -4503,23 +4561,17 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
4503 * this is all done under the css_set_lock. 4561 * this is all done under the css_set_lock.
4504 */ 4562 */
4505 write_lock(&css_set_lock); 4563 write_lock(&css_set_lock);
4506 for (i = 0; i < CSS_SET_TABLE_SIZE; i++) { 4564 hash_for_each_safe(css_set_table, i, node, tmp, cg, hlist) {
4507 struct css_set *cg; 4565 /* skip entries that we already rehashed */
4508 struct hlist_node *node, *tmp; 4566 if (cg->subsys[ss->subsys_id])
4509 struct hlist_head *bucket = &css_set_table[i], *new_bucket; 4567 continue;
4510 4568 /* remove existing entry */
4511 hlist_for_each_entry_safe(cg, node, tmp, bucket, hlist) { 4569 hash_del(&cg->hlist);
4512 /* skip entries that we already rehashed */ 4570 /* set new value */
4513 if (cg->subsys[ss->subsys_id]) 4571 cg->subsys[ss->subsys_id] = css;
4514 continue; 4572 /* recompute hash and restore entry */
4515 /* remove existing entry */ 4573 key = css_set_hash(cg->subsys);
4516 hlist_del(&cg->hlist); 4574 hash_add(css_set_table, node, key);
4517 /* set new value */
4518 cg->subsys[ss->subsys_id] = css;
4519 /* recompute hash and restore entry */
4520 new_bucket = css_set_hash(cg->subsys);
4521 hlist_add_head(&cg->hlist, new_bucket);
4522 }
4523 } 4575 }
4524 write_unlock(&css_set_lock); 4576 write_unlock(&css_set_lock);
4525 4577
@@ -4551,7 +4603,6 @@ EXPORT_SYMBOL_GPL(cgroup_load_subsys);
4551void cgroup_unload_subsys(struct cgroup_subsys *ss) 4603void cgroup_unload_subsys(struct cgroup_subsys *ss)
4552{ 4604{
4553 struct cg_cgroup_link *link; 4605 struct cg_cgroup_link *link;
4554 struct hlist_head *hhead;
4555 4606
4556 BUG_ON(ss->module == NULL); 4607 BUG_ON(ss->module == NULL);
4557 4608
@@ -4585,11 +4636,12 @@ void cgroup_unload_subsys(struct cgroup_subsys *ss)
4585 write_lock(&css_set_lock); 4636 write_lock(&css_set_lock);
4586 list_for_each_entry(link, &dummytop->css_sets, cgrp_link_list) { 4637 list_for_each_entry(link, &dummytop->css_sets, cgrp_link_list) {
4587 struct css_set *cg = link->cg; 4638 struct css_set *cg = link->cg;
4639 unsigned long key;
4588 4640
4589 hlist_del(&cg->hlist); 4641 hash_del(&cg->hlist);
4590 cg->subsys[ss->subsys_id] = NULL; 4642 cg->subsys[ss->subsys_id] = NULL;
4591 hhead = css_set_hash(cg->subsys); 4643 key = css_set_hash(cg->subsys);
4592 hlist_add_head(&cg->hlist, hhead); 4644 hash_add(css_set_table, &cg->hlist, key);
4593 } 4645 }
4594 write_unlock(&css_set_lock); 4646 write_unlock(&css_set_lock);
4595 4647
@@ -4631,9 +4683,6 @@ int __init cgroup_init_early(void)
4631 list_add(&init_css_set_link.cg_link_list, 4683 list_add(&init_css_set_link.cg_link_list,
4632 &init_css_set.cg_links); 4684 &init_css_set.cg_links);
4633 4685
4634 for (i = 0; i < CSS_SET_TABLE_SIZE; i++)
4635 INIT_HLIST_HEAD(&css_set_table[i]);
4636
4637 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { 4686 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
4638 struct cgroup_subsys *ss = subsys[i]; 4687 struct cgroup_subsys *ss = subsys[i];
4639 4688
@@ -4667,7 +4716,7 @@ int __init cgroup_init(void)
4667{ 4716{
4668 int err; 4717 int err;
4669 int i; 4718 int i;
4670 struct hlist_head *hhead; 4719 unsigned long key;
4671 4720
4672 err = bdi_init(&cgroup_backing_dev_info); 4721 err = bdi_init(&cgroup_backing_dev_info);
4673 if (err) 4722 if (err)
@@ -4686,8 +4735,8 @@ int __init cgroup_init(void)
4686 } 4735 }
4687 4736
4688 /* Add init_css_set to the hash table */ 4737 /* Add init_css_set to the hash table */
4689 hhead = css_set_hash(init_css_set.subsys); 4738 key = css_set_hash(init_css_set.subsys);
4690 hlist_add_head(&init_css_set.hlist, hhead); 4739 hash_add(css_set_table, &init_css_set.hlist, key);
4691 BUG_ON(!init_root_id(&rootnode)); 4740 BUG_ON(!init_root_id(&rootnode));
4692 4741
4693 cgroup_kobj = kobject_create_and_add("cgroup", fs_kobj); 4742 cgroup_kobj = kobject_create_and_add("cgroup", fs_kobj);
@@ -4982,8 +5031,7 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks)
4982 } 5031 }
4983 task_unlock(tsk); 5032 task_unlock(tsk);
4984 5033
4985 if (cg) 5034 put_css_set_taskexit(cg);
4986 put_css_set_taskexit(cg);
4987} 5035}
4988 5036
4989/** 5037/**
diff --git a/kernel/compat.c b/kernel/compat.c
index f6150e92dfc9..36700e9e2be9 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -535,9 +535,11 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
535 return 0; 535 return 0;
536} 536}
537 537
538asmlinkage long 538COMPAT_SYSCALL_DEFINE4(wait4,
539compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options, 539 compat_pid_t, pid,
540 struct compat_rusage __user *ru) 540 compat_uint_t __user *, stat_addr,
541 int, options,
542 struct compat_rusage __user *, ru)
541{ 543{
542 if (!ru) { 544 if (!ru) {
543 return sys_wait4(pid, stat_addr, options, NULL); 545 return sys_wait4(pid, stat_addr, options, NULL);
@@ -564,9 +566,10 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
564 } 566 }
565} 567}
566 568
567asmlinkage long compat_sys_waitid(int which, compat_pid_t pid, 569COMPAT_SYSCALL_DEFINE5(waitid,
568 struct compat_siginfo __user *uinfo, int options, 570 int, which, compat_pid_t, pid,
569 struct compat_rusage __user *uru) 571 struct compat_siginfo __user *, uinfo, int, options,
572 struct compat_rusage __user *, uru)
570{ 573{
571 siginfo_t info; 574 siginfo_t info;
572 struct rusage ru; 575 struct rusage ru;
@@ -584,7 +587,11 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
584 return ret; 587 return ret;
585 588
586 if (uru) { 589 if (uru) {
587 ret = put_compat_rusage(&ru, uru); 590 /* sys_waitid() overwrites everything in ru */
591 if (COMPAT_USE_64BIT_TIME)
592 ret = copy_to_user(uru, &ru, sizeof(ru));
593 else
594 ret = put_compat_rusage(&ru, uru);
588 if (ret) 595 if (ret)
589 return ret; 596 return ret;
590 } 597 }
@@ -994,7 +1001,7 @@ compat_sys_rt_sigtimedwait (compat_sigset_t __user *uthese,
994 sigset_from_compat(&s, &s32); 1001 sigset_from_compat(&s, &s32);
995 1002
996 if (uts) { 1003 if (uts) {
997 if (get_compat_timespec(&t, uts)) 1004 if (compat_get_timespec(&t, uts))
998 return -EFAULT; 1005 return -EFAULT;
999 } 1006 }
1000 1007
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
index e0e07fd55508..65349f07b878 100644
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -1,29 +1,41 @@
1/*
2 * Context tracking: Probe on high level context boundaries such as kernel
3 * and userspace. This includes syscalls and exceptions entry/exit.
4 *
5 * This is used by RCU to remove its dependency on the timer tick while a CPU
6 * runs in userspace.
7 *
8 * Started by Frederic Weisbecker:
9 *
10 * Copyright (C) 2012 Red Hat, Inc., Frederic Weisbecker <fweisbec@redhat.com>
11 *
12 * Many thanks to Gilad Ben-Yossef, Paul McKenney, Ingo Molnar, Andrew Morton,
13 * Steven Rostedt, Peter Zijlstra for suggestions and improvements.
14 *
15 */
16
1#include <linux/context_tracking.h> 17#include <linux/context_tracking.h>
18#include <linux/kvm_host.h>
2#include <linux/rcupdate.h> 19#include <linux/rcupdate.h>
3#include <linux/sched.h> 20#include <linux/sched.h>
4#include <linux/percpu.h>
5#include <linux/hardirq.h> 21#include <linux/hardirq.h>
22#include <linux/export.h>
6 23
7struct context_tracking { 24DEFINE_PER_CPU(struct context_tracking, context_tracking) = {
8 /*
9 * When active is false, hooks are not set to
10 * minimize overhead: TIF flags are cleared
11 * and calls to user_enter/exit are ignored. This
12 * may be further optimized using static keys.
13 */
14 bool active;
15 enum {
16 IN_KERNEL = 0,
17 IN_USER,
18 } state;
19};
20
21static DEFINE_PER_CPU(struct context_tracking, context_tracking) = {
22#ifdef CONFIG_CONTEXT_TRACKING_FORCE 25#ifdef CONFIG_CONTEXT_TRACKING_FORCE
23 .active = true, 26 .active = true,
24#endif 27#endif
25}; 28};
26 29
30/**
31 * user_enter - Inform the context tracking that the CPU is going to
32 * enter userspace mode.
33 *
34 * This function must be called right before we switch from the kernel
35 * to userspace, when it's guaranteed the remaining kernel instructions
36 * to execute won't use any RCU read side critical section because this
37 * function sets RCU in extended quiescent state.
38 */
27void user_enter(void) 39void user_enter(void)
28{ 40{
29 unsigned long flags; 41 unsigned long flags;
@@ -39,40 +51,90 @@ void user_enter(void)
39 if (in_interrupt()) 51 if (in_interrupt())
40 return; 52 return;
41 53
54 /* Kernel threads aren't supposed to go to userspace */
42 WARN_ON_ONCE(!current->mm); 55 WARN_ON_ONCE(!current->mm);
43 56
44 local_irq_save(flags); 57 local_irq_save(flags);
45 if (__this_cpu_read(context_tracking.active) && 58 if (__this_cpu_read(context_tracking.active) &&
46 __this_cpu_read(context_tracking.state) != IN_USER) { 59 __this_cpu_read(context_tracking.state) != IN_USER) {
47 __this_cpu_write(context_tracking.state, IN_USER); 60 /*
61 * At this stage, only low level arch entry code remains and
62 * then we'll run in userspace. We can assume there won't be
63 * any RCU read-side critical section until the next call to
64 * user_exit() or rcu_irq_enter(). Let's remove RCU's dependency
65 * on the tick.
66 */
67 vtime_user_enter(current);
48 rcu_user_enter(); 68 rcu_user_enter();
69 __this_cpu_write(context_tracking.state, IN_USER);
49 } 70 }
50 local_irq_restore(flags); 71 local_irq_restore(flags);
51} 72}
52 73
74
75/**
76 * user_exit - Inform the context tracking that the CPU is
77 * exiting userspace mode and entering the kernel.
78 *
79 * This function must be called after we entered the kernel from userspace
80 * before any use of RCU read side critical section. This potentially include
81 * any high level kernel code like syscalls, exceptions, signal handling, etc...
82 *
83 * This call supports re-entrancy. This way it can be called from any exception
84 * handler without needing to know if we came from userspace or not.
85 */
53void user_exit(void) 86void user_exit(void)
54{ 87{
55 unsigned long flags; 88 unsigned long flags;
56 89
57 /*
58 * Some contexts may involve an exception occuring in an irq,
59 * leading to that nesting:
60 * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
61 * This would mess up the dyntick_nesting count though. And rcu_irq_*()
62 * helpers are enough to protect RCU uses inside the exception. So
63 * just return immediately if we detect we are in an IRQ.
64 */
65 if (in_interrupt()) 90 if (in_interrupt())
66 return; 91 return;
67 92
68 local_irq_save(flags); 93 local_irq_save(flags);
69 if (__this_cpu_read(context_tracking.state) == IN_USER) { 94 if (__this_cpu_read(context_tracking.state) == IN_USER) {
70 __this_cpu_write(context_tracking.state, IN_KERNEL); 95 /*
96 * We are going to run code that may use RCU. Inform
97 * RCU core about that (ie: we may need the tick again).
98 */
71 rcu_user_exit(); 99 rcu_user_exit();
100 vtime_user_exit(current);
101 __this_cpu_write(context_tracking.state, IN_KERNEL);
72 } 102 }
73 local_irq_restore(flags); 103 local_irq_restore(flags);
74} 104}
75 105
106void guest_enter(void)
107{
108 if (vtime_accounting_enabled())
109 vtime_guest_enter(current);
110 else
111 __guest_enter();
112}
113EXPORT_SYMBOL_GPL(guest_enter);
114
115void guest_exit(void)
116{
117 if (vtime_accounting_enabled())
118 vtime_guest_exit(current);
119 else
120 __guest_exit();
121}
122EXPORT_SYMBOL_GPL(guest_exit);
123
124
125/**
126 * context_tracking_task_switch - context switch the syscall callbacks
127 * @prev: the task that is being switched out
128 * @next: the task that is being switched in
129 *
130 * The context tracking uses the syscall slow path to implement its user-kernel
131 * boundaries probes on syscalls. This way it doesn't impact the syscall fast
132 * path on CPUs that don't do context tracking.
133 *
134 * But we need to clear the flag on the previous task because it may later
135 * migrate to some CPU that doesn't do the context tracking. As such the TIF
136 * flag may not be desired there.
137 */
76void context_tracking_task_switch(struct task_struct *prev, 138void context_tracking_task_switch(struct task_struct *prev,
77 struct task_struct *next) 139 struct task_struct *next)
78{ 140{
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 3046a503242c..b5e4ab2d427e 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -224,11 +224,13 @@ void clear_tasks_mm_cpumask(int cpu)
224static inline void check_for_tasks(int cpu) 224static inline void check_for_tasks(int cpu)
225{ 225{
226 struct task_struct *p; 226 struct task_struct *p;
227 cputime_t utime, stime;
227 228
228 write_lock_irq(&tasklist_lock); 229 write_lock_irq(&tasklist_lock);
229 for_each_process(p) { 230 for_each_process(p) {
231 task_cputime(p, &utime, &stime);
230 if (task_cpu(p) == cpu && p->state == TASK_RUNNING && 232 if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
231 (p->utime || p->stime)) 233 (utime || stime))
232 printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d " 234 printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d "
233 "(state = %ld, flags = %x)\n", 235 "(state = %ld, flags = %x)\n",
234 p->comm, task_pid_nr(p), cpu, 236 p->comm, task_pid_nr(p), cpu,
@@ -254,6 +256,8 @@ static int __ref take_cpu_down(void *_param)
254 return err; 256 return err;
255 257
256 cpu_notify(CPU_DYING | param->mod, param->hcpu); 258 cpu_notify(CPU_DYING | param->mod, param->hcpu);
259 /* Park the stopper thread */
260 kthread_park(current);
257 return 0; 261 return 0;
258} 262}
259 263
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 7bb63eea6eb8..4f9dfe43ecbd 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -61,14 +61,6 @@
61#include <linux/cgroup.h> 61#include <linux/cgroup.h>
62 62
63/* 63/*
64 * Workqueue for cpuset related tasks.
65 *
66 * Using kevent workqueue may cause deadlock when memory_migrate
67 * is set. So we create a separate workqueue thread for cpuset.
68 */
69static struct workqueue_struct *cpuset_wq;
70
71/*
72 * Tracks how many cpusets are currently defined in system. 64 * Tracks how many cpusets are currently defined in system.
73 * When there is only one cpuset (the root cpuset) we can 65 * When there is only one cpuset (the root cpuset) we can
74 * short circuit some hooks. 66 * short circuit some hooks.
@@ -95,18 +87,21 @@ struct cpuset {
95 cpumask_var_t cpus_allowed; /* CPUs allowed to tasks in cpuset */ 87 cpumask_var_t cpus_allowed; /* CPUs allowed to tasks in cpuset */
96 nodemask_t mems_allowed; /* Memory Nodes allowed to tasks */ 88 nodemask_t mems_allowed; /* Memory Nodes allowed to tasks */
97 89
98 struct cpuset *parent; /* my parent */
99
100 struct fmeter fmeter; /* memory_pressure filter */ 90 struct fmeter fmeter; /* memory_pressure filter */
101 91
92 /*
93 * Tasks are being attached to this cpuset. Used to prevent
94 * zeroing cpus/mems_allowed between ->can_attach() and ->attach().
95 */
96 int attach_in_progress;
97
102 /* partition number for rebuild_sched_domains() */ 98 /* partition number for rebuild_sched_domains() */
103 int pn; 99 int pn;
104 100
105 /* for custom sched domain */ 101 /* for custom sched domain */
106 int relax_domain_level; 102 int relax_domain_level;
107 103
108 /* used for walking a cpuset hierarchy */ 104 struct work_struct hotplug_work;
109 struct list_head stack_list;
110}; 105};
111 106
112/* Retrieve the cpuset for a cgroup */ 107/* Retrieve the cpuset for a cgroup */
@@ -123,6 +118,15 @@ static inline struct cpuset *task_cs(struct task_struct *task)
123 struct cpuset, css); 118 struct cpuset, css);
124} 119}
125 120
121static inline struct cpuset *parent_cs(const struct cpuset *cs)
122{
123 struct cgroup *pcgrp = cs->css.cgroup->parent;
124
125 if (pcgrp)
126 return cgroup_cs(pcgrp);
127 return NULL;
128}
129
126#ifdef CONFIG_NUMA 130#ifdef CONFIG_NUMA
127static inline bool task_has_mempolicy(struct task_struct *task) 131static inline bool task_has_mempolicy(struct task_struct *task)
128{ 132{
@@ -138,6 +142,7 @@ static inline bool task_has_mempolicy(struct task_struct *task)
138 142
139/* bits in struct cpuset flags field */ 143/* bits in struct cpuset flags field */
140typedef enum { 144typedef enum {
145 CS_ONLINE,
141 CS_CPU_EXCLUSIVE, 146 CS_CPU_EXCLUSIVE,
142 CS_MEM_EXCLUSIVE, 147 CS_MEM_EXCLUSIVE,
143 CS_MEM_HARDWALL, 148 CS_MEM_HARDWALL,
@@ -147,13 +152,12 @@ typedef enum {
147 CS_SPREAD_SLAB, 152 CS_SPREAD_SLAB,
148} cpuset_flagbits_t; 153} cpuset_flagbits_t;
149 154
150/* the type of hotplug event */
151enum hotplug_event {
152 CPUSET_CPU_OFFLINE,
153 CPUSET_MEM_OFFLINE,
154};
155
156/* convenient tests for these bits */ 155/* convenient tests for these bits */
156static inline bool is_cpuset_online(const struct cpuset *cs)
157{
158 return test_bit(CS_ONLINE, &cs->flags);
159}
160
157static inline int is_cpu_exclusive(const struct cpuset *cs) 161static inline int is_cpu_exclusive(const struct cpuset *cs)
158{ 162{
159 return test_bit(CS_CPU_EXCLUSIVE, &cs->flags); 163 return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
@@ -190,27 +194,52 @@ static inline int is_spread_slab(const struct cpuset *cs)
190} 194}
191 195
192static struct cpuset top_cpuset = { 196static struct cpuset top_cpuset = {
193 .flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)), 197 .flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) |
198 (1 << CS_MEM_EXCLUSIVE)),
194}; 199};
195 200
201/**
202 * cpuset_for_each_child - traverse online children of a cpuset
203 * @child_cs: loop cursor pointing to the current child
204 * @pos_cgrp: used for iteration
205 * @parent_cs: target cpuset to walk children of
206 *
207 * Walk @child_cs through the online children of @parent_cs. Must be used
208 * with RCU read locked.
209 */
210#define cpuset_for_each_child(child_cs, pos_cgrp, parent_cs) \
211 cgroup_for_each_child((pos_cgrp), (parent_cs)->css.cgroup) \
212 if (is_cpuset_online(((child_cs) = cgroup_cs((pos_cgrp)))))
213
214/**
215 * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
216 * @des_cs: loop cursor pointing to the current descendant
217 * @pos_cgrp: used for iteration
218 * @root_cs: target cpuset to walk ancestor of
219 *
220 * Walk @des_cs through the online descendants of @root_cs. Must be used
221 * with RCU read locked. The caller may modify @pos_cgrp by calling
222 * cgroup_rightmost_descendant() to skip subtree.
223 */
224#define cpuset_for_each_descendant_pre(des_cs, pos_cgrp, root_cs) \
225 cgroup_for_each_descendant_pre((pos_cgrp), (root_cs)->css.cgroup) \
226 if (is_cpuset_online(((des_cs) = cgroup_cs((pos_cgrp)))))
227
196/* 228/*
197 * There are two global mutexes guarding cpuset structures. The first 229 * There are two global mutexes guarding cpuset structures - cpuset_mutex
198 * is the main control groups cgroup_mutex, accessed via 230 * and callback_mutex. The latter may nest inside the former. We also
199 * cgroup_lock()/cgroup_unlock(). The second is the cpuset-specific 231 * require taking task_lock() when dereferencing a task's cpuset pointer.
200 * callback_mutex, below. They can nest. It is ok to first take 232 * See "The task_lock() exception", at the end of this comment.
201 * cgroup_mutex, then nest callback_mutex. We also require taking 233 *
202 * task_lock() when dereferencing a task's cpuset pointer. See "The 234 * A task must hold both mutexes to modify cpusets. If a task holds
203 * task_lock() exception", at the end of this comment. 235 * cpuset_mutex, then it blocks others wanting that mutex, ensuring that it
204 * 236 * is the only task able to also acquire callback_mutex and be able to
205 * A task must hold both mutexes to modify cpusets. If a task 237 * modify cpusets. It can perform various checks on the cpuset structure
206 * holds cgroup_mutex, then it blocks others wanting that mutex, 238 * first, knowing nothing will change. It can also allocate memory while
207 * ensuring that it is the only task able to also acquire callback_mutex 239 * just holding cpuset_mutex. While it is performing these checks, various
208 * and be able to modify cpusets. It can perform various checks on 240 * callback routines can briefly acquire callback_mutex to query cpusets.
209 * the cpuset structure first, knowing nothing will change. It can 241 * Once it is ready to make the changes, it takes callback_mutex, blocking
210 * also allocate memory while just holding cgroup_mutex. While it is 242 * everyone else.
211 * performing these checks, various callback routines can briefly
212 * acquire callback_mutex to query cpusets. Once it is ready to make
213 * the changes, it takes callback_mutex, blocking everyone else.
214 * 243 *
215 * Calls to the kernel memory allocator can not be made while holding 244 * Calls to the kernel memory allocator can not be made while holding
216 * callback_mutex, as that would risk double tripping on callback_mutex 245 * callback_mutex, as that would risk double tripping on callback_mutex
@@ -232,6 +261,7 @@ static struct cpuset top_cpuset = {
232 * guidelines for accessing subsystem state in kernel/cgroup.c 261 * guidelines for accessing subsystem state in kernel/cgroup.c
233 */ 262 */
234 263
264static DEFINE_MUTEX(cpuset_mutex);
235static DEFINE_MUTEX(callback_mutex); 265static DEFINE_MUTEX(callback_mutex);
236 266
237/* 267/*
@@ -246,6 +276,17 @@ static char cpuset_nodelist[CPUSET_NODELIST_LEN];
246static DEFINE_SPINLOCK(cpuset_buffer_lock); 276static DEFINE_SPINLOCK(cpuset_buffer_lock);
247 277
248/* 278/*
279 * CPU / memory hotplug is handled asynchronously.
280 */
281static struct workqueue_struct *cpuset_propagate_hotplug_wq;
282
283static void cpuset_hotplug_workfn(struct work_struct *work);
284static void cpuset_propagate_hotplug_workfn(struct work_struct *work);
285static void schedule_cpuset_propagate_hotplug(struct cpuset *cs);
286
287static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn);
288
289/*
249 * This is ugly, but preserves the userspace API for existing cpuset 290 * This is ugly, but preserves the userspace API for existing cpuset
250 * users. If someone tries to mount the "cpuset" filesystem, we 291 * users. If someone tries to mount the "cpuset" filesystem, we
251 * silently switch it to mount "cgroup" instead 292 * silently switch it to mount "cgroup" instead
@@ -289,7 +330,7 @@ static void guarantee_online_cpus(const struct cpuset *cs,
289 struct cpumask *pmask) 330 struct cpumask *pmask)
290{ 331{
291 while (cs && !cpumask_intersects(cs->cpus_allowed, cpu_online_mask)) 332 while (cs && !cpumask_intersects(cs->cpus_allowed, cpu_online_mask))
292 cs = cs->parent; 333 cs = parent_cs(cs);
293 if (cs) 334 if (cs)
294 cpumask_and(pmask, cs->cpus_allowed, cpu_online_mask); 335 cpumask_and(pmask, cs->cpus_allowed, cpu_online_mask);
295 else 336 else
@@ -314,7 +355,7 @@ static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
314{ 355{
315 while (cs && !nodes_intersects(cs->mems_allowed, 356 while (cs && !nodes_intersects(cs->mems_allowed,
316 node_states[N_MEMORY])) 357 node_states[N_MEMORY]))
317 cs = cs->parent; 358 cs = parent_cs(cs);
318 if (cs) 359 if (cs)
319 nodes_and(*pmask, cs->mems_allowed, 360 nodes_and(*pmask, cs->mems_allowed,
320 node_states[N_MEMORY]); 361 node_states[N_MEMORY]);
@@ -326,7 +367,7 @@ static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
326/* 367/*
327 * update task's spread flag if cpuset's page/slab spread flag is set 368 * update task's spread flag if cpuset's page/slab spread flag is set
328 * 369 *
329 * Called with callback_mutex/cgroup_mutex held 370 * Called with callback_mutex/cpuset_mutex held
330 */ 371 */
331static void cpuset_update_task_spread_flag(struct cpuset *cs, 372static void cpuset_update_task_spread_flag(struct cpuset *cs,
332 struct task_struct *tsk) 373 struct task_struct *tsk)
@@ -346,7 +387,7 @@ static void cpuset_update_task_spread_flag(struct cpuset *cs,
346 * 387 *
347 * One cpuset is a subset of another if all its allowed CPUs and 388 * One cpuset is a subset of another if all its allowed CPUs and
348 * Memory Nodes are a subset of the other, and its exclusive flags 389 * Memory Nodes are a subset of the other, and its exclusive flags
349 * are only set if the other's are set. Call holding cgroup_mutex. 390 * are only set if the other's are set. Call holding cpuset_mutex.
350 */ 391 */
351 392
352static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) 393static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
@@ -395,7 +436,7 @@ static void free_trial_cpuset(struct cpuset *trial)
395 * If we replaced the flag and mask values of the current cpuset 436 * If we replaced the flag and mask values of the current cpuset
396 * (cur) with those values in the trial cpuset (trial), would 437 * (cur) with those values in the trial cpuset (trial), would
397 * our various subset and exclusive rules still be valid? Presumes 438 * our various subset and exclusive rules still be valid? Presumes
398 * cgroup_mutex held. 439 * cpuset_mutex held.
399 * 440 *
400 * 'cur' is the address of an actual, in-use cpuset. Operations 441 * 'cur' is the address of an actual, in-use cpuset. Operations
401 * such as list traversal that depend on the actual address of the 442 * such as list traversal that depend on the actual address of the
@@ -412,48 +453,58 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
412{ 453{
413 struct cgroup *cont; 454 struct cgroup *cont;
414 struct cpuset *c, *par; 455 struct cpuset *c, *par;
456 int ret;
457
458 rcu_read_lock();
415 459
416 /* Each of our child cpusets must be a subset of us */ 460 /* Each of our child cpusets must be a subset of us */
417 list_for_each_entry(cont, &cur->css.cgroup->children, sibling) { 461 ret = -EBUSY;
418 if (!is_cpuset_subset(cgroup_cs(cont), trial)) 462 cpuset_for_each_child(c, cont, cur)
419 return -EBUSY; 463 if (!is_cpuset_subset(c, trial))
420 } 464 goto out;
421 465
422 /* Remaining checks don't apply to root cpuset */ 466 /* Remaining checks don't apply to root cpuset */
467 ret = 0;
423 if (cur == &top_cpuset) 468 if (cur == &top_cpuset)
424 return 0; 469 goto out;
425 470
426 par = cur->parent; 471 par = parent_cs(cur);
427 472
428 /* We must be a subset of our parent cpuset */ 473 /* We must be a subset of our parent cpuset */
474 ret = -EACCES;
429 if (!is_cpuset_subset(trial, par)) 475 if (!is_cpuset_subset(trial, par))
430 return -EACCES; 476 goto out;
431 477
432 /* 478 /*
433 * If either I or some sibling (!= me) is exclusive, we can't 479 * If either I or some sibling (!= me) is exclusive, we can't
434 * overlap 480 * overlap
435 */ 481 */
436 list_for_each_entry(cont, &par->css.cgroup->children, sibling) { 482 ret = -EINVAL;
437 c = cgroup_cs(cont); 483 cpuset_for_each_child(c, cont, par) {
438 if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) && 484 if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
439 c != cur && 485 c != cur &&
440 cpumask_intersects(trial->cpus_allowed, c->cpus_allowed)) 486 cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
441 return -EINVAL; 487 goto out;
442 if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) && 488 if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
443 c != cur && 489 c != cur &&
444 nodes_intersects(trial->mems_allowed, c->mems_allowed)) 490 nodes_intersects(trial->mems_allowed, c->mems_allowed))
445 return -EINVAL; 491 goto out;
446 } 492 }
447 493
448 /* Cpusets with tasks can't have empty cpus_allowed or mems_allowed */ 494 /*
449 if (cgroup_task_count(cur->css.cgroup)) { 495 * Cpusets with tasks - existing or newly being attached - can't
450 if (cpumask_empty(trial->cpus_allowed) || 496 * have empty cpus_allowed or mems_allowed.
451 nodes_empty(trial->mems_allowed)) { 497 */
452 return -ENOSPC; 498 ret = -ENOSPC;
453 } 499 if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress) &&
454 } 500 (cpumask_empty(trial->cpus_allowed) ||
501 nodes_empty(trial->mems_allowed)))
502 goto out;
455 503
456 return 0; 504 ret = 0;
505out:
506 rcu_read_unlock();
507 return ret;
457} 508}
458 509
459#ifdef CONFIG_SMP 510#ifdef CONFIG_SMP
@@ -474,31 +525,24 @@ update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
474 return; 525 return;
475} 526}
476 527
477static void 528static void update_domain_attr_tree(struct sched_domain_attr *dattr,
478update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c) 529 struct cpuset *root_cs)
479{ 530{
480 LIST_HEAD(q); 531 struct cpuset *cp;
481 532 struct cgroup *pos_cgrp;
482 list_add(&c->stack_list, &q);
483 while (!list_empty(&q)) {
484 struct cpuset *cp;
485 struct cgroup *cont;
486 struct cpuset *child;
487
488 cp = list_first_entry(&q, struct cpuset, stack_list);
489 list_del(q.next);
490 533
491 if (cpumask_empty(cp->cpus_allowed)) 534 rcu_read_lock();
535 cpuset_for_each_descendant_pre(cp, pos_cgrp, root_cs) {
536 /* skip the whole subtree if @cp doesn't have any CPU */
537 if (cpumask_empty(cp->cpus_allowed)) {
538 pos_cgrp = cgroup_rightmost_descendant(pos_cgrp);
492 continue; 539 continue;
540 }
493 541
494 if (is_sched_load_balance(cp)) 542 if (is_sched_load_balance(cp))
495 update_domain_attr(dattr, cp); 543 update_domain_attr(dattr, cp);
496
497 list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
498 child = cgroup_cs(cont);
499 list_add_tail(&child->stack_list, &q);
500 }
501 } 544 }
545 rcu_read_unlock();
502} 546}
503 547
504/* 548/*
@@ -520,7 +564,7 @@ update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c)
520 * domains when operating in the severe memory shortage situations 564 * domains when operating in the severe memory shortage situations
521 * that could cause allocation failures below. 565 * that could cause allocation failures below.
522 * 566 *
523 * Must be called with cgroup_lock held. 567 * Must be called with cpuset_mutex held.
524 * 568 *
525 * The three key local variables below are: 569 * The three key local variables below are:
526 * q - a linked-list queue of cpuset pointers, used to implement a 570 * q - a linked-list queue of cpuset pointers, used to implement a
@@ -558,7 +602,6 @@ update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c)
558static int generate_sched_domains(cpumask_var_t **domains, 602static int generate_sched_domains(cpumask_var_t **domains,
559 struct sched_domain_attr **attributes) 603 struct sched_domain_attr **attributes)
560{ 604{
561 LIST_HEAD(q); /* queue of cpusets to be scanned */
562 struct cpuset *cp; /* scans q */ 605 struct cpuset *cp; /* scans q */
563 struct cpuset **csa; /* array of all cpuset ptrs */ 606 struct cpuset **csa; /* array of all cpuset ptrs */
564 int csn; /* how many cpuset ptrs in csa so far */ 607 int csn; /* how many cpuset ptrs in csa so far */
@@ -567,6 +610,7 @@ static int generate_sched_domains(cpumask_var_t **domains,
567 struct sched_domain_attr *dattr; /* attributes for custom domains */ 610 struct sched_domain_attr *dattr; /* attributes for custom domains */
568 int ndoms = 0; /* number of sched domains in result */ 611 int ndoms = 0; /* number of sched domains in result */
569 int nslot; /* next empty doms[] struct cpumask slot */ 612 int nslot; /* next empty doms[] struct cpumask slot */
613 struct cgroup *pos_cgrp;
570 614
571 doms = NULL; 615 doms = NULL;
572 dattr = NULL; 616 dattr = NULL;
@@ -594,33 +638,27 @@ static int generate_sched_domains(cpumask_var_t **domains,
594 goto done; 638 goto done;
595 csn = 0; 639 csn = 0;
596 640
597 list_add(&top_cpuset.stack_list, &q); 641 rcu_read_lock();
598 while (!list_empty(&q)) { 642 cpuset_for_each_descendant_pre(cp, pos_cgrp, &top_cpuset) {
599 struct cgroup *cont;
600 struct cpuset *child; /* scans child cpusets of cp */
601
602 cp = list_first_entry(&q, struct cpuset, stack_list);
603 list_del(q.next);
604
605 if (cpumask_empty(cp->cpus_allowed))
606 continue;
607
608 /* 643 /*
609 * All child cpusets contain a subset of the parent's cpus, so 644 * Continue traversing beyond @cp iff @cp has some CPUs and
610 * just skip them, and then we call update_domain_attr_tree() 645 * isn't load balancing. The former is obvious. The
611 * to calc relax_domain_level of the corresponding sched 646 * latter: All child cpusets contain a subset of the
612 * domain. 647 * parent's cpus, so just skip them, and then we call
648 * update_domain_attr_tree() to calc relax_domain_level of
649 * the corresponding sched domain.
613 */ 650 */
614 if (is_sched_load_balance(cp)) { 651 if (!cpumask_empty(cp->cpus_allowed) &&
615 csa[csn++] = cp; 652 !is_sched_load_balance(cp))
616 continue; 653 continue;
617 }
618 654
619 list_for_each_entry(cont, &cp->css.cgroup->children, sibling) { 655 if (is_sched_load_balance(cp))
620 child = cgroup_cs(cont); 656 csa[csn++] = cp;
621 list_add_tail(&child->stack_list, &q); 657
622 } 658 /* skip @cp's subtree */
623 } 659 pos_cgrp = cgroup_rightmost_descendant(pos_cgrp);
660 }
661 rcu_read_unlock();
624 662
625 for (i = 0; i < csn; i++) 663 for (i = 0; i < csn; i++)
626 csa[i]->pn = i; 664 csa[i]->pn = i;
@@ -725,25 +763,25 @@ done:
725/* 763/*
726 * Rebuild scheduler domains. 764 * Rebuild scheduler domains.
727 * 765 *
728 * Call with neither cgroup_mutex held nor within get_online_cpus(). 766 * If the flag 'sched_load_balance' of any cpuset with non-empty
729 * Takes both cgroup_mutex and get_online_cpus(). 767 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
768 * which has that flag enabled, or if any cpuset with a non-empty
769 * 'cpus' is removed, then call this routine to rebuild the
770 * scheduler's dynamic sched domains.
730 * 771 *
731 * Cannot be directly called from cpuset code handling changes 772 * Call with cpuset_mutex held. Takes get_online_cpus().
732 * to the cpuset pseudo-filesystem, because it cannot be called
733 * from code that already holds cgroup_mutex.
734 */ 773 */
735static void do_rebuild_sched_domains(struct work_struct *unused) 774static void rebuild_sched_domains_locked(void)
736{ 775{
737 struct sched_domain_attr *attr; 776 struct sched_domain_attr *attr;
738 cpumask_var_t *doms; 777 cpumask_var_t *doms;
739 int ndoms; 778 int ndoms;
740 779
780 lockdep_assert_held(&cpuset_mutex);
741 get_online_cpus(); 781 get_online_cpus();
742 782
743 /* Generate domain masks and attrs */ 783 /* Generate domain masks and attrs */
744 cgroup_lock();
745 ndoms = generate_sched_domains(&doms, &attr); 784 ndoms = generate_sched_domains(&doms, &attr);
746 cgroup_unlock();
747 785
748 /* Have scheduler rebuild the domains */ 786 /* Have scheduler rebuild the domains */
749 partition_sched_domains(ndoms, doms, attr); 787 partition_sched_domains(ndoms, doms, attr);
@@ -751,7 +789,7 @@ static void do_rebuild_sched_domains(struct work_struct *unused)
751 put_online_cpus(); 789 put_online_cpus();
752} 790}
753#else /* !CONFIG_SMP */ 791#else /* !CONFIG_SMP */
754static void do_rebuild_sched_domains(struct work_struct *unused) 792static void rebuild_sched_domains_locked(void)
755{ 793{
756} 794}
757 795
@@ -763,44 +801,11 @@ static int generate_sched_domains(cpumask_var_t **domains,
763} 801}
764#endif /* CONFIG_SMP */ 802#endif /* CONFIG_SMP */
765 803
766static DECLARE_WORK(rebuild_sched_domains_work, do_rebuild_sched_domains);
767
768/*
769 * Rebuild scheduler domains, asynchronously via workqueue.
770 *
771 * If the flag 'sched_load_balance' of any cpuset with non-empty
772 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
773 * which has that flag enabled, or if any cpuset with a non-empty
774 * 'cpus' is removed, then call this routine to rebuild the
775 * scheduler's dynamic sched domains.
776 *
777 * The rebuild_sched_domains() and partition_sched_domains()
778 * routines must nest cgroup_lock() inside get_online_cpus(),
779 * but such cpuset changes as these must nest that locking the
780 * other way, holding cgroup_lock() for much of the code.
781 *
782 * So in order to avoid an ABBA deadlock, the cpuset code handling
783 * these user changes delegates the actual sched domain rebuilding
784 * to a separate workqueue thread, which ends up processing the
785 * above do_rebuild_sched_domains() function.
786 */
787static void async_rebuild_sched_domains(void)
788{
789 queue_work(cpuset_wq, &rebuild_sched_domains_work);
790}
791
792/*
793 * Accomplishes the same scheduler domain rebuild as the above
794 * async_rebuild_sched_domains(), however it directly calls the
795 * rebuild routine synchronously rather than calling it via an
796 * asynchronous work thread.
797 *
798 * This can only be called from code that is not holding
799 * cgroup_mutex (not nested in a cgroup_lock() call.)
800 */
801void rebuild_sched_domains(void) 804void rebuild_sched_domains(void)
802{ 805{
803 do_rebuild_sched_domains(NULL); 806 mutex_lock(&cpuset_mutex);
807 rebuild_sched_domains_locked();
808 mutex_unlock(&cpuset_mutex);
804} 809}
805 810
806/** 811/**
@@ -808,7 +813,7 @@ void rebuild_sched_domains(void)
808 * @tsk: task to test 813 * @tsk: task to test
809 * @scan: struct cgroup_scanner contained in its struct cpuset_hotplug_scanner 814 * @scan: struct cgroup_scanner contained in its struct cpuset_hotplug_scanner
810 * 815 *
811 * Call with cgroup_mutex held. May take callback_mutex during call. 816 * Call with cpuset_mutex held. May take callback_mutex during call.
812 * Called for each task in a cgroup by cgroup_scan_tasks(). 817 * Called for each task in a cgroup by cgroup_scan_tasks().
813 * Return nonzero if this tasks's cpus_allowed mask should be changed (in other 818 * Return nonzero if this tasks's cpus_allowed mask should be changed (in other
814 * words, if its mask is not equal to its cpuset's mask). 819 * words, if its mask is not equal to its cpuset's mask).
@@ -829,7 +834,7 @@ static int cpuset_test_cpumask(struct task_struct *tsk,
829 * cpus_allowed mask needs to be changed. 834 * cpus_allowed mask needs to be changed.
830 * 835 *
831 * We don't need to re-check for the cgroup/cpuset membership, since we're 836 * We don't need to re-check for the cgroup/cpuset membership, since we're
832 * holding cgroup_lock() at this point. 837 * holding cpuset_mutex at this point.
833 */ 838 */
834static void cpuset_change_cpumask(struct task_struct *tsk, 839static void cpuset_change_cpumask(struct task_struct *tsk,
835 struct cgroup_scanner *scan) 840 struct cgroup_scanner *scan)
@@ -842,7 +847,7 @@ static void cpuset_change_cpumask(struct task_struct *tsk,
842 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed 847 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
843 * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks() 848 * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
844 * 849 *
845 * Called with cgroup_mutex held 850 * Called with cpuset_mutex held
846 * 851 *
847 * The cgroup_scan_tasks() function will scan all the tasks in a cgroup, 852 * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
848 * calling callback functions for each. 853 * calling callback functions for each.
@@ -920,7 +925,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
920 heap_free(&heap); 925 heap_free(&heap);
921 926
922 if (is_load_balanced) 927 if (is_load_balanced)
923 async_rebuild_sched_domains(); 928 rebuild_sched_domains_locked();
924 return 0; 929 return 0;
925} 930}
926 931
@@ -932,7 +937,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
932 * Temporarilly set tasks mems_allowed to target nodes of migration, 937 * Temporarilly set tasks mems_allowed to target nodes of migration,
933 * so that the migration code can allocate pages on these nodes. 938 * so that the migration code can allocate pages on these nodes.
934 * 939 *
935 * Call holding cgroup_mutex, so current's cpuset won't change 940 * Call holding cpuset_mutex, so current's cpuset won't change
936 * during this call, as manage_mutex holds off any cpuset_attach() 941 * during this call, as manage_mutex holds off any cpuset_attach()
937 * calls. Therefore we don't need to take task_lock around the 942 * calls. Therefore we don't need to take task_lock around the
938 * call to guarantee_online_mems(), as we know no one is changing 943 * call to guarantee_online_mems(), as we know no one is changing
@@ -1007,7 +1012,7 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk,
1007/* 1012/*
1008 * Update task's mems_allowed and rebind its mempolicy and vmas' mempolicy 1013 * Update task's mems_allowed and rebind its mempolicy and vmas' mempolicy
1009 * of it to cpuset's new mems_allowed, and migrate pages to new nodes if 1014 * of it to cpuset's new mems_allowed, and migrate pages to new nodes if
1010 * memory_migrate flag is set. Called with cgroup_mutex held. 1015 * memory_migrate flag is set. Called with cpuset_mutex held.
1011 */ 1016 */
1012static void cpuset_change_nodemask(struct task_struct *p, 1017static void cpuset_change_nodemask(struct task_struct *p,
1013 struct cgroup_scanner *scan) 1018 struct cgroup_scanner *scan)
@@ -1016,7 +1021,7 @@ static void cpuset_change_nodemask(struct task_struct *p,
1016 struct cpuset *cs; 1021 struct cpuset *cs;
1017 int migrate; 1022 int migrate;
1018 const nodemask_t *oldmem = scan->data; 1023 const nodemask_t *oldmem = scan->data;
1019 static nodemask_t newmems; /* protected by cgroup_mutex */ 1024 static nodemask_t newmems; /* protected by cpuset_mutex */
1020 1025
1021 cs = cgroup_cs(scan->cg); 1026 cs = cgroup_cs(scan->cg);
1022 guarantee_online_mems(cs, &newmems); 1027 guarantee_online_mems(cs, &newmems);
@@ -1043,7 +1048,7 @@ static void *cpuset_being_rebound;
1043 * @oldmem: old mems_allowed of cpuset cs 1048 * @oldmem: old mems_allowed of cpuset cs
1044 * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks() 1049 * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
1045 * 1050 *
1046 * Called with cgroup_mutex held 1051 * Called with cpuset_mutex held
1047 * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0 1052 * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
1048 * if @heap != NULL. 1053 * if @heap != NULL.
1049 */ 1054 */
@@ -1065,7 +1070,7 @@ static void update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem,
1065 * take while holding tasklist_lock. Forks can happen - the 1070 * take while holding tasklist_lock. Forks can happen - the
1066 * mpol_dup() cpuset_being_rebound check will catch such forks, 1071 * mpol_dup() cpuset_being_rebound check will catch such forks,
1067 * and rebind their vma mempolicies too. Because we still hold 1072 * and rebind their vma mempolicies too. Because we still hold
1068 * the global cgroup_mutex, we know that no other rebind effort 1073 * the global cpuset_mutex, we know that no other rebind effort
1069 * will be contending for the global variable cpuset_being_rebound. 1074 * will be contending for the global variable cpuset_being_rebound.
1070 * It's ok if we rebind the same mm twice; mpol_rebind_mm() 1075 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
1071 * is idempotent. Also migrate pages in each mm to new nodes. 1076 * is idempotent. Also migrate pages in each mm to new nodes.
@@ -1084,7 +1089,7 @@ static void update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem,
1084 * mempolicies and if the cpuset is marked 'memory_migrate', 1089 * mempolicies and if the cpuset is marked 'memory_migrate',
1085 * migrate the tasks pages to the new memory. 1090 * migrate the tasks pages to the new memory.
1086 * 1091 *
1087 * Call with cgroup_mutex held. May take callback_mutex during call. 1092 * Call with cpuset_mutex held. May take callback_mutex during call.
1088 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs, 1093 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
1089 * lock each such tasks mm->mmap_sem, scan its vma's and rebind 1094 * lock each such tasks mm->mmap_sem, scan its vma's and rebind
1090 * their mempolicies to the cpusets new mems_allowed. 1095 * their mempolicies to the cpusets new mems_allowed.
@@ -1168,7 +1173,7 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val)
1168 cs->relax_domain_level = val; 1173 cs->relax_domain_level = val;
1169 if (!cpumask_empty(cs->cpus_allowed) && 1174 if (!cpumask_empty(cs->cpus_allowed) &&
1170 is_sched_load_balance(cs)) 1175 is_sched_load_balance(cs))
1171 async_rebuild_sched_domains(); 1176 rebuild_sched_domains_locked();
1172 } 1177 }
1173 1178
1174 return 0; 1179 return 0;
@@ -1182,7 +1187,7 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val)
1182 * Called by cgroup_scan_tasks() for each task in a cgroup. 1187 * Called by cgroup_scan_tasks() for each task in a cgroup.
1183 * 1188 *
1184 * We don't need to re-check for the cgroup/cpuset membership, since we're 1189 * We don't need to re-check for the cgroup/cpuset membership, since we're
1185 * holding cgroup_lock() at this point. 1190 * holding cpuset_mutex at this point.
1186 */ 1191 */
1187static void cpuset_change_flag(struct task_struct *tsk, 1192static void cpuset_change_flag(struct task_struct *tsk,
1188 struct cgroup_scanner *scan) 1193 struct cgroup_scanner *scan)
@@ -1195,7 +1200,7 @@ static void cpuset_change_flag(struct task_struct *tsk,
1195 * @cs: the cpuset in which each task's spread flags needs to be changed 1200 * @cs: the cpuset in which each task's spread flags needs to be changed
1196 * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks() 1201 * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
1197 * 1202 *
1198 * Called with cgroup_mutex held 1203 * Called with cpuset_mutex held
1199 * 1204 *
1200 * The cgroup_scan_tasks() function will scan all the tasks in a cgroup, 1205 * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
1201 * calling callback functions for each. 1206 * calling callback functions for each.
@@ -1220,7 +1225,7 @@ static void update_tasks_flags(struct cpuset *cs, struct ptr_heap *heap)
1220 * cs: the cpuset to update 1225 * cs: the cpuset to update
1221 * turning_on: whether the flag is being set or cleared 1226 * turning_on: whether the flag is being set or cleared
1222 * 1227 *
1223 * Call with cgroup_mutex held. 1228 * Call with cpuset_mutex held.
1224 */ 1229 */
1225 1230
1226static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, 1231static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
@@ -1260,7 +1265,7 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1260 mutex_unlock(&callback_mutex); 1265 mutex_unlock(&callback_mutex);
1261 1266
1262 if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) 1267 if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
1263 async_rebuild_sched_domains(); 1268 rebuild_sched_domains_locked();
1264 1269
1265 if (spread_flag_changed) 1270 if (spread_flag_changed)
1266 update_tasks_flags(cs, &heap); 1271 update_tasks_flags(cs, &heap);
@@ -1368,24 +1373,18 @@ static int fmeter_getrate(struct fmeter *fmp)
1368 return val; 1373 return val;
1369} 1374}
1370 1375
1371/* 1376/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
1372 * Protected by cgroup_lock. The nodemasks must be stored globally because
1373 * dynamically allocating them is not allowed in can_attach, and they must
1374 * persist until attach.
1375 */
1376static cpumask_var_t cpus_attach;
1377static nodemask_t cpuset_attach_nodemask_from;
1378static nodemask_t cpuset_attach_nodemask_to;
1379
1380/* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */
1381static int cpuset_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) 1377static int cpuset_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
1382{ 1378{
1383 struct cpuset *cs = cgroup_cs(cgrp); 1379 struct cpuset *cs = cgroup_cs(cgrp);
1384 struct task_struct *task; 1380 struct task_struct *task;
1385 int ret; 1381 int ret;
1386 1382
1383 mutex_lock(&cpuset_mutex);
1384
1385 ret = -ENOSPC;
1387 if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) 1386 if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
1388 return -ENOSPC; 1387 goto out_unlock;
1389 1388
1390 cgroup_taskset_for_each(task, cgrp, tset) { 1389 cgroup_taskset_for_each(task, cgrp, tset) {
1391 /* 1390 /*
@@ -1397,25 +1396,45 @@ static int cpuset_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
1397 * set_cpus_allowed_ptr() on all attached tasks before 1396 * set_cpus_allowed_ptr() on all attached tasks before
1398 * cpus_allowed may be changed. 1397 * cpus_allowed may be changed.
1399 */ 1398 */
1399 ret = -EINVAL;
1400 if (task->flags & PF_THREAD_BOUND) 1400 if (task->flags & PF_THREAD_BOUND)
1401 return -EINVAL; 1401 goto out_unlock;
1402 if ((ret = security_task_setscheduler(task))) 1402 ret = security_task_setscheduler(task);
1403 return ret; 1403 if (ret)
1404 goto out_unlock;
1404 } 1405 }
1405 1406
1406 /* prepare for attach */ 1407 /*
1407 if (cs == &top_cpuset) 1408 * Mark attach is in progress. This makes validate_change() fail
1408 cpumask_copy(cpus_attach, cpu_possible_mask); 1409 * changes which zero cpus/mems_allowed.
1409 else 1410 */
1410 guarantee_online_cpus(cs, cpus_attach); 1411 cs->attach_in_progress++;
1411 1412 ret = 0;
1412 guarantee_online_mems(cs, &cpuset_attach_nodemask_to); 1413out_unlock:
1414 mutex_unlock(&cpuset_mutex);
1415 return ret;
1416}
1413 1417
1414 return 0; 1418static void cpuset_cancel_attach(struct cgroup *cgrp,
1419 struct cgroup_taskset *tset)
1420{
1421 mutex_lock(&cpuset_mutex);
1422 cgroup_cs(cgrp)->attach_in_progress--;
1423 mutex_unlock(&cpuset_mutex);
1415} 1424}
1416 1425
1426/*
1427 * Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach()
1428 * but we can't allocate it dynamically there. Define it global and
1429 * allocate from cpuset_init().
1430 */
1431static cpumask_var_t cpus_attach;
1432
1417static void cpuset_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) 1433static void cpuset_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
1418{ 1434{
1435 /* static bufs protected by cpuset_mutex */
1436 static nodemask_t cpuset_attach_nodemask_from;
1437 static nodemask_t cpuset_attach_nodemask_to;
1419 struct mm_struct *mm; 1438 struct mm_struct *mm;
1420 struct task_struct *task; 1439 struct task_struct *task;
1421 struct task_struct *leader = cgroup_taskset_first(tset); 1440 struct task_struct *leader = cgroup_taskset_first(tset);
@@ -1423,6 +1442,16 @@ static void cpuset_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
1423 struct cpuset *cs = cgroup_cs(cgrp); 1442 struct cpuset *cs = cgroup_cs(cgrp);
1424 struct cpuset *oldcs = cgroup_cs(oldcgrp); 1443 struct cpuset *oldcs = cgroup_cs(oldcgrp);
1425 1444
1445 mutex_lock(&cpuset_mutex);
1446
1447 /* prepare for attach */
1448 if (cs == &top_cpuset)
1449 cpumask_copy(cpus_attach, cpu_possible_mask);
1450 else
1451 guarantee_online_cpus(cs, cpus_attach);
1452
1453 guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
1454
1426 cgroup_taskset_for_each(task, cgrp, tset) { 1455 cgroup_taskset_for_each(task, cgrp, tset) {
1427 /* 1456 /*
1428 * can_attach beforehand should guarantee that this doesn't 1457 * can_attach beforehand should guarantee that this doesn't
@@ -1448,6 +1477,18 @@ static void cpuset_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
1448 &cpuset_attach_nodemask_to); 1477 &cpuset_attach_nodemask_to);
1449 mmput(mm); 1478 mmput(mm);
1450 } 1479 }
1480
1481 cs->attach_in_progress--;
1482
1483 /*
1484 * We may have raced with CPU/memory hotunplug. Trigger hotplug
1485 * propagation if @cs doesn't have any CPU or memory. It will move
1486 * the newly added tasks to the nearest parent which can execute.
1487 */
1488 if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
1489 schedule_cpuset_propagate_hotplug(cs);
1490
1491 mutex_unlock(&cpuset_mutex);
1451} 1492}
1452 1493
1453/* The various types of files and directories in a cpuset file system */ 1494/* The various types of files and directories in a cpuset file system */
@@ -1469,12 +1510,13 @@ typedef enum {
1469 1510
1470static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val) 1511static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
1471{ 1512{
1472 int retval = 0;
1473 struct cpuset *cs = cgroup_cs(cgrp); 1513 struct cpuset *cs = cgroup_cs(cgrp);
1474 cpuset_filetype_t type = cft->private; 1514 cpuset_filetype_t type = cft->private;
1515 int retval = -ENODEV;
1475 1516
1476 if (!cgroup_lock_live_group(cgrp)) 1517 mutex_lock(&cpuset_mutex);
1477 return -ENODEV; 1518 if (!is_cpuset_online(cs))
1519 goto out_unlock;
1478 1520
1479 switch (type) { 1521 switch (type) {
1480 case FILE_CPU_EXCLUSIVE: 1522 case FILE_CPU_EXCLUSIVE:
@@ -1508,18 +1550,20 @@ static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
1508 retval = -EINVAL; 1550 retval = -EINVAL;
1509 break; 1551 break;
1510 } 1552 }
1511 cgroup_unlock(); 1553out_unlock:
1554 mutex_unlock(&cpuset_mutex);
1512 return retval; 1555 return retval;
1513} 1556}
1514 1557
1515static int cpuset_write_s64(struct cgroup *cgrp, struct cftype *cft, s64 val) 1558static int cpuset_write_s64(struct cgroup *cgrp, struct cftype *cft, s64 val)
1516{ 1559{
1517 int retval = 0;
1518 struct cpuset *cs = cgroup_cs(cgrp); 1560 struct cpuset *cs = cgroup_cs(cgrp);
1519 cpuset_filetype_t type = cft->private; 1561 cpuset_filetype_t type = cft->private;
1562 int retval = -ENODEV;
1520 1563
1521 if (!cgroup_lock_live_group(cgrp)) 1564 mutex_lock(&cpuset_mutex);
1522 return -ENODEV; 1565 if (!is_cpuset_online(cs))
1566 goto out_unlock;
1523 1567
1524 switch (type) { 1568 switch (type) {
1525 case FILE_SCHED_RELAX_DOMAIN_LEVEL: 1569 case FILE_SCHED_RELAX_DOMAIN_LEVEL:
@@ -1529,7 +1573,8 @@ static int cpuset_write_s64(struct cgroup *cgrp, struct cftype *cft, s64 val)
1529 retval = -EINVAL; 1573 retval = -EINVAL;
1530 break; 1574 break;
1531 } 1575 }
1532 cgroup_unlock(); 1576out_unlock:
1577 mutex_unlock(&cpuset_mutex);
1533 return retval; 1578 return retval;
1534} 1579}
1535 1580
@@ -1539,17 +1584,36 @@ static int cpuset_write_s64(struct cgroup *cgrp, struct cftype *cft, s64 val)
1539static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft, 1584static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft,
1540 const char *buf) 1585 const char *buf)
1541{ 1586{
1542 int retval = 0;
1543 struct cpuset *cs = cgroup_cs(cgrp); 1587 struct cpuset *cs = cgroup_cs(cgrp);
1544 struct cpuset *trialcs; 1588 struct cpuset *trialcs;
1589 int retval = -ENODEV;
1590
1591 /*
1592 * CPU or memory hotunplug may leave @cs w/o any execution
1593 * resources, in which case the hotplug code asynchronously updates
1594 * configuration and transfers all tasks to the nearest ancestor
1595 * which can execute.
1596 *
1597 * As writes to "cpus" or "mems" may restore @cs's execution
1598 * resources, wait for the previously scheduled operations before
1599 * proceeding, so that we don't end up keep removing tasks added
1600 * after execution capability is restored.
1601 *
1602 * Flushing cpuset_hotplug_work is enough to synchronize against
1603 * hotplug hanlding; however, cpuset_attach() may schedule
1604 * propagation work directly. Flush the workqueue too.
1605 */
1606 flush_work(&cpuset_hotplug_work);
1607 flush_workqueue(cpuset_propagate_hotplug_wq);
1545 1608
1546 if (!cgroup_lock_live_group(cgrp)) 1609 mutex_lock(&cpuset_mutex);
1547 return -ENODEV; 1610 if (!is_cpuset_online(cs))
1611 goto out_unlock;
1548 1612
1549 trialcs = alloc_trial_cpuset(cs); 1613 trialcs = alloc_trial_cpuset(cs);
1550 if (!trialcs) { 1614 if (!trialcs) {
1551 retval = -ENOMEM; 1615 retval = -ENOMEM;
1552 goto out; 1616 goto out_unlock;
1553 } 1617 }
1554 1618
1555 switch (cft->private) { 1619 switch (cft->private) {
@@ -1565,8 +1629,8 @@ static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft,
1565 } 1629 }
1566 1630
1567 free_trial_cpuset(trialcs); 1631 free_trial_cpuset(trialcs);
1568out: 1632out_unlock:
1569 cgroup_unlock(); 1633 mutex_unlock(&cpuset_mutex);
1570 return retval; 1634 return retval;
1571} 1635}
1572 1636
@@ -1790,15 +1854,12 @@ static struct cftype files[] = {
1790 1854
1791static struct cgroup_subsys_state *cpuset_css_alloc(struct cgroup *cont) 1855static struct cgroup_subsys_state *cpuset_css_alloc(struct cgroup *cont)
1792{ 1856{
1793 struct cgroup *parent_cg = cont->parent; 1857 struct cpuset *cs;
1794 struct cgroup *tmp_cg;
1795 struct cpuset *parent, *cs;
1796 1858
1797 if (!parent_cg) 1859 if (!cont->parent)
1798 return &top_cpuset.css; 1860 return &top_cpuset.css;
1799 parent = cgroup_cs(parent_cg);
1800 1861
1801 cs = kmalloc(sizeof(*cs), GFP_KERNEL); 1862 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
1802 if (!cs) 1863 if (!cs)
1803 return ERR_PTR(-ENOMEM); 1864 return ERR_PTR(-ENOMEM);
1804 if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL)) { 1865 if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL)) {
@@ -1806,22 +1867,38 @@ static struct cgroup_subsys_state *cpuset_css_alloc(struct cgroup *cont)
1806 return ERR_PTR(-ENOMEM); 1867 return ERR_PTR(-ENOMEM);
1807 } 1868 }
1808 1869
1809 cs->flags = 0;
1810 if (is_spread_page(parent))
1811 set_bit(CS_SPREAD_PAGE, &cs->flags);
1812 if (is_spread_slab(parent))
1813 set_bit(CS_SPREAD_SLAB, &cs->flags);
1814 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); 1870 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1815 cpumask_clear(cs->cpus_allowed); 1871 cpumask_clear(cs->cpus_allowed);
1816 nodes_clear(cs->mems_allowed); 1872 nodes_clear(cs->mems_allowed);
1817 fmeter_init(&cs->fmeter); 1873 fmeter_init(&cs->fmeter);
1874 INIT_WORK(&cs->hotplug_work, cpuset_propagate_hotplug_workfn);
1818 cs->relax_domain_level = -1; 1875 cs->relax_domain_level = -1;
1819 1876
1820 cs->parent = parent; 1877 return &cs->css;
1878}
1879
1880static int cpuset_css_online(struct cgroup *cgrp)
1881{
1882 struct cpuset *cs = cgroup_cs(cgrp);
1883 struct cpuset *parent = parent_cs(cs);
1884 struct cpuset *tmp_cs;
1885 struct cgroup *pos_cg;
1886
1887 if (!parent)
1888 return 0;
1889
1890 mutex_lock(&cpuset_mutex);
1891
1892 set_bit(CS_ONLINE, &cs->flags);
1893 if (is_spread_page(parent))
1894 set_bit(CS_SPREAD_PAGE, &cs->flags);
1895 if (is_spread_slab(parent))
1896 set_bit(CS_SPREAD_SLAB, &cs->flags);
1897
1821 number_of_cpusets++; 1898 number_of_cpusets++;
1822 1899
1823 if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &cont->flags)) 1900 if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags))
1824 goto skip_clone; 1901 goto out_unlock;
1825 1902
1826 /* 1903 /*
1827 * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is 1904 * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is
@@ -1836,35 +1913,49 @@ static struct cgroup_subsys_state *cpuset_css_alloc(struct cgroup *cont)
1836 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive 1913 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
1837 * (and likewise for mems) to the new cgroup. 1914 * (and likewise for mems) to the new cgroup.
1838 */ 1915 */
1839 list_for_each_entry(tmp_cg, &parent_cg->children, sibling) { 1916 rcu_read_lock();
1840 struct cpuset *tmp_cs = cgroup_cs(tmp_cg); 1917 cpuset_for_each_child(tmp_cs, pos_cg, parent) {
1841 1918 if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) {
1842 if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) 1919 rcu_read_unlock();
1843 goto skip_clone; 1920 goto out_unlock;
1921 }
1844 } 1922 }
1923 rcu_read_unlock();
1845 1924
1846 mutex_lock(&callback_mutex); 1925 mutex_lock(&callback_mutex);
1847 cs->mems_allowed = parent->mems_allowed; 1926 cs->mems_allowed = parent->mems_allowed;
1848 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); 1927 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
1849 mutex_unlock(&callback_mutex); 1928 mutex_unlock(&callback_mutex);
1850skip_clone: 1929out_unlock:
1851 return &cs->css; 1930 mutex_unlock(&cpuset_mutex);
1931 return 0;
1932}
1933
1934static void cpuset_css_offline(struct cgroup *cgrp)
1935{
1936 struct cpuset *cs = cgroup_cs(cgrp);
1937
1938 mutex_lock(&cpuset_mutex);
1939
1940 if (is_sched_load_balance(cs))
1941 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
1942
1943 number_of_cpusets--;
1944 clear_bit(CS_ONLINE, &cs->flags);
1945
1946 mutex_unlock(&cpuset_mutex);
1852} 1947}
1853 1948
1854/* 1949/*
1855 * If the cpuset being removed has its flag 'sched_load_balance' 1950 * If the cpuset being removed has its flag 'sched_load_balance'
1856 * enabled, then simulate turning sched_load_balance off, which 1951 * enabled, then simulate turning sched_load_balance off, which
1857 * will call async_rebuild_sched_domains(). 1952 * will call rebuild_sched_domains_locked().
1858 */ 1953 */
1859 1954
1860static void cpuset_css_free(struct cgroup *cont) 1955static void cpuset_css_free(struct cgroup *cont)
1861{ 1956{
1862 struct cpuset *cs = cgroup_cs(cont); 1957 struct cpuset *cs = cgroup_cs(cont);
1863 1958
1864 if (is_sched_load_balance(cs))
1865 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
1866
1867 number_of_cpusets--;
1868 free_cpumask_var(cs->cpus_allowed); 1959 free_cpumask_var(cs->cpus_allowed);
1869 kfree(cs); 1960 kfree(cs);
1870} 1961}
@@ -1872,8 +1963,11 @@ static void cpuset_css_free(struct cgroup *cont)
1872struct cgroup_subsys cpuset_subsys = { 1963struct cgroup_subsys cpuset_subsys = {
1873 .name = "cpuset", 1964 .name = "cpuset",
1874 .css_alloc = cpuset_css_alloc, 1965 .css_alloc = cpuset_css_alloc,
1966 .css_online = cpuset_css_online,
1967 .css_offline = cpuset_css_offline,
1875 .css_free = cpuset_css_free, 1968 .css_free = cpuset_css_free,
1876 .can_attach = cpuset_can_attach, 1969 .can_attach = cpuset_can_attach,
1970 .cancel_attach = cpuset_cancel_attach,
1877 .attach = cpuset_attach, 1971 .attach = cpuset_attach,
1878 .subsys_id = cpuset_subsys_id, 1972 .subsys_id = cpuset_subsys_id,
1879 .base_cftypes = files, 1973 .base_cftypes = files,
@@ -1924,7 +2018,9 @@ static void cpuset_do_move_task(struct task_struct *tsk,
1924{ 2018{
1925 struct cgroup *new_cgroup = scan->data; 2019 struct cgroup *new_cgroup = scan->data;
1926 2020
2021 cgroup_lock();
1927 cgroup_attach_task(new_cgroup, tsk); 2022 cgroup_attach_task(new_cgroup, tsk);
2023 cgroup_unlock();
1928} 2024}
1929 2025
1930/** 2026/**
@@ -1932,7 +2028,7 @@ static void cpuset_do_move_task(struct task_struct *tsk,
1932 * @from: cpuset in which the tasks currently reside 2028 * @from: cpuset in which the tasks currently reside
1933 * @to: cpuset to which the tasks will be moved 2029 * @to: cpuset to which the tasks will be moved
1934 * 2030 *
1935 * Called with cgroup_mutex held 2031 * Called with cpuset_mutex held
1936 * callback_mutex must not be held, as cpuset_attach() will take it. 2032 * callback_mutex must not be held, as cpuset_attach() will take it.
1937 * 2033 *
1938 * The cgroup_scan_tasks() function will scan all the tasks in a cgroup, 2034 * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
@@ -1959,169 +2055,200 @@ static void move_member_tasks_to_cpuset(struct cpuset *from, struct cpuset *to)
1959 * removing that CPU or node from all cpusets. If this removes the 2055 * removing that CPU or node from all cpusets. If this removes the
1960 * last CPU or node from a cpuset, then move the tasks in the empty 2056 * last CPU or node from a cpuset, then move the tasks in the empty
1961 * cpuset to its next-highest non-empty parent. 2057 * cpuset to its next-highest non-empty parent.
1962 *
1963 * Called with cgroup_mutex held
1964 * callback_mutex must not be held, as cpuset_attach() will take it.
1965 */ 2058 */
1966static void remove_tasks_in_empty_cpuset(struct cpuset *cs) 2059static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
1967{ 2060{
1968 struct cpuset *parent; 2061 struct cpuset *parent;
1969 2062
1970 /* 2063 /*
1971 * The cgroup's css_sets list is in use if there are tasks
1972 * in the cpuset; the list is empty if there are none;
1973 * the cs->css.refcnt seems always 0.
1974 */
1975 if (list_empty(&cs->css.cgroup->css_sets))
1976 return;
1977
1978 /*
1979 * Find its next-highest non-empty parent, (top cpuset 2064 * Find its next-highest non-empty parent, (top cpuset
1980 * has online cpus, so can't be empty). 2065 * has online cpus, so can't be empty).
1981 */ 2066 */
1982 parent = cs->parent; 2067 parent = parent_cs(cs);
1983 while (cpumask_empty(parent->cpus_allowed) || 2068 while (cpumask_empty(parent->cpus_allowed) ||
1984 nodes_empty(parent->mems_allowed)) 2069 nodes_empty(parent->mems_allowed))
1985 parent = parent->parent; 2070 parent = parent_cs(parent);
1986 2071
1987 move_member_tasks_to_cpuset(cs, parent); 2072 move_member_tasks_to_cpuset(cs, parent);
1988} 2073}
1989 2074
1990/* 2075/**
1991 * Helper function to traverse cpusets. 2076 * cpuset_propagate_hotplug_workfn - propagate CPU/memory hotplug to a cpuset
1992 * It can be used to walk the cpuset tree from top to bottom, completing 2077 * @cs: cpuset in interest
1993 * one layer before dropping down to the next (thus always processing a 2078 *
1994 * node before any of its children). 2079 * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
2080 * offline, update @cs accordingly. If @cs ends up with no CPU or memory,
2081 * all its tasks are moved to the nearest ancestor with both resources.
1995 */ 2082 */
1996static struct cpuset *cpuset_next(struct list_head *queue) 2083static void cpuset_propagate_hotplug_workfn(struct work_struct *work)
1997{ 2084{
1998 struct cpuset *cp; 2085 static cpumask_t off_cpus;
1999 struct cpuset *child; /* scans child cpusets of cp */ 2086 static nodemask_t off_mems, tmp_mems;
2000 struct cgroup *cont; 2087 struct cpuset *cs = container_of(work, struct cpuset, hotplug_work);
2088 bool is_empty;
2001 2089
2002 if (list_empty(queue)) 2090 mutex_lock(&cpuset_mutex);
2003 return NULL; 2091
2092 cpumask_andnot(&off_cpus, cs->cpus_allowed, top_cpuset.cpus_allowed);
2093 nodes_andnot(off_mems, cs->mems_allowed, top_cpuset.mems_allowed);
2004 2094
2005 cp = list_first_entry(queue, struct cpuset, stack_list); 2095 /* remove offline cpus from @cs */
2006 list_del(queue->next); 2096 if (!cpumask_empty(&off_cpus)) {
2007 list_for_each_entry(cont, &cp->css.cgroup->children, sibling) { 2097 mutex_lock(&callback_mutex);
2008 child = cgroup_cs(cont); 2098 cpumask_andnot(cs->cpus_allowed, cs->cpus_allowed, &off_cpus);
2009 list_add_tail(&child->stack_list, queue); 2099 mutex_unlock(&callback_mutex);
2100 update_tasks_cpumask(cs, NULL);
2101 }
2102
2103 /* remove offline mems from @cs */
2104 if (!nodes_empty(off_mems)) {
2105 tmp_mems = cs->mems_allowed;
2106 mutex_lock(&callback_mutex);
2107 nodes_andnot(cs->mems_allowed, cs->mems_allowed, off_mems);
2108 mutex_unlock(&callback_mutex);
2109 update_tasks_nodemask(cs, &tmp_mems, NULL);
2010 } 2110 }
2011 2111
2012 return cp; 2112 is_empty = cpumask_empty(cs->cpus_allowed) ||
2113 nodes_empty(cs->mems_allowed);
2114
2115 mutex_unlock(&cpuset_mutex);
2116
2117 /*
2118 * If @cs became empty, move tasks to the nearest ancestor with
2119 * execution resources. This is full cgroup operation which will
2120 * also call back into cpuset. Should be done outside any lock.
2121 */
2122 if (is_empty)
2123 remove_tasks_in_empty_cpuset(cs);
2124
2125 /* the following may free @cs, should be the last operation */
2126 css_put(&cs->css);
2013} 2127}
2014 2128
2129/**
2130 * schedule_cpuset_propagate_hotplug - schedule hotplug propagation to a cpuset
2131 * @cs: cpuset of interest
2132 *
2133 * Schedule cpuset_propagate_hotplug_workfn() which will update CPU and
2134 * memory masks according to top_cpuset.
2135 */
2136static void schedule_cpuset_propagate_hotplug(struct cpuset *cs)
2137{
2138 /*
2139 * Pin @cs. The refcnt will be released when the work item
2140 * finishes executing.
2141 */
2142 if (!css_tryget(&cs->css))
2143 return;
2015 2144
2016/* 2145 /*
2017 * Walk the specified cpuset subtree upon a hotplug operation (CPU/Memory 2146 * Queue @cs->hotplug_work. If already pending, lose the css ref.
2018 * online/offline) and update the cpusets accordingly. 2147 * cpuset_propagate_hotplug_wq is ordered and propagation will
2019 * For regular CPU/Mem hotplug, look for empty cpusets; the tasks of such 2148 * happen in the order this function is called.
2020 * cpuset must be moved to a parent cpuset. 2149 */
2150 if (!queue_work(cpuset_propagate_hotplug_wq, &cs->hotplug_work))
2151 css_put(&cs->css);
2152}
2153
2154/**
2155 * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
2021 * 2156 *
2022 * Called with cgroup_mutex held. We take callback_mutex to modify 2157 * This function is called after either CPU or memory configuration has
2023 * cpus_allowed and mems_allowed. 2158 * changed and updates cpuset accordingly. The top_cpuset is always
2159 * synchronized to cpu_active_mask and N_MEMORY, which is necessary in
2160 * order to make cpusets transparent (of no affect) on systems that are
2161 * actively using CPU hotplug but making no active use of cpusets.
2024 * 2162 *
2025 * This walk processes the tree from top to bottom, completing one layer 2163 * Non-root cpusets are only affected by offlining. If any CPUs or memory
2026 * before dropping down to the next. It always processes a node before 2164 * nodes have been taken down, cpuset_propagate_hotplug() is invoked on all
2027 * any of its children. 2165 * descendants.
2028 * 2166 *
2029 * In the case of memory hot-unplug, it will remove nodes from N_MEMORY 2167 * Note that CPU offlining during suspend is ignored. We don't modify
2030 * if all present pages from a node are offlined. 2168 * cpusets across suspend/resume cycles at all.
2031 */ 2169 */
2032static void 2170static void cpuset_hotplug_workfn(struct work_struct *work)
2033scan_cpusets_upon_hotplug(struct cpuset *root, enum hotplug_event event)
2034{ 2171{
2035 LIST_HEAD(queue); 2172 static cpumask_t new_cpus, tmp_cpus;
2036 struct cpuset *cp; /* scans cpusets being updated */ 2173 static nodemask_t new_mems, tmp_mems;
2037 static nodemask_t oldmems; /* protected by cgroup_mutex */ 2174 bool cpus_updated, mems_updated;
2175 bool cpus_offlined, mems_offlined;
2038 2176
2039 list_add_tail((struct list_head *)&root->stack_list, &queue); 2177 mutex_lock(&cpuset_mutex);
2040 2178
2041 switch (event) { 2179 /* fetch the available cpus/mems and find out which changed how */
2042 case CPUSET_CPU_OFFLINE: 2180 cpumask_copy(&new_cpus, cpu_active_mask);
2043 while ((cp = cpuset_next(&queue)) != NULL) { 2181 new_mems = node_states[N_MEMORY];
2044 2182
2045 /* Continue past cpusets with all cpus online */ 2183 cpus_updated = !cpumask_equal(top_cpuset.cpus_allowed, &new_cpus);
2046 if (cpumask_subset(cp->cpus_allowed, cpu_active_mask)) 2184 cpus_offlined = cpumask_andnot(&tmp_cpus, top_cpuset.cpus_allowed,
2047 continue; 2185 &new_cpus);
2048 2186
2049 /* Remove offline cpus from this cpuset. */ 2187 mems_updated = !nodes_equal(top_cpuset.mems_allowed, new_mems);
2050 mutex_lock(&callback_mutex); 2188 nodes_andnot(tmp_mems, top_cpuset.mems_allowed, new_mems);
2051 cpumask_and(cp->cpus_allowed, cp->cpus_allowed, 2189 mems_offlined = !nodes_empty(tmp_mems);
2052 cpu_active_mask);
2053 mutex_unlock(&callback_mutex);
2054 2190
2055 /* Move tasks from the empty cpuset to a parent */ 2191 /* synchronize cpus_allowed to cpu_active_mask */
2056 if (cpumask_empty(cp->cpus_allowed)) 2192 if (cpus_updated) {
2057 remove_tasks_in_empty_cpuset(cp); 2193 mutex_lock(&callback_mutex);
2058 else 2194 cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
2059 update_tasks_cpumask(cp, NULL); 2195 mutex_unlock(&callback_mutex);
2060 } 2196 /* we don't mess with cpumasks of tasks in top_cpuset */
2061 break; 2197 }
2062 2198
2063 case CPUSET_MEM_OFFLINE: 2199 /* synchronize mems_allowed to N_MEMORY */
2064 while ((cp = cpuset_next(&queue)) != NULL) { 2200 if (mems_updated) {
2201 tmp_mems = top_cpuset.mems_allowed;
2202 mutex_lock(&callback_mutex);
2203 top_cpuset.mems_allowed = new_mems;
2204 mutex_unlock(&callback_mutex);
2205 update_tasks_nodemask(&top_cpuset, &tmp_mems, NULL);
2206 }
2065 2207
2066 /* Continue past cpusets with all mems online */ 2208 /* if cpus or mems went down, we need to propagate to descendants */
2067 if (nodes_subset(cp->mems_allowed, 2209 if (cpus_offlined || mems_offlined) {
2068 node_states[N_MEMORY])) 2210 struct cpuset *cs;
2069 continue; 2211 struct cgroup *pos_cgrp;
2070 2212
2071 oldmems = cp->mems_allowed; 2213 rcu_read_lock();
2214 cpuset_for_each_descendant_pre(cs, pos_cgrp, &top_cpuset)
2215 schedule_cpuset_propagate_hotplug(cs);
2216 rcu_read_unlock();
2217 }
2072 2218
2073 /* Remove offline mems from this cpuset. */ 2219 mutex_unlock(&cpuset_mutex);
2074 mutex_lock(&callback_mutex);
2075 nodes_and(cp->mems_allowed, cp->mems_allowed,
2076 node_states[N_MEMORY]);
2077 mutex_unlock(&callback_mutex);
2078 2220
2079 /* Move tasks from the empty cpuset to a parent */ 2221 /* wait for propagations to finish */
2080 if (nodes_empty(cp->mems_allowed)) 2222 flush_workqueue(cpuset_propagate_hotplug_wq);
2081 remove_tasks_in_empty_cpuset(cp); 2223
2082 else 2224 /* rebuild sched domains if cpus_allowed has changed */
2083 update_tasks_nodemask(cp, &oldmems, NULL); 2225 if (cpus_updated) {
2084 } 2226 struct sched_domain_attr *attr;
2227 cpumask_var_t *doms;
2228 int ndoms;
2229
2230 mutex_lock(&cpuset_mutex);
2231 ndoms = generate_sched_domains(&doms, &attr);
2232 mutex_unlock(&cpuset_mutex);
2233
2234 partition_sched_domains(ndoms, doms, attr);
2085 } 2235 }
2086} 2236}
2087 2237
2088/*
2089 * The top_cpuset tracks what CPUs and Memory Nodes are online,
2090 * period. This is necessary in order to make cpusets transparent
2091 * (of no affect) on systems that are actively using CPU hotplug
2092 * but making no active use of cpusets.
2093 *
2094 * The only exception to this is suspend/resume, where we don't
2095 * modify cpusets at all.
2096 *
2097 * This routine ensures that top_cpuset.cpus_allowed tracks
2098 * cpu_active_mask on each CPU hotplug (cpuhp) event.
2099 *
2100 * Called within get_online_cpus(). Needs to call cgroup_lock()
2101 * before calling generate_sched_domains().
2102 *
2103 * @cpu_online: Indicates whether this is a CPU online event (true) or
2104 * a CPU offline event (false).
2105 */
2106void cpuset_update_active_cpus(bool cpu_online) 2238void cpuset_update_active_cpus(bool cpu_online)
2107{ 2239{
2108 struct sched_domain_attr *attr; 2240 /*
2109 cpumask_var_t *doms; 2241 * We're inside cpu hotplug critical region which usually nests
2110 int ndoms; 2242 * inside cgroup synchronization. Bounce actual hotplug processing
2111 2243 * to a work item to avoid reverse locking order.
2112 cgroup_lock(); 2244 *
2113 mutex_lock(&callback_mutex); 2245 * We still need to do partition_sched_domains() synchronously;
2114 cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask); 2246 * otherwise, the scheduler will get confused and put tasks to the
2115 mutex_unlock(&callback_mutex); 2247 * dead CPU. Fall back to the default single domain.
2116 2248 * cpuset_hotplug_workfn() will rebuild it as necessary.
2117 if (!cpu_online) 2249 */
2118 scan_cpusets_upon_hotplug(&top_cpuset, CPUSET_CPU_OFFLINE); 2250 partition_sched_domains(1, NULL, NULL);
2119 2251 schedule_work(&cpuset_hotplug_work);
2120 ndoms = generate_sched_domains(&doms, &attr);
2121 cgroup_unlock();
2122
2123 /* Have scheduler rebuild the domains */
2124 partition_sched_domains(ndoms, doms, attr);
2125} 2252}
2126 2253
2127#ifdef CONFIG_MEMORY_HOTPLUG 2254#ifdef CONFIG_MEMORY_HOTPLUG
@@ -2133,29 +2260,7 @@ void cpuset_update_active_cpus(bool cpu_online)
2133static int cpuset_track_online_nodes(struct notifier_block *self, 2260static int cpuset_track_online_nodes(struct notifier_block *self,
2134 unsigned long action, void *arg) 2261 unsigned long action, void *arg)
2135{ 2262{
2136 static nodemask_t oldmems; /* protected by cgroup_mutex */ 2263 schedule_work(&cpuset_hotplug_work);
2137
2138 cgroup_lock();
2139 switch (action) {
2140 case MEM_ONLINE:
2141 oldmems = top_cpuset.mems_allowed;
2142 mutex_lock(&callback_mutex);
2143 top_cpuset.mems_allowed = node_states[N_MEMORY];
2144 mutex_unlock(&callback_mutex);
2145 update_tasks_nodemask(&top_cpuset, &oldmems, NULL);
2146 break;
2147 case MEM_OFFLINE:
2148 /*
2149 * needn't update top_cpuset.mems_allowed explicitly because
2150 * scan_cpusets_upon_hotplug() will update it.
2151 */
2152 scan_cpusets_upon_hotplug(&top_cpuset, CPUSET_MEM_OFFLINE);
2153 break;
2154 default:
2155 break;
2156 }
2157 cgroup_unlock();
2158
2159 return NOTIFY_OK; 2264 return NOTIFY_OK;
2160} 2265}
2161#endif 2266#endif
@@ -2173,8 +2278,9 @@ void __init cpuset_init_smp(void)
2173 2278
2174 hotplug_memory_notifier(cpuset_track_online_nodes, 10); 2279 hotplug_memory_notifier(cpuset_track_online_nodes, 10);
2175 2280
2176 cpuset_wq = create_singlethread_workqueue("cpuset"); 2281 cpuset_propagate_hotplug_wq =
2177 BUG_ON(!cpuset_wq); 2282 alloc_ordered_workqueue("cpuset_hotplug", 0);
2283 BUG_ON(!cpuset_propagate_hotplug_wq);
2178} 2284}
2179 2285
2180/** 2286/**
@@ -2273,8 +2379,8 @@ int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
2273 */ 2379 */
2274static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs) 2380static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs)
2275{ 2381{
2276 while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && cs->parent) 2382 while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs))
2277 cs = cs->parent; 2383 cs = parent_cs(cs);
2278 return cs; 2384 return cs;
2279} 2385}
2280 2386
@@ -2412,17 +2518,6 @@ int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
2412} 2518}
2413 2519
2414/** 2520/**
2415 * cpuset_unlock - release lock on cpuset changes
2416 *
2417 * Undo the lock taken in a previous cpuset_lock() call.
2418 */
2419
2420void cpuset_unlock(void)
2421{
2422 mutex_unlock(&callback_mutex);
2423}
2424
2425/**
2426 * cpuset_mem_spread_node() - On which node to begin search for a file page 2521 * cpuset_mem_spread_node() - On which node to begin search for a file page
2427 * cpuset_slab_spread_node() - On which node to begin search for a slab page 2522 * cpuset_slab_spread_node() - On which node to begin search for a slab page
2428 * 2523 *
@@ -2511,8 +2606,16 @@ void cpuset_print_task_mems_allowed(struct task_struct *tsk)
2511 2606
2512 dentry = task_cs(tsk)->css.cgroup->dentry; 2607 dentry = task_cs(tsk)->css.cgroup->dentry;
2513 spin_lock(&cpuset_buffer_lock); 2608 spin_lock(&cpuset_buffer_lock);
2514 snprintf(cpuset_name, CPUSET_NAME_LEN, 2609
2515 dentry ? (const char *)dentry->d_name.name : "/"); 2610 if (!dentry) {
2611 strcpy(cpuset_name, "/");
2612 } else {
2613 spin_lock(&dentry->d_lock);
2614 strlcpy(cpuset_name, (const char *)dentry->d_name.name,
2615 CPUSET_NAME_LEN);
2616 spin_unlock(&dentry->d_lock);
2617 }
2618
2516 nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN, 2619 nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN,
2517 tsk->mems_allowed); 2620 tsk->mems_allowed);
2518 printk(KERN_INFO "%s cpuset=%s mems_allowed=%s\n", 2621 printk(KERN_INFO "%s cpuset=%s mems_allowed=%s\n",
@@ -2560,7 +2663,7 @@ void __cpuset_memory_pressure_bump(void)
2560 * - Used for /proc/<pid>/cpuset. 2663 * - Used for /proc/<pid>/cpuset.
2561 * - No need to task_lock(tsk) on this tsk->cpuset reference, as it 2664 * - No need to task_lock(tsk) on this tsk->cpuset reference, as it
2562 * doesn't really matter if tsk->cpuset changes after we read it, 2665 * doesn't really matter if tsk->cpuset changes after we read it,
2563 * and we take cgroup_mutex, keeping cpuset_attach() from changing it 2666 * and we take cpuset_mutex, keeping cpuset_attach() from changing it
2564 * anyway. 2667 * anyway.
2565 */ 2668 */
2566static int proc_cpuset_show(struct seq_file *m, void *unused_v) 2669static int proc_cpuset_show(struct seq_file *m, void *unused_v)
@@ -2582,16 +2685,15 @@ static int proc_cpuset_show(struct seq_file *m, void *unused_v)
2582 if (!tsk) 2685 if (!tsk)
2583 goto out_free; 2686 goto out_free;
2584 2687
2585 retval = -EINVAL; 2688 rcu_read_lock();
2586 cgroup_lock();
2587 css = task_subsys_state(tsk, cpuset_subsys_id); 2689 css = task_subsys_state(tsk, cpuset_subsys_id);
2588 retval = cgroup_path(css->cgroup, buf, PAGE_SIZE); 2690 retval = cgroup_path(css->cgroup, buf, PAGE_SIZE);
2691 rcu_read_unlock();
2589 if (retval < 0) 2692 if (retval < 0)
2590 goto out_unlock; 2693 goto out_put_task;
2591 seq_puts(m, buf); 2694 seq_puts(m, buf);
2592 seq_putc(m, '\n'); 2695 seq_putc(m, '\n');
2593out_unlock: 2696out_put_task:
2594 cgroup_unlock();
2595 put_task_struct(tsk); 2697 put_task_struct(tsk);
2596out_free: 2698out_free:
2597 kfree(buf); 2699 kfree(buf);
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 4d5f8d5612f3..8875254120b6 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -1970,6 +1970,8 @@ static int kdb_lsmod(int argc, const char **argv)
1970 1970
1971 kdb_printf("Module Size modstruct Used by\n"); 1971 kdb_printf("Module Size modstruct Used by\n");
1972 list_for_each_entry(mod, kdb_modules, list) { 1972 list_for_each_entry(mod, kdb_modules, list) {
1973 if (mod->state == MODULE_STATE_UNFORMED)
1974 continue;
1973 1975
1974 kdb_printf("%-20s%8u 0x%p ", mod->name, 1976 kdb_printf("%-20s%8u 0x%p ", mod->name,
1975 mod->core_size, (void *)mod); 1977 mod->core_size, (void *)mod);
diff --git a/kernel/delayacct.c b/kernel/delayacct.c
index 418b3f7053aa..d473988c1d0b 100644
--- a/kernel/delayacct.c
+++ b/kernel/delayacct.c
@@ -106,6 +106,7 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
106 unsigned long long t2, t3; 106 unsigned long long t2, t3;
107 unsigned long flags; 107 unsigned long flags;
108 struct timespec ts; 108 struct timespec ts;
109 cputime_t utime, stime, stimescaled, utimescaled;
109 110
110 /* Though tsk->delays accessed later, early exit avoids 111 /* Though tsk->delays accessed later, early exit avoids
111 * unnecessary returning of other data 112 * unnecessary returning of other data
@@ -114,12 +115,14 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
114 goto done; 115 goto done;
115 116
116 tmp = (s64)d->cpu_run_real_total; 117 tmp = (s64)d->cpu_run_real_total;
117 cputime_to_timespec(tsk->utime + tsk->stime, &ts); 118 task_cputime(tsk, &utime, &stime);
119 cputime_to_timespec(utime + stime, &ts);
118 tmp += timespec_to_ns(&ts); 120 tmp += timespec_to_ns(&ts);
119 d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp; 121 d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp;
120 122
121 tmp = (s64)d->cpu_scaled_run_real_total; 123 tmp = (s64)d->cpu_scaled_run_real_total;
122 cputime_to_timespec(tsk->utimescaled + tsk->stimescaled, &ts); 124 task_cputime_scaled(tsk, &utimescaled, &stimescaled);
125 cputime_to_timespec(utimescaled + stimescaled, &ts);
123 tmp += timespec_to_ns(&ts); 126 tmp += timespec_to_ns(&ts);
124 d->cpu_scaled_run_real_total = 127 d->cpu_scaled_run_real_total =
125 (tmp < (s64)d->cpu_scaled_run_real_total) ? 0 : tmp; 128 (tmp < (s64)d->cpu_scaled_run_real_total) ? 0 : tmp;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 301079d06f24..5c75791d7269 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -908,6 +908,15 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
908} 908}
909 909
910/* 910/*
911 * Initialize event state based on the perf_event_attr::disabled.
912 */
913static inline void perf_event__state_init(struct perf_event *event)
914{
915 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
916 PERF_EVENT_STATE_INACTIVE;
917}
918
919/*
911 * Called at perf_event creation and when events are attached/detached from a 920 * Called at perf_event creation and when events are attached/detached from a
912 * group. 921 * group.
913 */ 922 */
@@ -6162,11 +6171,14 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
6162 6171
6163 if (task) { 6172 if (task) {
6164 event->attach_state = PERF_ATTACH_TASK; 6173 event->attach_state = PERF_ATTACH_TASK;
6174
6175 if (attr->type == PERF_TYPE_TRACEPOINT)
6176 event->hw.tp_target = task;
6165#ifdef CONFIG_HAVE_HW_BREAKPOINT 6177#ifdef CONFIG_HAVE_HW_BREAKPOINT
6166 /* 6178 /*
6167 * hw_breakpoint is a bit difficult here.. 6179 * hw_breakpoint is a bit difficult here..
6168 */ 6180 */
6169 if (attr->type == PERF_TYPE_BREAKPOINT) 6181 else if (attr->type == PERF_TYPE_BREAKPOINT)
6170 event->hw.bp_target = task; 6182 event->hw.bp_target = task;
6171#endif 6183#endif
6172 } 6184 }
@@ -6179,8 +6191,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
6179 event->overflow_handler = overflow_handler; 6191 event->overflow_handler = overflow_handler;
6180 event->overflow_handler_context = context; 6192 event->overflow_handler_context = context;
6181 6193
6182 if (attr->disabled) 6194 perf_event__state_init(event);
6183 event->state = PERF_EVENT_STATE_OFF;
6184 6195
6185 pmu = NULL; 6196 pmu = NULL;
6186 6197
@@ -6609,9 +6620,17 @@ SYSCALL_DEFINE5(perf_event_open,
6609 6620
6610 mutex_lock(&gctx->mutex); 6621 mutex_lock(&gctx->mutex);
6611 perf_remove_from_context(group_leader); 6622 perf_remove_from_context(group_leader);
6623
6624 /*
6625 * Removing from the context ends up with disabled
6626 * event. What we want here is event in the initial
6627 * startup state, ready to be add into new context.
6628 */
6629 perf_event__state_init(group_leader);
6612 list_for_each_entry(sibling, &group_leader->sibling_list, 6630 list_for_each_entry(sibling, &group_leader->sibling_list,
6613 group_entry) { 6631 group_entry) {
6614 perf_remove_from_context(sibling); 6632 perf_remove_from_context(sibling);
6633 perf_event__state_init(sibling);
6615 put_ctx(gctx); 6634 put_ctx(gctx);
6616 } 6635 }
6617 mutex_unlock(&gctx->mutex); 6636 mutex_unlock(&gctx->mutex);
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index fe8a916507ed..a64f8aeb5c1f 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -676,7 +676,7 @@ int __init init_hw_breakpoint(void)
676 err_alloc: 676 err_alloc:
677 for_each_possible_cpu(err_cpu) { 677 for_each_possible_cpu(err_cpu) {
678 for (i = 0; i < TYPE_MAX; i++) 678 for (i = 0; i < TYPE_MAX; i++)
679 kfree(per_cpu(nr_task_bp_pinned[i], cpu)); 679 kfree(per_cpu(nr_task_bp_pinned[i], err_cpu));
680 if (err_cpu == cpu) 680 if (err_cpu == cpu)
681 break; 681 break;
682 } 682 }
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index dea7acfbb071..a567c8c7ef31 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -27,6 +27,7 @@
27#include <linux/pagemap.h> /* read_mapping_page */ 27#include <linux/pagemap.h> /* read_mapping_page */
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/sched.h> 29#include <linux/sched.h>
30#include <linux/export.h>
30#include <linux/rmap.h> /* anon_vma_prepare */ 31#include <linux/rmap.h> /* anon_vma_prepare */
31#include <linux/mmu_notifier.h> /* set_pte_at_notify */ 32#include <linux/mmu_notifier.h> /* set_pte_at_notify */
32#include <linux/swap.h> /* try_to_free_swap */ 33#include <linux/swap.h> /* try_to_free_swap */
@@ -41,58 +42,31 @@
41#define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE 42#define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE
42 43
43static struct rb_root uprobes_tree = RB_ROOT; 44static struct rb_root uprobes_tree = RB_ROOT;
44
45static DEFINE_SPINLOCK(uprobes_treelock); /* serialize rbtree access */
46
47#define UPROBES_HASH_SZ 13
48
49/* 45/*
50 * We need separate register/unregister and mmap/munmap lock hashes because 46 * allows us to skip the uprobe_mmap if there are no uprobe events active
51 * of mmap_sem nesting. 47 * at this time. Probably a fine grained per inode count is better?
52 *
53 * uprobe_register() needs to install probes on (potentially) all processes
54 * and thus needs to acquire multiple mmap_sems (consequtively, not
55 * concurrently), whereas uprobe_mmap() is called while holding mmap_sem
56 * for the particular process doing the mmap.
57 *
58 * uprobe_register()->register_for_each_vma() needs to drop/acquire mmap_sem
59 * because of lock order against i_mmap_mutex. This means there's a hole in
60 * the register vma iteration where a mmap() can happen.
61 *
62 * Thus uprobe_register() can race with uprobe_mmap() and we can try and
63 * install a probe where one is already installed.
64 */ 48 */
49#define no_uprobe_events() RB_EMPTY_ROOT(&uprobes_tree)
65 50
66/* serialize (un)register */ 51static DEFINE_SPINLOCK(uprobes_treelock); /* serialize rbtree access */
67static struct mutex uprobes_mutex[UPROBES_HASH_SZ];
68
69#define uprobes_hash(v) (&uprobes_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
70 52
53#define UPROBES_HASH_SZ 13
71/* serialize uprobe->pending_list */ 54/* serialize uprobe->pending_list */
72static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ]; 55static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
73#define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ]) 56#define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
74 57
75static struct percpu_rw_semaphore dup_mmap_sem; 58static struct percpu_rw_semaphore dup_mmap_sem;
76 59
77/*
78 * uprobe_events allows us to skip the uprobe_mmap if there are no uprobe
79 * events active at this time. Probably a fine grained per inode count is
80 * better?
81 */
82static atomic_t uprobe_events = ATOMIC_INIT(0);
83
84/* Have a copy of original instruction */ 60/* Have a copy of original instruction */
85#define UPROBE_COPY_INSN 0 61#define UPROBE_COPY_INSN 0
86/* Dont run handlers when first register/ last unregister in progress*/
87#define UPROBE_RUN_HANDLER 1
88/* Can skip singlestep */ 62/* Can skip singlestep */
89#define UPROBE_SKIP_SSTEP 2 63#define UPROBE_SKIP_SSTEP 1
90 64
91struct uprobe { 65struct uprobe {
92 struct rb_node rb_node; /* node in the rb tree */ 66 struct rb_node rb_node; /* node in the rb tree */
93 atomic_t ref; 67 atomic_t ref;
68 struct rw_semaphore register_rwsem;
94 struct rw_semaphore consumer_rwsem; 69 struct rw_semaphore consumer_rwsem;
95 struct mutex copy_mutex; /* TODO: kill me and UPROBE_COPY_INSN */
96 struct list_head pending_list; 70 struct list_head pending_list;
97 struct uprobe_consumer *consumers; 71 struct uprobe_consumer *consumers;
98 struct inode *inode; /* Also hold a ref to inode */ 72 struct inode *inode; /* Also hold a ref to inode */
@@ -430,9 +404,6 @@ static struct uprobe *insert_uprobe(struct uprobe *uprobe)
430 u = __insert_uprobe(uprobe); 404 u = __insert_uprobe(uprobe);
431 spin_unlock(&uprobes_treelock); 405 spin_unlock(&uprobes_treelock);
432 406
433 /* For now assume that the instruction need not be single-stepped */
434 __set_bit(UPROBE_SKIP_SSTEP, &uprobe->flags);
435
436 return u; 407 return u;
437} 408}
438 409
@@ -452,8 +423,10 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
452 423
453 uprobe->inode = igrab(inode); 424 uprobe->inode = igrab(inode);
454 uprobe->offset = offset; 425 uprobe->offset = offset;
426 init_rwsem(&uprobe->register_rwsem);
455 init_rwsem(&uprobe->consumer_rwsem); 427 init_rwsem(&uprobe->consumer_rwsem);
456 mutex_init(&uprobe->copy_mutex); 428 /* For now assume that the instruction need not be single-stepped */
429 __set_bit(UPROBE_SKIP_SSTEP, &uprobe->flags);
457 430
458 /* add to uprobes_tree, sorted on inode:offset */ 431 /* add to uprobes_tree, sorted on inode:offset */
459 cur_uprobe = insert_uprobe(uprobe); 432 cur_uprobe = insert_uprobe(uprobe);
@@ -463,38 +436,17 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
463 kfree(uprobe); 436 kfree(uprobe);
464 uprobe = cur_uprobe; 437 uprobe = cur_uprobe;
465 iput(inode); 438 iput(inode);
466 } else {
467 atomic_inc(&uprobe_events);
468 } 439 }
469 440
470 return uprobe; 441 return uprobe;
471} 442}
472 443
473static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs) 444static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
474{
475 struct uprobe_consumer *uc;
476
477 if (!test_bit(UPROBE_RUN_HANDLER, &uprobe->flags))
478 return;
479
480 down_read(&uprobe->consumer_rwsem);
481 for (uc = uprobe->consumers; uc; uc = uc->next) {
482 if (!uc->filter || uc->filter(uc, current))
483 uc->handler(uc, regs);
484 }
485 up_read(&uprobe->consumer_rwsem);
486}
487
488/* Returns the previous consumer */
489static struct uprobe_consumer *
490consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
491{ 445{
492 down_write(&uprobe->consumer_rwsem); 446 down_write(&uprobe->consumer_rwsem);
493 uc->next = uprobe->consumers; 447 uc->next = uprobe->consumers;
494 uprobe->consumers = uc; 448 uprobe->consumers = uc;
495 up_write(&uprobe->consumer_rwsem); 449 up_write(&uprobe->consumer_rwsem);
496
497 return uc->next;
498} 450}
499 451
500/* 452/*
@@ -588,7 +540,8 @@ static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
588 if (test_bit(UPROBE_COPY_INSN, &uprobe->flags)) 540 if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
589 return ret; 541 return ret;
590 542
591 mutex_lock(&uprobe->copy_mutex); 543 /* TODO: move this into _register, until then we abuse this sem. */
544 down_write(&uprobe->consumer_rwsem);
592 if (test_bit(UPROBE_COPY_INSN, &uprobe->flags)) 545 if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
593 goto out; 546 goto out;
594 547
@@ -612,7 +565,30 @@ static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
612 set_bit(UPROBE_COPY_INSN, &uprobe->flags); 565 set_bit(UPROBE_COPY_INSN, &uprobe->flags);
613 566
614 out: 567 out:
615 mutex_unlock(&uprobe->copy_mutex); 568 up_write(&uprobe->consumer_rwsem);
569
570 return ret;
571}
572
573static inline bool consumer_filter(struct uprobe_consumer *uc,
574 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
575{
576 return !uc->filter || uc->filter(uc, ctx, mm);
577}
578
579static bool filter_chain(struct uprobe *uprobe,
580 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
581{
582 struct uprobe_consumer *uc;
583 bool ret = false;
584
585 down_read(&uprobe->consumer_rwsem);
586 for (uc = uprobe->consumers; uc; uc = uc->next) {
587 ret = consumer_filter(uc, ctx, mm);
588 if (ret)
589 break;
590 }
591 up_read(&uprobe->consumer_rwsem);
616 592
617 return ret; 593 return ret;
618} 594}
@@ -624,16 +600,6 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
624 bool first_uprobe; 600 bool first_uprobe;
625 int ret; 601 int ret;
626 602
627 /*
628 * If probe is being deleted, unregister thread could be done with
629 * the vma-rmap-walk through. Adding a probe now can be fatal since
630 * nobody will be able to cleanup. Also we could be from fork or
631 * mremap path, where the probe might have already been inserted.
632 * Hence behave as if probe already existed.
633 */
634 if (!uprobe->consumers)
635 return 0;
636
637 ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr); 603 ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr);
638 if (ret) 604 if (ret)
639 return ret; 605 return ret;
@@ -658,14 +624,14 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
658static int 624static int
659remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr) 625remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
660{ 626{
661 /* can happen if uprobe_register() fails */
662 if (!test_bit(MMF_HAS_UPROBES, &mm->flags))
663 return 0;
664
665 set_bit(MMF_RECALC_UPROBES, &mm->flags); 627 set_bit(MMF_RECALC_UPROBES, &mm->flags);
666 return set_orig_insn(&uprobe->arch, mm, vaddr); 628 return set_orig_insn(&uprobe->arch, mm, vaddr);
667} 629}
668 630
631static inline bool uprobe_is_active(struct uprobe *uprobe)
632{
633 return !RB_EMPTY_NODE(&uprobe->rb_node);
634}
669/* 635/*
670 * There could be threads that have already hit the breakpoint. They 636 * There could be threads that have already hit the breakpoint. They
671 * will recheck the current insn and restart if find_uprobe() fails. 637 * will recheck the current insn and restart if find_uprobe() fails.
@@ -673,12 +639,15 @@ remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vad
673 */ 639 */
674static void delete_uprobe(struct uprobe *uprobe) 640static void delete_uprobe(struct uprobe *uprobe)
675{ 641{
642 if (WARN_ON(!uprobe_is_active(uprobe)))
643 return;
644
676 spin_lock(&uprobes_treelock); 645 spin_lock(&uprobes_treelock);
677 rb_erase(&uprobe->rb_node, &uprobes_tree); 646 rb_erase(&uprobe->rb_node, &uprobes_tree);
678 spin_unlock(&uprobes_treelock); 647 spin_unlock(&uprobes_treelock);
648 RB_CLEAR_NODE(&uprobe->rb_node); /* for uprobe_is_active() */
679 iput(uprobe->inode); 649 iput(uprobe->inode);
680 put_uprobe(uprobe); 650 put_uprobe(uprobe);
681 atomic_dec(&uprobe_events);
682} 651}
683 652
684struct map_info { 653struct map_info {
@@ -764,8 +733,10 @@ build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
764 return curr; 733 return curr;
765} 734}
766 735
767static int register_for_each_vma(struct uprobe *uprobe, bool is_register) 736static int
737register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new)
768{ 738{
739 bool is_register = !!new;
769 struct map_info *info; 740 struct map_info *info;
770 int err = 0; 741 int err = 0;
771 742
@@ -794,10 +765,16 @@ static int register_for_each_vma(struct uprobe *uprobe, bool is_register)
794 vaddr_to_offset(vma, info->vaddr) != uprobe->offset) 765 vaddr_to_offset(vma, info->vaddr) != uprobe->offset)
795 goto unlock; 766 goto unlock;
796 767
797 if (is_register) 768 if (is_register) {
798 err = install_breakpoint(uprobe, mm, vma, info->vaddr); 769 /* consult only the "caller", new consumer. */
799 else 770 if (consumer_filter(new,
800 err |= remove_breakpoint(uprobe, mm, info->vaddr); 771 UPROBE_FILTER_REGISTER, mm))
772 err = install_breakpoint(uprobe, mm, vma, info->vaddr);
773 } else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) {
774 if (!filter_chain(uprobe,
775 UPROBE_FILTER_UNREGISTER, mm))
776 err |= remove_breakpoint(uprobe, mm, info->vaddr);
777 }
801 778
802 unlock: 779 unlock:
803 up_write(&mm->mmap_sem); 780 up_write(&mm->mmap_sem);
@@ -810,17 +787,23 @@ static int register_for_each_vma(struct uprobe *uprobe, bool is_register)
810 return err; 787 return err;
811} 788}
812 789
813static int __uprobe_register(struct uprobe *uprobe) 790static int __uprobe_register(struct uprobe *uprobe, struct uprobe_consumer *uc)
814{ 791{
815 return register_for_each_vma(uprobe, true); 792 consumer_add(uprobe, uc);
793 return register_for_each_vma(uprobe, uc);
816} 794}
817 795
818static void __uprobe_unregister(struct uprobe *uprobe) 796static void __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc)
819{ 797{
820 if (!register_for_each_vma(uprobe, false)) 798 int err;
821 delete_uprobe(uprobe); 799
800 if (!consumer_del(uprobe, uc)) /* WARN? */
801 return;
822 802
803 err = register_for_each_vma(uprobe, NULL);
823 /* TODO : cant unregister? schedule a worker thread */ 804 /* TODO : cant unregister? schedule a worker thread */
805 if (!uprobe->consumers && !err)
806 delete_uprobe(uprobe);
824} 807}
825 808
826/* 809/*
@@ -845,31 +828,59 @@ int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *
845 struct uprobe *uprobe; 828 struct uprobe *uprobe;
846 int ret; 829 int ret;
847 830
848 if (!inode || !uc || uc->next) 831 /* Racy, just to catch the obvious mistakes */
849 return -EINVAL;
850
851 if (offset > i_size_read(inode)) 832 if (offset > i_size_read(inode))
852 return -EINVAL; 833 return -EINVAL;
853 834
854 ret = 0; 835 retry:
855 mutex_lock(uprobes_hash(inode));
856 uprobe = alloc_uprobe(inode, offset); 836 uprobe = alloc_uprobe(inode, offset);
857 837 if (!uprobe)
858 if (!uprobe) { 838 return -ENOMEM;
859 ret = -ENOMEM; 839 /*
860 } else if (!consumer_add(uprobe, uc)) { 840 * We can race with uprobe_unregister()->delete_uprobe().
861 ret = __uprobe_register(uprobe); 841 * Check uprobe_is_active() and retry if it is false.
862 if (ret) { 842 */
863 uprobe->consumers = NULL; 843 down_write(&uprobe->register_rwsem);
864 __uprobe_unregister(uprobe); 844 ret = -EAGAIN;
865 } else { 845 if (likely(uprobe_is_active(uprobe))) {
866 set_bit(UPROBE_RUN_HANDLER, &uprobe->flags); 846 ret = __uprobe_register(uprobe, uc);
867 } 847 if (ret)
848 __uprobe_unregister(uprobe, uc);
868 } 849 }
850 up_write(&uprobe->register_rwsem);
851 put_uprobe(uprobe);
869 852
870 mutex_unlock(uprobes_hash(inode)); 853 if (unlikely(ret == -EAGAIN))
871 if (uprobe) 854 goto retry;
872 put_uprobe(uprobe); 855 return ret;
856}
857EXPORT_SYMBOL_GPL(uprobe_register);
858
859/*
860 * uprobe_apply - unregister a already registered probe.
861 * @inode: the file in which the probe has to be removed.
862 * @offset: offset from the start of the file.
863 * @uc: consumer which wants to add more or remove some breakpoints
864 * @add: add or remove the breakpoints
865 */
866int uprobe_apply(struct inode *inode, loff_t offset,
867 struct uprobe_consumer *uc, bool add)
868{
869 struct uprobe *uprobe;
870 struct uprobe_consumer *con;
871 int ret = -ENOENT;
872
873 uprobe = find_uprobe(inode, offset);
874 if (!uprobe)
875 return ret;
876
877 down_write(&uprobe->register_rwsem);
878 for (con = uprobe->consumers; con && con != uc ; con = con->next)
879 ;
880 if (con)
881 ret = register_for_each_vma(uprobe, add ? uc : NULL);
882 up_write(&uprobe->register_rwsem);
883 put_uprobe(uprobe);
873 884
874 return ret; 885 return ret;
875} 886}
@@ -884,25 +895,42 @@ void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consume
884{ 895{
885 struct uprobe *uprobe; 896 struct uprobe *uprobe;
886 897
887 if (!inode || !uc)
888 return;
889
890 uprobe = find_uprobe(inode, offset); 898 uprobe = find_uprobe(inode, offset);
891 if (!uprobe) 899 if (!uprobe)
892 return; 900 return;
893 901
894 mutex_lock(uprobes_hash(inode)); 902 down_write(&uprobe->register_rwsem);
903 __uprobe_unregister(uprobe, uc);
904 up_write(&uprobe->register_rwsem);
905 put_uprobe(uprobe);
906}
907EXPORT_SYMBOL_GPL(uprobe_unregister);
895 908
896 if (consumer_del(uprobe, uc)) { 909static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm)
897 if (!uprobe->consumers) { 910{
898 __uprobe_unregister(uprobe); 911 struct vm_area_struct *vma;
899 clear_bit(UPROBE_RUN_HANDLER, &uprobe->flags); 912 int err = 0;
900 } 913
914 down_read(&mm->mmap_sem);
915 for (vma = mm->mmap; vma; vma = vma->vm_next) {
916 unsigned long vaddr;
917 loff_t offset;
918
919 if (!valid_vma(vma, false) ||
920 vma->vm_file->f_mapping->host != uprobe->inode)
921 continue;
922
923 offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
924 if (uprobe->offset < offset ||
925 uprobe->offset >= offset + vma->vm_end - vma->vm_start)
926 continue;
927
928 vaddr = offset_to_vaddr(vma, uprobe->offset);
929 err |= remove_breakpoint(uprobe, mm, vaddr);
901 } 930 }
931 up_read(&mm->mmap_sem);
902 932
903 mutex_unlock(uprobes_hash(inode)); 933 return err;
904 if (uprobe)
905 put_uprobe(uprobe);
906} 934}
907 935
908static struct rb_node * 936static struct rb_node *
@@ -979,7 +1007,7 @@ int uprobe_mmap(struct vm_area_struct *vma)
979 struct uprobe *uprobe, *u; 1007 struct uprobe *uprobe, *u;
980 struct inode *inode; 1008 struct inode *inode;
981 1009
982 if (!atomic_read(&uprobe_events) || !valid_vma(vma, true)) 1010 if (no_uprobe_events() || !valid_vma(vma, true))
983 return 0; 1011 return 0;
984 1012
985 inode = vma->vm_file->f_mapping->host; 1013 inode = vma->vm_file->f_mapping->host;
@@ -988,9 +1016,14 @@ int uprobe_mmap(struct vm_area_struct *vma)
988 1016
989 mutex_lock(uprobes_mmap_hash(inode)); 1017 mutex_lock(uprobes_mmap_hash(inode));
990 build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list); 1018 build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list);
991 1019 /*
1020 * We can race with uprobe_unregister(), this uprobe can be already
1021 * removed. But in this case filter_chain() must return false, all
1022 * consumers have gone away.
1023 */
992 list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { 1024 list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
993 if (!fatal_signal_pending(current)) { 1025 if (!fatal_signal_pending(current) &&
1026 filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) {
994 unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset); 1027 unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset);
995 install_breakpoint(uprobe, vma->vm_mm, vma, vaddr); 1028 install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
996 } 1029 }
@@ -1025,7 +1058,7 @@ vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long e
1025 */ 1058 */
1026void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) 1059void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1027{ 1060{
1028 if (!atomic_read(&uprobe_events) || !valid_vma(vma, false)) 1061 if (no_uprobe_events() || !valid_vma(vma, false))
1029 return; 1062 return;
1030 1063
1031 if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */ 1064 if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */
@@ -1042,22 +1075,14 @@ void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned lon
1042/* Slot allocation for XOL */ 1075/* Slot allocation for XOL */
1043static int xol_add_vma(struct xol_area *area) 1076static int xol_add_vma(struct xol_area *area)
1044{ 1077{
1045 struct mm_struct *mm; 1078 struct mm_struct *mm = current->mm;
1046 int ret; 1079 int ret = -EALREADY;
1047
1048 area->page = alloc_page(GFP_HIGHUSER);
1049 if (!area->page)
1050 return -ENOMEM;
1051
1052 ret = -EALREADY;
1053 mm = current->mm;
1054 1080
1055 down_write(&mm->mmap_sem); 1081 down_write(&mm->mmap_sem);
1056 if (mm->uprobes_state.xol_area) 1082 if (mm->uprobes_state.xol_area)
1057 goto fail; 1083 goto fail;
1058 1084
1059 ret = -ENOMEM; 1085 ret = -ENOMEM;
1060
1061 /* Try to map as high as possible, this is only a hint. */ 1086 /* Try to map as high as possible, this is only a hint. */
1062 area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, PAGE_SIZE, 0, 0); 1087 area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, PAGE_SIZE, 0, 0);
1063 if (area->vaddr & ~PAGE_MASK) { 1088 if (area->vaddr & ~PAGE_MASK) {
@@ -1073,54 +1098,53 @@ static int xol_add_vma(struct xol_area *area)
1073 smp_wmb(); /* pairs with get_xol_area() */ 1098 smp_wmb(); /* pairs with get_xol_area() */
1074 mm->uprobes_state.xol_area = area; 1099 mm->uprobes_state.xol_area = area;
1075 ret = 0; 1100 ret = 0;
1076 1101 fail:
1077fail:
1078 up_write(&mm->mmap_sem); 1102 up_write(&mm->mmap_sem);
1079 if (ret)
1080 __free_page(area->page);
1081 1103
1082 return ret; 1104 return ret;
1083} 1105}
1084 1106
1085static struct xol_area *get_xol_area(struct mm_struct *mm)
1086{
1087 struct xol_area *area;
1088
1089 area = mm->uprobes_state.xol_area;
1090 smp_read_barrier_depends(); /* pairs with wmb in xol_add_vma() */
1091
1092 return area;
1093}
1094
1095/* 1107/*
1096 * xol_alloc_area - Allocate process's xol_area. 1108 * get_xol_area - Allocate process's xol_area if necessary.
1097 * This area will be used for storing instructions for execution out of 1109 * This area will be used for storing instructions for execution out of line.
1098 * line.
1099 * 1110 *
1100 * Returns the allocated area or NULL. 1111 * Returns the allocated area or NULL.
1101 */ 1112 */
1102static struct xol_area *xol_alloc_area(void) 1113static struct xol_area *get_xol_area(void)
1103{ 1114{
1115 struct mm_struct *mm = current->mm;
1104 struct xol_area *area; 1116 struct xol_area *area;
1105 1117
1118 area = mm->uprobes_state.xol_area;
1119 if (area)
1120 goto ret;
1121
1106 area = kzalloc(sizeof(*area), GFP_KERNEL); 1122 area = kzalloc(sizeof(*area), GFP_KERNEL);
1107 if (unlikely(!area)) 1123 if (unlikely(!area))
1108 return NULL; 1124 goto out;
1109 1125
1110 area->bitmap = kzalloc(BITS_TO_LONGS(UINSNS_PER_PAGE) * sizeof(long), GFP_KERNEL); 1126 area->bitmap = kzalloc(BITS_TO_LONGS(UINSNS_PER_PAGE) * sizeof(long), GFP_KERNEL);
1111
1112 if (!area->bitmap) 1127 if (!area->bitmap)
1113 goto fail; 1128 goto free_area;
1129
1130 area->page = alloc_page(GFP_HIGHUSER);
1131 if (!area->page)
1132 goto free_bitmap;
1114 1133
1115 init_waitqueue_head(&area->wq); 1134 init_waitqueue_head(&area->wq);
1116 if (!xol_add_vma(area)) 1135 if (!xol_add_vma(area))
1117 return area; 1136 return area;
1118 1137
1119fail: 1138 __free_page(area->page);
1139 free_bitmap:
1120 kfree(area->bitmap); 1140 kfree(area->bitmap);
1141 free_area:
1121 kfree(area); 1142 kfree(area);
1122 1143 out:
1123 return get_xol_area(current->mm); 1144 area = mm->uprobes_state.xol_area;
1145 ret:
1146 smp_read_barrier_depends(); /* pairs with wmb in xol_add_vma() */
1147 return area;
1124} 1148}
1125 1149
1126/* 1150/*
@@ -1186,33 +1210,26 @@ static unsigned long xol_take_insn_slot(struct xol_area *area)
1186} 1210}
1187 1211
1188/* 1212/*
1189 * xol_get_insn_slot - If was not allocated a slot, then 1213 * xol_get_insn_slot - allocate a slot for xol.
1190 * allocate a slot.
1191 * Returns the allocated slot address or 0. 1214 * Returns the allocated slot address or 0.
1192 */ 1215 */
1193static unsigned long xol_get_insn_slot(struct uprobe *uprobe, unsigned long slot_addr) 1216static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
1194{ 1217{
1195 struct xol_area *area; 1218 struct xol_area *area;
1196 unsigned long offset; 1219 unsigned long offset;
1220 unsigned long xol_vaddr;
1197 void *vaddr; 1221 void *vaddr;
1198 1222
1199 area = get_xol_area(current->mm); 1223 area = get_xol_area();
1200 if (!area) { 1224 if (!area)
1201 area = xol_alloc_area(); 1225 return 0;
1202 if (!area)
1203 return 0;
1204 }
1205 current->utask->xol_vaddr = xol_take_insn_slot(area);
1206 1226
1207 /* 1227 xol_vaddr = xol_take_insn_slot(area);
1208 * Initialize the slot if xol_vaddr points to valid 1228 if (unlikely(!xol_vaddr))
1209 * instruction slot.
1210 */
1211 if (unlikely(!current->utask->xol_vaddr))
1212 return 0; 1229 return 0;
1213 1230
1214 current->utask->vaddr = slot_addr; 1231 /* Initialize the slot */
1215 offset = current->utask->xol_vaddr & ~PAGE_MASK; 1232 offset = xol_vaddr & ~PAGE_MASK;
1216 vaddr = kmap_atomic(area->page); 1233 vaddr = kmap_atomic(area->page);
1217 memcpy(vaddr + offset, uprobe->arch.insn, MAX_UINSN_BYTES); 1234 memcpy(vaddr + offset, uprobe->arch.insn, MAX_UINSN_BYTES);
1218 kunmap_atomic(vaddr); 1235 kunmap_atomic(vaddr);
@@ -1222,7 +1239,7 @@ static unsigned long xol_get_insn_slot(struct uprobe *uprobe, unsigned long slot
1222 */ 1239 */
1223 flush_dcache_page(area->page); 1240 flush_dcache_page(area->page);
1224 1241
1225 return current->utask->xol_vaddr; 1242 return xol_vaddr;
1226} 1243}
1227 1244
1228/* 1245/*
@@ -1240,8 +1257,7 @@ static void xol_free_insn_slot(struct task_struct *tsk)
1240 return; 1257 return;
1241 1258
1242 slot_addr = tsk->utask->xol_vaddr; 1259 slot_addr = tsk->utask->xol_vaddr;
1243 1260 if (unlikely(!slot_addr))
1244 if (unlikely(!slot_addr || IS_ERR_VALUE(slot_addr)))
1245 return; 1261 return;
1246 1262
1247 area = tsk->mm->uprobes_state.xol_area; 1263 area = tsk->mm->uprobes_state.xol_area;
@@ -1303,33 +1319,48 @@ void uprobe_copy_process(struct task_struct *t)
1303} 1319}
1304 1320
1305/* 1321/*
1306 * Allocate a uprobe_task object for the task. 1322 * Allocate a uprobe_task object for the task if if necessary.
1307 * Called when the thread hits a breakpoint for the first time. 1323 * Called when the thread hits a breakpoint.
1308 * 1324 *
1309 * Returns: 1325 * Returns:
1310 * - pointer to new uprobe_task on success 1326 * - pointer to new uprobe_task on success
1311 * - NULL otherwise 1327 * - NULL otherwise
1312 */ 1328 */
1313static struct uprobe_task *add_utask(void) 1329static struct uprobe_task *get_utask(void)
1314{ 1330{
1315 struct uprobe_task *utask; 1331 if (!current->utask)
1316 1332 current->utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
1317 utask = kzalloc(sizeof *utask, GFP_KERNEL); 1333 return current->utask;
1318 if (unlikely(!utask))
1319 return NULL;
1320
1321 current->utask = utask;
1322 return utask;
1323} 1334}
1324 1335
1325/* Prepare to single-step probed instruction out of line. */ 1336/* Prepare to single-step probed instruction out of line. */
1326static int 1337static int
1327pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long vaddr) 1338pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr)
1328{ 1339{
1329 if (xol_get_insn_slot(uprobe, vaddr) && !arch_uprobe_pre_xol(&uprobe->arch, regs)) 1340 struct uprobe_task *utask;
1330 return 0; 1341 unsigned long xol_vaddr;
1342 int err;
1343
1344 utask = get_utask();
1345 if (!utask)
1346 return -ENOMEM;
1347
1348 xol_vaddr = xol_get_insn_slot(uprobe);
1349 if (!xol_vaddr)
1350 return -ENOMEM;
1351
1352 utask->xol_vaddr = xol_vaddr;
1353 utask->vaddr = bp_vaddr;
1354
1355 err = arch_uprobe_pre_xol(&uprobe->arch, regs);
1356 if (unlikely(err)) {
1357 xol_free_insn_slot(current);
1358 return err;
1359 }
1331 1360
1332 return -EFAULT; 1361 utask->active_uprobe = uprobe;
1362 utask->state = UTASK_SSTEP;
1363 return 0;
1333} 1364}
1334 1365
1335/* 1366/*
@@ -1391,6 +1422,7 @@ static void mmf_recalc_uprobes(struct mm_struct *mm)
1391 * This is not strictly accurate, we can race with 1422 * This is not strictly accurate, we can race with
1392 * uprobe_unregister() and see the already removed 1423 * uprobe_unregister() and see the already removed
1393 * uprobe if delete_uprobe() was not yet called. 1424 * uprobe if delete_uprobe() was not yet called.
1425 * Or this uprobe can be filtered out.
1394 */ 1426 */
1395 if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end)) 1427 if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end))
1396 return; 1428 return;
@@ -1452,13 +1484,33 @@ static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
1452 return uprobe; 1484 return uprobe;
1453} 1485}
1454 1486
1487static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
1488{
1489 struct uprobe_consumer *uc;
1490 int remove = UPROBE_HANDLER_REMOVE;
1491
1492 down_read(&uprobe->register_rwsem);
1493 for (uc = uprobe->consumers; uc; uc = uc->next) {
1494 int rc = uc->handler(uc, regs);
1495
1496 WARN(rc & ~UPROBE_HANDLER_MASK,
1497 "bad rc=0x%x from %pf()\n", rc, uc->handler);
1498 remove &= rc;
1499 }
1500
1501 if (remove && uprobe->consumers) {
1502 WARN_ON(!uprobe_is_active(uprobe));
1503 unapply_uprobe(uprobe, current->mm);
1504 }
1505 up_read(&uprobe->register_rwsem);
1506}
1507
1455/* 1508/*
1456 * Run handler and ask thread to singlestep. 1509 * Run handler and ask thread to singlestep.
1457 * Ensure all non-fatal signals cannot interrupt thread while it singlesteps. 1510 * Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
1458 */ 1511 */
1459static void handle_swbp(struct pt_regs *regs) 1512static void handle_swbp(struct pt_regs *regs)
1460{ 1513{
1461 struct uprobe_task *utask;
1462 struct uprobe *uprobe; 1514 struct uprobe *uprobe;
1463 unsigned long bp_vaddr; 1515 unsigned long bp_vaddr;
1464 int uninitialized_var(is_swbp); 1516 int uninitialized_var(is_swbp);
@@ -1483,6 +1535,10 @@ static void handle_swbp(struct pt_regs *regs)
1483 } 1535 }
1484 return; 1536 return;
1485 } 1537 }
1538
1539 /* change it in advance for ->handler() and restart */
1540 instruction_pointer_set(regs, bp_vaddr);
1541
1486 /* 1542 /*
1487 * TODO: move copy_insn/etc into _register and remove this hack. 1543 * TODO: move copy_insn/etc into _register and remove this hack.
1488 * After we hit the bp, _unregister + _register can install the 1544 * After we hit the bp, _unregister + _register can install the
@@ -1490,32 +1546,16 @@ static void handle_swbp(struct pt_regs *regs)
1490 */ 1546 */
1491 smp_rmb(); /* pairs with wmb() in install_breakpoint() */ 1547 smp_rmb(); /* pairs with wmb() in install_breakpoint() */
1492 if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags))) 1548 if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags)))
1493 goto restart; 1549 goto out;
1494
1495 utask = current->utask;
1496 if (!utask) {
1497 utask = add_utask();
1498 /* Cannot allocate; re-execute the instruction. */
1499 if (!utask)
1500 goto restart;
1501 }
1502 1550
1503 handler_chain(uprobe, regs); 1551 handler_chain(uprobe, regs);
1504 if (can_skip_sstep(uprobe, regs)) 1552 if (can_skip_sstep(uprobe, regs))
1505 goto out; 1553 goto out;
1506 1554
1507 if (!pre_ssout(uprobe, regs, bp_vaddr)) { 1555 if (!pre_ssout(uprobe, regs, bp_vaddr))
1508 utask->active_uprobe = uprobe;
1509 utask->state = UTASK_SSTEP;
1510 return; 1556 return;
1511 }
1512 1557
1513restart: 1558 /* can_skip_sstep() succeeded, or restart if can't singlestep */
1514 /*
1515 * cannot singlestep; cannot skip instruction;
1516 * re-execute the instruction.
1517 */
1518 instruction_pointer_set(regs, bp_vaddr);
1519out: 1559out:
1520 put_uprobe(uprobe); 1560 put_uprobe(uprobe);
1521} 1561}
@@ -1609,10 +1649,8 @@ static int __init init_uprobes(void)
1609{ 1649{
1610 int i; 1650 int i;
1611 1651
1612 for (i = 0; i < UPROBES_HASH_SZ; i++) { 1652 for (i = 0; i < UPROBES_HASH_SZ; i++)
1613 mutex_init(&uprobes_mutex[i]);
1614 mutex_init(&uprobes_mmap_mutex[i]); 1653 mutex_init(&uprobes_mmap_mutex[i]);
1615 }
1616 1654
1617 if (percpu_init_rwsem(&dup_mmap_sem)) 1655 if (percpu_init_rwsem(&dup_mmap_sem))
1618 return -ENOMEM; 1656 return -ENOMEM;
diff --git a/kernel/exit.c b/kernel/exit.c
index b4df21937216..7dd20408707c 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -85,6 +85,7 @@ static void __exit_signal(struct task_struct *tsk)
85 bool group_dead = thread_group_leader(tsk); 85 bool group_dead = thread_group_leader(tsk);
86 struct sighand_struct *sighand; 86 struct sighand_struct *sighand;
87 struct tty_struct *uninitialized_var(tty); 87 struct tty_struct *uninitialized_var(tty);
88 cputime_t utime, stime;
88 89
89 sighand = rcu_dereference_check(tsk->sighand, 90 sighand = rcu_dereference_check(tsk->sighand,
90 lockdep_tasklist_lock_is_held()); 91 lockdep_tasklist_lock_is_held());
@@ -123,9 +124,10 @@ static void __exit_signal(struct task_struct *tsk)
123 * We won't ever get here for the group leader, since it 124 * We won't ever get here for the group leader, since it
124 * will have been the last reference on the signal_struct. 125 * will have been the last reference on the signal_struct.
125 */ 126 */
126 sig->utime += tsk->utime; 127 task_cputime(tsk, &utime, &stime);
127 sig->stime += tsk->stime; 128 sig->utime += utime;
128 sig->gtime += tsk->gtime; 129 sig->stime += stime;
130 sig->gtime += task_gtime(tsk);
129 sig->min_flt += tsk->min_flt; 131 sig->min_flt += tsk->min_flt;
130 sig->maj_flt += tsk->maj_flt; 132 sig->maj_flt += tsk->maj_flt;
131 sig->nvcsw += tsk->nvcsw; 133 sig->nvcsw += tsk->nvcsw;
@@ -1092,7 +1094,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
1092 sig = p->signal; 1094 sig = p->signal;
1093 psig->cutime += tgutime + sig->cutime; 1095 psig->cutime += tgutime + sig->cutime;
1094 psig->cstime += tgstime + sig->cstime; 1096 psig->cstime += tgstime + sig->cstime;
1095 psig->cgtime += p->gtime + sig->gtime + sig->cgtime; 1097 psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime;
1096 psig->cmin_flt += 1098 psig->cmin_flt +=
1097 p->min_flt + sig->min_flt + sig->cmin_flt; 1099 p->min_flt + sig->min_flt + sig->cmin_flt;
1098 psig->cmaj_flt += 1100 psig->cmaj_flt +=
diff --git a/kernel/fork.c b/kernel/fork.c
index 65ca6d27f24e..4133876d8cd2 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1233,6 +1233,12 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1233#ifndef CONFIG_VIRT_CPU_ACCOUNTING 1233#ifndef CONFIG_VIRT_CPU_ACCOUNTING
1234 p->prev_cputime.utime = p->prev_cputime.stime = 0; 1234 p->prev_cputime.utime = p->prev_cputime.stime = 0;
1235#endif 1235#endif
1236#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1237 seqlock_init(&p->vtime_seqlock);
1238 p->vtime_snap = 0;
1239 p->vtime_snap_whence = VTIME_SLEEPING;
1240#endif
1241
1236#if defined(SPLIT_RSS_COUNTING) 1242#if defined(SPLIT_RSS_COUNTING)
1237 memset(&p->rss_stat, 0, sizeof(p->rss_stat)); 1243 memset(&p->rss_stat, 0, sizeof(p->rss_stat));
1238#endif 1244#endif
@@ -1668,8 +1674,10 @@ SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
1668 int, tls_val) 1674 int, tls_val)
1669#endif 1675#endif
1670{ 1676{
1671 return do_fork(clone_flags, newsp, 0, 1677 long ret = do_fork(clone_flags, newsp, 0, parent_tidptr, child_tidptr);
1672 parent_tidptr, child_tidptr); 1678 asmlinkage_protect(5, ret, clone_flags, newsp,
1679 parent_tidptr, child_tidptr, tls_val);
1680 return ret;
1673} 1681}
1674#endif 1682#endif
1675 1683
diff --git a/kernel/futex.c b/kernel/futex.c
index 19eb089ca003..9618b6e9fb36 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -60,6 +60,7 @@
60#include <linux/pid.h> 60#include <linux/pid.h>
61#include <linux/nsproxy.h> 61#include <linux/nsproxy.h>
62#include <linux/ptrace.h> 62#include <linux/ptrace.h>
63#include <linux/sched/rt.h>
63 64
64#include <asm/futex.h> 65#include <asm/futex.h>
65 66
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 6db7a5ed52b5..cc47812d3feb 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -44,6 +44,8 @@
44#include <linux/err.h> 44#include <linux/err.h>
45#include <linux/debugobjects.h> 45#include <linux/debugobjects.h>
46#include <linux/sched.h> 46#include <linux/sched.h>
47#include <linux/sched/sysctl.h>
48#include <linux/sched/rt.h>
47#include <linux/timer.h> 49#include <linux/timer.h>
48 50
49#include <asm/uaccess.h> 51#include <asm/uaccess.h>
@@ -640,21 +642,9 @@ static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
640 * and expiry check is done in the hrtimer_interrupt or in the softirq. 642 * and expiry check is done in the hrtimer_interrupt or in the softirq.
641 */ 643 */
642static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, 644static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
643 struct hrtimer_clock_base *base, 645 struct hrtimer_clock_base *base)
644 int wakeup)
645{ 646{
646 if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { 647 return base->cpu_base->hres_active && hrtimer_reprogram(timer, base);
647 if (wakeup) {
648 raw_spin_unlock(&base->cpu_base->lock);
649 raise_softirq_irqoff(HRTIMER_SOFTIRQ);
650 raw_spin_lock(&base->cpu_base->lock);
651 } else
652 __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
653
654 return 1;
655 }
656
657 return 0;
658} 648}
659 649
660static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base) 650static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
@@ -735,8 +725,7 @@ static inline int hrtimer_switch_to_hres(void) { return 0; }
735static inline void 725static inline void
736hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { } 726hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
737static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, 727static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
738 struct hrtimer_clock_base *base, 728 struct hrtimer_clock_base *base)
739 int wakeup)
740{ 729{
741 return 0; 730 return 0;
742} 731}
@@ -995,8 +984,21 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
995 * 984 *
996 * XXX send_remote_softirq() ? 985 * XXX send_remote_softirq() ?
997 */ 986 */
998 if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)) 987 if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)
999 hrtimer_enqueue_reprogram(timer, new_base, wakeup); 988 && hrtimer_enqueue_reprogram(timer, new_base)) {
989 if (wakeup) {
990 /*
991 * We need to drop cpu_base->lock to avoid a
992 * lock ordering issue vs. rq->lock.
993 */
994 raw_spin_unlock(&new_base->cpu_base->lock);
995 raise_softirq_irqoff(HRTIMER_SOFTIRQ);
996 local_irq_restore(flags);
997 return ret;
998 } else {
999 __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
1000 }
1001 }
1000 1002
1001 unlock_hrtimer_base(timer, &flags); 1003 unlock_hrtimer_base(timer, &flags);
1002 1004
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 3aca9f29d30e..cbd97ce0b000 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -90,27 +90,41 @@ int irq_set_handler_data(unsigned int irq, void *data)
90EXPORT_SYMBOL(irq_set_handler_data); 90EXPORT_SYMBOL(irq_set_handler_data);
91 91
92/** 92/**
93 * irq_set_msi_desc - set MSI descriptor data for an irq 93 * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
94 * @irq: Interrupt number 94 * @irq_base: Interrupt number base
95 * @entry: Pointer to MSI descriptor data 95 * @irq_offset: Interrupt number offset
96 * @entry: Pointer to MSI descriptor data
96 * 97 *
97 * Set the MSI descriptor entry for an irq 98 * Set the MSI descriptor entry for an irq at offset
98 */ 99 */
99int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) 100int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
101 struct msi_desc *entry)
100{ 102{
101 unsigned long flags; 103 unsigned long flags;
102 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 104 struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
103 105
104 if (!desc) 106 if (!desc)
105 return -EINVAL; 107 return -EINVAL;
106 desc->irq_data.msi_desc = entry; 108 desc->irq_data.msi_desc = entry;
107 if (entry) 109 if (entry && !irq_offset)
108 entry->irq = irq; 110 entry->irq = irq_base;
109 irq_put_desc_unlock(desc, flags); 111 irq_put_desc_unlock(desc, flags);
110 return 0; 112 return 0;
111} 113}
112 114
113/** 115/**
116 * irq_set_msi_desc - set MSI descriptor data for an irq
117 * @irq: Interrupt number
118 * @entry: Pointer to MSI descriptor data
119 *
120 * Set the MSI descriptor entry for an irq
121 */
122int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
123{
124 return irq_set_msi_desc_off(irq, 0, entry);
125}
126
127/**
114 * irq_set_chip_data - set irq chip data for an irq 128 * irq_set_chip_data - set irq chip data for an irq
115 * @irq: Interrupt number 129 * @irq: Interrupt number
116 * @data: Pointer to chip specific data 130 * @data: Pointer to chip specific data
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index e49a288fa479..fa17855ca65a 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -16,6 +16,7 @@
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/sched.h> 18#include <linux/sched.h>
19#include <linux/sched/rt.h>
19#include <linux/task_work.h> 20#include <linux/task_work.h>
20 21
21#include "internals.h" 22#include "internals.h"
@@ -1524,6 +1525,7 @@ void enable_percpu_irq(unsigned int irq, unsigned int type)
1524out: 1525out:
1525 irq_put_desc_unlock(desc, flags); 1526 irq_put_desc_unlock(desc, flags);
1526} 1527}
1528EXPORT_SYMBOL_GPL(enable_percpu_irq);
1527 1529
1528void disable_percpu_irq(unsigned int irq) 1530void disable_percpu_irq(unsigned int irq)
1529{ 1531{
@@ -1537,6 +1539,7 @@ void disable_percpu_irq(unsigned int irq)
1537 irq_percpu_disable(desc, cpu); 1539 irq_percpu_disable(desc, cpu);
1538 irq_put_desc_unlock(desc, flags); 1540 irq_put_desc_unlock(desc, flags);
1539} 1541}
1542EXPORT_SYMBOL_GPL(disable_percpu_irq);
1540 1543
1541/* 1544/*
1542 * Internal function to unregister a percpu irqaction. 1545 * Internal function to unregister a percpu irqaction.
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index 611cd6003c45..7b5f012bde9d 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -80,13 +80,11 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
80 80
81 /* 81 /*
82 * All handlers must agree on IRQF_SHARED, so we test just the 82 * All handlers must agree on IRQF_SHARED, so we test just the
83 * first. Check for action->next as well. 83 * first.
84 */ 84 */
85 action = desc->action; 85 action = desc->action;
86 if (!action || !(action->flags & IRQF_SHARED) || 86 if (!action || !(action->flags & IRQF_SHARED) ||
87 (action->flags & __IRQF_TIMER) || 87 (action->flags & __IRQF_TIMER))
88 (action->handler(irq, action->dev_id) == IRQ_HANDLED) ||
89 !action->next)
90 goto out; 88 goto out;
91 89
92 /* Already running on another processor */ 90 /* Already running on another processor */
@@ -104,6 +102,7 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
104 do { 102 do {
105 if (handle_irq_event(desc) == IRQ_HANDLED) 103 if (handle_irq_event(desc) == IRQ_HANDLED)
106 ret = IRQ_HANDLED; 104 ret = IRQ_HANDLED;
105 /* Make sure that there is still a valid action */
107 action = desc->action; 106 action = desc->action;
108 } while ((desc->istate & IRQS_PENDING) && action); 107 } while ((desc->istate & IRQS_PENDING) && action);
109 desc->istate &= ~IRQS_POLL_INPROGRESS; 108 desc->istate &= ~IRQS_POLL_INPROGRESS;
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index 1588e3b2871b..55fcce6065cf 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -12,37 +12,36 @@
12#include <linux/percpu.h> 12#include <linux/percpu.h>
13#include <linux/hardirq.h> 13#include <linux/hardirq.h>
14#include <linux/irqflags.h> 14#include <linux/irqflags.h>
15#include <linux/sched.h>
16#include <linux/tick.h>
17#include <linux/cpu.h>
18#include <linux/notifier.h>
15#include <asm/processor.h> 19#include <asm/processor.h>
16 20
17/*
18 * An entry can be in one of four states:
19 *
20 * free NULL, 0 -> {claimed} : free to be used
21 * claimed NULL, 3 -> {pending} : claimed to be enqueued
22 * pending next, 3 -> {busy} : queued, pending callback
23 * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
24 */
25
26#define IRQ_WORK_PENDING 1UL
27#define IRQ_WORK_BUSY 2UL
28#define IRQ_WORK_FLAGS 3UL
29 21
30static DEFINE_PER_CPU(struct llist_head, irq_work_list); 22static DEFINE_PER_CPU(struct llist_head, irq_work_list);
23static DEFINE_PER_CPU(int, irq_work_raised);
31 24
32/* 25/*
33 * Claim the entry so that no one else will poke at it. 26 * Claim the entry so that no one else will poke at it.
34 */ 27 */
35static bool irq_work_claim(struct irq_work *work) 28static bool irq_work_claim(struct irq_work *work)
36{ 29{
37 unsigned long flags, nflags; 30 unsigned long flags, oflags, nflags;
38 31
32 /*
33 * Start with our best wish as a premise but only trust any
34 * flag value after cmpxchg() result.
35 */
36 flags = work->flags & ~IRQ_WORK_PENDING;
39 for (;;) { 37 for (;;) {
40 flags = work->flags;
41 if (flags & IRQ_WORK_PENDING)
42 return false;
43 nflags = flags | IRQ_WORK_FLAGS; 38 nflags = flags | IRQ_WORK_FLAGS;
44 if (cmpxchg(&work->flags, flags, nflags) == flags) 39 oflags = cmpxchg(&work->flags, flags, nflags);
40 if (oflags == flags)
45 break; 41 break;
42 if (oflags & IRQ_WORK_PENDING)
43 return false;
44 flags = oflags;
46 cpu_relax(); 45 cpu_relax();
47 } 46 }
48 47
@@ -57,57 +56,69 @@ void __weak arch_irq_work_raise(void)
57} 56}
58 57
59/* 58/*
60 * Queue the entry and raise the IPI if needed. 59 * Enqueue the irq_work @entry unless it's already pending
60 * somewhere.
61 *
62 * Can be re-enqueued while the callback is still in progress.
61 */ 63 */
62static void __irq_work_queue(struct irq_work *work) 64void irq_work_queue(struct irq_work *work)
63{ 65{
64 bool empty; 66 /* Only queue if not already pending */
67 if (!irq_work_claim(work))
68 return;
65 69
70 /* Queue the entry and raise the IPI if needed. */
66 preempt_disable(); 71 preempt_disable();
67 72
68 empty = llist_add(&work->llnode, &__get_cpu_var(irq_work_list)); 73 llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
69 /* The list was empty, raise self-interrupt to start processing. */ 74
70 if (empty) 75 /*
71 arch_irq_work_raise(); 76 * If the work is not "lazy" or the tick is stopped, raise the irq
77 * work interrupt (if supported by the arch), otherwise, just wait
78 * for the next tick.
79 */
80 if (!(work->flags & IRQ_WORK_LAZY) || tick_nohz_tick_stopped()) {
81 if (!this_cpu_cmpxchg(irq_work_raised, 0, 1))
82 arch_irq_work_raise();
83 }
72 84
73 preempt_enable(); 85 preempt_enable();
74} 86}
87EXPORT_SYMBOL_GPL(irq_work_queue);
75 88
76/* 89bool irq_work_needs_cpu(void)
77 * Enqueue the irq_work @entry, returns true on success, failure when the
78 * @entry was already enqueued by someone else.
79 *
80 * Can be re-enqueued while the callback is still in progress.
81 */
82bool irq_work_queue(struct irq_work *work)
83{ 90{
84 if (!irq_work_claim(work)) { 91 struct llist_head *this_list;
85 /* 92
86 * Already enqueued, can't do! 93 this_list = &__get_cpu_var(irq_work_list);
87 */ 94 if (llist_empty(this_list))
88 return false; 95 return false;
89 }
90 96
91 __irq_work_queue(work); 97 /* All work should have been flushed before going offline */
98 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
99
92 return true; 100 return true;
93} 101}
94EXPORT_SYMBOL_GPL(irq_work_queue);
95 102
96/* 103static void __irq_work_run(void)
97 * Run the irq_work entries on this cpu. Requires to be ran from hardirq
98 * context with local IRQs disabled.
99 */
100void irq_work_run(void)
101{ 104{
105 unsigned long flags;
102 struct irq_work *work; 106 struct irq_work *work;
103 struct llist_head *this_list; 107 struct llist_head *this_list;
104 struct llist_node *llnode; 108 struct llist_node *llnode;
105 109
110
111 /*
112 * Reset the "raised" state right before we check the list because
113 * an NMI may enqueue after we find the list empty from the runner.
114 */
115 __this_cpu_write(irq_work_raised, 0);
116 barrier();
117
106 this_list = &__get_cpu_var(irq_work_list); 118 this_list = &__get_cpu_var(irq_work_list);
107 if (llist_empty(this_list)) 119 if (llist_empty(this_list))
108 return; 120 return;
109 121
110 BUG_ON(!in_irq());
111 BUG_ON(!irqs_disabled()); 122 BUG_ON(!irqs_disabled());
112 123
113 llnode = llist_del_all(this_list); 124 llnode = llist_del_all(this_list);
@@ -119,16 +130,31 @@ void irq_work_run(void)
119 /* 130 /*
120 * Clear the PENDING bit, after this point the @work 131 * Clear the PENDING bit, after this point the @work
121 * can be re-used. 132 * can be re-used.
133 * Make it immediately visible so that other CPUs trying
134 * to claim that work don't rely on us to handle their data
135 * while we are in the middle of the func.
122 */ 136 */
123 work->flags = IRQ_WORK_BUSY; 137 flags = work->flags & ~IRQ_WORK_PENDING;
138 xchg(&work->flags, flags);
139
124 work->func(work); 140 work->func(work);
125 /* 141 /*
126 * Clear the BUSY bit and return to the free state if 142 * Clear the BUSY bit and return to the free state if
127 * no-one else claimed it meanwhile. 143 * no-one else claimed it meanwhile.
128 */ 144 */
129 (void)cmpxchg(&work->flags, IRQ_WORK_BUSY, 0); 145 (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
130 } 146 }
131} 147}
148
149/*
150 * Run the irq_work entries on this cpu. Requires to be ran from hardirq
151 * context with local IRQs disabled.
152 */
153void irq_work_run(void)
154{
155 BUG_ON(!in_irq());
156 __irq_work_run();
157}
132EXPORT_SYMBOL_GPL(irq_work_run); 158EXPORT_SYMBOL_GPL(irq_work_run);
133 159
134/* 160/*
@@ -143,3 +169,35 @@ void irq_work_sync(struct irq_work *work)
143 cpu_relax(); 169 cpu_relax();
144} 170}
145EXPORT_SYMBOL_GPL(irq_work_sync); 171EXPORT_SYMBOL_GPL(irq_work_sync);
172
173#ifdef CONFIG_HOTPLUG_CPU
174static int irq_work_cpu_notify(struct notifier_block *self,
175 unsigned long action, void *hcpu)
176{
177 long cpu = (long)hcpu;
178
179 switch (action) {
180 case CPU_DYING:
181 /* Called from stop_machine */
182 if (WARN_ON_ONCE(cpu != smp_processor_id()))
183 break;
184 __irq_work_run();
185 break;
186 default:
187 break;
188 }
189 return NOTIFY_OK;
190}
191
192static struct notifier_block cpu_notify;
193
194static __init int irq_work_init_cpu_notifier(void)
195{
196 cpu_notify.notifier_call = irq_work_cpu_notify;
197 cpu_notify.priority = 0;
198 register_cpu_notifier(&cpu_notify);
199 return 0;
200}
201device_initcall(irq_work_init_cpu_notifier);
202
203#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 0023a87e8de6..56dd34976d7b 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -38,6 +38,7 @@
38#include <linux/suspend.h> 38#include <linux/suspend.h>
39#include <linux/rwsem.h> 39#include <linux/rwsem.h>
40#include <linux/ptrace.h> 40#include <linux/ptrace.h>
41#include <linux/async.h>
41#include <asm/uaccess.h> 42#include <asm/uaccess.h>
42 43
43#include <trace/events/module.h> 44#include <trace/events/module.h>
@@ -130,6 +131,14 @@ int __request_module(bool wait, const char *fmt, ...)
130#define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */ 131#define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
131 static int kmod_loop_msg; 132 static int kmod_loop_msg;
132 133
134 /*
135 * We don't allow synchronous module loading from async. Module
136 * init may invoke async_synchronize_full() which will end up
137 * waiting for this task which already is waiting for the module
138 * loading to complete, leading to a deadlock.
139 */
140 WARN_ON_ONCE(wait && current_is_async());
141
133 va_start(args, fmt); 142 va_start(args, fmt);
134 ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args); 143 ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
135 va_end(args); 144 va_end(args);
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 098f396aa409..550294d58a02 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -471,7 +471,6 @@ static LIST_HEAD(unoptimizing_list);
471 471
472static void kprobe_optimizer(struct work_struct *work); 472static void kprobe_optimizer(struct work_struct *work);
473static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); 473static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
474static DECLARE_COMPLETION(optimizer_comp);
475#define OPTIMIZE_DELAY 5 474#define OPTIMIZE_DELAY 5
476 475
477/* 476/*
@@ -552,8 +551,7 @@ static __kprobes void do_free_cleaned_kprobes(struct list_head *free_list)
552/* Start optimizer after OPTIMIZE_DELAY passed */ 551/* Start optimizer after OPTIMIZE_DELAY passed */
553static __kprobes void kick_kprobe_optimizer(void) 552static __kprobes void kick_kprobe_optimizer(void)
554{ 553{
555 if (!delayed_work_pending(&optimizing_work)) 554 schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
556 schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
557} 555}
558 556
559/* Kprobe jump optimizer */ 557/* Kprobe jump optimizer */
@@ -592,16 +590,25 @@ static __kprobes void kprobe_optimizer(struct work_struct *work)
592 /* Step 5: Kick optimizer again if needed */ 590 /* Step 5: Kick optimizer again if needed */
593 if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) 591 if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
594 kick_kprobe_optimizer(); 592 kick_kprobe_optimizer();
595 else
596 /* Wake up all waiters */
597 complete_all(&optimizer_comp);
598} 593}
599 594
600/* Wait for completing optimization and unoptimization */ 595/* Wait for completing optimization and unoptimization */
601static __kprobes void wait_for_kprobe_optimizer(void) 596static __kprobes void wait_for_kprobe_optimizer(void)
602{ 597{
603 if (delayed_work_pending(&optimizing_work)) 598 mutex_lock(&kprobe_mutex);
604 wait_for_completion(&optimizer_comp); 599
600 while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
601 mutex_unlock(&kprobe_mutex);
602
603 /* this will also make optimizing_work execute immmediately */
604 flush_delayed_work(&optimizing_work);
605 /* @optimizing_work might not have been queued yet, relax */
606 cpu_relax();
607
608 mutex_lock(&kprobe_mutex);
609 }
610
611 mutex_unlock(&kprobe_mutex);
605} 612}
606 613
607/* Optimize kprobe if p is ready to be optimized */ 614/* Optimize kprobe if p is ready to be optimized */
@@ -919,7 +926,7 @@ static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
919} 926}
920#endif /* CONFIG_OPTPROBES */ 927#endif /* CONFIG_OPTPROBES */
921 928
922#ifdef KPROBES_CAN_USE_FTRACE 929#ifdef CONFIG_KPROBES_ON_FTRACE
923static struct ftrace_ops kprobe_ftrace_ops __read_mostly = { 930static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
924 .func = kprobe_ftrace_handler, 931 .func = kprobe_ftrace_handler,
925 .flags = FTRACE_OPS_FL_SAVE_REGS, 932 .flags = FTRACE_OPS_FL_SAVE_REGS,
@@ -964,7 +971,7 @@ static void __kprobes disarm_kprobe_ftrace(struct kprobe *p)
964 (unsigned long)p->addr, 1, 0); 971 (unsigned long)p->addr, 1, 0);
965 WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret); 972 WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret);
966} 973}
967#else /* !KPROBES_CAN_USE_FTRACE */ 974#else /* !CONFIG_KPROBES_ON_FTRACE */
968#define prepare_kprobe(p) arch_prepare_kprobe(p) 975#define prepare_kprobe(p) arch_prepare_kprobe(p)
969#define arm_kprobe_ftrace(p) do {} while (0) 976#define arm_kprobe_ftrace(p) do {} while (0)
970#define disarm_kprobe_ftrace(p) do {} while (0) 977#define disarm_kprobe_ftrace(p) do {} while (0)
@@ -1414,12 +1421,12 @@ static __kprobes int check_kprobe_address_safe(struct kprobe *p,
1414 */ 1421 */
1415 ftrace_addr = ftrace_location((unsigned long)p->addr); 1422 ftrace_addr = ftrace_location((unsigned long)p->addr);
1416 if (ftrace_addr) { 1423 if (ftrace_addr) {
1417#ifdef KPROBES_CAN_USE_FTRACE 1424#ifdef CONFIG_KPROBES_ON_FTRACE
1418 /* Given address is not on the instruction boundary */ 1425 /* Given address is not on the instruction boundary */
1419 if ((unsigned long)p->addr != ftrace_addr) 1426 if ((unsigned long)p->addr != ftrace_addr)
1420 return -EILSEQ; 1427 return -EILSEQ;
1421 p->flags |= KPROBE_FLAG_FTRACE; 1428 p->flags |= KPROBE_FLAG_FTRACE;
1422#else /* !KPROBES_CAN_USE_FTRACE */ 1429#else /* !CONFIG_KPROBES_ON_FTRACE */
1423 return -EINVAL; 1430 return -EINVAL;
1424#endif 1431#endif
1425 } 1432 }
diff --git a/kernel/module.c b/kernel/module.c
index b10b048367e1..eab08274ec9b 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -188,6 +188,7 @@ struct load_info {
188 ongoing or failed initialization etc. */ 188 ongoing or failed initialization etc. */
189static inline int strong_try_module_get(struct module *mod) 189static inline int strong_try_module_get(struct module *mod)
190{ 190{
191 BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED);
191 if (mod && mod->state == MODULE_STATE_COMING) 192 if (mod && mod->state == MODULE_STATE_COMING)
192 return -EBUSY; 193 return -EBUSY;
193 if (try_module_get(mod)) 194 if (try_module_get(mod))
@@ -343,6 +344,9 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
343#endif 344#endif
344 }; 345 };
345 346
347 if (mod->state == MODULE_STATE_UNFORMED)
348 continue;
349
346 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data)) 350 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
347 return true; 351 return true;
348 } 352 }
@@ -450,16 +454,24 @@ const struct kernel_symbol *find_symbol(const char *name,
450EXPORT_SYMBOL_GPL(find_symbol); 454EXPORT_SYMBOL_GPL(find_symbol);
451 455
452/* Search for module by name: must hold module_mutex. */ 456/* Search for module by name: must hold module_mutex. */
453struct module *find_module(const char *name) 457static struct module *find_module_all(const char *name,
458 bool even_unformed)
454{ 459{
455 struct module *mod; 460 struct module *mod;
456 461
457 list_for_each_entry(mod, &modules, list) { 462 list_for_each_entry(mod, &modules, list) {
463 if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
464 continue;
458 if (strcmp(mod->name, name) == 0) 465 if (strcmp(mod->name, name) == 0)
459 return mod; 466 return mod;
460 } 467 }
461 return NULL; 468 return NULL;
462} 469}
470
471struct module *find_module(const char *name)
472{
473 return find_module_all(name, false);
474}
463EXPORT_SYMBOL_GPL(find_module); 475EXPORT_SYMBOL_GPL(find_module);
464 476
465#ifdef CONFIG_SMP 477#ifdef CONFIG_SMP
@@ -525,6 +537,8 @@ bool is_module_percpu_address(unsigned long addr)
525 preempt_disable(); 537 preempt_disable();
526 538
527 list_for_each_entry_rcu(mod, &modules, list) { 539 list_for_each_entry_rcu(mod, &modules, list) {
540 if (mod->state == MODULE_STATE_UNFORMED)
541 continue;
528 if (!mod->percpu_size) 542 if (!mod->percpu_size)
529 continue; 543 continue;
530 for_each_possible_cpu(cpu) { 544 for_each_possible_cpu(cpu) {
@@ -1048,6 +1062,8 @@ static ssize_t show_initstate(struct module_attribute *mattr,
1048 case MODULE_STATE_GOING: 1062 case MODULE_STATE_GOING:
1049 state = "going"; 1063 state = "going";
1050 break; 1064 break;
1065 default:
1066 BUG();
1051 } 1067 }
1052 return sprintf(buffer, "%s\n", state); 1068 return sprintf(buffer, "%s\n", state);
1053} 1069}
@@ -1786,6 +1802,8 @@ void set_all_modules_text_rw(void)
1786 1802
1787 mutex_lock(&module_mutex); 1803 mutex_lock(&module_mutex);
1788 list_for_each_entry_rcu(mod, &modules, list) { 1804 list_for_each_entry_rcu(mod, &modules, list) {
1805 if (mod->state == MODULE_STATE_UNFORMED)
1806 continue;
1789 if ((mod->module_core) && (mod->core_text_size)) { 1807 if ((mod->module_core) && (mod->core_text_size)) {
1790 set_page_attributes(mod->module_core, 1808 set_page_attributes(mod->module_core,
1791 mod->module_core + mod->core_text_size, 1809 mod->module_core + mod->core_text_size,
@@ -1807,6 +1825,8 @@ void set_all_modules_text_ro(void)
1807 1825
1808 mutex_lock(&module_mutex); 1826 mutex_lock(&module_mutex);
1809 list_for_each_entry_rcu(mod, &modules, list) { 1827 list_for_each_entry_rcu(mod, &modules, list) {
1828 if (mod->state == MODULE_STATE_UNFORMED)
1829 continue;
1810 if ((mod->module_core) && (mod->core_text_size)) { 1830 if ((mod->module_core) && (mod->core_text_size)) {
1811 set_page_attributes(mod->module_core, 1831 set_page_attributes(mod->module_core,
1812 mod->module_core + mod->core_text_size, 1832 mod->module_core + mod->core_text_size,
@@ -2527,6 +2547,13 @@ static int copy_module_from_fd(int fd, struct load_info *info)
2527 err = -EFBIG; 2547 err = -EFBIG;
2528 goto out; 2548 goto out;
2529 } 2549 }
2550
2551 /* Don't hand 0 to vmalloc, it whines. */
2552 if (stat.size == 0) {
2553 err = -EINVAL;
2554 goto out;
2555 }
2556
2530 info->hdr = vmalloc(stat.size); 2557 info->hdr = vmalloc(stat.size);
2531 if (!info->hdr) { 2558 if (!info->hdr) {
2532 err = -ENOMEM; 2559 err = -ENOMEM;
@@ -2990,8 +3017,9 @@ static bool finished_loading(const char *name)
2990 bool ret; 3017 bool ret;
2991 3018
2992 mutex_lock(&module_mutex); 3019 mutex_lock(&module_mutex);
2993 mod = find_module(name); 3020 mod = find_module_all(name, true);
2994 ret = !mod || mod->state != MODULE_STATE_COMING; 3021 ret = !mod || mod->state == MODULE_STATE_LIVE
3022 || mod->state == MODULE_STATE_GOING;
2995 mutex_unlock(&module_mutex); 3023 mutex_unlock(&module_mutex);
2996 3024
2997 return ret; 3025 return ret;
@@ -3136,6 +3164,32 @@ static int load_module(struct load_info *info, const char __user *uargs,
3136 goto free_copy; 3164 goto free_copy;
3137 } 3165 }
3138 3166
3167 /*
3168 * We try to place it in the list now to make sure it's unique
3169 * before we dedicate too many resources. In particular,
3170 * temporary percpu memory exhaustion.
3171 */
3172 mod->state = MODULE_STATE_UNFORMED;
3173again:
3174 mutex_lock(&module_mutex);
3175 if ((old = find_module_all(mod->name, true)) != NULL) {
3176 if (old->state == MODULE_STATE_COMING
3177 || old->state == MODULE_STATE_UNFORMED) {
3178 /* Wait in case it fails to load. */
3179 mutex_unlock(&module_mutex);
3180 err = wait_event_interruptible(module_wq,
3181 finished_loading(mod->name));
3182 if (err)
3183 goto free_module;
3184 goto again;
3185 }
3186 err = -EEXIST;
3187 mutex_unlock(&module_mutex);
3188 goto free_module;
3189 }
3190 list_add_rcu(&mod->list, &modules);
3191 mutex_unlock(&module_mutex);
3192
3139#ifdef CONFIG_MODULE_SIG 3193#ifdef CONFIG_MODULE_SIG
3140 mod->sig_ok = info->sig_ok; 3194 mod->sig_ok = info->sig_ok;
3141 if (!mod->sig_ok) 3195 if (!mod->sig_ok)
@@ -3145,7 +3199,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
3145 /* Now module is in final location, initialize linked lists, etc. */ 3199 /* Now module is in final location, initialize linked lists, etc. */
3146 err = module_unload_init(mod); 3200 err = module_unload_init(mod);
3147 if (err) 3201 if (err)
3148 goto free_module; 3202 goto unlink_mod;
3149 3203
3150 /* Now we've got everything in the final locations, we can 3204 /* Now we've got everything in the final locations, we can
3151 * find optional sections. */ 3205 * find optional sections. */
@@ -3180,54 +3234,33 @@ static int load_module(struct load_info *info, const char __user *uargs,
3180 goto free_arch_cleanup; 3234 goto free_arch_cleanup;
3181 } 3235 }
3182 3236
3183 /* Mark state as coming so strong_try_module_get() ignores us. */
3184 mod->state = MODULE_STATE_COMING;
3185
3186 /* Now sew it into the lists so we can get lockdep and oops
3187 * info during argument parsing. No one should access us, since
3188 * strong_try_module_get() will fail.
3189 * lockdep/oops can run asynchronous, so use the RCU list insertion
3190 * function to insert in a way safe to concurrent readers.
3191 * The mutex protects against concurrent writers.
3192 */
3193again:
3194 mutex_lock(&module_mutex);
3195 if ((old = find_module(mod->name)) != NULL) {
3196 if (old->state == MODULE_STATE_COMING) {
3197 /* Wait in case it fails to load. */
3198 mutex_unlock(&module_mutex);
3199 err = wait_event_interruptible(module_wq,
3200 finished_loading(mod->name));
3201 if (err)
3202 goto free_arch_cleanup;
3203 goto again;
3204 }
3205 err = -EEXIST;
3206 goto unlock;
3207 }
3208
3209 /* This has to be done once we're sure module name is unique. */
3210 dynamic_debug_setup(info->debug, info->num_debug); 3237 dynamic_debug_setup(info->debug, info->num_debug);
3211 3238
3212 /* Find duplicate symbols */ 3239 mutex_lock(&module_mutex);
3240 /* Find duplicate symbols (must be called under lock). */
3213 err = verify_export_symbols(mod); 3241 err = verify_export_symbols(mod);
3214 if (err < 0) 3242 if (err < 0)
3215 goto ddebug; 3243 goto ddebug_cleanup;
3216 3244
3245 /* This relies on module_mutex for list integrity. */
3217 module_bug_finalize(info->hdr, info->sechdrs, mod); 3246 module_bug_finalize(info->hdr, info->sechdrs, mod);
3218 list_add_rcu(&mod->list, &modules); 3247
3248 /* Mark state as coming so strong_try_module_get() ignores us,
3249 * but kallsyms etc. can see us. */
3250 mod->state = MODULE_STATE_COMING;
3251
3219 mutex_unlock(&module_mutex); 3252 mutex_unlock(&module_mutex);
3220 3253
3221 /* Module is ready to execute: parsing args may do that. */ 3254 /* Module is ready to execute: parsing args may do that. */
3222 err = parse_args(mod->name, mod->args, mod->kp, mod->num_kp, 3255 err = parse_args(mod->name, mod->args, mod->kp, mod->num_kp,
3223 -32768, 32767, &ddebug_dyndbg_module_param_cb); 3256 -32768, 32767, &ddebug_dyndbg_module_param_cb);
3224 if (err < 0) 3257 if (err < 0)
3225 goto unlink; 3258 goto bug_cleanup;
3226 3259
3227 /* Link in to syfs. */ 3260 /* Link in to syfs. */
3228 err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp); 3261 err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp);
3229 if (err < 0) 3262 if (err < 0)
3230 goto unlink; 3263 goto bug_cleanup;
3231 3264
3232 /* Get rid of temporary copy. */ 3265 /* Get rid of temporary copy. */
3233 free_copy(info); 3266 free_copy(info);
@@ -3237,16 +3270,13 @@ again:
3237 3270
3238 return do_init_module(mod); 3271 return do_init_module(mod);
3239 3272
3240 unlink: 3273 bug_cleanup:
3274 /* module_bug_cleanup needs module_mutex protection */
3241 mutex_lock(&module_mutex); 3275 mutex_lock(&module_mutex);
3242 /* Unlink carefully: kallsyms could be walking list. */
3243 list_del_rcu(&mod->list);
3244 module_bug_cleanup(mod); 3276 module_bug_cleanup(mod);
3245 wake_up_all(&module_wq); 3277 ddebug_cleanup:
3246 ddebug:
3247 dynamic_debug_remove(info->debug);
3248 unlock:
3249 mutex_unlock(&module_mutex); 3278 mutex_unlock(&module_mutex);
3279 dynamic_debug_remove(info->debug);
3250 synchronize_sched(); 3280 synchronize_sched();
3251 kfree(mod->args); 3281 kfree(mod->args);
3252 free_arch_cleanup: 3282 free_arch_cleanup:
@@ -3255,6 +3285,12 @@ again:
3255 free_modinfo(mod); 3285 free_modinfo(mod);
3256 free_unload: 3286 free_unload:
3257 module_unload_free(mod); 3287 module_unload_free(mod);
3288 unlink_mod:
3289 mutex_lock(&module_mutex);
3290 /* Unlink carefully: kallsyms could be walking list. */
3291 list_del_rcu(&mod->list);
3292 wake_up_all(&module_wq);
3293 mutex_unlock(&module_mutex);
3258 free_module: 3294 free_module:
3259 module_deallocate(mod, info); 3295 module_deallocate(mod, info);
3260 free_copy: 3296 free_copy:
@@ -3377,6 +3413,8 @@ const char *module_address_lookup(unsigned long addr,
3377 3413
3378 preempt_disable(); 3414 preempt_disable();
3379 list_for_each_entry_rcu(mod, &modules, list) { 3415 list_for_each_entry_rcu(mod, &modules, list) {
3416 if (mod->state == MODULE_STATE_UNFORMED)
3417 continue;
3380 if (within_module_init(addr, mod) || 3418 if (within_module_init(addr, mod) ||
3381 within_module_core(addr, mod)) { 3419 within_module_core(addr, mod)) {
3382 if (modname) 3420 if (modname)
@@ -3400,6 +3438,8 @@ int lookup_module_symbol_name(unsigned long addr, char *symname)
3400 3438
3401 preempt_disable(); 3439 preempt_disable();
3402 list_for_each_entry_rcu(mod, &modules, list) { 3440 list_for_each_entry_rcu(mod, &modules, list) {
3441 if (mod->state == MODULE_STATE_UNFORMED)
3442 continue;
3403 if (within_module_init(addr, mod) || 3443 if (within_module_init(addr, mod) ||
3404 within_module_core(addr, mod)) { 3444 within_module_core(addr, mod)) {
3405 const char *sym; 3445 const char *sym;
@@ -3424,6 +3464,8 @@ int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size,
3424 3464
3425 preempt_disable(); 3465 preempt_disable();
3426 list_for_each_entry_rcu(mod, &modules, list) { 3466 list_for_each_entry_rcu(mod, &modules, list) {
3467 if (mod->state == MODULE_STATE_UNFORMED)
3468 continue;
3427 if (within_module_init(addr, mod) || 3469 if (within_module_init(addr, mod) ||
3428 within_module_core(addr, mod)) { 3470 within_module_core(addr, mod)) {
3429 const char *sym; 3471 const char *sym;
@@ -3451,6 +3493,8 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
3451 3493
3452 preempt_disable(); 3494 preempt_disable();
3453 list_for_each_entry_rcu(mod, &modules, list) { 3495 list_for_each_entry_rcu(mod, &modules, list) {
3496 if (mod->state == MODULE_STATE_UNFORMED)
3497 continue;
3454 if (symnum < mod->num_symtab) { 3498 if (symnum < mod->num_symtab) {
3455 *value = mod->symtab[symnum].st_value; 3499 *value = mod->symtab[symnum].st_value;
3456 *type = mod->symtab[symnum].st_info; 3500 *type = mod->symtab[symnum].st_info;
@@ -3493,9 +3537,12 @@ unsigned long module_kallsyms_lookup_name(const char *name)
3493 ret = mod_find_symname(mod, colon+1); 3537 ret = mod_find_symname(mod, colon+1);
3494 *colon = ':'; 3538 *colon = ':';
3495 } else { 3539 } else {
3496 list_for_each_entry_rcu(mod, &modules, list) 3540 list_for_each_entry_rcu(mod, &modules, list) {
3541 if (mod->state == MODULE_STATE_UNFORMED)
3542 continue;
3497 if ((ret = mod_find_symname(mod, name)) != 0) 3543 if ((ret = mod_find_symname(mod, name)) != 0)
3498 break; 3544 break;
3545 }
3499 } 3546 }
3500 preempt_enable(); 3547 preempt_enable();
3501 return ret; 3548 return ret;
@@ -3510,6 +3557,8 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
3510 int ret; 3557 int ret;
3511 3558
3512 list_for_each_entry(mod, &modules, list) { 3559 list_for_each_entry(mod, &modules, list) {
3560 if (mod->state == MODULE_STATE_UNFORMED)
3561 continue;
3513 for (i = 0; i < mod->num_symtab; i++) { 3562 for (i = 0; i < mod->num_symtab; i++) {
3514 ret = fn(data, mod->strtab + mod->symtab[i].st_name, 3563 ret = fn(data, mod->strtab + mod->symtab[i].st_name,
3515 mod, mod->symtab[i].st_value); 3564 mod, mod->symtab[i].st_value);
@@ -3525,6 +3574,7 @@ static char *module_flags(struct module *mod, char *buf)
3525{ 3574{
3526 int bx = 0; 3575 int bx = 0;
3527 3576
3577 BUG_ON(mod->state == MODULE_STATE_UNFORMED);
3528 if (mod->taints || 3578 if (mod->taints ||
3529 mod->state == MODULE_STATE_GOING || 3579 mod->state == MODULE_STATE_GOING ||
3530 mod->state == MODULE_STATE_COMING) { 3580 mod->state == MODULE_STATE_COMING) {
@@ -3566,6 +3616,10 @@ static int m_show(struct seq_file *m, void *p)
3566 struct module *mod = list_entry(p, struct module, list); 3616 struct module *mod = list_entry(p, struct module, list);
3567 char buf[8]; 3617 char buf[8];
3568 3618
3619 /* We always ignore unformed modules. */
3620 if (mod->state == MODULE_STATE_UNFORMED)
3621 return 0;
3622
3569 seq_printf(m, "%s %u", 3623 seq_printf(m, "%s %u",
3570 mod->name, mod->init_size + mod->core_size); 3624 mod->name, mod->init_size + mod->core_size);
3571 print_unload_info(m, mod); 3625 print_unload_info(m, mod);
@@ -3626,6 +3680,8 @@ const struct exception_table_entry *search_module_extables(unsigned long addr)
3626 3680
3627 preempt_disable(); 3681 preempt_disable();
3628 list_for_each_entry_rcu(mod, &modules, list) { 3682 list_for_each_entry_rcu(mod, &modules, list) {
3683 if (mod->state == MODULE_STATE_UNFORMED)
3684 continue;
3629 if (mod->num_exentries == 0) 3685 if (mod->num_exentries == 0)
3630 continue; 3686 continue;
3631 3687
@@ -3674,10 +3730,13 @@ struct module *__module_address(unsigned long addr)
3674 if (addr < module_addr_min || addr > module_addr_max) 3730 if (addr < module_addr_min || addr > module_addr_max)
3675 return NULL; 3731 return NULL;
3676 3732
3677 list_for_each_entry_rcu(mod, &modules, list) 3733 list_for_each_entry_rcu(mod, &modules, list) {
3734 if (mod->state == MODULE_STATE_UNFORMED)
3735 continue;
3678 if (within_module_core(addr, mod) 3736 if (within_module_core(addr, mod)
3679 || within_module_init(addr, mod)) 3737 || within_module_init(addr, mod))
3680 return mod; 3738 return mod;
3739 }
3681 return NULL; 3740 return NULL;
3682} 3741}
3683EXPORT_SYMBOL_GPL(__module_address); 3742EXPORT_SYMBOL_GPL(__module_address);
@@ -3730,8 +3789,11 @@ void print_modules(void)
3730 printk(KERN_DEFAULT "Modules linked in:"); 3789 printk(KERN_DEFAULT "Modules linked in:");
3731 /* Most callers should already have preempt disabled, but make sure */ 3790 /* Most callers should already have preempt disabled, but make sure */
3732 preempt_disable(); 3791 preempt_disable();
3733 list_for_each_entry_rcu(mod, &modules, list) 3792 list_for_each_entry_rcu(mod, &modules, list) {
3793 if (mod->state == MODULE_STATE_UNFORMED)
3794 continue;
3734 printk(" %s%s", mod->name, module_flags(mod, buf)); 3795 printk(" %s%s", mod->name, module_flags(mod, buf));
3796 }
3735 preempt_enable(); 3797 preempt_enable();
3736 if (last_unloaded_module[0]) 3798 if (last_unloaded_module[0])
3737 printk(" [last unloaded: %s]", last_unloaded_module); 3799 printk(" [last unloaded: %s]", last_unloaded_module);
diff --git a/kernel/mutex.c b/kernel/mutex.c
index a307cc9c9526..52f23011b6e0 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -19,6 +19,7 @@
19 */ 19 */
20#include <linux/mutex.h> 20#include <linux/mutex.h>
21#include <linux/sched.h> 21#include <linux/sched.h>
22#include <linux/sched/rt.h>
22#include <linux/export.h> 23#include <linux/export.h>
23#include <linux/spinlock.h> 24#include <linux/spinlock.h>
24#include <linux/interrupt.h> 25#include <linux/interrupt.h>
diff --git a/kernel/pid.c b/kernel/pid.c
index de9af600006f..f2c6a6825098 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -331,7 +331,7 @@ out:
331 return pid; 331 return pid;
332 332
333out_unlock: 333out_unlock:
334 spin_unlock(&pidmap_lock); 334 spin_unlock_irq(&pidmap_lock);
335out_free: 335out_free:
336 while (++i <= ns->level) 336 while (++i <= ns->level)
337 free_pidmap(pid->numbers + i); 337 free_pidmap(pid->numbers + i);
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index a278cad1d5d6..8fd709c9bb58 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -155,11 +155,19 @@ static void bump_cpu_timer(struct k_itimer *timer,
155 155
156static inline cputime_t prof_ticks(struct task_struct *p) 156static inline cputime_t prof_ticks(struct task_struct *p)
157{ 157{
158 return p->utime + p->stime; 158 cputime_t utime, stime;
159
160 task_cputime(p, &utime, &stime);
161
162 return utime + stime;
159} 163}
160static inline cputime_t virt_ticks(struct task_struct *p) 164static inline cputime_t virt_ticks(struct task_struct *p)
161{ 165{
162 return p->utime; 166 cputime_t utime;
167
168 task_cputime(p, &utime, NULL);
169
170 return utime;
163} 171}
164 172
165static int 173static int
@@ -471,18 +479,23 @@ static void cleanup_timers(struct list_head *head,
471 */ 479 */
472void posix_cpu_timers_exit(struct task_struct *tsk) 480void posix_cpu_timers_exit(struct task_struct *tsk)
473{ 481{
482 cputime_t utime, stime;
483
474 add_device_randomness((const void*) &tsk->se.sum_exec_runtime, 484 add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
475 sizeof(unsigned long long)); 485 sizeof(unsigned long long));
486 task_cputime(tsk, &utime, &stime);
476 cleanup_timers(tsk->cpu_timers, 487 cleanup_timers(tsk->cpu_timers,
477 tsk->utime, tsk->stime, tsk->se.sum_exec_runtime); 488 utime, stime, tsk->se.sum_exec_runtime);
478 489
479} 490}
480void posix_cpu_timers_exit_group(struct task_struct *tsk) 491void posix_cpu_timers_exit_group(struct task_struct *tsk)
481{ 492{
482 struct signal_struct *const sig = tsk->signal; 493 struct signal_struct *const sig = tsk->signal;
494 cputime_t utime, stime;
483 495
496 task_cputime(tsk, &utime, &stime);
484 cleanup_timers(tsk->signal->cpu_timers, 497 cleanup_timers(tsk->signal->cpu_timers,
485 tsk->utime + sig->utime, tsk->stime + sig->stime, 498 utime + sig->utime, stime + sig->stime,
486 tsk->se.sum_exec_runtime + sig->sum_sched_runtime); 499 tsk->se.sum_exec_runtime + sig->sum_sched_runtime);
487} 500}
488 501
@@ -1226,11 +1239,14 @@ static inline int task_cputime_expired(const struct task_cputime *sample,
1226static inline int fastpath_timer_check(struct task_struct *tsk) 1239static inline int fastpath_timer_check(struct task_struct *tsk)
1227{ 1240{
1228 struct signal_struct *sig; 1241 struct signal_struct *sig;
1242 cputime_t utime, stime;
1243
1244 task_cputime(tsk, &utime, &stime);
1229 1245
1230 if (!task_cputime_zero(&tsk->cputime_expires)) { 1246 if (!task_cputime_zero(&tsk->cputime_expires)) {
1231 struct task_cputime task_sample = { 1247 struct task_cputime task_sample = {
1232 .utime = tsk->utime, 1248 .utime = utime,
1233 .stime = tsk->stime, 1249 .stime = stime,
1234 .sum_exec_runtime = tsk->se.sum_exec_runtime 1250 .sum_exec_runtime = tsk->se.sum_exec_runtime
1235 }; 1251 };
1236 1252
@@ -1401,8 +1417,10 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
1401 while (!signal_pending(current)) { 1417 while (!signal_pending(current)) {
1402 if (timer.it.cpu.expires.sched == 0) { 1418 if (timer.it.cpu.expires.sched == 0) {
1403 /* 1419 /*
1404 * Our timer fired and was reset. 1420 * Our timer fired and was reset, below
1421 * deletion can not fail.
1405 */ 1422 */
1423 posix_cpu_timer_del(&timer);
1406 spin_unlock_irq(&timer.it_lock); 1424 spin_unlock_irq(&timer.it_lock);
1407 return 0; 1425 return 0;
1408 } 1426 }
@@ -1420,9 +1438,26 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
1420 * We were interrupted by a signal. 1438 * We were interrupted by a signal.
1421 */ 1439 */
1422 sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp); 1440 sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);
1423 posix_cpu_timer_set(&timer, 0, &zero_it, it); 1441 error = posix_cpu_timer_set(&timer, 0, &zero_it, it);
1442 if (!error) {
1443 /*
1444 * Timer is now unarmed, deletion can not fail.
1445 */
1446 posix_cpu_timer_del(&timer);
1447 }
1424 spin_unlock_irq(&timer.it_lock); 1448 spin_unlock_irq(&timer.it_lock);
1425 1449
1450 while (error == TIMER_RETRY) {
1451 /*
1452 * We need to handle case when timer was or is in the
1453 * middle of firing. In other cases we already freed
1454 * resources.
1455 */
1456 spin_lock_irq(&timer.it_lock);
1457 error = posix_cpu_timer_del(&timer);
1458 spin_unlock_irq(&timer.it_lock);
1459 }
1460
1426 if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) { 1461 if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) {
1427 /* 1462 /*
1428 * It actually did fire already. 1463 * It actually did fire already.
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 69185ae6b701..10349d5f2ec3 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -997,7 +997,7 @@ SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock,
997 997
998 err = kc->clock_adj(which_clock, &ktx); 998 err = kc->clock_adj(which_clock, &ktx);
999 999
1000 if (!err && copy_to_user(utx, &ktx, sizeof(ktx))) 1000 if (err >= 0 && copy_to_user(utx, &ktx, sizeof(ktx)))
1001 return -EFAULT; 1001 return -EFAULT;
1002 1002
1003 return err; 1003 return err;
diff --git a/kernel/power/autosleep.c b/kernel/power/autosleep.c
index ca304046d9e2..c6422ffeda9a 100644
--- a/kernel/power/autosleep.c
+++ b/kernel/power/autosleep.c
@@ -66,7 +66,7 @@ static DECLARE_WORK(suspend_work, try_to_suspend);
66 66
67void queue_up_suspend_work(void) 67void queue_up_suspend_work(void)
68{ 68{
69 if (!work_pending(&suspend_work) && autosleep_state > PM_SUSPEND_ON) 69 if (autosleep_state > PM_SUSPEND_ON)
70 queue_work(autosleep_wq, &suspend_work); 70 queue_work(autosleep_wq, &suspend_work);
71} 71}
72 72
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 1c16f9167de1..d77663bfedeb 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -313,7 +313,7 @@ static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
313static suspend_state_t decode_state(const char *buf, size_t n) 313static suspend_state_t decode_state(const char *buf, size_t n)
314{ 314{
315#ifdef CONFIG_SUSPEND 315#ifdef CONFIG_SUSPEND
316 suspend_state_t state = PM_SUSPEND_STANDBY; 316 suspend_state_t state = PM_SUSPEND_MIN;
317 const char * const *s; 317 const char * const *s;
318#endif 318#endif
319 char *p; 319 char *p;
@@ -553,6 +553,30 @@ power_attr(pm_trace_dev_match);
553 553
554#endif /* CONFIG_PM_TRACE */ 554#endif /* CONFIG_PM_TRACE */
555 555
556#ifdef CONFIG_FREEZER
557static ssize_t pm_freeze_timeout_show(struct kobject *kobj,
558 struct kobj_attribute *attr, char *buf)
559{
560 return sprintf(buf, "%u\n", freeze_timeout_msecs);
561}
562
563static ssize_t pm_freeze_timeout_store(struct kobject *kobj,
564 struct kobj_attribute *attr,
565 const char *buf, size_t n)
566{
567 unsigned long val;
568
569 if (kstrtoul(buf, 10, &val))
570 return -EINVAL;
571
572 freeze_timeout_msecs = val;
573 return n;
574}
575
576power_attr(pm_freeze_timeout);
577
578#endif /* CONFIG_FREEZER*/
579
556static struct attribute * g[] = { 580static struct attribute * g[] = {
557 &state_attr.attr, 581 &state_attr.attr,
558#ifdef CONFIG_PM_TRACE 582#ifdef CONFIG_PM_TRACE
@@ -576,6 +600,9 @@ static struct attribute * g[] = {
576 &pm_print_times_attr.attr, 600 &pm_print_times_attr.attr,
577#endif 601#endif
578#endif 602#endif
603#ifdef CONFIG_FREEZER
604 &pm_freeze_timeout_attr.attr,
605#endif
579 NULL, 606 NULL,
580}; 607};
581 608
diff --git a/kernel/power/process.c b/kernel/power/process.c
index d5a258b60c6f..98088e0e71e8 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -21,7 +21,7 @@
21/* 21/*
22 * Timeout for stopping processes 22 * Timeout for stopping processes
23 */ 23 */
24#define TIMEOUT (20 * HZ) 24unsigned int __read_mostly freeze_timeout_msecs = 20 * MSEC_PER_SEC;
25 25
26static int try_to_freeze_tasks(bool user_only) 26static int try_to_freeze_tasks(bool user_only)
27{ 27{
@@ -36,7 +36,7 @@ static int try_to_freeze_tasks(bool user_only)
36 36
37 do_gettimeofday(&start); 37 do_gettimeofday(&start);
38 38
39 end_time = jiffies + TIMEOUT; 39 end_time = jiffies + msecs_to_jiffies(freeze_timeout_msecs);
40 40
41 if (!user_only) 41 if (!user_only)
42 freeze_workqueues_begin(); 42 freeze_workqueues_begin();
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 9322ff7eaad6..587dddeebf15 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -359,8 +359,7 @@ void pm_qos_update_request(struct pm_qos_request *req,
359 return; 359 return;
360 } 360 }
361 361
362 if (delayed_work_pending(&req->work)) 362 cancel_delayed_work_sync(&req->work);
363 cancel_delayed_work_sync(&req->work);
364 363
365 if (new_value != req->node.prio) 364 if (new_value != req->node.prio)
366 pm_qos_update_target( 365 pm_qos_update_target(
@@ -386,8 +385,7 @@ void pm_qos_update_request_timeout(struct pm_qos_request *req, s32 new_value,
386 "%s called for unknown object.", __func__)) 385 "%s called for unknown object.", __func__))
387 return; 386 return;
388 387
389 if (delayed_work_pending(&req->work)) 388 cancel_delayed_work_sync(&req->work);
390 cancel_delayed_work_sync(&req->work);
391 389
392 if (new_value != req->node.prio) 390 if (new_value != req->node.prio)
393 pm_qos_update_target( 391 pm_qos_update_target(
@@ -416,8 +414,7 @@ void pm_qos_remove_request(struct pm_qos_request *req)
416 return; 414 return;
417 } 415 }
418 416
419 if (delayed_work_pending(&req->work)) 417 cancel_delayed_work_sync(&req->work);
420 cancel_delayed_work_sync(&req->work);
421 418
422 pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints, 419 pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints,
423 &req->node, PM_QOS_REMOVE_REQ, 420 &req->node, PM_QOS_REMOVE_REQ,
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index c8b7446b27df..d4feda084a3a 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -30,12 +30,38 @@
30#include "power.h" 30#include "power.h"
31 31
32const char *const pm_states[PM_SUSPEND_MAX] = { 32const char *const pm_states[PM_SUSPEND_MAX] = {
33 [PM_SUSPEND_FREEZE] = "freeze",
33 [PM_SUSPEND_STANDBY] = "standby", 34 [PM_SUSPEND_STANDBY] = "standby",
34 [PM_SUSPEND_MEM] = "mem", 35 [PM_SUSPEND_MEM] = "mem",
35}; 36};
36 37
37static const struct platform_suspend_ops *suspend_ops; 38static const struct platform_suspend_ops *suspend_ops;
38 39
40static bool need_suspend_ops(suspend_state_t state)
41{
42 return !!(state > PM_SUSPEND_FREEZE);
43}
44
45static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head);
46static bool suspend_freeze_wake;
47
48static void freeze_begin(void)
49{
50 suspend_freeze_wake = false;
51}
52
53static void freeze_enter(void)
54{
55 wait_event(suspend_freeze_wait_head, suspend_freeze_wake);
56}
57
58void freeze_wake(void)
59{
60 suspend_freeze_wake = true;
61 wake_up(&suspend_freeze_wait_head);
62}
63EXPORT_SYMBOL_GPL(freeze_wake);
64
39/** 65/**
40 * suspend_set_ops - Set the global suspend method table. 66 * suspend_set_ops - Set the global suspend method table.
41 * @ops: Suspend operations to use. 67 * @ops: Suspend operations to use.
@@ -50,8 +76,11 @@ EXPORT_SYMBOL_GPL(suspend_set_ops);
50 76
51bool valid_state(suspend_state_t state) 77bool valid_state(suspend_state_t state)
52{ 78{
79 if (state == PM_SUSPEND_FREEZE)
80 return true;
53 /* 81 /*
54 * All states need lowlevel support and need to be valid to the lowlevel 82 * PM_SUSPEND_STANDBY and PM_SUSPEND_MEMORY states need lowlevel
83 * support and need to be valid to the lowlevel
55 * implementation, no valid callback implies that none are valid. 84 * implementation, no valid callback implies that none are valid.
56 */ 85 */
57 return suspend_ops && suspend_ops->valid && suspend_ops->valid(state); 86 return suspend_ops && suspend_ops->valid && suspend_ops->valid(state);
@@ -89,11 +118,11 @@ static int suspend_test(int level)
89 * hibernation). Run suspend notifiers, allocate the "suspend" console and 118 * hibernation). Run suspend notifiers, allocate the "suspend" console and
90 * freeze processes. 119 * freeze processes.
91 */ 120 */
92static int suspend_prepare(void) 121static int suspend_prepare(suspend_state_t state)
93{ 122{
94 int error; 123 int error;
95 124
96 if (!suspend_ops || !suspend_ops->enter) 125 if (need_suspend_ops(state) && (!suspend_ops || !suspend_ops->enter))
97 return -EPERM; 126 return -EPERM;
98 127
99 pm_prepare_console(); 128 pm_prepare_console();
@@ -137,7 +166,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
137{ 166{
138 int error; 167 int error;
139 168
140 if (suspend_ops->prepare) { 169 if (need_suspend_ops(state) && suspend_ops->prepare) {
141 error = suspend_ops->prepare(); 170 error = suspend_ops->prepare();
142 if (error) 171 if (error)
143 goto Platform_finish; 172 goto Platform_finish;
@@ -149,12 +178,23 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
149 goto Platform_finish; 178 goto Platform_finish;
150 } 179 }
151 180
152 if (suspend_ops->prepare_late) { 181 if (need_suspend_ops(state) && suspend_ops->prepare_late) {
153 error = suspend_ops->prepare_late(); 182 error = suspend_ops->prepare_late();
154 if (error) 183 if (error)
155 goto Platform_wake; 184 goto Platform_wake;
156 } 185 }
157 186
187 /*
188 * PM_SUSPEND_FREEZE equals
189 * frozen processes + suspended devices + idle processors.
190 * Thus we should invoke freeze_enter() soon after
191 * all the devices are suspended.
192 */
193 if (state == PM_SUSPEND_FREEZE) {
194 freeze_enter();
195 goto Platform_wake;
196 }
197
158 if (suspend_test(TEST_PLATFORM)) 198 if (suspend_test(TEST_PLATFORM))
159 goto Platform_wake; 199 goto Platform_wake;
160 200
@@ -182,13 +222,13 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
182 enable_nonboot_cpus(); 222 enable_nonboot_cpus();
183 223
184 Platform_wake: 224 Platform_wake:
185 if (suspend_ops->wake) 225 if (need_suspend_ops(state) && suspend_ops->wake)
186 suspend_ops->wake(); 226 suspend_ops->wake();
187 227
188 dpm_resume_start(PMSG_RESUME); 228 dpm_resume_start(PMSG_RESUME);
189 229
190 Platform_finish: 230 Platform_finish:
191 if (suspend_ops->finish) 231 if (need_suspend_ops(state) && suspend_ops->finish)
192 suspend_ops->finish(); 232 suspend_ops->finish();
193 233
194 return error; 234 return error;
@@ -203,11 +243,11 @@ int suspend_devices_and_enter(suspend_state_t state)
203 int error; 243 int error;
204 bool wakeup = false; 244 bool wakeup = false;
205 245
206 if (!suspend_ops) 246 if (need_suspend_ops(state) && !suspend_ops)
207 return -ENOSYS; 247 return -ENOSYS;
208 248
209 trace_machine_suspend(state); 249 trace_machine_suspend(state);
210 if (suspend_ops->begin) { 250 if (need_suspend_ops(state) && suspend_ops->begin) {
211 error = suspend_ops->begin(state); 251 error = suspend_ops->begin(state);
212 if (error) 252 if (error)
213 goto Close; 253 goto Close;
@@ -226,7 +266,7 @@ int suspend_devices_and_enter(suspend_state_t state)
226 266
227 do { 267 do {
228 error = suspend_enter(state, &wakeup); 268 error = suspend_enter(state, &wakeup);
229 } while (!error && !wakeup 269 } while (!error && !wakeup && need_suspend_ops(state)
230 && suspend_ops->suspend_again && suspend_ops->suspend_again()); 270 && suspend_ops->suspend_again && suspend_ops->suspend_again());
231 271
232 Resume_devices: 272 Resume_devices:
@@ -236,13 +276,13 @@ int suspend_devices_and_enter(suspend_state_t state)
236 ftrace_start(); 276 ftrace_start();
237 resume_console(); 277 resume_console();
238 Close: 278 Close:
239 if (suspend_ops->end) 279 if (need_suspend_ops(state) && suspend_ops->end)
240 suspend_ops->end(); 280 suspend_ops->end();
241 trace_machine_suspend(PWR_EVENT_EXIT); 281 trace_machine_suspend(PWR_EVENT_EXIT);
242 return error; 282 return error;
243 283
244 Recover_platform: 284 Recover_platform:
245 if (suspend_ops->recover) 285 if (need_suspend_ops(state) && suspend_ops->recover)
246 suspend_ops->recover(); 286 suspend_ops->recover();
247 goto Resume_devices; 287 goto Resume_devices;
248} 288}
@@ -278,12 +318,15 @@ static int enter_state(suspend_state_t state)
278 if (!mutex_trylock(&pm_mutex)) 318 if (!mutex_trylock(&pm_mutex))
279 return -EBUSY; 319 return -EBUSY;
280 320
321 if (state == PM_SUSPEND_FREEZE)
322 freeze_begin();
323
281 printk(KERN_INFO "PM: Syncing filesystems ... "); 324 printk(KERN_INFO "PM: Syncing filesystems ... ");
282 sys_sync(); 325 sys_sync();
283 printk("done.\n"); 326 printk("done.\n");
284 327
285 pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]); 328 pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
286 error = suspend_prepare(); 329 error = suspend_prepare(state);
287 if (error) 330 if (error)
288 goto Unlock; 331 goto Unlock;
289 332
diff --git a/kernel/printk.c b/kernel/printk.c
index 357f714ddd49..f24633afa46a 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -42,6 +42,7 @@
42#include <linux/notifier.h> 42#include <linux/notifier.h>
43#include <linux/rculist.h> 43#include <linux/rculist.h>
44#include <linux/poll.h> 44#include <linux/poll.h>
45#include <linux/irq_work.h>
45 46
46#include <asm/uaccess.h> 47#include <asm/uaccess.h>
47 48
@@ -87,12 +88,6 @@ static DEFINE_SEMAPHORE(console_sem);
87struct console *console_drivers; 88struct console *console_drivers;
88EXPORT_SYMBOL_GPL(console_drivers); 89EXPORT_SYMBOL_GPL(console_drivers);
89 90
90#ifdef CONFIG_LOCKDEP
91static struct lockdep_map console_lock_dep_map = {
92 .name = "console_lock"
93};
94#endif
95
96/* 91/*
97 * This is used for debugging the mess that is the VT code by 92 * This is used for debugging the mess that is the VT code by
98 * keeping track if we have the console semaphore held. It's 93 * keeping track if we have the console semaphore held. It's
@@ -1924,7 +1919,6 @@ void console_lock(void)
1924 return; 1919 return;
1925 console_locked = 1; 1920 console_locked = 1;
1926 console_may_schedule = 1; 1921 console_may_schedule = 1;
1927 mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_);
1928} 1922}
1929EXPORT_SYMBOL(console_lock); 1923EXPORT_SYMBOL(console_lock);
1930 1924
@@ -1946,7 +1940,6 @@ int console_trylock(void)
1946 } 1940 }
1947 console_locked = 1; 1941 console_locked = 1;
1948 console_may_schedule = 0; 1942 console_may_schedule = 0;
1949 mutex_acquire(&console_lock_dep_map, 0, 1, _RET_IP_);
1950 return 1; 1943 return 1;
1951} 1944}
1952EXPORT_SYMBOL(console_trylock); 1945EXPORT_SYMBOL(console_trylock);
@@ -1967,30 +1960,32 @@ int is_console_locked(void)
1967static DEFINE_PER_CPU(int, printk_pending); 1960static DEFINE_PER_CPU(int, printk_pending);
1968static DEFINE_PER_CPU(char [PRINTK_BUF_SIZE], printk_sched_buf); 1961static DEFINE_PER_CPU(char [PRINTK_BUF_SIZE], printk_sched_buf);
1969 1962
1970void printk_tick(void) 1963static void wake_up_klogd_work_func(struct irq_work *irq_work)
1971{ 1964{
1972 if (__this_cpu_read(printk_pending)) { 1965 int pending = __this_cpu_xchg(printk_pending, 0);
1973 int pending = __this_cpu_xchg(printk_pending, 0); 1966
1974 if (pending & PRINTK_PENDING_SCHED) { 1967 if (pending & PRINTK_PENDING_SCHED) {
1975 char *buf = __get_cpu_var(printk_sched_buf); 1968 char *buf = __get_cpu_var(printk_sched_buf);
1976 printk(KERN_WARNING "[sched_delayed] %s", buf); 1969 printk(KERN_WARNING "[sched_delayed] %s", buf);
1977 }
1978 if (pending & PRINTK_PENDING_WAKEUP)
1979 wake_up_interruptible(&log_wait);
1980 } 1970 }
1981}
1982 1971
1983int printk_needs_cpu(int cpu) 1972 if (pending & PRINTK_PENDING_WAKEUP)
1984{ 1973 wake_up_interruptible(&log_wait);
1985 if (cpu_is_offline(cpu))
1986 printk_tick();
1987 return __this_cpu_read(printk_pending);
1988} 1974}
1989 1975
1976static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = {
1977 .func = wake_up_klogd_work_func,
1978 .flags = IRQ_WORK_LAZY,
1979};
1980
1990void wake_up_klogd(void) 1981void wake_up_klogd(void)
1991{ 1982{
1992 if (waitqueue_active(&log_wait)) 1983 preempt_disable();
1984 if (waitqueue_active(&log_wait)) {
1993 this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP); 1985 this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP);
1986 irq_work_queue(&__get_cpu_var(wake_up_klogd_work));
1987 }
1988 preempt_enable();
1994} 1989}
1995 1990
1996static void console_cont_flush(char *text, size_t size) 1991static void console_cont_flush(char *text, size_t size)
@@ -2107,7 +2102,6 @@ skip:
2107 local_irq_restore(flags); 2102 local_irq_restore(flags);
2108 } 2103 }
2109 console_locked = 0; 2104 console_locked = 0;
2110 mutex_release(&console_lock_dep_map, 1, _RET_IP_);
2111 2105
2112 /* Release the exclusive_console once it is used */ 2106 /* Release the exclusive_console once it is used */
2113 if (unlikely(exclusive_console)) 2107 if (unlikely(exclusive_console))
@@ -2471,6 +2465,7 @@ int printk_sched(const char *fmt, ...)
2471 va_end(args); 2465 va_end(args);
2472 2466
2473 __this_cpu_or(printk_pending, PRINTK_PENDING_SCHED); 2467 __this_cpu_or(printk_pending, PRINTK_PENDING_SCHED);
2468 irq_work_queue(&__get_cpu_var(wake_up_klogd_work));
2474 local_irq_restore(flags); 2469 local_irq_restore(flags);
2475 2470
2476 return r; 2471 return r;
diff --git a/kernel/profile.c b/kernel/profile.c
index 1f391819c42f..dc3384ee874e 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -37,9 +37,6 @@ struct profile_hit {
37#define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit)) 37#define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
38#define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ) 38#define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
39 39
40/* Oprofile timer tick hook */
41static int (*timer_hook)(struct pt_regs *) __read_mostly;
42
43static atomic_t *prof_buffer; 40static atomic_t *prof_buffer;
44static unsigned long prof_len, prof_shift; 41static unsigned long prof_len, prof_shift;
45 42
@@ -208,25 +205,6 @@ int profile_event_unregister(enum profile_type type, struct notifier_block *n)
208} 205}
209EXPORT_SYMBOL_GPL(profile_event_unregister); 206EXPORT_SYMBOL_GPL(profile_event_unregister);
210 207
211int register_timer_hook(int (*hook)(struct pt_regs *))
212{
213 if (timer_hook)
214 return -EBUSY;
215 timer_hook = hook;
216 return 0;
217}
218EXPORT_SYMBOL_GPL(register_timer_hook);
219
220void unregister_timer_hook(int (*hook)(struct pt_regs *))
221{
222 WARN_ON(hook != timer_hook);
223 timer_hook = NULL;
224 /* make sure all CPUs see the NULL hook */
225 synchronize_sched(); /* Allow ongoing interrupts to complete. */
226}
227EXPORT_SYMBOL_GPL(unregister_timer_hook);
228
229
230#ifdef CONFIG_SMP 208#ifdef CONFIG_SMP
231/* 209/*
232 * Each cpu has a pair of open-addressed hashtables for pending 210 * Each cpu has a pair of open-addressed hashtables for pending
@@ -436,8 +414,6 @@ void profile_tick(int type)
436{ 414{
437 struct pt_regs *regs = get_irq_regs(); 415 struct pt_regs *regs = get_irq_regs();
438 416
439 if (type == CPU_PROFILING && timer_hook)
440 timer_hook(regs);
441 if (!user_mode(regs) && prof_cpu_mask != NULL && 417 if (!user_mode(regs) && prof_cpu_mask != NULL &&
442 cpumask_test_cpu(smp_processor_id(), prof_cpu_mask)) 418 cpumask_test_cpu(smp_processor_id(), prof_cpu_mask))
443 profile_hit(type, (void *)profile_pc(regs)); 419 profile_hit(type, (void *)profile_pc(regs));
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 1599157336a6..acbd28424d81 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -117,11 +117,45 @@ void __ptrace_unlink(struct task_struct *child)
117 * TASK_KILLABLE sleeps. 117 * TASK_KILLABLE sleeps.
118 */ 118 */
119 if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child)) 119 if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
120 signal_wake_up(child, task_is_traced(child)); 120 ptrace_signal_wake_up(child, true);
121 121
122 spin_unlock(&child->sighand->siglock); 122 spin_unlock(&child->sighand->siglock);
123} 123}
124 124
125/* Ensure that nothing can wake it up, even SIGKILL */
126static bool ptrace_freeze_traced(struct task_struct *task)
127{
128 bool ret = false;
129
130 /* Lockless, nobody but us can set this flag */
131 if (task->jobctl & JOBCTL_LISTENING)
132 return ret;
133
134 spin_lock_irq(&task->sighand->siglock);
135 if (task_is_traced(task) && !__fatal_signal_pending(task)) {
136 task->state = __TASK_TRACED;
137 ret = true;
138 }
139 spin_unlock_irq(&task->sighand->siglock);
140
141 return ret;
142}
143
144static void ptrace_unfreeze_traced(struct task_struct *task)
145{
146 if (task->state != __TASK_TRACED)
147 return;
148
149 WARN_ON(!task->ptrace || task->parent != current);
150
151 spin_lock_irq(&task->sighand->siglock);
152 if (__fatal_signal_pending(task))
153 wake_up_state(task, __TASK_TRACED);
154 else
155 task->state = TASK_TRACED;
156 spin_unlock_irq(&task->sighand->siglock);
157}
158
125/** 159/**
126 * ptrace_check_attach - check whether ptracee is ready for ptrace operation 160 * ptrace_check_attach - check whether ptracee is ready for ptrace operation
127 * @child: ptracee to check for 161 * @child: ptracee to check for
@@ -139,7 +173,7 @@ void __ptrace_unlink(struct task_struct *child)
139 * RETURNS: 173 * RETURNS:
140 * 0 on success, -ESRCH if %child is not ready. 174 * 0 on success, -ESRCH if %child is not ready.
141 */ 175 */
142int ptrace_check_attach(struct task_struct *child, bool ignore_state) 176static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
143{ 177{
144 int ret = -ESRCH; 178 int ret = -ESRCH;
145 179
@@ -151,24 +185,29 @@ int ptrace_check_attach(struct task_struct *child, bool ignore_state)
151 * be changed by us so it's not changing right after this. 185 * be changed by us so it's not changing right after this.
152 */ 186 */
153 read_lock(&tasklist_lock); 187 read_lock(&tasklist_lock);
154 if ((child->ptrace & PT_PTRACED) && child->parent == current) { 188 if (child->ptrace && child->parent == current) {
189 WARN_ON(child->state == __TASK_TRACED);
155 /* 190 /*
156 * child->sighand can't be NULL, release_task() 191 * child->sighand can't be NULL, release_task()
157 * does ptrace_unlink() before __exit_signal(). 192 * does ptrace_unlink() before __exit_signal().
158 */ 193 */
159 spin_lock_irq(&child->sighand->siglock); 194 if (ignore_state || ptrace_freeze_traced(child))
160 WARN_ON_ONCE(task_is_stopped(child));
161 if (ignore_state || (task_is_traced(child) &&
162 !(child->jobctl & JOBCTL_LISTENING)))
163 ret = 0; 195 ret = 0;
164 spin_unlock_irq(&child->sighand->siglock);
165 } 196 }
166 read_unlock(&tasklist_lock); 197 read_unlock(&tasklist_lock);
167 198
168 if (!ret && !ignore_state) 199 if (!ret && !ignore_state) {
169 ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH; 200 if (!wait_task_inactive(child, __TASK_TRACED)) {
201 /*
202 * This can only happen if may_ptrace_stop() fails and
203 * ptrace_stop() changes ->state back to TASK_RUNNING,
204 * so we should not worry about leaking __TASK_TRACED.
205 */
206 WARN_ON(child->state == __TASK_TRACED);
207 ret = -ESRCH;
208 }
209 }
170 210
171 /* All systems go.. */
172 return ret; 211 return ret;
173} 212}
174 213
@@ -317,7 +356,7 @@ static int ptrace_attach(struct task_struct *task, long request,
317 */ 356 */
318 if (task_is_stopped(task) && 357 if (task_is_stopped(task) &&
319 task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) 358 task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
320 signal_wake_up(task, 1); 359 signal_wake_up_state(task, __TASK_STOPPED);
321 360
322 spin_unlock(&task->sighand->siglock); 361 spin_unlock(&task->sighand->siglock);
323 362
@@ -673,6 +712,12 @@ static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
673 kiov->iov_len, kiov->iov_base); 712 kiov->iov_len, kiov->iov_base);
674} 713}
675 714
715/*
716 * This is declared in linux/regset.h and defined in machine-dependent
717 * code. We put the export here, near the primary machine-neutral use,
718 * to ensure no machine forgets it.
719 */
720EXPORT_SYMBOL_GPL(task_user_regset_view);
676#endif 721#endif
677 722
678int ptrace_request(struct task_struct *child, long request, 723int ptrace_request(struct task_struct *child, long request,
@@ -737,7 +782,7 @@ int ptrace_request(struct task_struct *child, long request,
737 * tracee into STOP. 782 * tracee into STOP.
738 */ 783 */
739 if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP))) 784 if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
740 signal_wake_up(child, child->jobctl & JOBCTL_LISTENING); 785 ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
741 786
742 unlock_task_sighand(child, &flags); 787 unlock_task_sighand(child, &flags);
743 ret = 0; 788 ret = 0;
@@ -763,7 +808,7 @@ int ptrace_request(struct task_struct *child, long request,
763 * start of this trap and now. Trigger re-trap. 808 * start of this trap and now. Trigger re-trap.
764 */ 809 */
765 if (child->jobctl & JOBCTL_TRAP_NOTIFY) 810 if (child->jobctl & JOBCTL_TRAP_NOTIFY)
766 signal_wake_up(child, true); 811 ptrace_signal_wake_up(child, true);
767 ret = 0; 812 ret = 0;
768 } 813 }
769 unlock_task_sighand(child, &flags); 814 unlock_task_sighand(child, &flags);
@@ -900,6 +945,8 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
900 goto out_put_task_struct; 945 goto out_put_task_struct;
901 946
902 ret = arch_ptrace(child, request, addr, data); 947 ret = arch_ptrace(child, request, addr, data);
948 if (ret || request != PTRACE_DETACH)
949 ptrace_unfreeze_traced(child);
903 950
904 out_put_task_struct: 951 out_put_task_struct:
905 put_task_struct(child); 952 put_task_struct(child);
@@ -1039,8 +1086,11 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
1039 1086
1040 ret = ptrace_check_attach(child, request == PTRACE_KILL || 1087 ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1041 request == PTRACE_INTERRUPT); 1088 request == PTRACE_INTERRUPT);
1042 if (!ret) 1089 if (!ret) {
1043 ret = compat_arch_ptrace(child, request, addr, data); 1090 ret = compat_arch_ptrace(child, request, addr, data);
1091 if (ret || request != PTRACE_DETACH)
1092 ptrace_unfreeze_traced(child);
1093 }
1044 1094
1045 out_put_task_struct: 1095 out_put_task_struct:
1046 put_task_struct(child); 1096 put_task_struct(child);
diff --git a/kernel/rcu.h b/kernel/rcu.h
index 20dfba576c2b..7f8e7590e3e5 100644
--- a/kernel/rcu.h
+++ b/kernel/rcu.h
@@ -111,4 +111,11 @@ static inline bool __rcu_reclaim(char *rn, struct rcu_head *head)
111 111
112extern int rcu_expedited; 112extern int rcu_expedited;
113 113
114#ifdef CONFIG_RCU_STALL_COMMON
115
116extern int rcu_cpu_stall_suppress;
117int rcu_jiffies_till_stall_check(void);
118
119#endif /* #ifdef CONFIG_RCU_STALL_COMMON */
120
114#endif /* __LINUX_RCU_H */ 121#endif /* __LINUX_RCU_H */
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index a2cf76177b44..48ab70384a4c 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -404,11 +404,65 @@ EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
404#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 404#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
405 405
406#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE) 406#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE)
407void do_trace_rcu_torture_read(char *rcutorturename, struct rcu_head *rhp) 407void do_trace_rcu_torture_read(char *rcutorturename, struct rcu_head *rhp,
408 unsigned long secs,
409 unsigned long c_old, unsigned long c)
408{ 410{
409 trace_rcu_torture_read(rcutorturename, rhp); 411 trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c);
410} 412}
411EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read); 413EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
412#else 414#else
413#define do_trace_rcu_torture_read(rcutorturename, rhp) do { } while (0) 415#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
416 do { } while (0)
414#endif 417#endif
418
419#ifdef CONFIG_RCU_STALL_COMMON
420
421#ifdef CONFIG_PROVE_RCU
422#define RCU_STALL_DELAY_DELTA (5 * HZ)
423#else
424#define RCU_STALL_DELAY_DELTA 0
425#endif
426
427int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */
428int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
429
430module_param(rcu_cpu_stall_suppress, int, 0644);
431module_param(rcu_cpu_stall_timeout, int, 0644);
432
433int rcu_jiffies_till_stall_check(void)
434{
435 int till_stall_check = ACCESS_ONCE(rcu_cpu_stall_timeout);
436
437 /*
438 * Limit check must be consistent with the Kconfig limits
439 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
440 */
441 if (till_stall_check < 3) {
442 ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
443 till_stall_check = 3;
444 } else if (till_stall_check > 300) {
445 ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
446 till_stall_check = 300;
447 }
448 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
449}
450
451static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
452{
453 rcu_cpu_stall_suppress = 1;
454 return NOTIFY_DONE;
455}
456
457static struct notifier_block rcu_panic_block = {
458 .notifier_call = rcu_panic,
459};
460
461static int __init check_cpu_stall_init(void)
462{
463 atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
464 return 0;
465}
466early_initcall(check_cpu_stall_init);
467
468#endif /* #ifdef CONFIG_RCU_STALL_COMMON */
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
index e7dce58f9c2a..a0714a51b6d7 100644
--- a/kernel/rcutiny.c
+++ b/kernel/rcutiny.c
@@ -51,10 +51,10 @@ static void __call_rcu(struct rcu_head *head,
51 void (*func)(struct rcu_head *rcu), 51 void (*func)(struct rcu_head *rcu),
52 struct rcu_ctrlblk *rcp); 52 struct rcu_ctrlblk *rcp);
53 53
54#include "rcutiny_plugin.h"
55
56static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; 54static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
57 55
56#include "rcutiny_plugin.h"
57
58/* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */ 58/* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */
59static void rcu_idle_enter_common(long long newval) 59static void rcu_idle_enter_common(long long newval)
60{ 60{
@@ -193,7 +193,7 @@ EXPORT_SYMBOL(rcu_is_cpu_idle);
193 * interrupts don't count, we must be running at the first interrupt 193 * interrupts don't count, we must be running at the first interrupt
194 * level. 194 * level.
195 */ 195 */
196int rcu_is_cpu_rrupt_from_idle(void) 196static int rcu_is_cpu_rrupt_from_idle(void)
197{ 197{
198 return rcu_dynticks_nesting <= 1; 198 return rcu_dynticks_nesting <= 1;
199} 199}
@@ -205,6 +205,7 @@ int rcu_is_cpu_rrupt_from_idle(void)
205 */ 205 */
206static int rcu_qsctr_help(struct rcu_ctrlblk *rcp) 206static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
207{ 207{
208 reset_cpu_stall_ticks(rcp);
208 if (rcp->rcucblist != NULL && 209 if (rcp->rcucblist != NULL &&
209 rcp->donetail != rcp->curtail) { 210 rcp->donetail != rcp->curtail) {
210 rcp->donetail = rcp->curtail; 211 rcp->donetail = rcp->curtail;
@@ -251,6 +252,7 @@ void rcu_bh_qs(int cpu)
251 */ 252 */
252void rcu_check_callbacks(int cpu, int user) 253void rcu_check_callbacks(int cpu, int user)
253{ 254{
255 check_cpu_stalls();
254 if (user || rcu_is_cpu_rrupt_from_idle()) 256 if (user || rcu_is_cpu_rrupt_from_idle())
255 rcu_sched_qs(cpu); 257 rcu_sched_qs(cpu);
256 else if (!in_softirq()) 258 else if (!in_softirq())
diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
index f85016a2309b..8a233002faeb 100644
--- a/kernel/rcutiny_plugin.h
+++ b/kernel/rcutiny_plugin.h
@@ -33,6 +33,9 @@ struct rcu_ctrlblk {
33 struct rcu_head **donetail; /* ->next pointer of last "done" CB. */ 33 struct rcu_head **donetail; /* ->next pointer of last "done" CB. */
34 struct rcu_head **curtail; /* ->next pointer of last CB. */ 34 struct rcu_head **curtail; /* ->next pointer of last CB. */
35 RCU_TRACE(long qlen); /* Number of pending CBs. */ 35 RCU_TRACE(long qlen); /* Number of pending CBs. */
36 RCU_TRACE(unsigned long gp_start); /* Start time for stalls. */
37 RCU_TRACE(unsigned long ticks_this_gp); /* Statistic for stalls. */
38 RCU_TRACE(unsigned long jiffies_stall); /* Jiffies at next stall. */
36 RCU_TRACE(char *name); /* Name of RCU type. */ 39 RCU_TRACE(char *name); /* Name of RCU type. */
37}; 40};
38 41
@@ -54,6 +57,51 @@ int rcu_scheduler_active __read_mostly;
54EXPORT_SYMBOL_GPL(rcu_scheduler_active); 57EXPORT_SYMBOL_GPL(rcu_scheduler_active);
55#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 58#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
56 59
60#ifdef CONFIG_RCU_TRACE
61
62static void check_cpu_stall(struct rcu_ctrlblk *rcp)
63{
64 unsigned long j;
65 unsigned long js;
66
67 if (rcu_cpu_stall_suppress)
68 return;
69 rcp->ticks_this_gp++;
70 j = jiffies;
71 js = rcp->jiffies_stall;
72 if (*rcp->curtail && ULONG_CMP_GE(j, js)) {
73 pr_err("INFO: %s stall on CPU (%lu ticks this GP) idle=%llx (t=%lu jiffies q=%ld)\n",
74 rcp->name, rcp->ticks_this_gp, rcu_dynticks_nesting,
75 jiffies - rcp->gp_start, rcp->qlen);
76 dump_stack();
77 }
78 if (*rcp->curtail && ULONG_CMP_GE(j, js))
79 rcp->jiffies_stall = jiffies +
80 3 * rcu_jiffies_till_stall_check() + 3;
81 else if (ULONG_CMP_GE(j, js))
82 rcp->jiffies_stall = jiffies + rcu_jiffies_till_stall_check();
83}
84
85static void check_cpu_stall_preempt(void);
86
87#endif /* #ifdef CONFIG_RCU_TRACE */
88
89static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
90{
91#ifdef CONFIG_RCU_TRACE
92 rcp->ticks_this_gp = 0;
93 rcp->gp_start = jiffies;
94 rcp->jiffies_stall = jiffies + rcu_jiffies_till_stall_check();
95#endif /* #ifdef CONFIG_RCU_TRACE */
96}
97
98static void check_cpu_stalls(void)
99{
100 RCU_TRACE(check_cpu_stall(&rcu_bh_ctrlblk));
101 RCU_TRACE(check_cpu_stall(&rcu_sched_ctrlblk));
102 RCU_TRACE(check_cpu_stall_preempt());
103}
104
57#ifdef CONFIG_TINY_PREEMPT_RCU 105#ifdef CONFIG_TINY_PREEMPT_RCU
58 106
59#include <linux/delay.h> 107#include <linux/delay.h>
@@ -448,6 +496,7 @@ static void rcu_preempt_start_gp(void)
448 /* Official start of GP. */ 496 /* Official start of GP. */
449 rcu_preempt_ctrlblk.gpnum++; 497 rcu_preempt_ctrlblk.gpnum++;
450 RCU_TRACE(rcu_preempt_ctrlblk.n_grace_periods++); 498 RCU_TRACE(rcu_preempt_ctrlblk.n_grace_periods++);
499 reset_cpu_stall_ticks(&rcu_preempt_ctrlblk.rcb);
451 500
452 /* Any blocked RCU readers block new GP. */ 501 /* Any blocked RCU readers block new GP. */
453 if (rcu_preempt_blocked_readers_any()) 502 if (rcu_preempt_blocked_readers_any())
@@ -1054,4 +1103,11 @@ MODULE_AUTHOR("Paul E. McKenney");
1054MODULE_DESCRIPTION("Read-Copy Update tracing for tiny implementation"); 1103MODULE_DESCRIPTION("Read-Copy Update tracing for tiny implementation");
1055MODULE_LICENSE("GPL"); 1104MODULE_LICENSE("GPL");
1056 1105
1106static void check_cpu_stall_preempt(void)
1107{
1108#ifdef CONFIG_TINY_PREEMPT_RCU
1109 check_cpu_stall(&rcu_preempt_ctrlblk.rcb);
1110#endif /* #ifdef CONFIG_TINY_PREEMPT_RCU */
1111}
1112
1057#endif /* #ifdef CONFIG_RCU_TRACE */ 1113#endif /* #ifdef CONFIG_RCU_TRACE */
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 31dea01c85fd..e1f3a8c96724 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -46,6 +46,7 @@
46#include <linux/stat.h> 46#include <linux/stat.h>
47#include <linux/srcu.h> 47#include <linux/srcu.h>
48#include <linux/slab.h> 48#include <linux/slab.h>
49#include <linux/trace_clock.h>
49#include <asm/byteorder.h> 50#include <asm/byteorder.h>
50 51
51MODULE_LICENSE("GPL"); 52MODULE_LICENSE("GPL");
@@ -207,6 +208,20 @@ MODULE_PARM_DESC(rcutorture_runnable, "Start rcutorture at boot");
207#define rcu_can_boost() 0 208#define rcu_can_boost() 0
208#endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */ 209#endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
209 210
211#ifdef CONFIG_RCU_TRACE
212static u64 notrace rcu_trace_clock_local(void)
213{
214 u64 ts = trace_clock_local();
215 unsigned long __maybe_unused ts_rem = do_div(ts, NSEC_PER_USEC);
216 return ts;
217}
218#else /* #ifdef CONFIG_RCU_TRACE */
219static u64 notrace rcu_trace_clock_local(void)
220{
221 return 0ULL;
222}
223#endif /* #else #ifdef CONFIG_RCU_TRACE */
224
210static unsigned long shutdown_time; /* jiffies to system shutdown. */ 225static unsigned long shutdown_time; /* jiffies to system shutdown. */
211static unsigned long boost_starttime; /* jiffies of next boost test start. */ 226static unsigned long boost_starttime; /* jiffies of next boost test start. */
212DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ 227DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */
@@ -845,7 +860,7 @@ static int rcu_torture_boost(void *arg)
845 /* Wait for the next test interval. */ 860 /* Wait for the next test interval. */
846 oldstarttime = boost_starttime; 861 oldstarttime = boost_starttime;
847 while (ULONG_CMP_LT(jiffies, oldstarttime)) { 862 while (ULONG_CMP_LT(jiffies, oldstarttime)) {
848 schedule_timeout_uninterruptible(1); 863 schedule_timeout_interruptible(oldstarttime - jiffies);
849 rcu_stutter_wait("rcu_torture_boost"); 864 rcu_stutter_wait("rcu_torture_boost");
850 if (kthread_should_stop() || 865 if (kthread_should_stop() ||
851 fullstop != FULLSTOP_DONTSTOP) 866 fullstop != FULLSTOP_DONTSTOP)
@@ -1028,7 +1043,6 @@ void rcutorture_trace_dump(void)
1028 return; 1043 return;
1029 if (atomic_xchg(&beenhere, 1) != 0) 1044 if (atomic_xchg(&beenhere, 1) != 0)
1030 return; 1045 return;
1031 do_trace_rcu_torture_read(cur_ops->name, (struct rcu_head *)~0UL);
1032 ftrace_dump(DUMP_ALL); 1046 ftrace_dump(DUMP_ALL);
1033} 1047}
1034 1048
@@ -1042,13 +1056,16 @@ static void rcu_torture_timer(unsigned long unused)
1042{ 1056{
1043 int idx; 1057 int idx;
1044 int completed; 1058 int completed;
1059 int completed_end;
1045 static DEFINE_RCU_RANDOM(rand); 1060 static DEFINE_RCU_RANDOM(rand);
1046 static DEFINE_SPINLOCK(rand_lock); 1061 static DEFINE_SPINLOCK(rand_lock);
1047 struct rcu_torture *p; 1062 struct rcu_torture *p;
1048 int pipe_count; 1063 int pipe_count;
1064 unsigned long long ts;
1049 1065
1050 idx = cur_ops->readlock(); 1066 idx = cur_ops->readlock();
1051 completed = cur_ops->completed(); 1067 completed = cur_ops->completed();
1068 ts = rcu_trace_clock_local();
1052 p = rcu_dereference_check(rcu_torture_current, 1069 p = rcu_dereference_check(rcu_torture_current,
1053 rcu_read_lock_bh_held() || 1070 rcu_read_lock_bh_held() ||
1054 rcu_read_lock_sched_held() || 1071 rcu_read_lock_sched_held() ||
@@ -1058,7 +1075,6 @@ static void rcu_torture_timer(unsigned long unused)
1058 cur_ops->readunlock(idx); 1075 cur_ops->readunlock(idx);
1059 return; 1076 return;
1060 } 1077 }
1061 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
1062 if (p->rtort_mbtest == 0) 1078 if (p->rtort_mbtest == 0)
1063 atomic_inc(&n_rcu_torture_mberror); 1079 atomic_inc(&n_rcu_torture_mberror);
1064 spin_lock(&rand_lock); 1080 spin_lock(&rand_lock);
@@ -1071,10 +1087,14 @@ static void rcu_torture_timer(unsigned long unused)
1071 /* Should not happen, but... */ 1087 /* Should not happen, but... */
1072 pipe_count = RCU_TORTURE_PIPE_LEN; 1088 pipe_count = RCU_TORTURE_PIPE_LEN;
1073 } 1089 }
1074 if (pipe_count > 1) 1090 completed_end = cur_ops->completed();
1091 if (pipe_count > 1) {
1092 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts,
1093 completed, completed_end);
1075 rcutorture_trace_dump(); 1094 rcutorture_trace_dump();
1095 }
1076 __this_cpu_inc(rcu_torture_count[pipe_count]); 1096 __this_cpu_inc(rcu_torture_count[pipe_count]);
1077 completed = cur_ops->completed() - completed; 1097 completed = completed_end - completed;
1078 if (completed > RCU_TORTURE_PIPE_LEN) { 1098 if (completed > RCU_TORTURE_PIPE_LEN) {
1079 /* Should not happen, but... */ 1099 /* Should not happen, but... */
1080 completed = RCU_TORTURE_PIPE_LEN; 1100 completed = RCU_TORTURE_PIPE_LEN;
@@ -1094,11 +1114,13 @@ static int
1094rcu_torture_reader(void *arg) 1114rcu_torture_reader(void *arg)
1095{ 1115{
1096 int completed; 1116 int completed;
1117 int completed_end;
1097 int idx; 1118 int idx;
1098 DEFINE_RCU_RANDOM(rand); 1119 DEFINE_RCU_RANDOM(rand);
1099 struct rcu_torture *p; 1120 struct rcu_torture *p;
1100 int pipe_count; 1121 int pipe_count;
1101 struct timer_list t; 1122 struct timer_list t;
1123 unsigned long long ts;
1102 1124
1103 VERBOSE_PRINTK_STRING("rcu_torture_reader task started"); 1125 VERBOSE_PRINTK_STRING("rcu_torture_reader task started");
1104 set_user_nice(current, 19); 1126 set_user_nice(current, 19);
@@ -1112,6 +1134,7 @@ rcu_torture_reader(void *arg)
1112 } 1134 }
1113 idx = cur_ops->readlock(); 1135 idx = cur_ops->readlock();
1114 completed = cur_ops->completed(); 1136 completed = cur_ops->completed();
1137 ts = rcu_trace_clock_local();
1115 p = rcu_dereference_check(rcu_torture_current, 1138 p = rcu_dereference_check(rcu_torture_current,
1116 rcu_read_lock_bh_held() || 1139 rcu_read_lock_bh_held() ||
1117 rcu_read_lock_sched_held() || 1140 rcu_read_lock_sched_held() ||
@@ -1122,7 +1145,6 @@ rcu_torture_reader(void *arg)
1122 schedule_timeout_interruptible(HZ); 1145 schedule_timeout_interruptible(HZ);
1123 continue; 1146 continue;
1124 } 1147 }
1125 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
1126 if (p->rtort_mbtest == 0) 1148 if (p->rtort_mbtest == 0)
1127 atomic_inc(&n_rcu_torture_mberror); 1149 atomic_inc(&n_rcu_torture_mberror);
1128 cur_ops->read_delay(&rand); 1150 cur_ops->read_delay(&rand);
@@ -1132,10 +1154,14 @@ rcu_torture_reader(void *arg)
1132 /* Should not happen, but... */ 1154 /* Should not happen, but... */
1133 pipe_count = RCU_TORTURE_PIPE_LEN; 1155 pipe_count = RCU_TORTURE_PIPE_LEN;
1134 } 1156 }
1135 if (pipe_count > 1) 1157 completed_end = cur_ops->completed();
1158 if (pipe_count > 1) {
1159 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
1160 ts, completed, completed_end);
1136 rcutorture_trace_dump(); 1161 rcutorture_trace_dump();
1162 }
1137 __this_cpu_inc(rcu_torture_count[pipe_count]); 1163 __this_cpu_inc(rcu_torture_count[pipe_count]);
1138 completed = cur_ops->completed() - completed; 1164 completed = completed_end - completed;
1139 if (completed > RCU_TORTURE_PIPE_LEN) { 1165 if (completed > RCU_TORTURE_PIPE_LEN) {
1140 /* Should not happen, but... */ 1166 /* Should not happen, but... */
1141 completed = RCU_TORTURE_PIPE_LEN; 1167 completed = RCU_TORTURE_PIPE_LEN;
@@ -1301,19 +1327,35 @@ static void rcu_torture_shuffle_tasks(void)
1301 set_cpus_allowed_ptr(reader_tasks[i], 1327 set_cpus_allowed_ptr(reader_tasks[i],
1302 shuffle_tmp_mask); 1328 shuffle_tmp_mask);
1303 } 1329 }
1304
1305 if (fakewriter_tasks) { 1330 if (fakewriter_tasks) {
1306 for (i = 0; i < nfakewriters; i++) 1331 for (i = 0; i < nfakewriters; i++)
1307 if (fakewriter_tasks[i]) 1332 if (fakewriter_tasks[i])
1308 set_cpus_allowed_ptr(fakewriter_tasks[i], 1333 set_cpus_allowed_ptr(fakewriter_tasks[i],
1309 shuffle_tmp_mask); 1334 shuffle_tmp_mask);
1310 } 1335 }
1311
1312 if (writer_task) 1336 if (writer_task)
1313 set_cpus_allowed_ptr(writer_task, shuffle_tmp_mask); 1337 set_cpus_allowed_ptr(writer_task, shuffle_tmp_mask);
1314
1315 if (stats_task) 1338 if (stats_task)
1316 set_cpus_allowed_ptr(stats_task, shuffle_tmp_mask); 1339 set_cpus_allowed_ptr(stats_task, shuffle_tmp_mask);
1340 if (stutter_task)
1341 set_cpus_allowed_ptr(stutter_task, shuffle_tmp_mask);
1342 if (fqs_task)
1343 set_cpus_allowed_ptr(fqs_task, shuffle_tmp_mask);
1344 if (shutdown_task)
1345 set_cpus_allowed_ptr(shutdown_task, shuffle_tmp_mask);
1346#ifdef CONFIG_HOTPLUG_CPU
1347 if (onoff_task)
1348 set_cpus_allowed_ptr(onoff_task, shuffle_tmp_mask);
1349#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1350 if (stall_task)
1351 set_cpus_allowed_ptr(stall_task, shuffle_tmp_mask);
1352 if (barrier_cbs_tasks)
1353 for (i = 0; i < n_barrier_cbs; i++)
1354 if (barrier_cbs_tasks[i])
1355 set_cpus_allowed_ptr(barrier_cbs_tasks[i],
1356 shuffle_tmp_mask);
1357 if (barrier_task)
1358 set_cpus_allowed_ptr(barrier_task, shuffle_tmp_mask);
1317 1359
1318 if (rcu_idle_cpu == -1) 1360 if (rcu_idle_cpu == -1)
1319 rcu_idle_cpu = num_online_cpus() - 1; 1361 rcu_idle_cpu = num_online_cpus() - 1;
@@ -1749,7 +1791,7 @@ static int rcu_torture_barrier_init(void)
1749 barrier_cbs_wq = 1791 barrier_cbs_wq =
1750 kzalloc(n_barrier_cbs * sizeof(barrier_cbs_wq[0]), 1792 kzalloc(n_barrier_cbs * sizeof(barrier_cbs_wq[0]),
1751 GFP_KERNEL); 1793 GFP_KERNEL);
1752 if (barrier_cbs_tasks == NULL || barrier_cbs_wq == 0) 1794 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
1753 return -ENOMEM; 1795 return -ENOMEM;
1754 for (i = 0; i < n_barrier_cbs; i++) { 1796 for (i = 0; i < n_barrier_cbs; i++) {
1755 init_waitqueue_head(&barrier_cbs_wq[i]); 1797 init_waitqueue_head(&barrier_cbs_wq[i]);
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index e441b77b614e..5b8ad827fd86 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -105,7 +105,7 @@ int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
105 * The rcu_scheduler_active variable transitions from zero to one just 105 * The rcu_scheduler_active variable transitions from zero to one just
106 * before the first task is spawned. So when this variable is zero, RCU 106 * before the first task is spawned. So when this variable is zero, RCU
107 * can assume that there is but one task, allowing RCU to (for example) 107 * can assume that there is but one task, allowing RCU to (for example)
108 * optimized synchronize_sched() to a simple barrier(). When this variable 108 * optimize synchronize_sched() to a simple barrier(). When this variable
109 * is one, RCU must actually do all the hard work required to detect real 109 * is one, RCU must actually do all the hard work required to detect real
110 * grace periods. This variable is also used to suppress boot-time false 110 * grace periods. This variable is also used to suppress boot-time false
111 * positives from lockdep-RCU error checking. 111 * positives from lockdep-RCU error checking.
@@ -217,12 +217,6 @@ module_param(blimit, long, 0444);
217module_param(qhimark, long, 0444); 217module_param(qhimark, long, 0444);
218module_param(qlowmark, long, 0444); 218module_param(qlowmark, long, 0444);
219 219
220int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */
221int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
222
223module_param(rcu_cpu_stall_suppress, int, 0644);
224module_param(rcu_cpu_stall_timeout, int, 0644);
225
226static ulong jiffies_till_first_fqs = RCU_JIFFIES_TILL_FORCE_QS; 220static ulong jiffies_till_first_fqs = RCU_JIFFIES_TILL_FORCE_QS;
227static ulong jiffies_till_next_fqs = RCU_JIFFIES_TILL_FORCE_QS; 221static ulong jiffies_till_next_fqs = RCU_JIFFIES_TILL_FORCE_QS;
228 222
@@ -305,17 +299,27 @@ cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
305} 299}
306 300
307/* 301/*
308 * Does the current CPU require a yet-as-unscheduled grace period? 302 * Does the current CPU require a not-yet-started grace period?
303 * The caller must have disabled interrupts to prevent races with
304 * normal callback registry.
309 */ 305 */
310static int 306static int
311cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) 307cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
312{ 308{
313 struct rcu_head **ntp; 309 int i;
314 310
315 ntp = rdp->nxttail[RCU_DONE_TAIL + 311 if (rcu_gp_in_progress(rsp))
316 (ACCESS_ONCE(rsp->completed) != rdp->completed)]; 312 return 0; /* No, a grace period is already in progress. */
317 return rdp->nxttail[RCU_DONE_TAIL] && ntp && *ntp && 313 if (!rdp->nxttail[RCU_NEXT_TAIL])
318 !rcu_gp_in_progress(rsp); 314 return 0; /* No, this is a no-CBs (or offline) CPU. */
315 if (*rdp->nxttail[RCU_NEXT_READY_TAIL])
316 return 1; /* Yes, this CPU has newly registered callbacks. */
317 for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++)
318 if (rdp->nxttail[i - 1] != rdp->nxttail[i] &&
319 ULONG_CMP_LT(ACCESS_ONCE(rsp->completed),
320 rdp->nxtcompleted[i]))
321 return 1; /* Yes, CBs for future grace period. */
322 return 0; /* No grace period needed. */
319} 323}
320 324
321/* 325/*
@@ -336,7 +340,7 @@ static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
336static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval, 340static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
337 bool user) 341 bool user)
338{ 342{
339 trace_rcu_dyntick("Start", oldval, 0); 343 trace_rcu_dyntick("Start", oldval, rdtp->dynticks_nesting);
340 if (!user && !is_idle_task(current)) { 344 if (!user && !is_idle_task(current)) {
341 struct task_struct *idle = idle_task(smp_processor_id()); 345 struct task_struct *idle = idle_task(smp_processor_id());
342 346
@@ -727,7 +731,7 @@ EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
727 * interrupt from idle, return true. The caller must have at least 731 * interrupt from idle, return true. The caller must have at least
728 * disabled preemption. 732 * disabled preemption.
729 */ 733 */
730int rcu_is_cpu_rrupt_from_idle(void) 734static int rcu_is_cpu_rrupt_from_idle(void)
731{ 735{
732 return __get_cpu_var(rcu_dynticks).dynticks_nesting <= 1; 736 return __get_cpu_var(rcu_dynticks).dynticks_nesting <= 1;
733} 737}
@@ -793,28 +797,10 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
793 return 0; 797 return 0;
794} 798}
795 799
796static int jiffies_till_stall_check(void)
797{
798 int till_stall_check = ACCESS_ONCE(rcu_cpu_stall_timeout);
799
800 /*
801 * Limit check must be consistent with the Kconfig limits
802 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
803 */
804 if (till_stall_check < 3) {
805 ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
806 till_stall_check = 3;
807 } else if (till_stall_check > 300) {
808 ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
809 till_stall_check = 300;
810 }
811 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
812}
813
814static void record_gp_stall_check_time(struct rcu_state *rsp) 800static void record_gp_stall_check_time(struct rcu_state *rsp)
815{ 801{
816 rsp->gp_start = jiffies; 802 rsp->gp_start = jiffies;
817 rsp->jiffies_stall = jiffies + jiffies_till_stall_check(); 803 rsp->jiffies_stall = jiffies + rcu_jiffies_till_stall_check();
818} 804}
819 805
820/* 806/*
@@ -857,7 +843,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
857 raw_spin_unlock_irqrestore(&rnp->lock, flags); 843 raw_spin_unlock_irqrestore(&rnp->lock, flags);
858 return; 844 return;
859 } 845 }
860 rsp->jiffies_stall = jiffies + 3 * jiffies_till_stall_check() + 3; 846 rsp->jiffies_stall = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
861 raw_spin_unlock_irqrestore(&rnp->lock, flags); 847 raw_spin_unlock_irqrestore(&rnp->lock, flags);
862 848
863 /* 849 /*
@@ -935,7 +921,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
935 raw_spin_lock_irqsave(&rnp->lock, flags); 921 raw_spin_lock_irqsave(&rnp->lock, flags);
936 if (ULONG_CMP_GE(jiffies, rsp->jiffies_stall)) 922 if (ULONG_CMP_GE(jiffies, rsp->jiffies_stall))
937 rsp->jiffies_stall = jiffies + 923 rsp->jiffies_stall = jiffies +
938 3 * jiffies_till_stall_check() + 3; 924 3 * rcu_jiffies_till_stall_check() + 3;
939 raw_spin_unlock_irqrestore(&rnp->lock, flags); 925 raw_spin_unlock_irqrestore(&rnp->lock, flags);
940 926
941 set_need_resched(); /* kick ourselves to get things going. */ 927 set_need_resched(); /* kick ourselves to get things going. */
@@ -966,12 +952,6 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
966 } 952 }
967} 953}
968 954
969static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
970{
971 rcu_cpu_stall_suppress = 1;
972 return NOTIFY_DONE;
973}
974
975/** 955/**
976 * rcu_cpu_stall_reset - prevent further stall warnings in current grace period 956 * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
977 * 957 *
@@ -989,15 +969,6 @@ void rcu_cpu_stall_reset(void)
989 rsp->jiffies_stall = jiffies + ULONG_MAX / 2; 969 rsp->jiffies_stall = jiffies + ULONG_MAX / 2;
990} 970}
991 971
992static struct notifier_block rcu_panic_block = {
993 .notifier_call = rcu_panic,
994};
995
996static void __init check_cpu_stall_init(void)
997{
998 atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
999}
1000
1001/* 972/*
1002 * Update CPU-local rcu_data state to record the newly noticed grace period. 973 * Update CPU-local rcu_data state to record the newly noticed grace period.
1003 * This is used both when we started the grace period and when we notice 974 * This is used both when we started the grace period and when we notice
@@ -1071,6 +1042,145 @@ static void init_callback_list(struct rcu_data *rdp)
1071} 1042}
1072 1043
1073/* 1044/*
1045 * Determine the value that ->completed will have at the end of the
1046 * next subsequent grace period. This is used to tag callbacks so that
1047 * a CPU can invoke callbacks in a timely fashion even if that CPU has
1048 * been dyntick-idle for an extended period with callbacks under the
1049 * influence of RCU_FAST_NO_HZ.
1050 *
1051 * The caller must hold rnp->lock with interrupts disabled.
1052 */
1053static unsigned long rcu_cbs_completed(struct rcu_state *rsp,
1054 struct rcu_node *rnp)
1055{
1056 /*
1057 * If RCU is idle, we just wait for the next grace period.
1058 * But we can only be sure that RCU is idle if we are looking
1059 * at the root rcu_node structure -- otherwise, a new grace
1060 * period might have started, but just not yet gotten around
1061 * to initializing the current non-root rcu_node structure.
1062 */
1063 if (rcu_get_root(rsp) == rnp && rnp->gpnum == rnp->completed)
1064 return rnp->completed + 1;
1065
1066 /*
1067 * Otherwise, wait for a possible partial grace period and
1068 * then the subsequent full grace period.
1069 */
1070 return rnp->completed + 2;
1071}
1072
1073/*
1074 * If there is room, assign a ->completed number to any callbacks on
1075 * this CPU that have not already been assigned. Also accelerate any
1076 * callbacks that were previously assigned a ->completed number that has
1077 * since proven to be too conservative, which can happen if callbacks get
1078 * assigned a ->completed number while RCU is idle, but with reference to
1079 * a non-root rcu_node structure. This function is idempotent, so it does
1080 * not hurt to call it repeatedly.
1081 *
1082 * The caller must hold rnp->lock with interrupts disabled.
1083 */
1084static void rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
1085 struct rcu_data *rdp)
1086{
1087 unsigned long c;
1088 int i;
1089
1090 /* If the CPU has no callbacks, nothing to do. */
1091 if (!rdp->nxttail[RCU_NEXT_TAIL] || !*rdp->nxttail[RCU_DONE_TAIL])
1092 return;
1093
1094 /*
1095 * Starting from the sublist containing the callbacks most
1096 * recently assigned a ->completed number and working down, find the
1097 * first sublist that is not assignable to an upcoming grace period.
1098 * Such a sublist has something in it (first two tests) and has
1099 * a ->completed number assigned that will complete sooner than
1100 * the ->completed number for newly arrived callbacks (last test).
1101 *
1102 * The key point is that any later sublist can be assigned the
1103 * same ->completed number as the newly arrived callbacks, which
1104 * means that the callbacks in any of these later sublist can be
1105 * grouped into a single sublist, whether or not they have already
1106 * been assigned a ->completed number.
1107 */
1108 c = rcu_cbs_completed(rsp, rnp);
1109 for (i = RCU_NEXT_TAIL - 1; i > RCU_DONE_TAIL; i--)
1110 if (rdp->nxttail[i] != rdp->nxttail[i - 1] &&
1111 !ULONG_CMP_GE(rdp->nxtcompleted[i], c))
1112 break;
1113
1114 /*
1115 * If there are no sublist for unassigned callbacks, leave.
1116 * At the same time, advance "i" one sublist, so that "i" will
1117 * index into the sublist where all the remaining callbacks should
1118 * be grouped into.
1119 */
1120 if (++i >= RCU_NEXT_TAIL)
1121 return;
1122
1123 /*
1124 * Assign all subsequent callbacks' ->completed number to the next
1125 * full grace period and group them all in the sublist initially
1126 * indexed by "i".
1127 */
1128 for (; i <= RCU_NEXT_TAIL; i++) {
1129 rdp->nxttail[i] = rdp->nxttail[RCU_NEXT_TAIL];
1130 rdp->nxtcompleted[i] = c;
1131 }
1132
1133 /* Trace depending on how much we were able to accelerate. */
1134 if (!*rdp->nxttail[RCU_WAIT_TAIL])
1135 trace_rcu_grace_period(rsp->name, rdp->gpnum, "AccWaitCB");
1136 else
1137 trace_rcu_grace_period(rsp->name, rdp->gpnum, "AccReadyCB");
1138}
1139
1140/*
1141 * Move any callbacks whose grace period has completed to the
1142 * RCU_DONE_TAIL sublist, then compact the remaining sublists and
1143 * assign ->completed numbers to any callbacks in the RCU_NEXT_TAIL
1144 * sublist. This function is idempotent, so it does not hurt to
1145 * invoke it repeatedly. As long as it is not invoked -too- often...
1146 *
1147 * The caller must hold rnp->lock with interrupts disabled.
1148 */
1149static void rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
1150 struct rcu_data *rdp)
1151{
1152 int i, j;
1153
1154 /* If the CPU has no callbacks, nothing to do. */
1155 if (!rdp->nxttail[RCU_NEXT_TAIL] || !*rdp->nxttail[RCU_DONE_TAIL])
1156 return;
1157
1158 /*
1159 * Find all callbacks whose ->completed numbers indicate that they
1160 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
1161 */
1162 for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) {
1163 if (ULONG_CMP_LT(rnp->completed, rdp->nxtcompleted[i]))
1164 break;
1165 rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[i];
1166 }
1167 /* Clean up any sublist tail pointers that were misordered above. */
1168 for (j = RCU_WAIT_TAIL; j < i; j++)
1169 rdp->nxttail[j] = rdp->nxttail[RCU_DONE_TAIL];
1170
1171 /* Copy down callbacks to fill in empty sublists. */
1172 for (j = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++, j++) {
1173 if (rdp->nxttail[j] == rdp->nxttail[RCU_NEXT_TAIL])
1174 break;
1175 rdp->nxttail[j] = rdp->nxttail[i];
1176 rdp->nxtcompleted[j] = rdp->nxtcompleted[i];
1177 }
1178
1179 /* Classify any remaining callbacks. */
1180 rcu_accelerate_cbs(rsp, rnp, rdp);
1181}
1182
1183/*
1074 * Advance this CPU's callbacks, but only if the current grace period 1184 * Advance this CPU's callbacks, but only if the current grace period
1075 * has ended. This may be called only from the CPU to whom the rdp 1185 * has ended. This may be called only from the CPU to whom the rdp
1076 * belongs. In addition, the corresponding leaf rcu_node structure's 1186 * belongs. In addition, the corresponding leaf rcu_node structure's
@@ -1080,12 +1190,15 @@ static void
1080__rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp) 1190__rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
1081{ 1191{
1082 /* Did another grace period end? */ 1192 /* Did another grace period end? */
1083 if (rdp->completed != rnp->completed) { 1193 if (rdp->completed == rnp->completed) {
1084 1194
1085 /* Advance callbacks. No harm if list empty. */ 1195 /* No, so just accelerate recent callbacks. */
1086 rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL]; 1196 rcu_accelerate_cbs(rsp, rnp, rdp);
1087 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL]; 1197
1088 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; 1198 } else {
1199
1200 /* Advance callbacks. */
1201 rcu_advance_cbs(rsp, rnp, rdp);
1089 1202
1090 /* Remember that we saw this grace-period completion. */ 1203 /* Remember that we saw this grace-period completion. */
1091 rdp->completed = rnp->completed; 1204 rdp->completed = rnp->completed;
@@ -1392,17 +1505,10 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
1392 /* 1505 /*
1393 * Because there is no grace period in progress right now, 1506 * Because there is no grace period in progress right now,
1394 * any callbacks we have up to this point will be satisfied 1507 * any callbacks we have up to this point will be satisfied
1395 * by the next grace period. So promote all callbacks to be 1508 * by the next grace period. So this is a good place to
1396 * handled after the end of the next grace period. If the 1509 * assign a grace period number to recently posted callbacks.
1397 * CPU is not yet aware of the end of the previous grace period,
1398 * we need to allow for the callback advancement that will
1399 * occur when it does become aware. Deadlock prevents us from
1400 * making it aware at this point: We cannot acquire a leaf
1401 * rcu_node ->lock while holding the root rcu_node ->lock.
1402 */ 1510 */
1403 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; 1511 rcu_accelerate_cbs(rsp, rnp, rdp);
1404 if (rdp->completed == rsp->completed)
1405 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
1406 1512
1407 rsp->gp_flags = RCU_GP_FLAG_INIT; 1513 rsp->gp_flags = RCU_GP_FLAG_INIT;
1408 raw_spin_unlock(&rnp->lock); /* Interrupts remain disabled. */ 1514 raw_spin_unlock(&rnp->lock); /* Interrupts remain disabled. */
@@ -1527,7 +1633,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
1527 * This GP can't end until cpu checks in, so all of our 1633 * This GP can't end until cpu checks in, so all of our
1528 * callbacks can be processed during the next GP. 1634 * callbacks can be processed during the next GP.
1529 */ 1635 */
1530 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; 1636 rcu_accelerate_cbs(rsp, rnp, rdp);
1531 1637
1532 rcu_report_qs_rnp(mask, rsp, rnp, flags); /* rlses rnp->lock */ 1638 rcu_report_qs_rnp(mask, rsp, rnp, flags); /* rlses rnp->lock */
1533 } 1639 }
@@ -1779,7 +1885,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
1779 long bl, count, count_lazy; 1885 long bl, count, count_lazy;
1780 int i; 1886 int i;
1781 1887
1782 /* If no callbacks are ready, just return.*/ 1888 /* If no callbacks are ready, just return. */
1783 if (!cpu_has_callbacks_ready_to_invoke(rdp)) { 1889 if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
1784 trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, 0); 1890 trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, 0);
1785 trace_rcu_batch_end(rsp->name, 0, !!ACCESS_ONCE(rdp->nxtlist), 1891 trace_rcu_batch_end(rsp->name, 0, !!ACCESS_ONCE(rdp->nxtlist),
@@ -2008,19 +2114,19 @@ __rcu_process_callbacks(struct rcu_state *rsp)
2008 2114
2009 WARN_ON_ONCE(rdp->beenonline == 0); 2115 WARN_ON_ONCE(rdp->beenonline == 0);
2010 2116
2011 /* 2117 /* Handle the end of a grace period that some other CPU ended. */
2012 * Advance callbacks in response to end of earlier grace
2013 * period that some other CPU ended.
2014 */
2015 rcu_process_gp_end(rsp, rdp); 2118 rcu_process_gp_end(rsp, rdp);
2016 2119
2017 /* Update RCU state based on any recent quiescent states. */ 2120 /* Update RCU state based on any recent quiescent states. */
2018 rcu_check_quiescent_state(rsp, rdp); 2121 rcu_check_quiescent_state(rsp, rdp);
2019 2122
2020 /* Does this CPU require a not-yet-started grace period? */ 2123 /* Does this CPU require a not-yet-started grace period? */
2124 local_irq_save(flags);
2021 if (cpu_needs_another_gp(rsp, rdp)) { 2125 if (cpu_needs_another_gp(rsp, rdp)) {
2022 raw_spin_lock_irqsave(&rcu_get_root(rsp)->lock, flags); 2126 raw_spin_lock(&rcu_get_root(rsp)->lock); /* irqs disabled. */
2023 rcu_start_gp(rsp, flags); /* releases above lock */ 2127 rcu_start_gp(rsp, flags); /* releases above lock */
2128 } else {
2129 local_irq_restore(flags);
2024 } 2130 }
2025 2131
2026 /* If there are callbacks ready, invoke them. */ 2132 /* If there are callbacks ready, invoke them. */
@@ -2719,9 +2825,6 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
2719 rdp->dynticks = &per_cpu(rcu_dynticks, cpu); 2825 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
2720 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE); 2826 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
2721 WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1); 2827 WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
2722#ifdef CONFIG_RCU_USER_QS
2723 WARN_ON_ONCE(rdp->dynticks->in_user);
2724#endif
2725 rdp->cpu = cpu; 2828 rdp->cpu = cpu;
2726 rdp->rsp = rsp; 2829 rdp->rsp = rsp;
2727 rcu_boot_init_nocb_percpu_data(rdp); 2830 rcu_boot_init_nocb_percpu_data(rdp);
@@ -2938,6 +3041,10 @@ static void __init rcu_init_one(struct rcu_state *rsp,
2938 3041
2939 BUILD_BUG_ON(MAX_RCU_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */ 3042 BUILD_BUG_ON(MAX_RCU_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */
2940 3043
3044 /* Silence gcc 4.8 warning about array index out of range. */
3045 if (rcu_num_lvls > RCU_NUM_LVLS)
3046 panic("rcu_init_one: rcu_num_lvls overflow");
3047
2941 /* Initialize the level-tracking arrays. */ 3048 /* Initialize the level-tracking arrays. */
2942 3049
2943 for (i = 0; i < rcu_num_lvls; i++) 3050 for (i = 0; i < rcu_num_lvls; i++)
@@ -3074,7 +3181,6 @@ void __init rcu_init(void)
3074 cpu_notifier(rcu_cpu_notify, 0); 3181 cpu_notifier(rcu_cpu_notify, 0);
3075 for_each_online_cpu(cpu) 3182 for_each_online_cpu(cpu)
3076 rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu); 3183 rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
3077 check_cpu_stall_init();
3078} 3184}
3079 3185
3080#include "rcutree_plugin.h" 3186#include "rcutree_plugin.h"
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 4b69291b093d..c896b5045d9d 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -102,10 +102,6 @@ struct rcu_dynticks {
102 /* idle-period nonlazy_posted snapshot. */ 102 /* idle-period nonlazy_posted snapshot. */
103 int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */ 103 int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */
104#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ 104#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
105#ifdef CONFIG_RCU_USER_QS
106 bool ignore_user_qs; /* Treat userspace as extended QS or not */
107 bool in_user; /* Is the CPU in userland from RCU POV? */
108#endif
109}; 105};
110 106
111/* RCU's kthread states for tracing. */ 107/* RCU's kthread states for tracing. */
@@ -282,6 +278,8 @@ struct rcu_data {
282 */ 278 */
283 struct rcu_head *nxtlist; 279 struct rcu_head *nxtlist;
284 struct rcu_head **nxttail[RCU_NEXT_SIZE]; 280 struct rcu_head **nxttail[RCU_NEXT_SIZE];
281 unsigned long nxtcompleted[RCU_NEXT_SIZE];
282 /* grace periods for sublists. */
285 long qlen_lazy; /* # of lazy queued callbacks */ 283 long qlen_lazy; /* # of lazy queued callbacks */
286 long qlen; /* # of queued callbacks, incl lazy */ 284 long qlen; /* # of queued callbacks, incl lazy */
287 long qlen_last_fqs_check; 285 long qlen_last_fqs_check;
@@ -343,11 +341,6 @@ struct rcu_data {
343 341
344#define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */ 342#define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */
345 343
346#ifdef CONFIG_PROVE_RCU
347#define RCU_STALL_DELAY_DELTA (5 * HZ)
348#else
349#define RCU_STALL_DELAY_DELTA 0
350#endif
351#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */ 344#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */
352 /* to take at least one */ 345 /* to take at least one */
353 /* scheduling clock irq */ 346 /* scheduling clock irq */
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index f6e5ec2932b4..c1cc7e17ff9d 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -40,8 +40,7 @@
40#ifdef CONFIG_RCU_NOCB_CPU 40#ifdef CONFIG_RCU_NOCB_CPU
41static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */ 41static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
42static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */ 42static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */
43static bool rcu_nocb_poll; /* Offload kthread are to poll. */ 43static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */
44module_param(rcu_nocb_poll, bool, 0444);
45static char __initdata nocb_buf[NR_CPUS * 5]; 44static char __initdata nocb_buf[NR_CPUS * 5];
46#endif /* #ifdef CONFIG_RCU_NOCB_CPU */ 45#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
47 46
@@ -2159,6 +2158,13 @@ static int __init rcu_nocb_setup(char *str)
2159} 2158}
2160__setup("rcu_nocbs=", rcu_nocb_setup); 2159__setup("rcu_nocbs=", rcu_nocb_setup);
2161 2160
2161static int __init parse_rcu_nocb_poll(char *arg)
2162{
2163 rcu_nocb_poll = 1;
2164 return 0;
2165}
2166early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
2167
2162/* Is the specified CPU a no-CPUs CPU? */ 2168/* Is the specified CPU a no-CPUs CPU? */
2163static bool is_nocb_cpu(int cpu) 2169static bool is_nocb_cpu(int cpu)
2164{ 2170{
@@ -2366,10 +2372,11 @@ static int rcu_nocb_kthread(void *arg)
2366 for (;;) { 2372 for (;;) {
2367 /* If not polling, wait for next batch of callbacks. */ 2373 /* If not polling, wait for next batch of callbacks. */
2368 if (!rcu_nocb_poll) 2374 if (!rcu_nocb_poll)
2369 wait_event(rdp->nocb_wq, rdp->nocb_head); 2375 wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head);
2370 list = ACCESS_ONCE(rdp->nocb_head); 2376 list = ACCESS_ONCE(rdp->nocb_head);
2371 if (!list) { 2377 if (!list) {
2372 schedule_timeout_interruptible(1); 2378 schedule_timeout_interruptible(1);
2379 flush_signals(current);
2373 continue; 2380 continue;
2374 } 2381 }
2375 2382
diff --git a/kernel/rtmutex-debug.c b/kernel/rtmutex-debug.c
index 16502d3a71c8..13b243a323fa 100644
--- a/kernel/rtmutex-debug.c
+++ b/kernel/rtmutex-debug.c
@@ -17,6 +17,7 @@
17 * See rt.c in preempt-rt for proper credits and further information 17 * See rt.c in preempt-rt for proper credits and further information
18 */ 18 */
19#include <linux/sched.h> 19#include <linux/sched.h>
20#include <linux/sched/rt.h>
20#include <linux/delay.h> 21#include <linux/delay.h>
21#include <linux/export.h> 22#include <linux/export.h>
22#include <linux/spinlock.h> 23#include <linux/spinlock.h>
diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
index 98ec49475460..7890b10084a7 100644
--- a/kernel/rtmutex-tester.c
+++ b/kernel/rtmutex-tester.c
@@ -10,6 +10,7 @@
10#include <linux/kthread.h> 10#include <linux/kthread.h>
11#include <linux/export.h> 11#include <linux/export.h>
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/sched/rt.h>
13#include <linux/spinlock.h> 14#include <linux/spinlock.h>
14#include <linux/timer.h> 15#include <linux/timer.h>
15#include <linux/freezer.h> 16#include <linux/freezer.h>
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index a242e691c993..1e09308bf2a1 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -13,6 +13,7 @@
13#include <linux/spinlock.h> 13#include <linux/spinlock.h>
14#include <linux/export.h> 14#include <linux/export.h>
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <linux/sched/rt.h>
16#include <linux/timer.h> 17#include <linux/timer.h>
17 18
18#include "rtmutex_common.h" 19#include "rtmutex_common.h"
diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
index 0984a21076a3..64de5f8b0c9e 100644
--- a/kernel/sched/auto_group.c
+++ b/kernel/sched/auto_group.c
@@ -35,6 +35,7 @@ static inline void autogroup_destroy(struct kref *kref)
35 ag->tg->rt_se = NULL; 35 ag->tg->rt_se = NULL;
36 ag->tg->rt_rq = NULL; 36 ag->tg->rt_rq = NULL;
37#endif 37#endif
38 sched_offline_group(ag->tg);
38 sched_destroy_group(ag->tg); 39 sched_destroy_group(ag->tg);
39} 40}
40 41
@@ -76,6 +77,8 @@ static inline struct autogroup *autogroup_create(void)
76 if (IS_ERR(tg)) 77 if (IS_ERR(tg))
77 goto out_free; 78 goto out_free;
78 79
80 sched_online_group(tg, &root_task_group);
81
79 kref_init(&ag->kref); 82 kref_init(&ag->kref);
80 init_rwsem(&ag->lock); 83 init_rwsem(&ag->lock);
81 ag->id = atomic_inc_return(&autogroup_seq_nr); 84 ag->id = atomic_inc_return(&autogroup_seq_nr);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 257002c13bb0..3a673a3b0c6b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -83,7 +83,7 @@
83#endif 83#endif
84 84
85#include "sched.h" 85#include "sched.h"
86#include "../workqueue_sched.h" 86#include "../workqueue_internal.h"
87#include "../smpboot.h" 87#include "../smpboot.h"
88 88
89#define CREATE_TRACE_POINTS 89#define CREATE_TRACE_POINTS
@@ -1523,7 +1523,8 @@ out:
1523 */ 1523 */
1524int wake_up_process(struct task_struct *p) 1524int wake_up_process(struct task_struct *p)
1525{ 1525{
1526 return try_to_wake_up(p, TASK_ALL, 0); 1526 WARN_ON(task_is_stopped_or_traced(p));
1527 return try_to_wake_up(p, TASK_NORMAL, 0);
1527} 1528}
1528EXPORT_SYMBOL(wake_up_process); 1529EXPORT_SYMBOL(wake_up_process);
1529 1530
@@ -4370,7 +4371,7 @@ bool __sched yield_to(struct task_struct *p, bool preempt)
4370 struct task_struct *curr = current; 4371 struct task_struct *curr = current;
4371 struct rq *rq, *p_rq; 4372 struct rq *rq, *p_rq;
4372 unsigned long flags; 4373 unsigned long flags;
4373 bool yielded = 0; 4374 int yielded = 0;
4374 4375
4375 local_irq_save(flags); 4376 local_irq_save(flags);
4376 rq = this_rq(); 4377 rq = this_rq();
@@ -4666,6 +4667,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
4666 */ 4667 */
4667 idle->sched_class = &idle_sched_class; 4668 idle->sched_class = &idle_sched_class;
4668 ftrace_graph_init_idle_task(idle, cpu); 4669 ftrace_graph_init_idle_task(idle, cpu);
4670 vtime_init_idle(idle);
4669#if defined(CONFIG_SMP) 4671#if defined(CONFIG_SMP)
4670 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); 4672 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
4671#endif 4673#endif
@@ -7159,7 +7161,6 @@ static void free_sched_group(struct task_group *tg)
7159struct task_group *sched_create_group(struct task_group *parent) 7161struct task_group *sched_create_group(struct task_group *parent)
7160{ 7162{
7161 struct task_group *tg; 7163 struct task_group *tg;
7162 unsigned long flags;
7163 7164
7164 tg = kzalloc(sizeof(*tg), GFP_KERNEL); 7165 tg = kzalloc(sizeof(*tg), GFP_KERNEL);
7165 if (!tg) 7166 if (!tg)
@@ -7171,6 +7172,17 @@ struct task_group *sched_create_group(struct task_group *parent)
7171 if (!alloc_rt_sched_group(tg, parent)) 7172 if (!alloc_rt_sched_group(tg, parent))
7172 goto err; 7173 goto err;
7173 7174
7175 return tg;
7176
7177err:
7178 free_sched_group(tg);
7179 return ERR_PTR(-ENOMEM);
7180}
7181
7182void sched_online_group(struct task_group *tg, struct task_group *parent)
7183{
7184 unsigned long flags;
7185
7174 spin_lock_irqsave(&task_group_lock, flags); 7186 spin_lock_irqsave(&task_group_lock, flags);
7175 list_add_rcu(&tg->list, &task_groups); 7187 list_add_rcu(&tg->list, &task_groups);
7176 7188
@@ -7180,12 +7192,6 @@ struct task_group *sched_create_group(struct task_group *parent)
7180 INIT_LIST_HEAD(&tg->children); 7192 INIT_LIST_HEAD(&tg->children);
7181 list_add_rcu(&tg->siblings, &parent->children); 7193 list_add_rcu(&tg->siblings, &parent->children);
7182 spin_unlock_irqrestore(&task_group_lock, flags); 7194 spin_unlock_irqrestore(&task_group_lock, flags);
7183
7184 return tg;
7185
7186err:
7187 free_sched_group(tg);
7188 return ERR_PTR(-ENOMEM);
7189} 7195}
7190 7196
7191/* rcu callback to free various structures associated with a task group */ 7197/* rcu callback to free various structures associated with a task group */
@@ -7198,6 +7204,12 @@ static void free_sched_group_rcu(struct rcu_head *rhp)
7198/* Destroy runqueue etc associated with a task group */ 7204/* Destroy runqueue etc associated with a task group */
7199void sched_destroy_group(struct task_group *tg) 7205void sched_destroy_group(struct task_group *tg)
7200{ 7206{
7207 /* wait for possible concurrent references to cfs_rqs complete */
7208 call_rcu(&tg->rcu, free_sched_group_rcu);
7209}
7210
7211void sched_offline_group(struct task_group *tg)
7212{
7201 unsigned long flags; 7213 unsigned long flags;
7202 int i; 7214 int i;
7203 7215
@@ -7209,9 +7221,6 @@ void sched_destroy_group(struct task_group *tg)
7209 list_del_rcu(&tg->list); 7221 list_del_rcu(&tg->list);
7210 list_del_rcu(&tg->siblings); 7222 list_del_rcu(&tg->siblings);
7211 spin_unlock_irqrestore(&task_group_lock, flags); 7223 spin_unlock_irqrestore(&task_group_lock, flags);
7212
7213 /* wait for possible concurrent references to cfs_rqs complete */
7214 call_rcu(&tg->rcu, free_sched_group_rcu);
7215} 7224}
7216 7225
7217/* change task's runqueue when it moves between groups. 7226/* change task's runqueue when it moves between groups.
@@ -7507,6 +7516,25 @@ static int sched_rt_global_constraints(void)
7507} 7516}
7508#endif /* CONFIG_RT_GROUP_SCHED */ 7517#endif /* CONFIG_RT_GROUP_SCHED */
7509 7518
7519int sched_rr_handler(struct ctl_table *table, int write,
7520 void __user *buffer, size_t *lenp,
7521 loff_t *ppos)
7522{
7523 int ret;
7524 static DEFINE_MUTEX(mutex);
7525
7526 mutex_lock(&mutex);
7527 ret = proc_dointvec(table, write, buffer, lenp, ppos);
7528 /* make sure that internally we keep jiffies */
7529 /* also, writing zero resets timeslice to default */
7530 if (!ret && write) {
7531 sched_rr_timeslice = sched_rr_timeslice <= 0 ?
7532 RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice);
7533 }
7534 mutex_unlock(&mutex);
7535 return ret;
7536}
7537
7510int sched_rt_handler(struct ctl_table *table, int write, 7538int sched_rt_handler(struct ctl_table *table, int write,
7511 void __user *buffer, size_t *lenp, 7539 void __user *buffer, size_t *lenp,
7512 loff_t *ppos) 7540 loff_t *ppos)
@@ -7563,6 +7591,19 @@ static struct cgroup_subsys_state *cpu_cgroup_css_alloc(struct cgroup *cgrp)
7563 return &tg->css; 7591 return &tg->css;
7564} 7592}
7565 7593
7594static int cpu_cgroup_css_online(struct cgroup *cgrp)
7595{
7596 struct task_group *tg = cgroup_tg(cgrp);
7597 struct task_group *parent;
7598
7599 if (!cgrp->parent)
7600 return 0;
7601
7602 parent = cgroup_tg(cgrp->parent);
7603 sched_online_group(tg, parent);
7604 return 0;
7605}
7606
7566static void cpu_cgroup_css_free(struct cgroup *cgrp) 7607static void cpu_cgroup_css_free(struct cgroup *cgrp)
7567{ 7608{
7568 struct task_group *tg = cgroup_tg(cgrp); 7609 struct task_group *tg = cgroup_tg(cgrp);
@@ -7570,6 +7611,13 @@ static void cpu_cgroup_css_free(struct cgroup *cgrp)
7570 sched_destroy_group(tg); 7611 sched_destroy_group(tg);
7571} 7612}
7572 7613
7614static void cpu_cgroup_css_offline(struct cgroup *cgrp)
7615{
7616 struct task_group *tg = cgroup_tg(cgrp);
7617
7618 sched_offline_group(tg);
7619}
7620
7573static int cpu_cgroup_can_attach(struct cgroup *cgrp, 7621static int cpu_cgroup_can_attach(struct cgroup *cgrp,
7574 struct cgroup_taskset *tset) 7622 struct cgroup_taskset *tset)
7575{ 7623{
@@ -7925,6 +7973,8 @@ struct cgroup_subsys cpu_cgroup_subsys = {
7925 .name = "cpu", 7973 .name = "cpu",
7926 .css_alloc = cpu_cgroup_css_alloc, 7974 .css_alloc = cpu_cgroup_css_alloc,
7927 .css_free = cpu_cgroup_css_free, 7975 .css_free = cpu_cgroup_css_free,
7976 .css_online = cpu_cgroup_css_online,
7977 .css_offline = cpu_cgroup_css_offline,
7928 .can_attach = cpu_cgroup_can_attach, 7978 .can_attach = cpu_cgroup_can_attach,
7929 .attach = cpu_cgroup_attach, 7979 .attach = cpu_cgroup_attach,
7930 .exit = cpu_cgroup_exit, 7980 .exit = cpu_cgroup_exit,
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index 23aa789c53ee..1095e878a46f 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -28,6 +28,8 @@
28 */ 28 */
29 29
30#include <linux/gfp.h> 30#include <linux/gfp.h>
31#include <linux/sched.h>
32#include <linux/sched/rt.h>
31#include "cpupri.h" 33#include "cpupri.h"
32 34
33/* Convert between a 140 based task->prio, and our 102 based cpupri */ 35/* Convert between a 140 based task->prio, and our 102 based cpupri */
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 293b202fcf79..9857329ed280 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -3,6 +3,7 @@
3#include <linux/tsacct_kern.h> 3#include <linux/tsacct_kern.h>
4#include <linux/kernel_stat.h> 4#include <linux/kernel_stat.h>
5#include <linux/static_key.h> 5#include <linux/static_key.h>
6#include <linux/context_tracking.h>
6#include "sched.h" 7#include "sched.h"
7 8
8 9
@@ -163,7 +164,7 @@ void account_user_time(struct task_struct *p, cputime_t cputime,
163 task_group_account_field(p, index, (__force u64) cputime); 164 task_group_account_field(p, index, (__force u64) cputime);
164 165
165 /* Account for user time used */ 166 /* Account for user time used */
166 acct_update_integrals(p); 167 acct_account_cputime(p);
167} 168}
168 169
169/* 170/*
@@ -213,7 +214,7 @@ void __account_system_time(struct task_struct *p, cputime_t cputime,
213 task_group_account_field(p, index, (__force u64) cputime); 214 task_group_account_field(p, index, (__force u64) cputime);
214 215
215 /* Account for system time used */ 216 /* Account for system time used */
216 acct_update_integrals(p); 217 acct_account_cputime(p);
217} 218}
218 219
219/* 220/*
@@ -295,6 +296,7 @@ static __always_inline bool steal_account_process_tick(void)
295void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) 296void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
296{ 297{
297 struct signal_struct *sig = tsk->signal; 298 struct signal_struct *sig = tsk->signal;
299 cputime_t utime, stime;
298 struct task_struct *t; 300 struct task_struct *t;
299 301
300 times->utime = sig->utime; 302 times->utime = sig->utime;
@@ -308,16 +310,15 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
308 310
309 t = tsk; 311 t = tsk;
310 do { 312 do {
311 times->utime += t->utime; 313 task_cputime(tsk, &utime, &stime);
312 times->stime += t->stime; 314 times->utime += utime;
315 times->stime += stime;
313 times->sum_exec_runtime += task_sched_runtime(t); 316 times->sum_exec_runtime += task_sched_runtime(t);
314 } while_each_thread(tsk, t); 317 } while_each_thread(tsk, t);
315out: 318out:
316 rcu_read_unlock(); 319 rcu_read_unlock();
317} 320}
318 321
319#ifndef CONFIG_VIRT_CPU_ACCOUNTING
320
321#ifdef CONFIG_IRQ_TIME_ACCOUNTING 322#ifdef CONFIG_IRQ_TIME_ACCOUNTING
322/* 323/*
323 * Account a tick to a process and cpustat 324 * Account a tick to a process and cpustat
@@ -382,11 +383,12 @@ static void irqtime_account_idle_ticks(int ticks)
382 irqtime_account_process_tick(current, 0, rq); 383 irqtime_account_process_tick(current, 0, rq);
383} 384}
384#else /* CONFIG_IRQ_TIME_ACCOUNTING */ 385#else /* CONFIG_IRQ_TIME_ACCOUNTING */
385static void irqtime_account_idle_ticks(int ticks) {} 386static inline void irqtime_account_idle_ticks(int ticks) {}
386static void irqtime_account_process_tick(struct task_struct *p, int user_tick, 387static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
387 struct rq *rq) {} 388 struct rq *rq) {}
388#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ 389#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
389 390
391#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
390/* 392/*
391 * Account a single tick of cpu time. 393 * Account a single tick of cpu time.
392 * @p: the process that the cpu time gets accounted to 394 * @p: the process that the cpu time gets accounted to
@@ -397,6 +399,9 @@ void account_process_tick(struct task_struct *p, int user_tick)
397 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); 399 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
398 struct rq *rq = this_rq(); 400 struct rq *rq = this_rq();
399 401
402 if (vtime_accounting_enabled())
403 return;
404
400 if (sched_clock_irqtime) { 405 if (sched_clock_irqtime) {
401 irqtime_account_process_tick(p, user_tick, rq); 406 irqtime_account_process_tick(p, user_tick, rq);
402 return; 407 return;
@@ -438,8 +443,7 @@ void account_idle_ticks(unsigned long ticks)
438 443
439 account_idle_time(jiffies_to_cputime(ticks)); 444 account_idle_time(jiffies_to_cputime(ticks));
440} 445}
441 446#endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
442#endif
443 447
444/* 448/*
445 * Use precise platform statistics if available: 449 * Use precise platform statistics if available:
@@ -461,25 +465,20 @@ void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime
461 *st = cputime.stime; 465 *st = cputime.stime;
462} 466}
463 467
464void vtime_account_system_irqsafe(struct task_struct *tsk)
465{
466 unsigned long flags;
467
468 local_irq_save(flags);
469 vtime_account_system(tsk);
470 local_irq_restore(flags);
471}
472EXPORT_SYMBOL_GPL(vtime_account_system_irqsafe);
473
474#ifndef __ARCH_HAS_VTIME_TASK_SWITCH 468#ifndef __ARCH_HAS_VTIME_TASK_SWITCH
475void vtime_task_switch(struct task_struct *prev) 469void vtime_task_switch(struct task_struct *prev)
476{ 470{
471 if (!vtime_accounting_enabled())
472 return;
473
477 if (is_idle_task(prev)) 474 if (is_idle_task(prev))
478 vtime_account_idle(prev); 475 vtime_account_idle(prev);
479 else 476 else
480 vtime_account_system(prev); 477 vtime_account_system(prev);
481 478
479#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
482 vtime_account_user(prev); 480 vtime_account_user(prev);
481#endif
483 arch_vtime_task_switch(prev); 482 arch_vtime_task_switch(prev);
484} 483}
485#endif 484#endif
@@ -493,27 +492,40 @@ void vtime_task_switch(struct task_struct *prev)
493 * vtime_account(). 492 * vtime_account().
494 */ 493 */
495#ifndef __ARCH_HAS_VTIME_ACCOUNT 494#ifndef __ARCH_HAS_VTIME_ACCOUNT
496void vtime_account(struct task_struct *tsk) 495void vtime_account_irq_enter(struct task_struct *tsk)
497{ 496{
498 if (in_interrupt() || !is_idle_task(tsk)) 497 if (!vtime_accounting_enabled())
499 vtime_account_system(tsk); 498 return;
500 else 499
501 vtime_account_idle(tsk); 500 if (!in_interrupt()) {
501 /*
502 * If we interrupted user, context_tracking_in_user()
503 * is 1 because the context tracking don't hook
504 * on irq entry/exit. This way we know if
505 * we need to flush user time on kernel entry.
506 */
507 if (context_tracking_in_user()) {
508 vtime_account_user(tsk);
509 return;
510 }
511
512 if (is_idle_task(tsk)) {
513 vtime_account_idle(tsk);
514 return;
515 }
516 }
517 vtime_account_system(tsk);
502} 518}
503EXPORT_SYMBOL_GPL(vtime_account); 519EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
504#endif /* __ARCH_HAS_VTIME_ACCOUNT */ 520#endif /* __ARCH_HAS_VTIME_ACCOUNT */
505 521
506#else 522#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
507
508#ifndef nsecs_to_cputime
509# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
510#endif
511 523
512static cputime_t scale_utime(cputime_t utime, cputime_t rtime, cputime_t total) 524static cputime_t scale_stime(cputime_t stime, cputime_t rtime, cputime_t total)
513{ 525{
514 u64 temp = (__force u64) rtime; 526 u64 temp = (__force u64) rtime;
515 527
516 temp *= (__force u64) utime; 528 temp *= (__force u64) stime;
517 529
518 if (sizeof(cputime_t) == 4) 530 if (sizeof(cputime_t) == 4)
519 temp = div_u64(temp, (__force u32) total); 531 temp = div_u64(temp, (__force u32) total);
@@ -531,10 +543,10 @@ static void cputime_adjust(struct task_cputime *curr,
531 struct cputime *prev, 543 struct cputime *prev,
532 cputime_t *ut, cputime_t *st) 544 cputime_t *ut, cputime_t *st)
533{ 545{
534 cputime_t rtime, utime, total; 546 cputime_t rtime, stime, total;
535 547
536 utime = curr->utime; 548 stime = curr->stime;
537 total = utime + curr->stime; 549 total = stime + curr->utime;
538 550
539 /* 551 /*
540 * Tick based cputime accounting depend on random scheduling 552 * Tick based cputime accounting depend on random scheduling
@@ -549,17 +561,17 @@ static void cputime_adjust(struct task_cputime *curr,
549 rtime = nsecs_to_cputime(curr->sum_exec_runtime); 561 rtime = nsecs_to_cputime(curr->sum_exec_runtime);
550 562
551 if (total) 563 if (total)
552 utime = scale_utime(utime, rtime, total); 564 stime = scale_stime(stime, rtime, total);
553 else 565 else
554 utime = rtime; 566 stime = rtime;
555 567
556 /* 568 /*
557 * If the tick based count grows faster than the scheduler one, 569 * If the tick based count grows faster than the scheduler one,
558 * the result of the scaling may go backward. 570 * the result of the scaling may go backward.
559 * Let's enforce monotonicity. 571 * Let's enforce monotonicity.
560 */ 572 */
561 prev->utime = max(prev->utime, utime); 573 prev->stime = max(prev->stime, stime);
562 prev->stime = max(prev->stime, rtime - prev->utime); 574 prev->utime = max(prev->utime, rtime - prev->stime);
563 575
564 *ut = prev->utime; 576 *ut = prev->utime;
565 *st = prev->stime; 577 *st = prev->stime;
@@ -568,11 +580,10 @@ static void cputime_adjust(struct task_cputime *curr,
568void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) 580void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
569{ 581{
570 struct task_cputime cputime = { 582 struct task_cputime cputime = {
571 .utime = p->utime,
572 .stime = p->stime,
573 .sum_exec_runtime = p->se.sum_exec_runtime, 583 .sum_exec_runtime = p->se.sum_exec_runtime,
574 }; 584 };
575 585
586 task_cputime(p, &cputime.utime, &cputime.stime);
576 cputime_adjust(&cputime, &p->prev_cputime, ut, st); 587 cputime_adjust(&cputime, &p->prev_cputime, ut, st);
577} 588}
578 589
@@ -586,4 +597,221 @@ void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime
586 thread_group_cputime(p, &cputime); 597 thread_group_cputime(p, &cputime);
587 cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st); 598 cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
588} 599}
589#endif 600#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
601
602#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
603static unsigned long long vtime_delta(struct task_struct *tsk)
604{
605 unsigned long long clock;
606
607 clock = sched_clock();
608 if (clock < tsk->vtime_snap)
609 return 0;
610
611 return clock - tsk->vtime_snap;
612}
613
614static cputime_t get_vtime_delta(struct task_struct *tsk)
615{
616 unsigned long long delta = vtime_delta(tsk);
617
618 WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_SLEEPING);
619 tsk->vtime_snap += delta;
620
621 /* CHECKME: always safe to convert nsecs to cputime? */
622 return nsecs_to_cputime(delta);
623}
624
625static void __vtime_account_system(struct task_struct *tsk)
626{
627 cputime_t delta_cpu = get_vtime_delta(tsk);
628
629 account_system_time(tsk, irq_count(), delta_cpu, cputime_to_scaled(delta_cpu));
630}
631
632void vtime_account_system(struct task_struct *tsk)
633{
634 if (!vtime_accounting_enabled())
635 return;
636
637 write_seqlock(&tsk->vtime_seqlock);
638 __vtime_account_system(tsk);
639 write_sequnlock(&tsk->vtime_seqlock);
640}
641
642void vtime_account_irq_exit(struct task_struct *tsk)
643{
644 if (!vtime_accounting_enabled())
645 return;
646
647 write_seqlock(&tsk->vtime_seqlock);
648 if (context_tracking_in_user())
649 tsk->vtime_snap_whence = VTIME_USER;
650 __vtime_account_system(tsk);
651 write_sequnlock(&tsk->vtime_seqlock);
652}
653
654void vtime_account_user(struct task_struct *tsk)
655{
656 cputime_t delta_cpu;
657
658 if (!vtime_accounting_enabled())
659 return;
660
661 delta_cpu = get_vtime_delta(tsk);
662
663 write_seqlock(&tsk->vtime_seqlock);
664 tsk->vtime_snap_whence = VTIME_SYS;
665 account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu));
666 write_sequnlock(&tsk->vtime_seqlock);
667}
668
669void vtime_user_enter(struct task_struct *tsk)
670{
671 if (!vtime_accounting_enabled())
672 return;
673
674 write_seqlock(&tsk->vtime_seqlock);
675 tsk->vtime_snap_whence = VTIME_USER;
676 __vtime_account_system(tsk);
677 write_sequnlock(&tsk->vtime_seqlock);
678}
679
680void vtime_guest_enter(struct task_struct *tsk)
681{
682 write_seqlock(&tsk->vtime_seqlock);
683 __vtime_account_system(tsk);
684 current->flags |= PF_VCPU;
685 write_sequnlock(&tsk->vtime_seqlock);
686}
687
688void vtime_guest_exit(struct task_struct *tsk)
689{
690 write_seqlock(&tsk->vtime_seqlock);
691 __vtime_account_system(tsk);
692 current->flags &= ~PF_VCPU;
693 write_sequnlock(&tsk->vtime_seqlock);
694}
695
696void vtime_account_idle(struct task_struct *tsk)
697{
698 cputime_t delta_cpu = get_vtime_delta(tsk);
699
700 account_idle_time(delta_cpu);
701}
702
703bool vtime_accounting_enabled(void)
704{
705 return context_tracking_active();
706}
707
708void arch_vtime_task_switch(struct task_struct *prev)
709{
710 write_seqlock(&prev->vtime_seqlock);
711 prev->vtime_snap_whence = VTIME_SLEEPING;
712 write_sequnlock(&prev->vtime_seqlock);
713
714 write_seqlock(&current->vtime_seqlock);
715 current->vtime_snap_whence = VTIME_SYS;
716 current->vtime_snap = sched_clock();
717 write_sequnlock(&current->vtime_seqlock);
718}
719
720void vtime_init_idle(struct task_struct *t)
721{
722 unsigned long flags;
723
724 write_seqlock_irqsave(&t->vtime_seqlock, flags);
725 t->vtime_snap_whence = VTIME_SYS;
726 t->vtime_snap = sched_clock();
727 write_sequnlock_irqrestore(&t->vtime_seqlock, flags);
728}
729
730cputime_t task_gtime(struct task_struct *t)
731{
732 unsigned int seq;
733 cputime_t gtime;
734
735 do {
736 seq = read_seqbegin(&t->vtime_seqlock);
737
738 gtime = t->gtime;
739 if (t->flags & PF_VCPU)
740 gtime += vtime_delta(t);
741
742 } while (read_seqretry(&t->vtime_seqlock, seq));
743
744 return gtime;
745}
746
747/*
748 * Fetch cputime raw values from fields of task_struct and
749 * add up the pending nohz execution time since the last
750 * cputime snapshot.
751 */
752static void
753fetch_task_cputime(struct task_struct *t,
754 cputime_t *u_dst, cputime_t *s_dst,
755 cputime_t *u_src, cputime_t *s_src,
756 cputime_t *udelta, cputime_t *sdelta)
757{
758 unsigned int seq;
759 unsigned long long delta;
760
761 do {
762 *udelta = 0;
763 *sdelta = 0;
764
765 seq = read_seqbegin(&t->vtime_seqlock);
766
767 if (u_dst)
768 *u_dst = *u_src;
769 if (s_dst)
770 *s_dst = *s_src;
771
772 /* Task is sleeping, nothing to add */
773 if (t->vtime_snap_whence == VTIME_SLEEPING ||
774 is_idle_task(t))
775 continue;
776
777 delta = vtime_delta(t);
778
779 /*
780 * Task runs either in user or kernel space, add pending nohz time to
781 * the right place.
782 */
783 if (t->vtime_snap_whence == VTIME_USER || t->flags & PF_VCPU) {
784 *udelta = delta;
785 } else {
786 if (t->vtime_snap_whence == VTIME_SYS)
787 *sdelta = delta;
788 }
789 } while (read_seqretry(&t->vtime_seqlock, seq));
790}
791
792
793void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime)
794{
795 cputime_t udelta, sdelta;
796
797 fetch_task_cputime(t, utime, stime, &t->utime,
798 &t->stime, &udelta, &sdelta);
799 if (utime)
800 *utime += udelta;
801 if (stime)
802 *stime += sdelta;
803}
804
805void task_cputime_scaled(struct task_struct *t,
806 cputime_t *utimescaled, cputime_t *stimescaled)
807{
808 cputime_t udelta, sdelta;
809
810 fetch_task_cputime(t, utimescaled, stimescaled,
811 &t->utimescaled, &t->stimescaled, &udelta, &sdelta);
812 if (utimescaled)
813 *utimescaled += cputime_to_scaled(udelta);
814 if (stimescaled)
815 *stimescaled += cputime_to_scaled(sdelta);
816}
817#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 2cd3c1b4e582..557e7b53b323 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -110,13 +110,6 @@ static char *task_group_path(struct task_group *tg)
110 if (autogroup_path(tg, group_path, PATH_MAX)) 110 if (autogroup_path(tg, group_path, PATH_MAX))
111 return group_path; 111 return group_path;
112 112
113 /*
114 * May be NULL if the underlying cgroup isn't fully-created yet
115 */
116 if (!tg->css.cgroup) {
117 group_path[0] = '\0';
118 return group_path;
119 }
120 cgroup_path(tg->css.cgroup, group_path, PATH_MAX); 113 cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
121 return group_path; 114 return group_path;
122} 115}
@@ -222,8 +215,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
222 cfs_rq->runnable_load_avg); 215 cfs_rq->runnable_load_avg);
223 SEQ_printf(m, " .%-30s: %lld\n", "blocked_load_avg", 216 SEQ_printf(m, " .%-30s: %lld\n", "blocked_load_avg",
224 cfs_rq->blocked_load_avg); 217 cfs_rq->blocked_load_avg);
225 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg", 218 SEQ_printf(m, " .%-30s: %lld\n", "tg_load_avg",
226 atomic64_read(&cfs_rq->tg->load_avg)); 219 (unsigned long long)atomic64_read(&cfs_rq->tg->load_avg));
227 SEQ_printf(m, " .%-30s: %lld\n", "tg_load_contrib", 220 SEQ_printf(m, " .%-30s: %lld\n", "tg_load_contrib",
228 cfs_rq->tg_load_contrib); 221 cfs_rq->tg_load_contrib);
229 SEQ_printf(m, " .%-30s: %d\n", "tg_runnable_contrib", 222 SEQ_printf(m, " .%-30s: %d\n", "tg_runnable_contrib",
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 5eea8707234a..7a33e5986fc5 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1680,9 +1680,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
1680 } 1680 }
1681 1681
1682 /* ensure we never gain time by being placed backwards. */ 1682 /* ensure we never gain time by being placed backwards. */
1683 vruntime = max_vruntime(se->vruntime, vruntime); 1683 se->vruntime = max_vruntime(se->vruntime, vruntime);
1684
1685 se->vruntime = vruntime;
1686} 1684}
1687 1685
1688static void check_enqueue_throttle(struct cfs_rq *cfs_rq); 1686static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
@@ -2663,7 +2661,7 @@ static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2663 hrtimer_cancel(&cfs_b->slack_timer); 2661 hrtimer_cancel(&cfs_b->slack_timer);
2664} 2662}
2665 2663
2666static void unthrottle_offline_cfs_rqs(struct rq *rq) 2664static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
2667{ 2665{
2668 struct cfs_rq *cfs_rq; 2666 struct cfs_rq *cfs_rq;
2669 2667
@@ -3254,25 +3252,18 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
3254 */ 3252 */
3255static int select_idle_sibling(struct task_struct *p, int target) 3253static int select_idle_sibling(struct task_struct *p, int target)
3256{ 3254{
3257 int cpu = smp_processor_id();
3258 int prev_cpu = task_cpu(p);
3259 struct sched_domain *sd; 3255 struct sched_domain *sd;
3260 struct sched_group *sg; 3256 struct sched_group *sg;
3261 int i; 3257 int i = task_cpu(p);
3262 3258
3263 /* 3259 if (idle_cpu(target))
3264 * If the task is going to be woken-up on this cpu and if it is 3260 return target;
3265 * already idle, then it is the right target.
3266 */
3267 if (target == cpu && idle_cpu(cpu))
3268 return cpu;
3269 3261
3270 /* 3262 /*
3271 * If the task is going to be woken-up on the cpu where it previously 3263 * If the prevous cpu is cache affine and idle, don't be stupid.
3272 * ran and if it is currently idle, then it the right target.
3273 */ 3264 */
3274 if (target == prev_cpu && idle_cpu(prev_cpu)) 3265 if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
3275 return prev_cpu; 3266 return i;
3276 3267
3277 /* 3268 /*
3278 * Otherwise, iterate the domains and find an elegible idle cpu. 3269 * Otherwise, iterate the domains and find an elegible idle cpu.
@@ -3286,7 +3277,7 @@ static int select_idle_sibling(struct task_struct *p, int target)
3286 goto next; 3277 goto next;
3287 3278
3288 for_each_cpu(i, sched_group_cpus(sg)) { 3279 for_each_cpu(i, sched_group_cpus(sg)) {
3289 if (!idle_cpu(i)) 3280 if (i == target || !idle_cpu(i))
3290 goto next; 3281 goto next;
3291 } 3282 }
3292 3283
@@ -6101,7 +6092,7 @@ static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task
6101 * idle runqueue: 6092 * idle runqueue:
6102 */ 6093 */
6103 if (rq->cfs.load.weight) 6094 if (rq->cfs.load.weight)
6104 rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se)); 6095 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
6105 6096
6106 return rr_interval; 6097 return rr_interval;
6107} 6098}
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 418feb01344e..127a2c4cf4ab 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -7,6 +7,8 @@
7 7
8#include <linux/slab.h> 8#include <linux/slab.h>
9 9
10int sched_rr_timeslice = RR_TIMESLICE;
11
10static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun); 12static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
11 13
12struct rt_bandwidth def_rt_bandwidth; 14struct rt_bandwidth def_rt_bandwidth;
@@ -566,7 +568,7 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
566static int do_balance_runtime(struct rt_rq *rt_rq) 568static int do_balance_runtime(struct rt_rq *rt_rq)
567{ 569{
568 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 570 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
569 struct root_domain *rd = cpu_rq(smp_processor_id())->rd; 571 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
570 int i, weight, more = 0; 572 int i, weight, more = 0;
571 u64 rt_period; 573 u64 rt_period;
572 574
@@ -925,8 +927,8 @@ static void update_curr_rt(struct rq *rq)
925 return; 927 return;
926 928
927 delta_exec = rq->clock_task - curr->se.exec_start; 929 delta_exec = rq->clock_task - curr->se.exec_start;
928 if (unlikely((s64)delta_exec < 0)) 930 if (unlikely((s64)delta_exec <= 0))
929 delta_exec = 0; 931 return;
930 932
931 schedstat_set(curr->se.statistics.exec_max, 933 schedstat_set(curr->se.statistics.exec_max,
932 max(curr->se.statistics.exec_max, delta_exec)); 934 max(curr->se.statistics.exec_max, delta_exec));
@@ -1427,8 +1429,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1427static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) 1429static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1428{ 1430{
1429 if (!task_running(rq, p) && 1431 if (!task_running(rq, p) &&
1430 (cpu < 0 || cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) && 1432 cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1431 (p->nr_cpus_allowed > 1))
1432 return 1; 1433 return 1;
1433 return 0; 1434 return 0;
1434} 1435}
@@ -1889,8 +1890,11 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p)
1889 * we may need to handle the pulling of RT tasks 1890 * we may need to handle the pulling of RT tasks
1890 * now. 1891 * now.
1891 */ 1892 */
1892 if (p->on_rq && !rq->rt.rt_nr_running) 1893 if (!p->on_rq || rq->rt.rt_nr_running)
1893 pull_rt_task(rq); 1894 return;
1895
1896 if (pull_rt_task(rq))
1897 resched_task(rq->curr);
1894} 1898}
1895 1899
1896void init_sched_rt_class(void) 1900void init_sched_rt_class(void)
@@ -1985,7 +1989,11 @@ static void watchdog(struct rq *rq, struct task_struct *p)
1985 if (soft != RLIM_INFINITY) { 1989 if (soft != RLIM_INFINITY) {
1986 unsigned long next; 1990 unsigned long next;
1987 1991
1988 p->rt.timeout++; 1992 if (p->rt.watchdog_stamp != jiffies) {
1993 p->rt.timeout++;
1994 p->rt.watchdog_stamp = jiffies;
1995 }
1996
1989 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ); 1997 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1990 if (p->rt.timeout > next) 1998 if (p->rt.timeout > next)
1991 p->cputime_expires.sched_exp = p->se.sum_exec_runtime; 1999 p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
@@ -2010,7 +2018,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2010 if (--p->rt.time_slice) 2018 if (--p->rt.time_slice)
2011 return; 2019 return;
2012 2020
2013 p->rt.time_slice = RR_TIMESLICE; 2021 p->rt.time_slice = sched_rr_timeslice;
2014 2022
2015 /* 2023 /*
2016 * Requeue to the end of queue if we (and all of our ancestors) are the 2024 * Requeue to the end of queue if we (and all of our ancestors) are the
@@ -2041,7 +2049,7 @@ static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2041 * Time slice is 0 for SCHED_FIFO tasks 2049 * Time slice is 0 for SCHED_FIFO tasks
2042 */ 2050 */
2043 if (task->policy == SCHED_RR) 2051 if (task->policy == SCHED_RR)
2044 return RR_TIMESLICE; 2052 return sched_rr_timeslice;
2045 else 2053 else
2046 return 0; 2054 return 0;
2047} 2055}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index fc886441436a..cc03cfdf469f 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1,5 +1,7 @@
1 1
2#include <linux/sched.h> 2#include <linux/sched.h>
3#include <linux/sched/sysctl.h>
4#include <linux/sched/rt.h>
3#include <linux/mutex.h> 5#include <linux/mutex.h>
4#include <linux/spinlock.h> 6#include <linux/spinlock.h>
5#include <linux/stop_machine.h> 7#include <linux/stop_machine.h>
diff --git a/kernel/signal.c b/kernel/signal.c
index 372771e948c2..7f82adbad480 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -680,23 +680,17 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
680 * No need to set need_resched since signal event passing 680 * No need to set need_resched since signal event passing
681 * goes through ->blocked 681 * goes through ->blocked
682 */ 682 */
683void signal_wake_up(struct task_struct *t, int resume) 683void signal_wake_up_state(struct task_struct *t, unsigned int state)
684{ 684{
685 unsigned int mask;
686
687 set_tsk_thread_flag(t, TIF_SIGPENDING); 685 set_tsk_thread_flag(t, TIF_SIGPENDING);
688
689 /* 686 /*
690 * For SIGKILL, we want to wake it up in the stopped/traced/killable 687 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
691 * case. We don't check t->state here because there is a race with it 688 * case. We don't check t->state here because there is a race with it
692 * executing another processor and just now entering stopped state. 689 * executing another processor and just now entering stopped state.
693 * By using wake_up_state, we ensure the process will wake up and 690 * By using wake_up_state, we ensure the process will wake up and
694 * handle its death signal. 691 * handle its death signal.
695 */ 692 */
696 mask = TASK_INTERRUPTIBLE; 693 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
697 if (resume)
698 mask |= TASK_WAKEKILL;
699 if (!wake_up_state(t, mask))
700 kick_process(t); 694 kick_process(t);
701} 695}
702 696
@@ -844,7 +838,7 @@ static void ptrace_trap_notify(struct task_struct *t)
844 assert_spin_locked(&t->sighand->siglock); 838 assert_spin_locked(&t->sighand->siglock);
845 839
846 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY); 840 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
847 signal_wake_up(t, t->jobctl & JOBCTL_LISTENING); 841 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
848} 842}
849 843
850/* 844/*
@@ -1638,6 +1632,7 @@ bool do_notify_parent(struct task_struct *tsk, int sig)
1638 unsigned long flags; 1632 unsigned long flags;
1639 struct sighand_struct *psig; 1633 struct sighand_struct *psig;
1640 bool autoreap = false; 1634 bool autoreap = false;
1635 cputime_t utime, stime;
1641 1636
1642 BUG_ON(sig == -1); 1637 BUG_ON(sig == -1);
1643 1638
@@ -1675,8 +1670,9 @@ bool do_notify_parent(struct task_struct *tsk, int sig)
1675 task_uid(tsk)); 1670 task_uid(tsk));
1676 rcu_read_unlock(); 1671 rcu_read_unlock();
1677 1672
1678 info.si_utime = cputime_to_clock_t(tsk->utime + tsk->signal->utime); 1673 task_cputime(tsk, &utime, &stime);
1679 info.si_stime = cputime_to_clock_t(tsk->stime + tsk->signal->stime); 1674 info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime);
1675 info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime);
1680 1676
1681 info.si_status = tsk->exit_code & 0x7f; 1677 info.si_status = tsk->exit_code & 0x7f;
1682 if (tsk->exit_code & 0x80) 1678 if (tsk->exit_code & 0x80)
@@ -1740,6 +1736,7 @@ static void do_notify_parent_cldstop(struct task_struct *tsk,
1740 unsigned long flags; 1736 unsigned long flags;
1741 struct task_struct *parent; 1737 struct task_struct *parent;
1742 struct sighand_struct *sighand; 1738 struct sighand_struct *sighand;
1739 cputime_t utime, stime;
1743 1740
1744 if (for_ptracer) { 1741 if (for_ptracer) {
1745 parent = tsk->parent; 1742 parent = tsk->parent;
@@ -1758,8 +1755,9 @@ static void do_notify_parent_cldstop(struct task_struct *tsk,
1758 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk)); 1755 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1759 rcu_read_unlock(); 1756 rcu_read_unlock();
1760 1757
1761 info.si_utime = cputime_to_clock_t(tsk->utime); 1758 task_cputime(tsk, &utime, &stime);
1762 info.si_stime = cputime_to_clock_t(tsk->stime); 1759 info.si_utime = cputime_to_clock_t(utime);
1760 info.si_stime = cputime_to_clock_t(stime);
1763 1761
1764 info.si_code = why; 1762 info.si_code = why;
1765 switch (why) { 1763 switch (why) {
@@ -1800,6 +1798,10 @@ static inline int may_ptrace_stop(void)
1800 * If SIGKILL was already sent before the caller unlocked 1798 * If SIGKILL was already sent before the caller unlocked
1801 * ->siglock we must see ->core_state != NULL. Otherwise it 1799 * ->siglock we must see ->core_state != NULL. Otherwise it
1802 * is safe to enter schedule(). 1800 * is safe to enter schedule().
1801 *
1802 * This is almost outdated, a task with the pending SIGKILL can't
1803 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1804 * after SIGKILL was already dequeued.
1803 */ 1805 */
1804 if (unlikely(current->mm->core_state) && 1806 if (unlikely(current->mm->core_state) &&
1805 unlikely(current->mm == current->parent->mm)) 1807 unlikely(current->mm == current->parent->mm))
@@ -1925,6 +1927,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1925 if (gstop_done) 1927 if (gstop_done)
1926 do_notify_parent_cldstop(current, false, why); 1928 do_notify_parent_cldstop(current, false, why);
1927 1929
1930 /* tasklist protects us from ptrace_freeze_traced() */
1928 __set_current_state(TASK_RUNNING); 1931 __set_current_state(TASK_RUNNING);
1929 if (clear_code) 1932 if (clear_code)
1930 current->exit_code = 0; 1933 current->exit_code = 0;
@@ -3116,8 +3119,9 @@ int __save_altstack(stack_t __user *uss, unsigned long sp)
3116 3119
3117#ifdef CONFIG_COMPAT 3120#ifdef CONFIG_COMPAT
3118#ifdef CONFIG_GENERIC_SIGALTSTACK 3121#ifdef CONFIG_GENERIC_SIGALTSTACK
3119asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr, 3122COMPAT_SYSCALL_DEFINE2(sigaltstack,
3120 compat_stack_t __user *uoss_ptr) 3123 const compat_stack_t __user *, uss_ptr,
3124 compat_stack_t __user *, uoss_ptr)
3121{ 3125{
3122 stack_t uss, uoss; 3126 stack_t uss, uoss;
3123 int ret; 3127 int ret;
diff --git a/kernel/smp.c b/kernel/smp.c
index 29dd40a9f2f4..69f38bd98b42 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -33,6 +33,7 @@ struct call_function_data {
33 struct call_single_data csd; 33 struct call_single_data csd;
34 atomic_t refs; 34 atomic_t refs;
35 cpumask_var_t cpumask; 35 cpumask_var_t cpumask;
36 cpumask_var_t cpumask_ipi;
36}; 37};
37 38
38static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data); 39static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
@@ -56,6 +57,9 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
56 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, 57 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
57 cpu_to_node(cpu))) 58 cpu_to_node(cpu)))
58 return notifier_from_errno(-ENOMEM); 59 return notifier_from_errno(-ENOMEM);
60 if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
61 cpu_to_node(cpu)))
62 return notifier_from_errno(-ENOMEM);
59 break; 63 break;
60 64
61#ifdef CONFIG_HOTPLUG_CPU 65#ifdef CONFIG_HOTPLUG_CPU
@@ -65,6 +69,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
65 case CPU_DEAD: 69 case CPU_DEAD:
66 case CPU_DEAD_FROZEN: 70 case CPU_DEAD_FROZEN:
67 free_cpumask_var(cfd->cpumask); 71 free_cpumask_var(cfd->cpumask);
72 free_cpumask_var(cfd->cpumask_ipi);
68 break; 73 break;
69#endif 74#endif
70 }; 75 };
@@ -526,6 +531,12 @@ void smp_call_function_many(const struct cpumask *mask,
526 return; 531 return;
527 } 532 }
528 533
534 /*
535 * After we put an entry into the list, data->cpumask
536 * may be cleared again when another CPU sends another IPI for
537 * a SMP function call, so data->cpumask will be zero.
538 */
539 cpumask_copy(data->cpumask_ipi, data->cpumask);
529 raw_spin_lock_irqsave(&call_function.lock, flags); 540 raw_spin_lock_irqsave(&call_function.lock, flags);
530 /* 541 /*
531 * Place entry at the _HEAD_ of the list, so that any cpu still 542 * Place entry at the _HEAD_ of the list, so that any cpu still
@@ -549,7 +560,7 @@ void smp_call_function_many(const struct cpumask *mask,
549 smp_mb(); 560 smp_mb();
550 561
551 /* Send a message to all CPUs in the map */ 562 /* Send a message to all CPUs in the map */
552 arch_send_call_function_ipi_mask(data->cpumask); 563 arch_send_call_function_ipi_mask(data->cpumask_ipi);
553 564
554 /* Optionally wait for the CPUs to complete */ 565 /* Optionally wait for the CPUs to complete */
555 if (wait) 566 if (wait)
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index d6c5fc054242..d4abac261779 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -183,9 +183,10 @@ __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
183 kfree(td); 183 kfree(td);
184 return PTR_ERR(tsk); 184 return PTR_ERR(tsk);
185 } 185 }
186
187 get_task_struct(tsk); 186 get_task_struct(tsk);
188 *per_cpu_ptr(ht->store, cpu) = tsk; 187 *per_cpu_ptr(ht->store, cpu) = tsk;
188 if (ht->create)
189 ht->create(cpu);
189 return 0; 190 return 0;
190} 191}
191 192
@@ -225,7 +226,7 @@ static void smpboot_park_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
225{ 226{
226 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); 227 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
227 228
228 if (tsk) 229 if (tsk && !ht->selfparking)
229 kthread_park(tsk); 230 kthread_park(tsk);
230} 231}
231 232
diff --git a/kernel/softirq.c b/kernel/softirq.c
index ed567babe789..f5cc25f147a6 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -221,7 +221,7 @@ asmlinkage void __do_softirq(void)
221 current->flags &= ~PF_MEMALLOC; 221 current->flags &= ~PF_MEMALLOC;
222 222
223 pending = local_softirq_pending(); 223 pending = local_softirq_pending();
224 vtime_account_irq_enter(current); 224 account_irq_enter_time(current);
225 225
226 __local_bh_disable((unsigned long)__builtin_return_address(0), 226 __local_bh_disable((unsigned long)__builtin_return_address(0),
227 SOFTIRQ_OFFSET); 227 SOFTIRQ_OFFSET);
@@ -272,7 +272,7 @@ restart:
272 272
273 lockdep_softirq_exit(); 273 lockdep_softirq_exit();
274 274
275 vtime_account_irq_exit(current); 275 account_irq_exit_time(current);
276 __local_bh_enable(SOFTIRQ_OFFSET); 276 __local_bh_enable(SOFTIRQ_OFFSET);
277 tsk_restore_flags(current, old_flags, PF_MEMALLOC); 277 tsk_restore_flags(current, old_flags, PF_MEMALLOC);
278} 278}
@@ -341,7 +341,7 @@ static inline void invoke_softirq(void)
341 */ 341 */
342void irq_exit(void) 342void irq_exit(void)
343{ 343{
344 vtime_account_irq_exit(current); 344 account_irq_exit_time(current);
345 trace_hardirq_exit(); 345 trace_hardirq_exit();
346 sub_preempt_count(IRQ_EXIT_OFFSET); 346 sub_preempt_count(IRQ_EXIT_OFFSET);
347 if (!in_interrupt() && local_softirq_pending()) 347 if (!in_interrupt() && local_softirq_pending())
diff --git a/kernel/srcu.c b/kernel/srcu.c
index 2b859828cdc3..01d5ccb8bfe3 100644
--- a/kernel/srcu.c
+++ b/kernel/srcu.c
@@ -282,12 +282,8 @@ static int srcu_readers_active(struct srcu_struct *sp)
282 */ 282 */
283void cleanup_srcu_struct(struct srcu_struct *sp) 283void cleanup_srcu_struct(struct srcu_struct *sp)
284{ 284{
285 int sum; 285 if (WARN_ON(srcu_readers_active(sp)))
286 286 return; /* Leakage unless caller handles error. */
287 sum = srcu_readers_active(sp);
288 WARN_ON(sum); /* Leakage unless caller handles error. */
289 if (sum != 0)
290 return;
291 free_percpu(sp->per_cpu_ref); 287 free_percpu(sp->per_cpu_ref);
292 sp->per_cpu_ref = NULL; 288 sp->per_cpu_ref = NULL;
293} 289}
@@ -302,9 +298,8 @@ int __srcu_read_lock(struct srcu_struct *sp)
302{ 298{
303 int idx; 299 int idx;
304 300
301 idx = ACCESS_ONCE(sp->completed) & 0x1;
305 preempt_disable(); 302 preempt_disable();
306 idx = rcu_dereference_index_check(sp->completed,
307 rcu_read_lock_sched_held()) & 0x1;
308 ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1; 303 ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
309 smp_mb(); /* B */ /* Avoid leaking the critical section. */ 304 smp_mb(); /* B */ /* Avoid leaking the critical section. */
310 ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1; 305 ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
@@ -321,10 +316,8 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock);
321 */ 316 */
322void __srcu_read_unlock(struct srcu_struct *sp, int idx) 317void __srcu_read_unlock(struct srcu_struct *sp, int idx)
323{ 318{
324 preempt_disable();
325 smp_mb(); /* C */ /* Avoid leaking the critical section. */ 319 smp_mb(); /* C */ /* Avoid leaking the critical section. */
326 ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) -= 1; 320 this_cpu_dec(sp->per_cpu_ref->c[idx]);
327 preempt_enable();
328} 321}
329EXPORT_SYMBOL_GPL(__srcu_read_unlock); 322EXPORT_SYMBOL_GPL(__srcu_read_unlock);
330 323
@@ -423,6 +416,7 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount)
423 !lock_is_held(&rcu_sched_lock_map), 416 !lock_is_held(&rcu_sched_lock_map),
424 "Illegal synchronize_srcu() in same-type SRCU (or RCU) read-side critical section"); 417 "Illegal synchronize_srcu() in same-type SRCU (or RCU) read-side critical section");
425 418
419 might_sleep();
426 init_completion(&rcu.completion); 420 init_completion(&rcu.completion);
427 421
428 head->next = NULL; 422 head->next = NULL;
@@ -455,10 +449,12 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount)
455 * synchronize_srcu - wait for prior SRCU read-side critical-section completion 449 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
456 * @sp: srcu_struct with which to synchronize. 450 * @sp: srcu_struct with which to synchronize.
457 * 451 *
458 * Flip the completed counter, and wait for the old count to drain to zero. 452 * Wait for the count to drain to zero of both indexes. To avoid the
459 * As with classic RCU, the updater must use some separate means of 453 * possible starvation of synchronize_srcu(), it waits for the count of
460 * synchronizing concurrent updates. Can block; must be called from 454 * the index=((->completed & 1) ^ 1) to drain to zero at first,
461 * process context. 455 * and then flip the completed and wait for the count of the other index.
456 *
457 * Can block; must be called from process context.
462 * 458 *
463 * Note that it is illegal to call synchronize_srcu() from the corresponding 459 * Note that it is illegal to call synchronize_srcu() from the corresponding
464 * SRCU read-side critical section; doing so will result in deadlock. 460 * SRCU read-side critical section; doing so will result in deadlock.
@@ -480,12 +476,11 @@ EXPORT_SYMBOL_GPL(synchronize_srcu);
480 * Wait for an SRCU grace period to elapse, but be more aggressive about 476 * Wait for an SRCU grace period to elapse, but be more aggressive about
481 * spinning rather than blocking when waiting. 477 * spinning rather than blocking when waiting.
482 * 478 *
483 * Note that it is illegal to call this function while holding any lock 479 * Note that it is also illegal to call synchronize_srcu_expedited()
484 * that is acquired by a CPU-hotplug notifier. It is also illegal to call 480 * from the corresponding SRCU read-side critical section;
485 * synchronize_srcu_expedited() from the corresponding SRCU read-side 481 * doing so will result in deadlock. However, it is perfectly legal
486 * critical section; doing so will result in deadlock. However, it is 482 * to call synchronize_srcu_expedited() on one srcu_struct from some
487 * perfectly legal to call synchronize_srcu_expedited() on one srcu_struct 483 * other srcu_struct's read-side critical section, as long as
488 * from some other srcu_struct's read-side critical section, as long as
489 * the resulting graph of srcu_structs is acyclic. 484 * the resulting graph of srcu_structs is acyclic.
490 */ 485 */
491void synchronize_srcu_expedited(struct srcu_struct *sp) 486void synchronize_srcu_expedited(struct srcu_struct *sp)
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 2f194e965715..95d178c62d5a 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -18,7 +18,7 @@
18#include <linux/stop_machine.h> 18#include <linux/stop_machine.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/kallsyms.h> 20#include <linux/kallsyms.h>
21 21#include <linux/smpboot.h>
22#include <linux/atomic.h> 22#include <linux/atomic.h>
23 23
24/* 24/*
@@ -37,10 +37,10 @@ struct cpu_stopper {
37 spinlock_t lock; 37 spinlock_t lock;
38 bool enabled; /* is this stopper enabled? */ 38 bool enabled; /* is this stopper enabled? */
39 struct list_head works; /* list of pending works */ 39 struct list_head works; /* list of pending works */
40 struct task_struct *thread; /* stopper thread */
41}; 40};
42 41
43static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper); 42static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
43static DEFINE_PER_CPU(struct task_struct *, cpu_stopper_task);
44static bool stop_machine_initialized = false; 44static bool stop_machine_initialized = false;
45 45
46static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo) 46static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
@@ -62,16 +62,18 @@ static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed)
62} 62}
63 63
64/* queue @work to @stopper. if offline, @work is completed immediately */ 64/* queue @work to @stopper. if offline, @work is completed immediately */
65static void cpu_stop_queue_work(struct cpu_stopper *stopper, 65static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
66 struct cpu_stop_work *work)
67{ 66{
67 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
68 struct task_struct *p = per_cpu(cpu_stopper_task, cpu);
69
68 unsigned long flags; 70 unsigned long flags;
69 71
70 spin_lock_irqsave(&stopper->lock, flags); 72 spin_lock_irqsave(&stopper->lock, flags);
71 73
72 if (stopper->enabled) { 74 if (stopper->enabled) {
73 list_add_tail(&work->list, &stopper->works); 75 list_add_tail(&work->list, &stopper->works);
74 wake_up_process(stopper->thread); 76 wake_up_process(p);
75 } else 77 } else
76 cpu_stop_signal_done(work->done, false); 78 cpu_stop_signal_done(work->done, false);
77 79
@@ -108,7 +110,7 @@ int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
108 struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done }; 110 struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done };
109 111
110 cpu_stop_init_done(&done, 1); 112 cpu_stop_init_done(&done, 1);
111 cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), &work); 113 cpu_stop_queue_work(cpu, &work);
112 wait_for_completion(&done.completion); 114 wait_for_completion(&done.completion);
113 return done.executed ? done.ret : -ENOENT; 115 return done.executed ? done.ret : -ENOENT;
114} 116}
@@ -130,7 +132,7 @@ void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
130 struct cpu_stop_work *work_buf) 132 struct cpu_stop_work *work_buf)
131{ 133{
132 *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, }; 134 *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, };
133 cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), work_buf); 135 cpu_stop_queue_work(cpu, work_buf);
134} 136}
135 137
136/* static data for stop_cpus */ 138/* static data for stop_cpus */
@@ -159,8 +161,7 @@ static void queue_stop_cpus_work(const struct cpumask *cpumask,
159 */ 161 */
160 preempt_disable(); 162 preempt_disable();
161 for_each_cpu(cpu, cpumask) 163 for_each_cpu(cpu, cpumask)
162 cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), 164 cpu_stop_queue_work(cpu, &per_cpu(stop_cpus_work, cpu));
163 &per_cpu(stop_cpus_work, cpu));
164 preempt_enable(); 165 preempt_enable();
165} 166}
166 167
@@ -244,20 +245,25 @@ int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
244 return ret; 245 return ret;
245} 246}
246 247
247static int cpu_stopper_thread(void *data) 248static int cpu_stop_should_run(unsigned int cpu)
249{
250 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
251 unsigned long flags;
252 int run;
253
254 spin_lock_irqsave(&stopper->lock, flags);
255 run = !list_empty(&stopper->works);
256 spin_unlock_irqrestore(&stopper->lock, flags);
257 return run;
258}
259
260static void cpu_stopper_thread(unsigned int cpu)
248{ 261{
249 struct cpu_stopper *stopper = data; 262 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
250 struct cpu_stop_work *work; 263 struct cpu_stop_work *work;
251 int ret; 264 int ret;
252 265
253repeat: 266repeat:
254 set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */
255
256 if (kthread_should_stop()) {
257 __set_current_state(TASK_RUNNING);
258 return 0;
259 }
260
261 work = NULL; 267 work = NULL;
262 spin_lock_irq(&stopper->lock); 268 spin_lock_irq(&stopper->lock);
263 if (!list_empty(&stopper->works)) { 269 if (!list_empty(&stopper->works)) {
@@ -273,8 +279,6 @@ repeat:
273 struct cpu_stop_done *done = work->done; 279 struct cpu_stop_done *done = work->done;
274 char ksym_buf[KSYM_NAME_LEN] __maybe_unused; 280 char ksym_buf[KSYM_NAME_LEN] __maybe_unused;
275 281
276 __set_current_state(TASK_RUNNING);
277
278 /* cpu stop callbacks are not allowed to sleep */ 282 /* cpu stop callbacks are not allowed to sleep */
279 preempt_disable(); 283 preempt_disable();
280 284
@@ -290,88 +294,55 @@ repeat:
290 ksym_buf), arg); 294 ksym_buf), arg);
291 295
292 cpu_stop_signal_done(done, true); 296 cpu_stop_signal_done(done, true);
293 } else 297 goto repeat;
294 schedule(); 298 }
295
296 goto repeat;
297} 299}
298 300
299extern void sched_set_stop_task(int cpu, struct task_struct *stop); 301extern void sched_set_stop_task(int cpu, struct task_struct *stop);
300 302
301/* manage stopper for a cpu, mostly lifted from sched migration thread mgmt */ 303static void cpu_stop_create(unsigned int cpu)
302static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb, 304{
303 unsigned long action, void *hcpu) 305 sched_set_stop_task(cpu, per_cpu(cpu_stopper_task, cpu));
306}
307
308static void cpu_stop_park(unsigned int cpu)
304{ 309{
305 unsigned int cpu = (unsigned long)hcpu;
306 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 310 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
307 struct task_struct *p; 311 struct cpu_stop_work *work;
308 312 unsigned long flags;
309 switch (action & ~CPU_TASKS_FROZEN) {
310 case CPU_UP_PREPARE:
311 BUG_ON(stopper->thread || stopper->enabled ||
312 !list_empty(&stopper->works));
313 p = kthread_create_on_node(cpu_stopper_thread,
314 stopper,
315 cpu_to_node(cpu),
316 "migration/%d", cpu);
317 if (IS_ERR(p))
318 return notifier_from_errno(PTR_ERR(p));
319 get_task_struct(p);
320 kthread_bind(p, cpu);
321 sched_set_stop_task(cpu, p);
322 stopper->thread = p;
323 break;
324
325 case CPU_ONLINE:
326 /* strictly unnecessary, as first user will wake it */
327 wake_up_process(stopper->thread);
328 /* mark enabled */
329 spin_lock_irq(&stopper->lock);
330 stopper->enabled = true;
331 spin_unlock_irq(&stopper->lock);
332 break;
333
334#ifdef CONFIG_HOTPLUG_CPU
335 case CPU_UP_CANCELED:
336 case CPU_POST_DEAD:
337 {
338 struct cpu_stop_work *work;
339
340 sched_set_stop_task(cpu, NULL);
341 /* kill the stopper */
342 kthread_stop(stopper->thread);
343 /* drain remaining works */
344 spin_lock_irq(&stopper->lock);
345 list_for_each_entry(work, &stopper->works, list)
346 cpu_stop_signal_done(work->done, false);
347 stopper->enabled = false;
348 spin_unlock_irq(&stopper->lock);
349 /* release the stopper */
350 put_task_struct(stopper->thread);
351 stopper->thread = NULL;
352 break;
353 }
354#endif
355 }
356 313
357 return NOTIFY_OK; 314 /* drain remaining works */
315 spin_lock_irqsave(&stopper->lock, flags);
316 list_for_each_entry(work, &stopper->works, list)
317 cpu_stop_signal_done(work->done, false);
318 stopper->enabled = false;
319 spin_unlock_irqrestore(&stopper->lock, flags);
358} 320}
359 321
360/* 322static void cpu_stop_unpark(unsigned int cpu)
361 * Give it a higher priority so that cpu stopper is available to other 323{
362 * cpu notifiers. It currently shares the same priority as sched 324 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
363 * migration_notifier. 325
364 */ 326 spin_lock_irq(&stopper->lock);
365static struct notifier_block __cpuinitdata cpu_stop_cpu_notifier = { 327 stopper->enabled = true;
366 .notifier_call = cpu_stop_cpu_callback, 328 spin_unlock_irq(&stopper->lock);
367 .priority = 10, 329}
330
331static struct smp_hotplug_thread cpu_stop_threads = {
332 .store = &cpu_stopper_task,
333 .thread_should_run = cpu_stop_should_run,
334 .thread_fn = cpu_stopper_thread,
335 .thread_comm = "migration/%u",
336 .create = cpu_stop_create,
337 .setup = cpu_stop_unpark,
338 .park = cpu_stop_park,
339 .unpark = cpu_stop_unpark,
340 .selfparking = true,
368}; 341};
369 342
370static int __init cpu_stop_init(void) 343static int __init cpu_stop_init(void)
371{ 344{
372 void *bcpu = (void *)(long)smp_processor_id();
373 unsigned int cpu; 345 unsigned int cpu;
374 int err;
375 346
376 for_each_possible_cpu(cpu) { 347 for_each_possible_cpu(cpu) {
377 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 348 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
@@ -380,15 +351,8 @@ static int __init cpu_stop_init(void)
380 INIT_LIST_HEAD(&stopper->works); 351 INIT_LIST_HEAD(&stopper->works);
381 } 352 }
382 353
383 /* start one for the boot cpu */ 354 BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
384 err = cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_UP_PREPARE,
385 bcpu);
386 BUG_ON(err != NOTIFY_OK);
387 cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_ONLINE, bcpu);
388 register_cpu_notifier(&cpu_stop_cpu_notifier);
389
390 stop_machine_initialized = true; 355 stop_machine_initialized = true;
391
392 return 0; 356 return 0;
393} 357}
394early_initcall(cpu_stop_init); 358early_initcall(cpu_stop_init);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index c88878db491e..4fc9be955c71 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -61,6 +61,7 @@
61#include <linux/kmod.h> 61#include <linux/kmod.h>
62#include <linux/capability.h> 62#include <linux/capability.h>
63#include <linux/binfmts.h> 63#include <linux/binfmts.h>
64#include <linux/sched/sysctl.h>
64 65
65#include <asm/uaccess.h> 66#include <asm/uaccess.h>
66#include <asm/processor.h> 67#include <asm/processor.h>
@@ -403,6 +404,13 @@ static struct ctl_table kern_table[] = {
403 .mode = 0644, 404 .mode = 0644,
404 .proc_handler = sched_rt_handler, 405 .proc_handler = sched_rt_handler,
405 }, 406 },
407 {
408 .procname = "sched_rr_timeslice_ms",
409 .data = &sched_rr_timeslice,
410 .maxlen = sizeof(int),
411 .mode = 0644,
412 .proc_handler = sched_rr_handler,
413 },
406#ifdef CONFIG_SCHED_AUTOGROUP 414#ifdef CONFIG_SCHED_AUTOGROUP
407 { 415 {
408 .procname = "sched_autogroup_enabled", 416 .procname = "sched_autogroup_enabled",
diff --git a/kernel/time.c b/kernel/time.c
index d226c6a3fd28..c2a27dd93142 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -115,6 +115,12 @@ SYSCALL_DEFINE2(gettimeofday, struct timeval __user *, tv,
115} 115}
116 116
117/* 117/*
118 * Indicates if there is an offset between the system clock and the hardware
119 * clock/persistent clock/rtc.
120 */
121int persistent_clock_is_local;
122
123/*
118 * Adjust the time obtained from the CMOS to be UTC time instead of 124 * Adjust the time obtained from the CMOS to be UTC time instead of
119 * local time. 125 * local time.
120 * 126 *
@@ -135,6 +141,8 @@ static inline void warp_clock(void)
135 struct timespec adjust; 141 struct timespec adjust;
136 142
137 adjust = current_kernel_time(); 143 adjust = current_kernel_time();
144 if (sys_tz.tz_minuteswest != 0)
145 persistent_clock_is_local = 1;
138 adjust.tv_sec += sys_tz.tz_minuteswest * 60; 146 adjust.tv_sec += sys_tz.tz_minuteswest * 60;
139 do_settimeofday(&adjust); 147 do_settimeofday(&adjust);
140} 148}
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index 8601f0db1261..24510d84efd7 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -12,6 +12,11 @@ config CLOCKSOURCE_WATCHDOG
12config ARCH_CLOCKSOURCE_DATA 12config ARCH_CLOCKSOURCE_DATA
13 bool 13 bool
14 14
15# Platforms has a persistent clock
16config ALWAYS_USE_PERSISTENT_CLOCK
17 bool
18 default n
19
15# Timekeeping vsyscall support 20# Timekeeping vsyscall support
16config GENERIC_TIME_VSYSCALL 21config GENERIC_TIME_VSYSCALL
17 bool 22 bool
@@ -38,6 +43,10 @@ config GENERIC_CLOCKEVENTS_BUILD
38 default y 43 default y
39 depends on GENERIC_CLOCKEVENTS 44 depends on GENERIC_CLOCKEVENTS
40 45
46# Architecture can handle broadcast in a driver-agnostic way
47config ARCH_HAS_TICK_BROADCAST
48 bool
49
41# Clockevents broadcasting infrastructure 50# Clockevents broadcasting infrastructure
42config GENERIC_CLOCKEVENTS_BROADCAST 51config GENERIC_CLOCKEVENTS_BROADCAST
43 bool 52 bool
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 24174b4d669b..b10a42bb0165 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -15,6 +15,7 @@
15#include <linux/time.h> 15#include <linux/time.h>
16#include <linux/mm.h> 16#include <linux/mm.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/rtc.h>
18 19
19#include "tick-internal.h" 20#include "tick-internal.h"
20 21
@@ -483,8 +484,7 @@ out:
483 return leap; 484 return leap;
484} 485}
485 486
486#ifdef CONFIG_GENERIC_CMOS_UPDATE 487#if defined(CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC)
487
488static void sync_cmos_clock(struct work_struct *work); 488static void sync_cmos_clock(struct work_struct *work);
489 489
490static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock); 490static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock);
@@ -510,14 +510,26 @@ static void sync_cmos_clock(struct work_struct *work)
510 } 510 }
511 511
512 getnstimeofday(&now); 512 getnstimeofday(&now);
513 if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2) 513 if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2) {
514 fail = update_persistent_clock(now); 514 struct timespec adjust = now;
515
516 fail = -ENODEV;
517 if (persistent_clock_is_local)
518 adjust.tv_sec -= (sys_tz.tz_minuteswest * 60);
519#ifdef CONFIG_GENERIC_CMOS_UPDATE
520 fail = update_persistent_clock(adjust);
521#endif
522#ifdef CONFIG_RTC_SYSTOHC
523 if (fail == -ENODEV)
524 fail = rtc_set_ntp_time(adjust);
525#endif
526 }
515 527
516 next.tv_nsec = (NSEC_PER_SEC / 2) - now.tv_nsec - (TICK_NSEC / 2); 528 next.tv_nsec = (NSEC_PER_SEC / 2) - now.tv_nsec - (TICK_NSEC / 2);
517 if (next.tv_nsec <= 0) 529 if (next.tv_nsec <= 0)
518 next.tv_nsec += NSEC_PER_SEC; 530 next.tv_nsec += NSEC_PER_SEC;
519 531
520 if (!fail) 532 if (!fail || fail == -ENODEV)
521 next.tv_sec = 659; 533 next.tv_sec = 659;
522 else 534 else
523 next.tv_sec = 0; 535 next.tv_sec = 0;
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index f113755695e2..2fb8cb88df8d 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -18,6 +18,7 @@
18#include <linux/percpu.h> 18#include <linux/percpu.h>
19#include <linux/profile.h> 19#include <linux/profile.h>
20#include <linux/sched.h> 20#include <linux/sched.h>
21#include <linux/smp.h>
21 22
22#include "tick-internal.h" 23#include "tick-internal.h"
23 24
@@ -86,6 +87,22 @@ int tick_is_broadcast_device(struct clock_event_device *dev)
86 return (dev && tick_broadcast_device.evtdev == dev); 87 return (dev && tick_broadcast_device.evtdev == dev);
87} 88}
88 89
90static void err_broadcast(const struct cpumask *mask)
91{
92 pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n");
93}
94
95static void tick_device_setup_broadcast_func(struct clock_event_device *dev)
96{
97 if (!dev->broadcast)
98 dev->broadcast = tick_broadcast;
99 if (!dev->broadcast) {
100 pr_warn_once("%s depends on broadcast, but no broadcast function available\n",
101 dev->name);
102 dev->broadcast = err_broadcast;
103 }
104}
105
89/* 106/*
90 * Check, if the device is disfunctional and a place holder, which 107 * Check, if the device is disfunctional and a place holder, which
91 * needs to be handled by the broadcast device. 108 * needs to be handled by the broadcast device.
@@ -105,6 +122,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
105 */ 122 */
106 if (!tick_device_is_functional(dev)) { 123 if (!tick_device_is_functional(dev)) {
107 dev->event_handler = tick_handle_periodic; 124 dev->event_handler = tick_handle_periodic;
125 tick_device_setup_broadcast_func(dev);
108 cpumask_set_cpu(cpu, tick_get_broadcast_mask()); 126 cpumask_set_cpu(cpu, tick_get_broadcast_mask());
109 tick_broadcast_start_periodic(tick_broadcast_device.evtdev); 127 tick_broadcast_start_periodic(tick_broadcast_device.evtdev);
110 ret = 1; 128 ret = 1;
@@ -116,15 +134,33 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
116 */ 134 */
117 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) { 135 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
118 int cpu = smp_processor_id(); 136 int cpu = smp_processor_id();
119
120 cpumask_clear_cpu(cpu, tick_get_broadcast_mask()); 137 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
121 tick_broadcast_clear_oneshot(cpu); 138 tick_broadcast_clear_oneshot(cpu);
139 } else {
140 tick_device_setup_broadcast_func(dev);
122 } 141 }
123 } 142 }
124 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); 143 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
125 return ret; 144 return ret;
126} 145}
127 146
147#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
148int tick_receive_broadcast(void)
149{
150 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
151 struct clock_event_device *evt = td->evtdev;
152
153 if (!evt)
154 return -ENODEV;
155
156 if (!evt->event_handler)
157 return -EINVAL;
158
159 evt->event_handler(evt);
160 return 0;
161}
162#endif
163
128/* 164/*
129 * Broadcast the event to the cpus, which are set in the mask (mangled). 165 * Broadcast the event to the cpus, which are set in the mask (mangled).
130 */ 166 */
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index d58e552d9fd1..314b9ee07edf 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -20,6 +20,7 @@
20#include <linux/profile.h> 20#include <linux/profile.h>
21#include <linux/sched.h> 21#include <linux/sched.h>
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/irq_work.h>
23 24
24#include <asm/irq_regs.h> 25#include <asm/irq_regs.h>
25 26
@@ -28,7 +29,7 @@
28/* 29/*
29 * Per cpu nohz control structure 30 * Per cpu nohz control structure
30 */ 31 */
31static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); 32DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
32 33
33/* 34/*
34 * The time, when the last jiffy update happened. Protected by jiffies_lock. 35 * The time, when the last jiffy update happened. Protected by jiffies_lock.
@@ -331,8 +332,8 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
331 time_delta = timekeeping_max_deferment(); 332 time_delta = timekeeping_max_deferment();
332 } while (read_seqretry(&jiffies_lock, seq)); 333 } while (read_seqretry(&jiffies_lock, seq));
333 334
334 if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) || 335 if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) ||
335 arch_needs_cpu(cpu)) { 336 arch_needs_cpu(cpu) || irq_work_needs_cpu()) {
336 next_jiffies = last_jiffies + 1; 337 next_jiffies = last_jiffies + 1;
337 delta_jiffies = 1; 338 delta_jiffies = 1;
338 } else { 339 } else {
@@ -631,8 +632,11 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
631 632
632static void tick_nohz_account_idle_ticks(struct tick_sched *ts) 633static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
633{ 634{
634#ifndef CONFIG_VIRT_CPU_ACCOUNTING 635#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
635 unsigned long ticks; 636 unsigned long ticks;
637
638 if (vtime_accounting_enabled())
639 return;
636 /* 640 /*
637 * We stopped the tick in idle. Update process times would miss the 641 * We stopped the tick in idle. Update process times would miss the
638 * time we slept as update_process_times does only a 1 tick 642 * time we slept as update_process_times does only a 1 tick
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index cbc6acb0db3f..1e35515a875e 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -29,6 +29,9 @@ static struct timekeeper timekeeper;
29/* flag for if timekeeping is suspended */ 29/* flag for if timekeeping is suspended */
30int __read_mostly timekeeping_suspended; 30int __read_mostly timekeeping_suspended;
31 31
32/* Flag for if there is a persistent clock on this platform */
33bool __read_mostly persistent_clock_exist = false;
34
32static inline void tk_normalize_xtime(struct timekeeper *tk) 35static inline void tk_normalize_xtime(struct timekeeper *tk)
33{ 36{
34 while (tk->xtime_nsec >= ((u64)NSEC_PER_SEC << tk->shift)) { 37 while (tk->xtime_nsec >= ((u64)NSEC_PER_SEC << tk->shift)) {
@@ -264,19 +267,18 @@ static void timekeeping_forward_now(struct timekeeper *tk)
264} 267}
265 268
266/** 269/**
267 * getnstimeofday - Returns the time of day in a timespec 270 * __getnstimeofday - Returns the time of day in a timespec.
268 * @ts: pointer to the timespec to be set 271 * @ts: pointer to the timespec to be set
269 * 272 *
270 * Returns the time of day in a timespec. 273 * Updates the time of day in the timespec.
274 * Returns 0 on success, or -ve when suspended (timespec will be undefined).
271 */ 275 */
272void getnstimeofday(struct timespec *ts) 276int __getnstimeofday(struct timespec *ts)
273{ 277{
274 struct timekeeper *tk = &timekeeper; 278 struct timekeeper *tk = &timekeeper;
275 unsigned long seq; 279 unsigned long seq;
276 s64 nsecs = 0; 280 s64 nsecs = 0;
277 281
278 WARN_ON(timekeeping_suspended);
279
280 do { 282 do {
281 seq = read_seqbegin(&tk->lock); 283 seq = read_seqbegin(&tk->lock);
282 284
@@ -287,6 +289,26 @@ void getnstimeofday(struct timespec *ts)
287 289
288 ts->tv_nsec = 0; 290 ts->tv_nsec = 0;
289 timespec_add_ns(ts, nsecs); 291 timespec_add_ns(ts, nsecs);
292
293 /*
294 * Do not bail out early, in case there were callers still using
295 * the value, even in the face of the WARN_ON.
296 */
297 if (unlikely(timekeeping_suspended))
298 return -EAGAIN;
299 return 0;
300}
301EXPORT_SYMBOL(__getnstimeofday);
302
303/**
304 * getnstimeofday - Returns the time of day in a timespec.
305 * @ts: pointer to the timespec to be set
306 *
307 * Returns the time of day in a timespec (WARN if suspended).
308 */
309void getnstimeofday(struct timespec *ts)
310{
311 WARN_ON(__getnstimeofday(ts));
290} 312}
291EXPORT_SYMBOL(getnstimeofday); 313EXPORT_SYMBOL(getnstimeofday);
292 314
@@ -640,12 +662,14 @@ void __init timekeeping_init(void)
640 struct timespec now, boot, tmp; 662 struct timespec now, boot, tmp;
641 663
642 read_persistent_clock(&now); 664 read_persistent_clock(&now);
665
643 if (!timespec_valid_strict(&now)) { 666 if (!timespec_valid_strict(&now)) {
644 pr_warn("WARNING: Persistent clock returned invalid value!\n" 667 pr_warn("WARNING: Persistent clock returned invalid value!\n"
645 " Check your CMOS/BIOS settings.\n"); 668 " Check your CMOS/BIOS settings.\n");
646 now.tv_sec = 0; 669 now.tv_sec = 0;
647 now.tv_nsec = 0; 670 now.tv_nsec = 0;
648 } 671 } else if (now.tv_sec || now.tv_nsec)
672 persistent_clock_exist = true;
649 673
650 read_boot_clock(&boot); 674 read_boot_clock(&boot);
651 if (!timespec_valid_strict(&boot)) { 675 if (!timespec_valid_strict(&boot)) {
@@ -718,11 +742,12 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
718{ 742{
719 struct timekeeper *tk = &timekeeper; 743 struct timekeeper *tk = &timekeeper;
720 unsigned long flags; 744 unsigned long flags;
721 struct timespec ts;
722 745
723 /* Make sure we don't set the clock twice */ 746 /*
724 read_persistent_clock(&ts); 747 * Make sure we don't set the clock twice, as timekeeping_resume()
725 if (!(ts.tv_sec == 0 && ts.tv_nsec == 0)) 748 * already did it
749 */
750 if (has_persistent_clock())
726 return; 751 return;
727 752
728 write_seqlock_irqsave(&tk->lock, flags); 753 write_seqlock_irqsave(&tk->lock, flags);
diff --git a/kernel/timeconst.pl b/kernel/timeconst.pl
index eb51d76e058a..3f42652a6a37 100644
--- a/kernel/timeconst.pl
+++ b/kernel/timeconst.pl
@@ -369,10 +369,8 @@ if ($hz eq '--can') {
369 die "Usage: $0 HZ\n"; 369 die "Usage: $0 HZ\n";
370 } 370 }
371 371
372 @val = @{$canned_values{$hz}}; 372 $cv = $canned_values{$hz};
373 if (!defined(@val)) { 373 @val = defined($cv) ? @$cv : compute_values($hz);
374 @val = compute_values($hz);
375 }
376 output($hz, @val); 374 output($hz, @val);
377} 375}
378exit 0; 376exit 0;
diff --git a/kernel/timer.c b/kernel/timer.c
index 367d00858482..dbf7a78a1ef1 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -39,6 +39,7 @@
39#include <linux/kallsyms.h> 39#include <linux/kallsyms.h>
40#include <linux/irq_work.h> 40#include <linux/irq_work.h>
41#include <linux/sched.h> 41#include <linux/sched.h>
42#include <linux/sched/sysctl.h>
42#include <linux/slab.h> 43#include <linux/slab.h>
43 44
44#include <asm/uaccess.h> 45#include <asm/uaccess.h>
@@ -1351,7 +1352,6 @@ void update_process_times(int user_tick)
1351 account_process_tick(p, user_tick); 1352 account_process_tick(p, user_tick);
1352 run_local_timers(); 1353 run_local_timers();
1353 rcu_check_callbacks(cpu, user_tick); 1354 rcu_check_callbacks(cpu, user_tick);
1354 printk_tick();
1355#ifdef CONFIG_IRQ_WORK 1355#ifdef CONFIG_IRQ_WORK
1356 if (in_irq()) 1356 if (in_irq())
1357 irq_work_run(); 1357 irq_work_run();
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 5d89335a485f..192473b22799 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -39,6 +39,9 @@ config HAVE_DYNAMIC_FTRACE
39 help 39 help
40 See Documentation/trace/ftrace-design.txt 40 See Documentation/trace/ftrace-design.txt
41 41
42config HAVE_DYNAMIC_FTRACE_WITH_REGS
43 bool
44
42config HAVE_FTRACE_MCOUNT_RECORD 45config HAVE_FTRACE_MCOUNT_RECORD
43 bool 46 bool
44 help 47 help
@@ -78,21 +81,6 @@ config EVENT_TRACING
78 select CONTEXT_SWITCH_TRACER 81 select CONTEXT_SWITCH_TRACER
79 bool 82 bool
80 83
81config EVENT_POWER_TRACING_DEPRECATED
82 depends on EVENT_TRACING
83 bool "Deprecated power event trace API, to be removed"
84 default y
85 help
86 Provides old power event types:
87 C-state/idle accounting events:
88 power:power_start
89 power:power_end
90 and old cpufreq accounting event:
91 power:power_frequency
92 This is for userspace compatibility
93 and will vanish after 5 kernel iterations,
94 namely 3.1.
95
96config CONTEXT_SWITCH_TRACER 84config CONTEXT_SWITCH_TRACER
97 bool 85 bool
98 86
@@ -250,6 +238,16 @@ config FTRACE_SYSCALLS
250 help 238 help
251 Basic tracer to catch the syscall entry and exit events. 239 Basic tracer to catch the syscall entry and exit events.
252 240
241config TRACER_SNAPSHOT
242 bool "Create a snapshot trace buffer"
243 select TRACER_MAX_TRACE
244 help
245 Allow tracing users to take snapshot of the current buffer using the
246 ftrace interface, e.g.:
247
248 echo 1 > /sys/kernel/debug/tracing/snapshot
249 cat snapshot
250
253config TRACE_BRANCH_PROFILING 251config TRACE_BRANCH_PROFILING
254 bool 252 bool
255 select GENERIC_TRACER 253 select GENERIC_TRACER
@@ -434,6 +432,11 @@ config DYNAMIC_FTRACE
434 were made. If so, it runs stop_machine (stops all CPUS) 432 were made. If so, it runs stop_machine (stops all CPUS)
435 and modifies the code to jump over the call to ftrace. 433 and modifies the code to jump over the call to ftrace.
436 434
435config DYNAMIC_FTRACE_WITH_REGS
436 def_bool y
437 depends on DYNAMIC_FTRACE
438 depends on HAVE_DYNAMIC_FTRACE_WITH_REGS
439
437config FUNCTION_PROFILER 440config FUNCTION_PROFILER
438 bool "Kernel function profiler" 441 bool "Kernel function profiler"
439 depends on FUNCTION_TRACER 442 depends on FUNCTION_TRACER
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index c0bd0308741c..71259e2b6b61 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -147,7 +147,7 @@ void __trace_note_message(struct blk_trace *bt, const char *fmt, ...)
147 return; 147 return;
148 148
149 local_irq_save(flags); 149 local_irq_save(flags);
150 buf = per_cpu_ptr(bt->msg_data, smp_processor_id()); 150 buf = this_cpu_ptr(bt->msg_data);
151 va_start(args, fmt); 151 va_start(args, fmt);
152 n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args); 152 n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
153 va_end(args); 153 va_end(args);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 3ffe4c5ad3f3..ce8c3d68292f 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -111,6 +111,26 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
111#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops) 111#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
112#endif 112#endif
113 113
114/*
115 * Traverse the ftrace_global_list, invoking all entries. The reason that we
116 * can use rcu_dereference_raw() is that elements removed from this list
117 * are simply leaked, so there is no need to interact with a grace-period
118 * mechanism. The rcu_dereference_raw() calls are needed to handle
119 * concurrent insertions into the ftrace_global_list.
120 *
121 * Silly Alpha and silly pointer-speculation compiler optimizations!
122 */
123#define do_for_each_ftrace_op(op, list) \
124 op = rcu_dereference_raw(list); \
125 do
126
127/*
128 * Optimized for just a single item in the list (as that is the normal case).
129 */
130#define while_for_each_ftrace_op(op) \
131 while (likely(op = rcu_dereference_raw((op)->next)) && \
132 unlikely((op) != &ftrace_list_end))
133
114/** 134/**
115 * ftrace_nr_registered_ops - return number of ops registered 135 * ftrace_nr_registered_ops - return number of ops registered
116 * 136 *
@@ -132,29 +152,21 @@ int ftrace_nr_registered_ops(void)
132 return cnt; 152 return cnt;
133} 153}
134 154
135/*
136 * Traverse the ftrace_global_list, invoking all entries. The reason that we
137 * can use rcu_dereference_raw() is that elements removed from this list
138 * are simply leaked, so there is no need to interact with a grace-period
139 * mechanism. The rcu_dereference_raw() calls are needed to handle
140 * concurrent insertions into the ftrace_global_list.
141 *
142 * Silly Alpha and silly pointer-speculation compiler optimizations!
143 */
144static void 155static void
145ftrace_global_list_func(unsigned long ip, unsigned long parent_ip, 156ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
146 struct ftrace_ops *op, struct pt_regs *regs) 157 struct ftrace_ops *op, struct pt_regs *regs)
147{ 158{
148 if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT))) 159 int bit;
160
161 bit = trace_test_and_set_recursion(TRACE_GLOBAL_START, TRACE_GLOBAL_MAX);
162 if (bit < 0)
149 return; 163 return;
150 164
151 trace_recursion_set(TRACE_GLOBAL_BIT); 165 do_for_each_ftrace_op(op, ftrace_global_list) {
152 op = rcu_dereference_raw(ftrace_global_list); /*see above*/
153 while (op != &ftrace_list_end) {
154 op->func(ip, parent_ip, op, regs); 166 op->func(ip, parent_ip, op, regs);
155 op = rcu_dereference_raw(op->next); /*see above*/ 167 } while_for_each_ftrace_op(op);
156 }; 168
157 trace_recursion_clear(TRACE_GLOBAL_BIT); 169 trace_clear_recursion(bit);
158} 170}
159 171
160static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, 172static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
@@ -221,10 +233,24 @@ static void update_global_ops(void)
221 * registered callers. 233 * registered callers.
222 */ 234 */
223 if (ftrace_global_list == &ftrace_list_end || 235 if (ftrace_global_list == &ftrace_list_end ||
224 ftrace_global_list->next == &ftrace_list_end) 236 ftrace_global_list->next == &ftrace_list_end) {
225 func = ftrace_global_list->func; 237 func = ftrace_global_list->func;
226 else 238 /*
239 * As we are calling the function directly.
240 * If it does not have recursion protection,
241 * the function_trace_op needs to be updated
242 * accordingly.
243 */
244 if (ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE)
245 global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
246 else
247 global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE;
248 } else {
227 func = ftrace_global_list_func; 249 func = ftrace_global_list_func;
250 /* The list has its own recursion protection. */
251 global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
252 }
253
228 254
229 /* If we filter on pids, update to use the pid function */ 255 /* If we filter on pids, update to use the pid function */
230 if (!list_empty(&ftrace_pids)) { 256 if (!list_empty(&ftrace_pids)) {
@@ -337,7 +363,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
337 if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK) 363 if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
338 return -EINVAL; 364 return -EINVAL;
339 365
340#ifndef ARCH_SUPPORTS_FTRACE_SAVE_REGS 366#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
341 /* 367 /*
342 * If the ftrace_ops specifies SAVE_REGS, then it only can be used 368 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
343 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set. 369 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
@@ -3998,7 +4024,7 @@ static int ftrace_module_notify(struct notifier_block *self,
3998 4024
3999struct notifier_block ftrace_module_nb = { 4025struct notifier_block ftrace_module_nb = {
4000 .notifier_call = ftrace_module_notify, 4026 .notifier_call = ftrace_module_notify,
4001 .priority = 0, 4027 .priority = INT_MAX, /* Run before anything that can use kprobes */
4002}; 4028};
4003 4029
4004extern unsigned long __start_mcount_loc[]; 4030extern unsigned long __start_mcount_loc[];
@@ -4090,14 +4116,11 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
4090 */ 4116 */
4091 preempt_disable_notrace(); 4117 preempt_disable_notrace();
4092 trace_recursion_set(TRACE_CONTROL_BIT); 4118 trace_recursion_set(TRACE_CONTROL_BIT);
4093 op = rcu_dereference_raw(ftrace_control_list); 4119 do_for_each_ftrace_op(op, ftrace_control_list) {
4094 while (op != &ftrace_list_end) {
4095 if (!ftrace_function_local_disabled(op) && 4120 if (!ftrace_function_local_disabled(op) &&
4096 ftrace_ops_test(op, ip)) 4121 ftrace_ops_test(op, ip))
4097 op->func(ip, parent_ip, op, regs); 4122 op->func(ip, parent_ip, op, regs);
4098 4123 } while_for_each_ftrace_op(op);
4099 op = rcu_dereference_raw(op->next);
4100 };
4101 trace_recursion_clear(TRACE_CONTROL_BIT); 4124 trace_recursion_clear(TRACE_CONTROL_BIT);
4102 preempt_enable_notrace(); 4125 preempt_enable_notrace();
4103} 4126}
@@ -4112,27 +4135,26 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4112 struct ftrace_ops *ignored, struct pt_regs *regs) 4135 struct ftrace_ops *ignored, struct pt_regs *regs)
4113{ 4136{
4114 struct ftrace_ops *op; 4137 struct ftrace_ops *op;
4138 int bit;
4115 4139
4116 if (function_trace_stop) 4140 if (function_trace_stop)
4117 return; 4141 return;
4118 4142
4119 if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT))) 4143 bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
4144 if (bit < 0)
4120 return; 4145 return;
4121 4146
4122 trace_recursion_set(TRACE_INTERNAL_BIT);
4123 /* 4147 /*
4124 * Some of the ops may be dynamically allocated, 4148 * Some of the ops may be dynamically allocated,
4125 * they must be freed after a synchronize_sched(). 4149 * they must be freed after a synchronize_sched().
4126 */ 4150 */
4127 preempt_disable_notrace(); 4151 preempt_disable_notrace();
4128 op = rcu_dereference_raw(ftrace_ops_list); 4152 do_for_each_ftrace_op(op, ftrace_ops_list) {
4129 while (op != &ftrace_list_end) {
4130 if (ftrace_ops_test(op, ip)) 4153 if (ftrace_ops_test(op, ip))
4131 op->func(ip, parent_ip, op, regs); 4154 op->func(ip, parent_ip, op, regs);
4132 op = rcu_dereference_raw(op->next); 4155 } while_for_each_ftrace_op(op);
4133 };
4134 preempt_enable_notrace(); 4156 preempt_enable_notrace();
4135 trace_recursion_clear(TRACE_INTERNAL_BIT); 4157 trace_clear_recursion(bit);
4136} 4158}
4137 4159
4138/* 4160/*
@@ -4143,8 +4165,8 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4143 * Archs are to support both the regs and ftrace_ops at the same time. 4165 * Archs are to support both the regs and ftrace_ops at the same time.
4144 * If they support ftrace_ops, it is assumed they support regs. 4166 * If they support ftrace_ops, it is assumed they support regs.
4145 * If call backs want to use regs, they must either check for regs 4167 * If call backs want to use regs, they must either check for regs
4146 * being NULL, or ARCH_SUPPORTS_FTRACE_SAVE_REGS. 4168 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
4147 * Note, ARCH_SUPPORT_SAVE_REGS expects a full regs to be saved. 4169 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
4148 * An architecture can pass partial regs with ftrace_ops and still 4170 * An architecture can pass partial regs with ftrace_ops and still
4149 * set the ARCH_SUPPORT_FTARCE_OPS. 4171 * set the ARCH_SUPPORT_FTARCE_OPS.
4150 */ 4172 */
diff --git a/kernel/trace/power-traces.c b/kernel/trace/power-traces.c
index f55fcf61b223..1c71382b283d 100644
--- a/kernel/trace/power-traces.c
+++ b/kernel/trace/power-traces.c
@@ -13,8 +13,5 @@
13#define CREATE_TRACE_POINTS 13#define CREATE_TRACE_POINTS
14#include <trace/events/power.h> 14#include <trace/events/power.h>
15 15
16#ifdef EVENT_POWER_TRACING_DEPRECATED
17EXPORT_TRACEPOINT_SYMBOL_GPL(power_start);
18#endif
19EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_idle); 16EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_idle);
20 17
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index ce8514feedcd..7244acde77b0 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -3,8 +3,10 @@
3 * 3 *
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> 4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */ 5 */
6#include <linux/ftrace_event.h>
6#include <linux/ring_buffer.h> 7#include <linux/ring_buffer.h>
7#include <linux/trace_clock.h> 8#include <linux/trace_clock.h>
9#include <linux/trace_seq.h>
8#include <linux/spinlock.h> 10#include <linux/spinlock.h>
9#include <linux/debugfs.h> 11#include <linux/debugfs.h>
10#include <linux/uaccess.h> 12#include <linux/uaccess.h>
@@ -21,7 +23,6 @@
21#include <linux/fs.h> 23#include <linux/fs.h>
22 24
23#include <asm/local.h> 25#include <asm/local.h>
24#include "trace.h"
25 26
26static void update_pages_handler(struct work_struct *work); 27static void update_pages_handler(struct work_struct *work);
27 28
@@ -2432,41 +2433,76 @@ rb_reserve_next_event(struct ring_buffer *buffer,
2432 2433
2433#ifdef CONFIG_TRACING 2434#ifdef CONFIG_TRACING
2434 2435
2435#define TRACE_RECURSIVE_DEPTH 16 2436/*
2437 * The lock and unlock are done within a preempt disable section.
2438 * The current_context per_cpu variable can only be modified
2439 * by the current task between lock and unlock. But it can
2440 * be modified more than once via an interrupt. To pass this
2441 * information from the lock to the unlock without having to
2442 * access the 'in_interrupt()' functions again (which do show
2443 * a bit of overhead in something as critical as function tracing,
2444 * we use a bitmask trick.
2445 *
2446 * bit 0 = NMI context
2447 * bit 1 = IRQ context
2448 * bit 2 = SoftIRQ context
2449 * bit 3 = normal context.
2450 *
2451 * This works because this is the order of contexts that can
2452 * preempt other contexts. A SoftIRQ never preempts an IRQ
2453 * context.
2454 *
2455 * When the context is determined, the corresponding bit is
2456 * checked and set (if it was set, then a recursion of that context
2457 * happened).
2458 *
2459 * On unlock, we need to clear this bit. To do so, just subtract
2460 * 1 from the current_context and AND it to itself.
2461 *
2462 * (binary)
2463 * 101 - 1 = 100
2464 * 101 & 100 = 100 (clearing bit zero)
2465 *
2466 * 1010 - 1 = 1001
2467 * 1010 & 1001 = 1000 (clearing bit 1)
2468 *
2469 * The least significant bit can be cleared this way, and it
2470 * just so happens that it is the same bit corresponding to
2471 * the current context.
2472 */
2473static DEFINE_PER_CPU(unsigned int, current_context);
2436 2474
2437/* Keep this code out of the fast path cache */ 2475static __always_inline int trace_recursive_lock(void)
2438static noinline void trace_recursive_fail(void)
2439{ 2476{
2440 /* Disable all tracing before we do anything else */ 2477 unsigned int val = this_cpu_read(current_context);
2441 tracing_off_permanent(); 2478 int bit;
2442
2443 printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:"
2444 "HC[%lu]:SC[%lu]:NMI[%lu]\n",
2445 trace_recursion_buffer(),
2446 hardirq_count() >> HARDIRQ_SHIFT,
2447 softirq_count() >> SOFTIRQ_SHIFT,
2448 in_nmi());
2449
2450 WARN_ON_ONCE(1);
2451}
2452 2479
2453static inline int trace_recursive_lock(void) 2480 if (in_interrupt()) {
2454{ 2481 if (in_nmi())
2455 trace_recursion_inc(); 2482 bit = 0;
2483 else if (in_irq())
2484 bit = 1;
2485 else
2486 bit = 2;
2487 } else
2488 bit = 3;
2456 2489
2457 if (likely(trace_recursion_buffer() < TRACE_RECURSIVE_DEPTH)) 2490 if (unlikely(val & (1 << bit)))
2458 return 0; 2491 return 1;
2459 2492
2460 trace_recursive_fail(); 2493 val |= (1 << bit);
2494 this_cpu_write(current_context, val);
2461 2495
2462 return -1; 2496 return 0;
2463} 2497}
2464 2498
2465static inline void trace_recursive_unlock(void) 2499static __always_inline void trace_recursive_unlock(void)
2466{ 2500{
2467 WARN_ON_ONCE(!trace_recursion_buffer()); 2501 unsigned int val = this_cpu_read(current_context);
2468 2502
2469 trace_recursion_dec(); 2503 val--;
2504 val &= this_cpu_read(current_context);
2505 this_cpu_write(current_context, val);
2470} 2506}
2471 2507
2472#else 2508#else
@@ -3067,6 +3103,24 @@ ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
3067EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu); 3103EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
3068 3104
3069/** 3105/**
3106 * ring_buffer_read_events_cpu - get the number of events successfully read
3107 * @buffer: The ring buffer
3108 * @cpu: The per CPU buffer to get the number of events read
3109 */
3110unsigned long
3111ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu)
3112{
3113 struct ring_buffer_per_cpu *cpu_buffer;
3114
3115 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3116 return 0;
3117
3118 cpu_buffer = buffer->buffers[cpu];
3119 return cpu_buffer->read;
3120}
3121EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
3122
3123/**
3070 * ring_buffer_entries - get the number of entries in a buffer 3124 * ring_buffer_entries - get the number of entries in a buffer
3071 * @buffer: The ring buffer 3125 * @buffer: The ring buffer
3072 * 3126 *
@@ -3425,7 +3479,7 @@ static void rb_advance_iter(struct ring_buffer_iter *iter)
3425 /* check for end of page padding */ 3479 /* check for end of page padding */
3426 if ((iter->head >= rb_page_size(iter->head_page)) && 3480 if ((iter->head >= rb_page_size(iter->head_page)) &&
3427 (iter->head_page != cpu_buffer->commit_page)) 3481 (iter->head_page != cpu_buffer->commit_page))
3428 rb_advance_iter(iter); 3482 rb_inc_iter(iter);
3429} 3483}
3430 3484
3431static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer) 3485static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 3c13e46d7d24..c2e2c2310374 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -39,6 +39,7 @@
39#include <linux/poll.h> 39#include <linux/poll.h>
40#include <linux/nmi.h> 40#include <linux/nmi.h>
41#include <linux/fs.h> 41#include <linux/fs.h>
42#include <linux/sched/rt.h>
42 43
43#include "trace.h" 44#include "trace.h"
44#include "trace_output.h" 45#include "trace_output.h"
@@ -249,7 +250,7 @@ static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
249static struct tracer *trace_types __read_mostly; 250static struct tracer *trace_types __read_mostly;
250 251
251/* current_trace points to the tracer that is currently active */ 252/* current_trace points to the tracer that is currently active */
252static struct tracer *current_trace __read_mostly; 253static struct tracer *current_trace __read_mostly = &nop_trace;
253 254
254/* 255/*
255 * trace_types_lock is used to protect the trace_types list. 256 * trace_types_lock is used to protect the trace_types list.
@@ -709,10 +710,13 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
709 return; 710 return;
710 711
711 WARN_ON_ONCE(!irqs_disabled()); 712 WARN_ON_ONCE(!irqs_disabled());
712 if (!current_trace->use_max_tr) { 713
713 WARN_ON_ONCE(1); 714 if (!current_trace->allocated_snapshot) {
715 /* Only the nop tracer should hit this when disabling */
716 WARN_ON_ONCE(current_trace != &nop_trace);
714 return; 717 return;
715 } 718 }
719
716 arch_spin_lock(&ftrace_max_lock); 720 arch_spin_lock(&ftrace_max_lock);
717 721
718 tr->buffer = max_tr.buffer; 722 tr->buffer = max_tr.buffer;
@@ -739,10 +743,8 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
739 return; 743 return;
740 744
741 WARN_ON_ONCE(!irqs_disabled()); 745 WARN_ON_ONCE(!irqs_disabled());
742 if (!current_trace->use_max_tr) { 746 if (WARN_ON_ONCE(!current_trace->allocated_snapshot))
743 WARN_ON_ONCE(1);
744 return; 747 return;
745 }
746 748
747 arch_spin_lock(&ftrace_max_lock); 749 arch_spin_lock(&ftrace_max_lock);
748 750
@@ -862,10 +864,13 @@ int register_tracer(struct tracer *type)
862 864
863 current_trace = type; 865 current_trace = type;
864 866
865 /* If we expanded the buffers, make sure the max is expanded too */ 867 if (type->use_max_tr) {
866 if (ring_buffer_expanded && type->use_max_tr) 868 /* If we expanded the buffers, make sure the max is expanded too */
867 ring_buffer_resize(max_tr.buffer, trace_buf_size, 869 if (ring_buffer_expanded)
868 RING_BUFFER_ALL_CPUS); 870 ring_buffer_resize(max_tr.buffer, trace_buf_size,
871 RING_BUFFER_ALL_CPUS);
872 type->allocated_snapshot = true;
873 }
869 874
870 /* the test is responsible for initializing and enabling */ 875 /* the test is responsible for initializing and enabling */
871 pr_info("Testing tracer %s: ", type->name); 876 pr_info("Testing tracer %s: ", type->name);
@@ -881,10 +886,14 @@ int register_tracer(struct tracer *type)
881 /* Only reset on passing, to avoid touching corrupted buffers */ 886 /* Only reset on passing, to avoid touching corrupted buffers */
882 tracing_reset_online_cpus(tr); 887 tracing_reset_online_cpus(tr);
883 888
884 /* Shrink the max buffer again */ 889 if (type->use_max_tr) {
885 if (ring_buffer_expanded && type->use_max_tr) 890 type->allocated_snapshot = false;
886 ring_buffer_resize(max_tr.buffer, 1, 891
887 RING_BUFFER_ALL_CPUS); 892 /* Shrink the max buffer again */
893 if (ring_buffer_expanded)
894 ring_buffer_resize(max_tr.buffer, 1,
895 RING_BUFFER_ALL_CPUS);
896 }
888 897
889 printk(KERN_CONT "PASSED\n"); 898 printk(KERN_CONT "PASSED\n");
890 } 899 }
@@ -922,6 +931,9 @@ void tracing_reset(struct trace_array *tr, int cpu)
922{ 931{
923 struct ring_buffer *buffer = tr->buffer; 932 struct ring_buffer *buffer = tr->buffer;
924 933
934 if (!buffer)
935 return;
936
925 ring_buffer_record_disable(buffer); 937 ring_buffer_record_disable(buffer);
926 938
927 /* Make sure all commits have finished */ 939 /* Make sure all commits have finished */
@@ -936,6 +948,9 @@ void tracing_reset_online_cpus(struct trace_array *tr)
936 struct ring_buffer *buffer = tr->buffer; 948 struct ring_buffer *buffer = tr->buffer;
937 int cpu; 949 int cpu;
938 950
951 if (!buffer)
952 return;
953
939 ring_buffer_record_disable(buffer); 954 ring_buffer_record_disable(buffer);
940 955
941 /* Make sure all commits have finished */ 956 /* Make sure all commits have finished */
@@ -1167,7 +1182,6 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1167 1182
1168 entry->preempt_count = pc & 0xff; 1183 entry->preempt_count = pc & 0xff;
1169 entry->pid = (tsk) ? tsk->pid : 0; 1184 entry->pid = (tsk) ? tsk->pid : 0;
1170 entry->padding = 0;
1171 entry->flags = 1185 entry->flags =
1172#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT 1186#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1173 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | 1187 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
@@ -1335,7 +1349,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
1335 */ 1349 */
1336 preempt_disable_notrace(); 1350 preempt_disable_notrace();
1337 1351
1338 use_stack = ++__get_cpu_var(ftrace_stack_reserve); 1352 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
1339 /* 1353 /*
1340 * We don't need any atomic variables, just a barrier. 1354 * We don't need any atomic variables, just a barrier.
1341 * If an interrupt comes in, we don't care, because it would 1355 * If an interrupt comes in, we don't care, because it would
@@ -1389,7 +1403,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
1389 out: 1403 out:
1390 /* Again, don't let gcc optimize things here */ 1404 /* Again, don't let gcc optimize things here */
1391 barrier(); 1405 barrier();
1392 __get_cpu_var(ftrace_stack_reserve)--; 1406 __this_cpu_dec(ftrace_stack_reserve);
1393 preempt_enable_notrace(); 1407 preempt_enable_notrace();
1394 1408
1395} 1409}
@@ -1517,7 +1531,6 @@ static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1517static char *get_trace_buf(void) 1531static char *get_trace_buf(void)
1518{ 1532{
1519 struct trace_buffer_struct *percpu_buffer; 1533 struct trace_buffer_struct *percpu_buffer;
1520 struct trace_buffer_struct *buffer;
1521 1534
1522 /* 1535 /*
1523 * If we have allocated per cpu buffers, then we do not 1536 * If we have allocated per cpu buffers, then we do not
@@ -1535,9 +1548,7 @@ static char *get_trace_buf(void)
1535 if (!percpu_buffer) 1548 if (!percpu_buffer)
1536 return NULL; 1549 return NULL;
1537 1550
1538 buffer = per_cpu_ptr(percpu_buffer, smp_processor_id()); 1551 return this_cpu_ptr(&percpu_buffer->buffer[0]);
1539
1540 return buffer->buffer;
1541} 1552}
1542 1553
1543static int alloc_percpu_trace_buffer(void) 1554static int alloc_percpu_trace_buffer(void)
@@ -1942,21 +1953,27 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu)
1942static void *s_start(struct seq_file *m, loff_t *pos) 1953static void *s_start(struct seq_file *m, loff_t *pos)
1943{ 1954{
1944 struct trace_iterator *iter = m->private; 1955 struct trace_iterator *iter = m->private;
1945 static struct tracer *old_tracer;
1946 int cpu_file = iter->cpu_file; 1956 int cpu_file = iter->cpu_file;
1947 void *p = NULL; 1957 void *p = NULL;
1948 loff_t l = 0; 1958 loff_t l = 0;
1949 int cpu; 1959 int cpu;
1950 1960
1951 /* copy the tracer to avoid using a global lock all around */ 1961 /*
1962 * copy the tracer to avoid using a global lock all around.
1963 * iter->trace is a copy of current_trace, the pointer to the
1964 * name may be used instead of a strcmp(), as iter->trace->name
1965 * will point to the same string as current_trace->name.
1966 */
1952 mutex_lock(&trace_types_lock); 1967 mutex_lock(&trace_types_lock);
1953 if (unlikely(old_tracer != current_trace && current_trace)) { 1968 if (unlikely(current_trace && iter->trace->name != current_trace->name))
1954 old_tracer = current_trace;
1955 *iter->trace = *current_trace; 1969 *iter->trace = *current_trace;
1956 }
1957 mutex_unlock(&trace_types_lock); 1970 mutex_unlock(&trace_types_lock);
1958 1971
1959 atomic_inc(&trace_record_cmdline_disabled); 1972 if (iter->snapshot && iter->trace->use_max_tr)
1973 return ERR_PTR(-EBUSY);
1974
1975 if (!iter->snapshot)
1976 atomic_inc(&trace_record_cmdline_disabled);
1960 1977
1961 if (*pos != iter->pos) { 1978 if (*pos != iter->pos) {
1962 iter->ent = NULL; 1979 iter->ent = NULL;
@@ -1995,7 +2012,11 @@ static void s_stop(struct seq_file *m, void *p)
1995{ 2012{
1996 struct trace_iterator *iter = m->private; 2013 struct trace_iterator *iter = m->private;
1997 2014
1998 atomic_dec(&trace_record_cmdline_disabled); 2015 if (iter->snapshot && iter->trace->use_max_tr)
2016 return;
2017
2018 if (!iter->snapshot)
2019 atomic_dec(&trace_record_cmdline_disabled);
1999 trace_access_unlock(iter->cpu_file); 2020 trace_access_unlock(iter->cpu_file);
2000 trace_event_read_unlock(); 2021 trace_event_read_unlock();
2001} 2022}
@@ -2080,8 +2101,7 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2080 unsigned long total; 2101 unsigned long total;
2081 const char *name = "preemption"; 2102 const char *name = "preemption";
2082 2103
2083 if (type) 2104 name = type->name;
2084 name = type->name;
2085 2105
2086 get_total_entries(tr, &total, &entries); 2106 get_total_entries(tr, &total, &entries);
2087 2107
@@ -2430,7 +2450,7 @@ static const struct seq_operations tracer_seq_ops = {
2430}; 2450};
2431 2451
2432static struct trace_iterator * 2452static struct trace_iterator *
2433__tracing_open(struct inode *inode, struct file *file) 2453__tracing_open(struct inode *inode, struct file *file, bool snapshot)
2434{ 2454{
2435 long cpu_file = (long) inode->i_private; 2455 long cpu_file = (long) inode->i_private;
2436 struct trace_iterator *iter; 2456 struct trace_iterator *iter;
@@ -2457,16 +2477,16 @@ __tracing_open(struct inode *inode, struct file *file)
2457 if (!iter->trace) 2477 if (!iter->trace)
2458 goto fail; 2478 goto fail;
2459 2479
2460 if (current_trace) 2480 *iter->trace = *current_trace;
2461 *iter->trace = *current_trace;
2462 2481
2463 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) 2482 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
2464 goto fail; 2483 goto fail;
2465 2484
2466 if (current_trace && current_trace->print_max) 2485 if (current_trace->print_max || snapshot)
2467 iter->tr = &max_tr; 2486 iter->tr = &max_tr;
2468 else 2487 else
2469 iter->tr = &global_trace; 2488 iter->tr = &global_trace;
2489 iter->snapshot = snapshot;
2470 iter->pos = -1; 2490 iter->pos = -1;
2471 mutex_init(&iter->mutex); 2491 mutex_init(&iter->mutex);
2472 iter->cpu_file = cpu_file; 2492 iter->cpu_file = cpu_file;
@@ -2483,8 +2503,9 @@ __tracing_open(struct inode *inode, struct file *file)
2483 if (trace_clocks[trace_clock_id].in_ns) 2503 if (trace_clocks[trace_clock_id].in_ns)
2484 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; 2504 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
2485 2505
2486 /* stop the trace while dumping */ 2506 /* stop the trace while dumping if we are not opening "snapshot" */
2487 tracing_stop(); 2507 if (!iter->snapshot)
2508 tracing_stop();
2488 2509
2489 if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { 2510 if (iter->cpu_file == TRACE_PIPE_ALL_CPU) {
2490 for_each_tracing_cpu(cpu) { 2511 for_each_tracing_cpu(cpu) {
@@ -2547,8 +2568,9 @@ static int tracing_release(struct inode *inode, struct file *file)
2547 if (iter->trace && iter->trace->close) 2568 if (iter->trace && iter->trace->close)
2548 iter->trace->close(iter); 2569 iter->trace->close(iter);
2549 2570
2550 /* reenable tracing if it was previously enabled */ 2571 if (!iter->snapshot)
2551 tracing_start(); 2572 /* reenable tracing if it was previously enabled */
2573 tracing_start();
2552 mutex_unlock(&trace_types_lock); 2574 mutex_unlock(&trace_types_lock);
2553 2575
2554 mutex_destroy(&iter->mutex); 2576 mutex_destroy(&iter->mutex);
@@ -2576,7 +2598,7 @@ static int tracing_open(struct inode *inode, struct file *file)
2576 } 2598 }
2577 2599
2578 if (file->f_mode & FMODE_READ) { 2600 if (file->f_mode & FMODE_READ) {
2579 iter = __tracing_open(inode, file); 2601 iter = __tracing_open(inode, file, false);
2580 if (IS_ERR(iter)) 2602 if (IS_ERR(iter))
2581 ret = PTR_ERR(iter); 2603 ret = PTR_ERR(iter);
2582 else if (trace_flags & TRACE_ITER_LATENCY_FMT) 2604 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
@@ -3014,10 +3036,7 @@ tracing_set_trace_read(struct file *filp, char __user *ubuf,
3014 int r; 3036 int r;
3015 3037
3016 mutex_lock(&trace_types_lock); 3038 mutex_lock(&trace_types_lock);
3017 if (current_trace) 3039 r = sprintf(buf, "%s\n", current_trace->name);
3018 r = sprintf(buf, "%s\n", current_trace->name);
3019 else
3020 r = sprintf(buf, "\n");
3021 mutex_unlock(&trace_types_lock); 3040 mutex_unlock(&trace_types_lock);
3022 3041
3023 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 3042 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
@@ -3183,6 +3202,7 @@ static int tracing_set_tracer(const char *buf)
3183 static struct trace_option_dentry *topts; 3202 static struct trace_option_dentry *topts;
3184 struct trace_array *tr = &global_trace; 3203 struct trace_array *tr = &global_trace;
3185 struct tracer *t; 3204 struct tracer *t;
3205 bool had_max_tr;
3186 int ret = 0; 3206 int ret = 0;
3187 3207
3188 mutex_lock(&trace_types_lock); 3208 mutex_lock(&trace_types_lock);
@@ -3207,9 +3227,21 @@ static int tracing_set_tracer(const char *buf)
3207 goto out; 3227 goto out;
3208 3228
3209 trace_branch_disable(); 3229 trace_branch_disable();
3210 if (current_trace && current_trace->reset) 3230 if (current_trace->reset)
3211 current_trace->reset(tr); 3231 current_trace->reset(tr);
3212 if (current_trace && current_trace->use_max_tr) { 3232
3233 had_max_tr = current_trace->allocated_snapshot;
3234 current_trace = &nop_trace;
3235
3236 if (had_max_tr && !t->use_max_tr) {
3237 /*
3238 * We need to make sure that the update_max_tr sees that
3239 * current_trace changed to nop_trace to keep it from
3240 * swapping the buffers after we resize it.
3241 * The update_max_tr is called from interrupts disabled
3242 * so a synchronized_sched() is sufficient.
3243 */
3244 synchronize_sched();
3213 /* 3245 /*
3214 * We don't free the ring buffer. instead, resize it because 3246 * We don't free the ring buffer. instead, resize it because
3215 * The max_tr ring buffer has some state (e.g. ring->clock) and 3247 * The max_tr ring buffer has some state (e.g. ring->clock) and
@@ -3217,18 +3249,19 @@ static int tracing_set_tracer(const char *buf)
3217 */ 3249 */
3218 ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS); 3250 ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS);
3219 set_buffer_entries(&max_tr, 1); 3251 set_buffer_entries(&max_tr, 1);
3252 tracing_reset_online_cpus(&max_tr);
3253 current_trace->allocated_snapshot = false;
3220 } 3254 }
3221 destroy_trace_option_files(topts); 3255 destroy_trace_option_files(topts);
3222 3256
3223 current_trace = &nop_trace;
3224
3225 topts = create_trace_option_files(t); 3257 topts = create_trace_option_files(t);
3226 if (t->use_max_tr) { 3258 if (t->use_max_tr && !had_max_tr) {
3227 /* we need to make per cpu buffer sizes equivalent */ 3259 /* we need to make per cpu buffer sizes equivalent */
3228 ret = resize_buffer_duplicate_size(&max_tr, &global_trace, 3260 ret = resize_buffer_duplicate_size(&max_tr, &global_trace,
3229 RING_BUFFER_ALL_CPUS); 3261 RING_BUFFER_ALL_CPUS);
3230 if (ret < 0) 3262 if (ret < 0)
3231 goto out; 3263 goto out;
3264 t->allocated_snapshot = true;
3232 } 3265 }
3233 3266
3234 if (t->init) { 3267 if (t->init) {
@@ -3336,8 +3369,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
3336 ret = -ENOMEM; 3369 ret = -ENOMEM;
3337 goto fail; 3370 goto fail;
3338 } 3371 }
3339 if (current_trace) 3372 *iter->trace = *current_trace;
3340 *iter->trace = *current_trace;
3341 3373
3342 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { 3374 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
3343 ret = -ENOMEM; 3375 ret = -ENOMEM;
@@ -3477,7 +3509,6 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
3477 size_t cnt, loff_t *ppos) 3509 size_t cnt, loff_t *ppos)
3478{ 3510{
3479 struct trace_iterator *iter = filp->private_data; 3511 struct trace_iterator *iter = filp->private_data;
3480 static struct tracer *old_tracer;
3481 ssize_t sret; 3512 ssize_t sret;
3482 3513
3483 /* return any leftover data */ 3514 /* return any leftover data */
@@ -3489,10 +3520,8 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
3489 3520
3490 /* copy the tracer to avoid using a global lock all around */ 3521 /* copy the tracer to avoid using a global lock all around */
3491 mutex_lock(&trace_types_lock); 3522 mutex_lock(&trace_types_lock);
3492 if (unlikely(old_tracer != current_trace && current_trace)) { 3523 if (unlikely(iter->trace->name != current_trace->name))
3493 old_tracer = current_trace;
3494 *iter->trace = *current_trace; 3524 *iter->trace = *current_trace;
3495 }
3496 mutex_unlock(&trace_types_lock); 3525 mutex_unlock(&trace_types_lock);
3497 3526
3498 /* 3527 /*
@@ -3648,7 +3677,6 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
3648 .ops = &tracing_pipe_buf_ops, 3677 .ops = &tracing_pipe_buf_ops,
3649 .spd_release = tracing_spd_release_pipe, 3678 .spd_release = tracing_spd_release_pipe,
3650 }; 3679 };
3651 static struct tracer *old_tracer;
3652 ssize_t ret; 3680 ssize_t ret;
3653 size_t rem; 3681 size_t rem;
3654 unsigned int i; 3682 unsigned int i;
@@ -3658,10 +3686,8 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
3658 3686
3659 /* copy the tracer to avoid using a global lock all around */ 3687 /* copy the tracer to avoid using a global lock all around */
3660 mutex_lock(&trace_types_lock); 3688 mutex_lock(&trace_types_lock);
3661 if (unlikely(old_tracer != current_trace && current_trace)) { 3689 if (unlikely(iter->trace->name != current_trace->name))
3662 old_tracer = current_trace;
3663 *iter->trace = *current_trace; 3690 *iter->trace = *current_trace;
3664 }
3665 mutex_unlock(&trace_types_lock); 3691 mutex_unlock(&trace_types_lock);
3666 3692
3667 mutex_lock(&iter->mutex); 3693 mutex_lock(&iter->mutex);
@@ -4037,8 +4063,7 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4037 * Reset the buffer so that it doesn't have incomparable timestamps. 4063 * Reset the buffer so that it doesn't have incomparable timestamps.
4038 */ 4064 */
4039 tracing_reset_online_cpus(&global_trace); 4065 tracing_reset_online_cpus(&global_trace);
4040 if (max_tr.buffer) 4066 tracing_reset_online_cpus(&max_tr);
4041 tracing_reset_online_cpus(&max_tr);
4042 4067
4043 mutex_unlock(&trace_types_lock); 4068 mutex_unlock(&trace_types_lock);
4044 4069
@@ -4054,6 +4079,87 @@ static int tracing_clock_open(struct inode *inode, struct file *file)
4054 return single_open(file, tracing_clock_show, NULL); 4079 return single_open(file, tracing_clock_show, NULL);
4055} 4080}
4056 4081
4082#ifdef CONFIG_TRACER_SNAPSHOT
4083static int tracing_snapshot_open(struct inode *inode, struct file *file)
4084{
4085 struct trace_iterator *iter;
4086 int ret = 0;
4087
4088 if (file->f_mode & FMODE_READ) {
4089 iter = __tracing_open(inode, file, true);
4090 if (IS_ERR(iter))
4091 ret = PTR_ERR(iter);
4092 }
4093 return ret;
4094}
4095
4096static ssize_t
4097tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
4098 loff_t *ppos)
4099{
4100 unsigned long val;
4101 int ret;
4102
4103 ret = tracing_update_buffers();
4104 if (ret < 0)
4105 return ret;
4106
4107 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4108 if (ret)
4109 return ret;
4110
4111 mutex_lock(&trace_types_lock);
4112
4113 if (current_trace->use_max_tr) {
4114 ret = -EBUSY;
4115 goto out;
4116 }
4117
4118 switch (val) {
4119 case 0:
4120 if (current_trace->allocated_snapshot) {
4121 /* free spare buffer */
4122 ring_buffer_resize(max_tr.buffer, 1,
4123 RING_BUFFER_ALL_CPUS);
4124 set_buffer_entries(&max_tr, 1);
4125 tracing_reset_online_cpus(&max_tr);
4126 current_trace->allocated_snapshot = false;
4127 }
4128 break;
4129 case 1:
4130 if (!current_trace->allocated_snapshot) {
4131 /* allocate spare buffer */
4132 ret = resize_buffer_duplicate_size(&max_tr,
4133 &global_trace, RING_BUFFER_ALL_CPUS);
4134 if (ret < 0)
4135 break;
4136 current_trace->allocated_snapshot = true;
4137 }
4138
4139 local_irq_disable();
4140 /* Now, we're going to swap */
4141 update_max_tr(&global_trace, current, smp_processor_id());
4142 local_irq_enable();
4143 break;
4144 default:
4145 if (current_trace->allocated_snapshot)
4146 tracing_reset_online_cpus(&max_tr);
4147 else
4148 ret = -EINVAL;
4149 break;
4150 }
4151
4152 if (ret >= 0) {
4153 *ppos += cnt;
4154 ret = cnt;
4155 }
4156out:
4157 mutex_unlock(&trace_types_lock);
4158 return ret;
4159}
4160#endif /* CONFIG_TRACER_SNAPSHOT */
4161
4162
4057static const struct file_operations tracing_max_lat_fops = { 4163static const struct file_operations tracing_max_lat_fops = {
4058 .open = tracing_open_generic, 4164 .open = tracing_open_generic,
4059 .read = tracing_max_lat_read, 4165 .read = tracing_max_lat_read,
@@ -4110,6 +4216,16 @@ static const struct file_operations trace_clock_fops = {
4110 .write = tracing_clock_write, 4216 .write = tracing_clock_write,
4111}; 4217};
4112 4218
4219#ifdef CONFIG_TRACER_SNAPSHOT
4220static const struct file_operations snapshot_fops = {
4221 .open = tracing_snapshot_open,
4222 .read = seq_read,
4223 .write = tracing_snapshot_write,
4224 .llseek = tracing_seek,
4225 .release = tracing_release,
4226};
4227#endif /* CONFIG_TRACER_SNAPSHOT */
4228
4113struct ftrace_buffer_info { 4229struct ftrace_buffer_info {
4114 struct trace_array *tr; 4230 struct trace_array *tr;
4115 void *spare; 4231 void *spare;
@@ -4414,6 +4530,9 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
4414 cnt = ring_buffer_dropped_events_cpu(tr->buffer, cpu); 4530 cnt = ring_buffer_dropped_events_cpu(tr->buffer, cpu);
4415 trace_seq_printf(s, "dropped events: %ld\n", cnt); 4531 trace_seq_printf(s, "dropped events: %ld\n", cnt);
4416 4532
4533 cnt = ring_buffer_read_events_cpu(tr->buffer, cpu);
4534 trace_seq_printf(s, "read events: %ld\n", cnt);
4535
4417 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); 4536 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
4418 4537
4419 kfree(s); 4538 kfree(s);
@@ -4490,7 +4609,7 @@ struct dentry *tracing_init_dentry(void)
4490 4609
4491static struct dentry *d_percpu; 4610static struct dentry *d_percpu;
4492 4611
4493struct dentry *tracing_dentry_percpu(void) 4612static struct dentry *tracing_dentry_percpu(void)
4494{ 4613{
4495 static int once; 4614 static int once;
4496 struct dentry *d_tracer; 4615 struct dentry *d_tracer;
@@ -4906,6 +5025,11 @@ static __init int tracer_init_debugfs(void)
4906 &ftrace_update_tot_cnt, &tracing_dyn_info_fops); 5025 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
4907#endif 5026#endif
4908 5027
5028#ifdef CONFIG_TRACER_SNAPSHOT
5029 trace_create_file("snapshot", 0644, d_tracer,
5030 (void *) TRACE_PIPE_ALL_CPU, &snapshot_fops);
5031#endif
5032
4909 create_trace_options_dir(); 5033 create_trace_options_dir();
4910 5034
4911 for_each_tracing_cpu(cpu) 5035 for_each_tracing_cpu(cpu)
@@ -5014,6 +5138,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
5014 if (disable_tracing) 5138 if (disable_tracing)
5015 ftrace_kill(); 5139 ftrace_kill();
5016 5140
5141 /* Simulate the iterator */
5017 trace_init_global_iter(&iter); 5142 trace_init_global_iter(&iter);
5018 5143
5019 for_each_tracing_cpu(cpu) { 5144 for_each_tracing_cpu(cpu) {
@@ -5025,10 +5150,6 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
5025 /* don't look at user memory in panic mode */ 5150 /* don't look at user memory in panic mode */
5026 trace_flags &= ~TRACE_ITER_SYM_USEROBJ; 5151 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
5027 5152
5028 /* Simulate the iterator */
5029 iter.tr = &global_trace;
5030 iter.trace = current_trace;
5031
5032 switch (oops_dump_mode) { 5153 switch (oops_dump_mode) {
5033 case DUMP_ALL: 5154 case DUMP_ALL:
5034 iter.cpu_file = TRACE_PIPE_ALL_CPU; 5155 iter.cpu_file = TRACE_PIPE_ALL_CPU;
@@ -5173,7 +5294,7 @@ __init static int tracer_alloc_buffers(void)
5173 init_irq_work(&trace_work_wakeup, trace_wake_up); 5294 init_irq_work(&trace_work_wakeup, trace_wake_up);
5174 5295
5175 register_tracer(&nop_trace); 5296 register_tracer(&nop_trace);
5176 current_trace = &nop_trace; 5297
5177 /* All seems OK, enable tracing */ 5298 /* All seems OK, enable tracing */
5178 tracing_disabled = 0; 5299 tracing_disabled = 0;
5179 5300
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index c75d7988902c..57d7e5397d56 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -287,20 +287,62 @@ struct tracer {
287 struct tracer_flags *flags; 287 struct tracer_flags *flags;
288 bool print_max; 288 bool print_max;
289 bool use_max_tr; 289 bool use_max_tr;
290 bool allocated_snapshot;
290}; 291};
291 292
292 293
293/* Only current can touch trace_recursion */ 294/* Only current can touch trace_recursion */
294#define trace_recursion_inc() do { (current)->trace_recursion++; } while (0)
295#define trace_recursion_dec() do { (current)->trace_recursion--; } while (0)
296 295
297/* Ring buffer has the 10 LSB bits to count */ 296/*
298#define trace_recursion_buffer() ((current)->trace_recursion & 0x3ff) 297 * For function tracing recursion:
299 298 * The order of these bits are important.
300/* for function tracing recursion */ 299 *
301#define TRACE_INTERNAL_BIT (1<<11) 300 * When function tracing occurs, the following steps are made:
302#define TRACE_GLOBAL_BIT (1<<12) 301 * If arch does not support a ftrace feature:
303#define TRACE_CONTROL_BIT (1<<13) 302 * call internal function (uses INTERNAL bits) which calls...
303 * If callback is registered to the "global" list, the list
304 * function is called and recursion checks the GLOBAL bits.
305 * then this function calls...
306 * The function callback, which can use the FTRACE bits to
307 * check for recursion.
308 *
309 * Now if the arch does not suppport a feature, and it calls
310 * the global list function which calls the ftrace callback
311 * all three of these steps will do a recursion protection.
312 * There's no reason to do one if the previous caller already
313 * did. The recursion that we are protecting against will
314 * go through the same steps again.
315 *
316 * To prevent the multiple recursion checks, if a recursion
317 * bit is set that is higher than the MAX bit of the current
318 * check, then we know that the check was made by the previous
319 * caller, and we can skip the current check.
320 */
321enum {
322 TRACE_BUFFER_BIT,
323 TRACE_BUFFER_NMI_BIT,
324 TRACE_BUFFER_IRQ_BIT,
325 TRACE_BUFFER_SIRQ_BIT,
326
327 /* Start of function recursion bits */
328 TRACE_FTRACE_BIT,
329 TRACE_FTRACE_NMI_BIT,
330 TRACE_FTRACE_IRQ_BIT,
331 TRACE_FTRACE_SIRQ_BIT,
332
333 /* GLOBAL_BITs must be greater than FTRACE_BITs */
334 TRACE_GLOBAL_BIT,
335 TRACE_GLOBAL_NMI_BIT,
336 TRACE_GLOBAL_IRQ_BIT,
337 TRACE_GLOBAL_SIRQ_BIT,
338
339 /* INTERNAL_BITs must be greater than GLOBAL_BITs */
340 TRACE_INTERNAL_BIT,
341 TRACE_INTERNAL_NMI_BIT,
342 TRACE_INTERNAL_IRQ_BIT,
343 TRACE_INTERNAL_SIRQ_BIT,
344
345 TRACE_CONTROL_BIT,
304 346
305/* 347/*
306 * Abuse of the trace_recursion. 348 * Abuse of the trace_recursion.
@@ -309,11 +351,77 @@ struct tracer {
309 * was called in irq context but we have irq tracing off. Since this 351 * was called in irq context but we have irq tracing off. Since this
310 * can only be modified by current, we can reuse trace_recursion. 352 * can only be modified by current, we can reuse trace_recursion.
311 */ 353 */
312#define TRACE_IRQ_BIT (1<<13) 354 TRACE_IRQ_BIT,
355};
356
357#define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
358#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
359#define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit)))
360
361#define TRACE_CONTEXT_BITS 4
362
363#define TRACE_FTRACE_START TRACE_FTRACE_BIT
364#define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
365
366#define TRACE_GLOBAL_START TRACE_GLOBAL_BIT
367#define TRACE_GLOBAL_MAX ((1 << (TRACE_GLOBAL_START + TRACE_CONTEXT_BITS)) - 1)
368
369#define TRACE_LIST_START TRACE_INTERNAL_BIT
370#define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
371
372#define TRACE_CONTEXT_MASK TRACE_LIST_MAX
373
374static __always_inline int trace_get_context_bit(void)
375{
376 int bit;
313 377
314#define trace_recursion_set(bit) do { (current)->trace_recursion |= (bit); } while (0) 378 if (in_interrupt()) {
315#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(bit); } while (0) 379 if (in_nmi())
316#define trace_recursion_test(bit) ((current)->trace_recursion & (bit)) 380 bit = 0;
381
382 else if (in_irq())
383 bit = 1;
384 else
385 bit = 2;
386 } else
387 bit = 3;
388
389 return bit;
390}
391
392static __always_inline int trace_test_and_set_recursion(int start, int max)
393{
394 unsigned int val = current->trace_recursion;
395 int bit;
396
397 /* A previous recursion check was made */
398 if ((val & TRACE_CONTEXT_MASK) > max)
399 return 0;
400
401 bit = trace_get_context_bit() + start;
402 if (unlikely(val & (1 << bit)))
403 return -1;
404
405 val |= 1 << bit;
406 current->trace_recursion = val;
407 barrier();
408
409 return bit;
410}
411
412static __always_inline void trace_clear_recursion(int bit)
413{
414 unsigned int val = current->trace_recursion;
415
416 if (!bit)
417 return;
418
419 bit = 1 << bit;
420 val &= ~bit;
421
422 barrier();
423 current->trace_recursion = val;
424}
317 425
318#define TRACE_PIPE_ALL_CPU -1 426#define TRACE_PIPE_ALL_CPU -1
319 427
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index 394783531cbb..aa8f5f48dae6 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -21,8 +21,6 @@
21#include <linux/ktime.h> 21#include <linux/ktime.h>
22#include <linux/trace_clock.h> 22#include <linux/trace_clock.h>
23 23
24#include "trace.h"
25
26/* 24/*
27 * trace_clock_local(): the simplest and least coherent tracing clock. 25 * trace_clock_local(): the simplest and least coherent tracing clock.
28 * 26 *
@@ -44,6 +42,7 @@ u64 notrace trace_clock_local(void)
44 42
45 return clock; 43 return clock;
46} 44}
45EXPORT_SYMBOL_GPL(trace_clock_local);
47 46
48/* 47/*
49 * trace_clock(): 'between' trace clock. Not completely serialized, 48 * trace_clock(): 'between' trace clock. Not completely serialized,
@@ -86,7 +85,7 @@ u64 notrace trace_clock_global(void)
86 local_irq_save(flags); 85 local_irq_save(flags);
87 86
88 this_cpu = raw_smp_processor_id(); 87 this_cpu = raw_smp_processor_id();
89 now = cpu_clock(this_cpu); 88 now = sched_clock_cpu(this_cpu);
90 /* 89 /*
91 * If in an NMI context then dont risk lockups and return the 90 * If in an NMI context then dont risk lockups and return the
92 * cpu_clock() time: 91 * cpu_clock() time:
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 880073d0b946..57e9b284250c 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -116,7 +116,6 @@ static int trace_define_common_fields(void)
116 __common_field(unsigned char, flags); 116 __common_field(unsigned char, flags);
117 __common_field(unsigned char, preempt_count); 117 __common_field(unsigned char, preempt_count);
118 __common_field(int, pid); 118 __common_field(int, pid);
119 __common_field(int, padding);
120 119
121 return ret; 120 return ret;
122} 121}
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 8e3ad8082ab7..601152523326 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -47,34 +47,6 @@ static void function_trace_start(struct trace_array *tr)
47 tracing_reset_online_cpus(tr); 47 tracing_reset_online_cpus(tr);
48} 48}
49 49
50static void
51function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip,
52 struct ftrace_ops *op, struct pt_regs *pt_regs)
53{
54 struct trace_array *tr = func_trace;
55 struct trace_array_cpu *data;
56 unsigned long flags;
57 long disabled;
58 int cpu;
59 int pc;
60
61 if (unlikely(!ftrace_function_enabled))
62 return;
63
64 pc = preempt_count();
65 preempt_disable_notrace();
66 local_save_flags(flags);
67 cpu = raw_smp_processor_id();
68 data = tr->data[cpu];
69 disabled = atomic_inc_return(&data->disabled);
70
71 if (likely(disabled == 1))
72 trace_function(tr, ip, parent_ip, flags, pc);
73
74 atomic_dec(&data->disabled);
75 preempt_enable_notrace();
76}
77
78/* Our option */ 50/* Our option */
79enum { 51enum {
80 TRACE_FUNC_OPT_STACK = 0x1, 52 TRACE_FUNC_OPT_STACK = 0x1,
@@ -85,34 +57,34 @@ static struct tracer_flags func_flags;
85static void 57static void
86function_trace_call(unsigned long ip, unsigned long parent_ip, 58function_trace_call(unsigned long ip, unsigned long parent_ip,
87 struct ftrace_ops *op, struct pt_regs *pt_regs) 59 struct ftrace_ops *op, struct pt_regs *pt_regs)
88
89{ 60{
90 struct trace_array *tr = func_trace; 61 struct trace_array *tr = func_trace;
91 struct trace_array_cpu *data; 62 struct trace_array_cpu *data;
92 unsigned long flags; 63 unsigned long flags;
93 long disabled; 64 int bit;
94 int cpu; 65 int cpu;
95 int pc; 66 int pc;
96 67
97 if (unlikely(!ftrace_function_enabled)) 68 if (unlikely(!ftrace_function_enabled))
98 return; 69 return;
99 70
100 /* 71 pc = preempt_count();
101 * Need to use raw, since this must be called before the 72 preempt_disable_notrace();
102 * recursive protection is performed.
103 */
104 local_irq_save(flags);
105 cpu = raw_smp_processor_id();
106 data = tr->data[cpu];
107 disabled = atomic_inc_return(&data->disabled);
108 73
109 if (likely(disabled == 1)) { 74 bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
110 pc = preempt_count(); 75 if (bit < 0)
76 goto out;
77
78 cpu = smp_processor_id();
79 data = tr->data[cpu];
80 if (!atomic_read(&data->disabled)) {
81 local_save_flags(flags);
111 trace_function(tr, ip, parent_ip, flags, pc); 82 trace_function(tr, ip, parent_ip, flags, pc);
112 } 83 }
84 trace_clear_recursion(bit);
113 85
114 atomic_dec(&data->disabled); 86 out:
115 local_irq_restore(flags); 87 preempt_enable_notrace();
116} 88}
117 89
118static void 90static void
@@ -185,11 +157,6 @@ static void tracing_start_function_trace(void)
185{ 157{
186 ftrace_function_enabled = 0; 158 ftrace_function_enabled = 0;
187 159
188 if (trace_flags & TRACE_ITER_PREEMPTONLY)
189 trace_ops.func = function_trace_call_preempt_only;
190 else
191 trace_ops.func = function_trace_call;
192
193 if (func_flags.val & TRACE_FUNC_OPT_STACK) 160 if (func_flags.val & TRACE_FUNC_OPT_STACK)
194 register_ftrace_function(&trace_stack_ops); 161 register_ftrace_function(&trace_stack_ops);
195 else 162 else
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 4edb4b74eb7e..39ada66389cc 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -47,6 +47,8 @@ struct fgraph_data {
47#define TRACE_GRAPH_PRINT_ABS_TIME 0x20 47#define TRACE_GRAPH_PRINT_ABS_TIME 0x20
48#define TRACE_GRAPH_PRINT_IRQS 0x40 48#define TRACE_GRAPH_PRINT_IRQS 0x40
49 49
50static unsigned int max_depth;
51
50static struct tracer_opt trace_opts[] = { 52static struct tracer_opt trace_opts[] = {
51 /* Display overruns? (for self-debug purpose) */ 53 /* Display overruns? (for self-debug purpose) */
52 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) }, 54 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
@@ -189,10 +191,16 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
189 191
190 ftrace_pop_return_trace(&trace, &ret, frame_pointer); 192 ftrace_pop_return_trace(&trace, &ret, frame_pointer);
191 trace.rettime = trace_clock_local(); 193 trace.rettime = trace_clock_local();
192 ftrace_graph_return(&trace);
193 barrier(); 194 barrier();
194 current->curr_ret_stack--; 195 current->curr_ret_stack--;
195 196
197 /*
198 * The trace should run after decrementing the ret counter
199 * in case an interrupt were to come in. We don't want to
200 * lose the interrupt if max_depth is set.
201 */
202 ftrace_graph_return(&trace);
203
196 if (unlikely(!ret)) { 204 if (unlikely(!ret)) {
197 ftrace_graph_stop(); 205 ftrace_graph_stop();
198 WARN_ON(1); 206 WARN_ON(1);
@@ -250,8 +258,9 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
250 return 0; 258 return 0;
251 259
252 /* trace it when it is-nested-in or is a function enabled. */ 260 /* trace it when it is-nested-in or is a function enabled. */
253 if (!(trace->depth || ftrace_graph_addr(trace->func)) || 261 if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
254 ftrace_graph_ignore_irqs()) 262 ftrace_graph_ignore_irqs()) ||
263 (max_depth && trace->depth >= max_depth))
255 return 0; 264 return 0;
256 265
257 local_irq_save(flags); 266 local_irq_save(flags);
@@ -1457,6 +1466,59 @@ static struct tracer graph_trace __read_mostly = {
1457#endif 1466#endif
1458}; 1467};
1459 1468
1469
1470static ssize_t
1471graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1472 loff_t *ppos)
1473{
1474 unsigned long val;
1475 int ret;
1476
1477 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1478 if (ret)
1479 return ret;
1480
1481 max_depth = val;
1482
1483 *ppos += cnt;
1484
1485 return cnt;
1486}
1487
1488static ssize_t
1489graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1490 loff_t *ppos)
1491{
1492 char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1493 int n;
1494
1495 n = sprintf(buf, "%d\n", max_depth);
1496
1497 return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1498}
1499
1500static const struct file_operations graph_depth_fops = {
1501 .open = tracing_open_generic,
1502 .write = graph_depth_write,
1503 .read = graph_depth_read,
1504 .llseek = generic_file_llseek,
1505};
1506
1507static __init int init_graph_debugfs(void)
1508{
1509 struct dentry *d_tracer;
1510
1511 d_tracer = tracing_init_dentry();
1512 if (!d_tracer)
1513 return 0;
1514
1515 trace_create_file("max_graph_depth", 0644, d_tracer,
1516 NULL, &graph_depth_fops);
1517
1518 return 0;
1519}
1520fs_initcall(init_graph_debugfs);
1521
1460static __init int init_graph_trace(void) 1522static __init int init_graph_trace(void)
1461{ 1523{
1462 max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1); 1524 max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
index 933708677814..5c7e09d10d74 100644
--- a/kernel/trace/trace_probe.h
+++ b/kernel/trace/trace_probe.h
@@ -66,7 +66,6 @@
66#define TP_FLAG_TRACE 1 66#define TP_FLAG_TRACE 1
67#define TP_FLAG_PROFILE 2 67#define TP_FLAG_PROFILE 2
68#define TP_FLAG_REGISTERED 4 68#define TP_FLAG_REGISTERED 4
69#define TP_FLAG_UPROBE 8
70 69
71 70
72/* data_rloc: data relative location, compatible with u32 */ 71/* data_rloc: data relative location, compatible with u32 */
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 9fe45fcefca0..75aa97fbe1a1 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -15,8 +15,8 @@
15#include <linux/kallsyms.h> 15#include <linux/kallsyms.h>
16#include <linux/uaccess.h> 16#include <linux/uaccess.h>
17#include <linux/ftrace.h> 17#include <linux/ftrace.h>
18#include <linux/sched/rt.h>
18#include <trace/events/sched.h> 19#include <trace/events/sched.h>
19
20#include "trace.h" 20#include "trace.h"
21 21
22static struct trace_array *wakeup_trace; 22static struct trace_array *wakeup_trace;
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 47623169a815..51c819c12c29 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -415,7 +415,8 @@ static void trace_selftest_test_recursion_func(unsigned long ip,
415 * The ftrace infrastructure should provide the recursion 415 * The ftrace infrastructure should provide the recursion
416 * protection. If not, this will crash the kernel! 416 * protection. If not, this will crash the kernel!
417 */ 417 */
418 trace_selftest_recursion_cnt++; 418 if (trace_selftest_recursion_cnt++ > 10)
419 return;
419 DYN_FTRACE_TEST_NAME(); 420 DYN_FTRACE_TEST_NAME();
420} 421}
421 422
@@ -452,7 +453,6 @@ trace_selftest_function_recursion(void)
452 char *func_name; 453 char *func_name;
453 int len; 454 int len;
454 int ret; 455 int ret;
455 int cnt;
456 456
457 /* The previous test PASSED */ 457 /* The previous test PASSED */
458 pr_cont("PASSED\n"); 458 pr_cont("PASSED\n");
@@ -510,19 +510,10 @@ trace_selftest_function_recursion(void)
510 510
511 unregister_ftrace_function(&test_recsafe_probe); 511 unregister_ftrace_function(&test_recsafe_probe);
512 512
513 /*
514 * If arch supports all ftrace features, and no other task
515 * was on the list, we should be fine.
516 */
517 if (!ftrace_nr_registered_ops() && !FTRACE_FORCE_LIST_FUNC)
518 cnt = 2; /* Should have recursed */
519 else
520 cnt = 1;
521
522 ret = -1; 513 ret = -1;
523 if (trace_selftest_recursion_cnt != cnt) { 514 if (trace_selftest_recursion_cnt != 2) {
524 pr_cont("*callback not called expected %d times (%d)* ", 515 pr_cont("*callback not called expected 2 times (%d)* ",
525 cnt, trace_selftest_recursion_cnt); 516 trace_selftest_recursion_cnt);
526 goto out; 517 goto out;
527 } 518 }
528 519
@@ -568,7 +559,7 @@ trace_selftest_function_regs(void)
568 int ret; 559 int ret;
569 int supported = 0; 560 int supported = 0;
570 561
571#ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS 562#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
572 supported = 1; 563 supported = 1;
573#endif 564#endif
574 565
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 7609dd6714c2..5329e13e74a1 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -77,7 +77,7 @@ static struct syscall_metadata *syscall_nr_to_meta(int nr)
77 return syscalls_metadata[nr]; 77 return syscalls_metadata[nr];
78} 78}
79 79
80enum print_line_t 80static enum print_line_t
81print_syscall_enter(struct trace_iterator *iter, int flags, 81print_syscall_enter(struct trace_iterator *iter, int flags,
82 struct trace_event *event) 82 struct trace_event *event)
83{ 83{
@@ -130,7 +130,7 @@ end:
130 return TRACE_TYPE_HANDLED; 130 return TRACE_TYPE_HANDLED;
131} 131}
132 132
133enum print_line_t 133static enum print_line_t
134print_syscall_exit(struct trace_iterator *iter, int flags, 134print_syscall_exit(struct trace_iterator *iter, int flags,
135 struct trace_event *event) 135 struct trace_event *event)
136{ 136{
@@ -270,7 +270,7 @@ static int syscall_exit_define_fields(struct ftrace_event_call *call)
270 return ret; 270 return ret;
271} 271}
272 272
273void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id) 273static void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id)
274{ 274{
275 struct syscall_trace_enter *entry; 275 struct syscall_trace_enter *entry;
276 struct syscall_metadata *sys_data; 276 struct syscall_metadata *sys_data;
@@ -305,7 +305,7 @@ void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id)
305 trace_current_buffer_unlock_commit(buffer, event, 0, 0); 305 trace_current_buffer_unlock_commit(buffer, event, 0, 0);
306} 306}
307 307
308void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret) 308static void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
309{ 309{
310 struct syscall_trace_exit *entry; 310 struct syscall_trace_exit *entry;
311 struct syscall_metadata *sys_data; 311 struct syscall_metadata *sys_data;
@@ -337,7 +337,7 @@ void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
337 trace_current_buffer_unlock_commit(buffer, event, 0, 0); 337 trace_current_buffer_unlock_commit(buffer, event, 0, 0);
338} 338}
339 339
340int reg_event_syscall_enter(struct ftrace_event_call *call) 340static int reg_event_syscall_enter(struct ftrace_event_call *call)
341{ 341{
342 int ret = 0; 342 int ret = 0;
343 int num; 343 int num;
@@ -356,7 +356,7 @@ int reg_event_syscall_enter(struct ftrace_event_call *call)
356 return ret; 356 return ret;
357} 357}
358 358
359void unreg_event_syscall_enter(struct ftrace_event_call *call) 359static void unreg_event_syscall_enter(struct ftrace_event_call *call)
360{ 360{
361 int num; 361 int num;
362 362
@@ -371,7 +371,7 @@ void unreg_event_syscall_enter(struct ftrace_event_call *call)
371 mutex_unlock(&syscall_trace_lock); 371 mutex_unlock(&syscall_trace_lock);
372} 372}
373 373
374int reg_event_syscall_exit(struct ftrace_event_call *call) 374static int reg_event_syscall_exit(struct ftrace_event_call *call)
375{ 375{
376 int ret = 0; 376 int ret = 0;
377 int num; 377 int num;
@@ -390,7 +390,7 @@ int reg_event_syscall_exit(struct ftrace_event_call *call)
390 return ret; 390 return ret;
391} 391}
392 392
393void unreg_event_syscall_exit(struct ftrace_event_call *call) 393static void unreg_event_syscall_exit(struct ftrace_event_call *call)
394{ 394{
395 int num; 395 int num;
396 396
@@ -459,7 +459,7 @@ unsigned long __init __weak arch_syscall_addr(int nr)
459 return (unsigned long)sys_call_table[nr]; 459 return (unsigned long)sys_call_table[nr];
460} 460}
461 461
462int __init init_ftrace_syscalls(void) 462static int __init init_ftrace_syscalls(void)
463{ 463{
464 struct syscall_metadata *meta; 464 struct syscall_metadata *meta;
465 unsigned long addr; 465 unsigned long addr;
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index c86e6d4f67fb..8dad2a92dee9 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -28,20 +28,21 @@
28 28
29#define UPROBE_EVENT_SYSTEM "uprobes" 29#define UPROBE_EVENT_SYSTEM "uprobes"
30 30
31struct trace_uprobe_filter {
32 rwlock_t rwlock;
33 int nr_systemwide;
34 struct list_head perf_events;
35};
36
31/* 37/*
32 * uprobe event core functions 38 * uprobe event core functions
33 */ 39 */
34struct trace_uprobe;
35struct uprobe_trace_consumer {
36 struct uprobe_consumer cons;
37 struct trace_uprobe *tu;
38};
39
40struct trace_uprobe { 40struct trace_uprobe {
41 struct list_head list; 41 struct list_head list;
42 struct ftrace_event_class class; 42 struct ftrace_event_class class;
43 struct ftrace_event_call call; 43 struct ftrace_event_call call;
44 struct uprobe_trace_consumer *consumer; 44 struct trace_uprobe_filter filter;
45 struct uprobe_consumer consumer;
45 struct inode *inode; 46 struct inode *inode;
46 char *filename; 47 char *filename;
47 unsigned long offset; 48 unsigned long offset;
@@ -64,6 +65,18 @@ static LIST_HEAD(uprobe_list);
64 65
65static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs); 66static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
66 67
68static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
69{
70 rwlock_init(&filter->rwlock);
71 filter->nr_systemwide = 0;
72 INIT_LIST_HEAD(&filter->perf_events);
73}
74
75static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
76{
77 return !filter->nr_systemwide && list_empty(&filter->perf_events);
78}
79
67/* 80/*
68 * Allocate new trace_uprobe and initialize it (including uprobes). 81 * Allocate new trace_uprobe and initialize it (including uprobes).
69 */ 82 */
@@ -92,6 +105,8 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs)
92 goto error; 105 goto error;
93 106
94 INIT_LIST_HEAD(&tu->list); 107 INIT_LIST_HEAD(&tu->list);
108 tu->consumer.handler = uprobe_dispatcher;
109 init_trace_uprobe_filter(&tu->filter);
95 return tu; 110 return tu;
96 111
97error: 112error:
@@ -253,12 +268,18 @@ static int create_trace_uprobe(int argc, char **argv)
253 if (ret) 268 if (ret)
254 goto fail_address_parse; 269 goto fail_address_parse;
255 270
271 inode = igrab(path.dentry->d_inode);
272 path_put(&path);
273
274 if (!inode || !S_ISREG(inode->i_mode)) {
275 ret = -EINVAL;
276 goto fail_address_parse;
277 }
278
256 ret = kstrtoul(arg, 0, &offset); 279 ret = kstrtoul(arg, 0, &offset);
257 if (ret) 280 if (ret)
258 goto fail_address_parse; 281 goto fail_address_parse;
259 282
260 inode = igrab(path.dentry->d_inode);
261
262 argc -= 2; 283 argc -= 2;
263 argv += 2; 284 argv += 2;
264 285
@@ -356,7 +377,7 @@ fail_address_parse:
356 if (inode) 377 if (inode)
357 iput(inode); 378 iput(inode);
358 379
359 pr_info("Failed to parse address.\n"); 380 pr_info("Failed to parse address or file.\n");
360 381
361 return ret; 382 return ret;
362} 383}
@@ -465,7 +486,7 @@ static const struct file_operations uprobe_profile_ops = {
465}; 486};
466 487
467/* uprobe handler */ 488/* uprobe handler */
468static void uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs) 489static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs)
469{ 490{
470 struct uprobe_trace_entry_head *entry; 491 struct uprobe_trace_entry_head *entry;
471 struct ring_buffer_event *event; 492 struct ring_buffer_event *event;
@@ -475,8 +496,6 @@ static void uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs)
475 unsigned long irq_flags; 496 unsigned long irq_flags;
476 struct ftrace_event_call *call = &tu->call; 497 struct ftrace_event_call *call = &tu->call;
477 498
478 tu->nhit++;
479
480 local_save_flags(irq_flags); 499 local_save_flags(irq_flags);
481 pc = preempt_count(); 500 pc = preempt_count();
482 501
@@ -485,16 +504,18 @@ static void uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs)
485 event = trace_current_buffer_lock_reserve(&buffer, call->event.type, 504 event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
486 size, irq_flags, pc); 505 size, irq_flags, pc);
487 if (!event) 506 if (!event)
488 return; 507 return 0;
489 508
490 entry = ring_buffer_event_data(event); 509 entry = ring_buffer_event_data(event);
491 entry->ip = uprobe_get_swbp_addr(task_pt_regs(current)); 510 entry->ip = instruction_pointer(task_pt_regs(current));
492 data = (u8 *)&entry[1]; 511 data = (u8 *)&entry[1];
493 for (i = 0; i < tu->nr_args; i++) 512 for (i = 0; i < tu->nr_args; i++)
494 call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset); 513 call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset);
495 514
496 if (!filter_current_check_discard(buffer, call, entry, event)) 515 if (!filter_current_check_discard(buffer, call, entry, event))
497 trace_buffer_unlock_commit(buffer, event, irq_flags, pc); 516 trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
517
518 return 0;
498} 519}
499 520
500/* Event entry printers */ 521/* Event entry printers */
@@ -533,42 +554,43 @@ partial:
533 return TRACE_TYPE_PARTIAL_LINE; 554 return TRACE_TYPE_PARTIAL_LINE;
534} 555}
535 556
536static int probe_event_enable(struct trace_uprobe *tu, int flag) 557static inline bool is_trace_uprobe_enabled(struct trace_uprobe *tu)
537{ 558{
538 struct uprobe_trace_consumer *utc; 559 return tu->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE);
539 int ret = 0; 560}
540 561
541 if (!tu->inode || tu->consumer) 562typedef bool (*filter_func_t)(struct uprobe_consumer *self,
542 return -EINTR; 563 enum uprobe_filter_ctx ctx,
564 struct mm_struct *mm);
543 565
544 utc = kzalloc(sizeof(struct uprobe_trace_consumer), GFP_KERNEL); 566static int
545 if (!utc) 567probe_event_enable(struct trace_uprobe *tu, int flag, filter_func_t filter)
568{
569 int ret = 0;
570
571 if (is_trace_uprobe_enabled(tu))
546 return -EINTR; 572 return -EINTR;
547 573
548 utc->cons.handler = uprobe_dispatcher; 574 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
549 utc->cons.filter = NULL;
550 ret = uprobe_register(tu->inode, tu->offset, &utc->cons);
551 if (ret) {
552 kfree(utc);
553 return ret;
554 }
555 575
556 tu->flags |= flag; 576 tu->flags |= flag;
557 utc->tu = tu; 577 tu->consumer.filter = filter;
558 tu->consumer = utc; 578 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
579 if (ret)
580 tu->flags &= ~flag;
559 581
560 return 0; 582 return ret;
561} 583}
562 584
563static void probe_event_disable(struct trace_uprobe *tu, int flag) 585static void probe_event_disable(struct trace_uprobe *tu, int flag)
564{ 586{
565 if (!tu->inode || !tu->consumer) 587 if (!is_trace_uprobe_enabled(tu))
566 return; 588 return;
567 589
568 uprobe_unregister(tu->inode, tu->offset, &tu->consumer->cons); 590 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
591
592 uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
569 tu->flags &= ~flag; 593 tu->flags &= ~flag;
570 kfree(tu->consumer);
571 tu->consumer = NULL;
572} 594}
573 595
574static int uprobe_event_define_fields(struct ftrace_event_call *event_call) 596static int uprobe_event_define_fields(struct ftrace_event_call *event_call)
@@ -642,8 +664,96 @@ static int set_print_fmt(struct trace_uprobe *tu)
642} 664}
643 665
644#ifdef CONFIG_PERF_EVENTS 666#ifdef CONFIG_PERF_EVENTS
667static bool
668__uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
669{
670 struct perf_event *event;
671
672 if (filter->nr_systemwide)
673 return true;
674
675 list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
676 if (event->hw.tp_target->mm == mm)
677 return true;
678 }
679
680 return false;
681}
682
683static inline bool
684uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
685{
686 return __uprobe_perf_filter(&tu->filter, event->hw.tp_target->mm);
687}
688
689static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
690{
691 bool done;
692
693 write_lock(&tu->filter.rwlock);
694 if (event->hw.tp_target) {
695 /*
696 * event->parent != NULL means copy_process(), we can avoid
697 * uprobe_apply(). current->mm must be probed and we can rely
698 * on dup_mmap() which preserves the already installed bp's.
699 *
700 * attr.enable_on_exec means that exec/mmap will install the
701 * breakpoints we need.
702 */
703 done = tu->filter.nr_systemwide ||
704 event->parent || event->attr.enable_on_exec ||
705 uprobe_filter_event(tu, event);
706 list_add(&event->hw.tp_list, &tu->filter.perf_events);
707 } else {
708 done = tu->filter.nr_systemwide;
709 tu->filter.nr_systemwide++;
710 }
711 write_unlock(&tu->filter.rwlock);
712
713 if (!done)
714 uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
715
716 return 0;
717}
718
719static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
720{
721 bool done;
722
723 write_lock(&tu->filter.rwlock);
724 if (event->hw.tp_target) {
725 list_del(&event->hw.tp_list);
726 done = tu->filter.nr_systemwide ||
727 (event->hw.tp_target->flags & PF_EXITING) ||
728 uprobe_filter_event(tu, event);
729 } else {
730 tu->filter.nr_systemwide--;
731 done = tu->filter.nr_systemwide;
732 }
733 write_unlock(&tu->filter.rwlock);
734
735 if (!done)
736 uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
737
738 return 0;
739}
740
741static bool uprobe_perf_filter(struct uprobe_consumer *uc,
742 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
743{
744 struct trace_uprobe *tu;
745 int ret;
746
747 tu = container_of(uc, struct trace_uprobe, consumer);
748 read_lock(&tu->filter.rwlock);
749 ret = __uprobe_perf_filter(&tu->filter, mm);
750 read_unlock(&tu->filter.rwlock);
751
752 return ret;
753}
754
645/* uprobe profile handler */ 755/* uprobe profile handler */
646static void uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs) 756static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs)
647{ 757{
648 struct ftrace_event_call *call = &tu->call; 758 struct ftrace_event_call *call = &tu->call;
649 struct uprobe_trace_entry_head *entry; 759 struct uprobe_trace_entry_head *entry;
@@ -652,11 +762,14 @@ static void uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs)
652 int size, __size, i; 762 int size, __size, i;
653 int rctx; 763 int rctx;
654 764
765 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
766 return UPROBE_HANDLER_REMOVE;
767
655 __size = sizeof(*entry) + tu->size; 768 __size = sizeof(*entry) + tu->size;
656 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 769 size = ALIGN(__size + sizeof(u32), sizeof(u64));
657 size -= sizeof(u32); 770 size -= sizeof(u32);
658 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough")) 771 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
659 return; 772 return 0;
660 773
661 preempt_disable(); 774 preempt_disable();
662 775
@@ -664,7 +777,7 @@ static void uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs)
664 if (!entry) 777 if (!entry)
665 goto out; 778 goto out;
666 779
667 entry->ip = uprobe_get_swbp_addr(task_pt_regs(current)); 780 entry->ip = instruction_pointer(task_pt_regs(current));
668 data = (u8 *)&entry[1]; 781 data = (u8 *)&entry[1];
669 for (i = 0; i < tu->nr_args; i++) 782 for (i = 0; i < tu->nr_args; i++)
670 call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset); 783 call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset);
@@ -674,6 +787,7 @@ static void uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs)
674 787
675 out: 788 out:
676 preempt_enable(); 789 preempt_enable();
790 return 0;
677} 791}
678#endif /* CONFIG_PERF_EVENTS */ 792#endif /* CONFIG_PERF_EVENTS */
679 793
@@ -684,7 +798,7 @@ int trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type,
684 798
685 switch (type) { 799 switch (type) {
686 case TRACE_REG_REGISTER: 800 case TRACE_REG_REGISTER:
687 return probe_event_enable(tu, TP_FLAG_TRACE); 801 return probe_event_enable(tu, TP_FLAG_TRACE, NULL);
688 802
689 case TRACE_REG_UNREGISTER: 803 case TRACE_REG_UNREGISTER:
690 probe_event_disable(tu, TP_FLAG_TRACE); 804 probe_event_disable(tu, TP_FLAG_TRACE);
@@ -692,11 +806,18 @@ int trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type,
692 806
693#ifdef CONFIG_PERF_EVENTS 807#ifdef CONFIG_PERF_EVENTS
694 case TRACE_REG_PERF_REGISTER: 808 case TRACE_REG_PERF_REGISTER:
695 return probe_event_enable(tu, TP_FLAG_PROFILE); 809 return probe_event_enable(tu, TP_FLAG_PROFILE, uprobe_perf_filter);
696 810
697 case TRACE_REG_PERF_UNREGISTER: 811 case TRACE_REG_PERF_UNREGISTER:
698 probe_event_disable(tu, TP_FLAG_PROFILE); 812 probe_event_disable(tu, TP_FLAG_PROFILE);
699 return 0; 813 return 0;
814
815 case TRACE_REG_PERF_OPEN:
816 return uprobe_perf_open(tu, data);
817
818 case TRACE_REG_PERF_CLOSE:
819 return uprobe_perf_close(tu, data);
820
700#endif 821#endif
701 default: 822 default:
702 return 0; 823 return 0;
@@ -706,22 +827,20 @@ int trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type,
706 827
707static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) 828static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
708{ 829{
709 struct uprobe_trace_consumer *utc;
710 struct trace_uprobe *tu; 830 struct trace_uprobe *tu;
831 int ret = 0;
711 832
712 utc = container_of(con, struct uprobe_trace_consumer, cons); 833 tu = container_of(con, struct trace_uprobe, consumer);
713 tu = utc->tu; 834 tu->nhit++;
714 if (!tu || tu->consumer != utc)
715 return 0;
716 835
717 if (tu->flags & TP_FLAG_TRACE) 836 if (tu->flags & TP_FLAG_TRACE)
718 uprobe_trace_func(tu, regs); 837 ret |= uprobe_trace_func(tu, regs);
719 838
720#ifdef CONFIG_PERF_EVENTS 839#ifdef CONFIG_PERF_EVENTS
721 if (tu->flags & TP_FLAG_PROFILE) 840 if (tu->flags & TP_FLAG_PROFILE)
722 uprobe_perf_func(tu, regs); 841 ret |= uprobe_perf_func(tu, regs);
723#endif 842#endif
724 return 0; 843 return ret;
725} 844}
726 845
727static struct trace_event_functions uprobe_funcs = { 846static struct trace_event_functions uprobe_funcs = {
diff --git a/kernel/tsacct.c b/kernel/tsacct.c
index 625df0b44690..a1dd9a1b1327 100644
--- a/kernel/tsacct.c
+++ b/kernel/tsacct.c
@@ -32,6 +32,7 @@ void bacct_add_tsk(struct user_namespace *user_ns,
32{ 32{
33 const struct cred *tcred; 33 const struct cred *tcred;
34 struct timespec uptime, ts; 34 struct timespec uptime, ts;
35 cputime_t utime, stime, utimescaled, stimescaled;
35 u64 ac_etime; 36 u64 ac_etime;
36 37
37 BUILD_BUG_ON(TS_COMM_LEN < TASK_COMM_LEN); 38 BUILD_BUG_ON(TS_COMM_LEN < TASK_COMM_LEN);
@@ -65,10 +66,15 @@ void bacct_add_tsk(struct user_namespace *user_ns,
65 stats->ac_ppid = pid_alive(tsk) ? 66 stats->ac_ppid = pid_alive(tsk) ?
66 task_tgid_nr_ns(rcu_dereference(tsk->real_parent), pid_ns) : 0; 67 task_tgid_nr_ns(rcu_dereference(tsk->real_parent), pid_ns) : 0;
67 rcu_read_unlock(); 68 rcu_read_unlock();
68 stats->ac_utime = cputime_to_usecs(tsk->utime); 69
69 stats->ac_stime = cputime_to_usecs(tsk->stime); 70 task_cputime(tsk, &utime, &stime);
70 stats->ac_utimescaled = cputime_to_usecs(tsk->utimescaled); 71 stats->ac_utime = cputime_to_usecs(utime);
71 stats->ac_stimescaled = cputime_to_usecs(tsk->stimescaled); 72 stats->ac_stime = cputime_to_usecs(stime);
73
74 task_cputime_scaled(tsk, &utimescaled, &stimescaled);
75 stats->ac_utimescaled = cputime_to_usecs(utimescaled);
76 stats->ac_stimescaled = cputime_to_usecs(stimescaled);
77
72 stats->ac_minflt = tsk->min_flt; 78 stats->ac_minflt = tsk->min_flt;
73 stats->ac_majflt = tsk->maj_flt; 79 stats->ac_majflt = tsk->maj_flt;
74 80
@@ -115,11 +121,8 @@ void xacct_add_tsk(struct taskstats *stats, struct task_struct *p)
115#undef KB 121#undef KB
116#undef MB 122#undef MB
117 123
118/** 124static void __acct_update_integrals(struct task_struct *tsk,
119 * acct_update_integrals - update mm integral fields in task_struct 125 cputime_t utime, cputime_t stime)
120 * @tsk: task_struct for accounting
121 */
122void acct_update_integrals(struct task_struct *tsk)
123{ 126{
124 if (likely(tsk->mm)) { 127 if (likely(tsk->mm)) {
125 cputime_t time, dtime; 128 cputime_t time, dtime;
@@ -128,7 +131,7 @@ void acct_update_integrals(struct task_struct *tsk)
128 u64 delta; 131 u64 delta;
129 132
130 local_irq_save(flags); 133 local_irq_save(flags);
131 time = tsk->stime + tsk->utime; 134 time = stime + utime;
132 dtime = time - tsk->acct_timexpd; 135 dtime = time - tsk->acct_timexpd;
133 jiffies_to_timeval(cputime_to_jiffies(dtime), &value); 136 jiffies_to_timeval(cputime_to_jiffies(dtime), &value);
134 delta = value.tv_sec; 137 delta = value.tv_sec;
@@ -145,6 +148,27 @@ void acct_update_integrals(struct task_struct *tsk)
145} 148}
146 149
147/** 150/**
151 * acct_update_integrals - update mm integral fields in task_struct
152 * @tsk: task_struct for accounting
153 */
154void acct_update_integrals(struct task_struct *tsk)
155{
156 cputime_t utime, stime;
157
158 task_cputime(tsk, &utime, &stime);
159 __acct_update_integrals(tsk, utime, stime);
160}
161
162/**
163 * acct_account_cputime - update mm integral after cputime update
164 * @tsk: task_struct for accounting
165 */
166void acct_account_cputime(struct task_struct *tsk)
167{
168 __acct_update_integrals(tsk, tsk->utime, tsk->stime);
169}
170
171/**
148 * acct_clear_integrals - clear the mm integral fields in task_struct 172 * acct_clear_integrals - clear the mm integral fields in task_struct
149 * @tsk: task_struct whose accounting fields are cleared 173 * @tsk: task_struct whose accounting fields are cleared
150 */ 174 */
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 75a2ab3d0b02..27689422aa92 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -23,6 +23,7 @@
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/sysctl.h> 24#include <linux/sysctl.h>
25#include <linux/smpboot.h> 25#include <linux/smpboot.h>
26#include <linux/sched/rt.h>
26 27
27#include <asm/irq_regs.h> 28#include <asm/irq_regs.h>
28#include <linux/kvm_para.h> 29#include <linux/kvm_para.h>
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index fbc6576a83c3..f4feacad3812 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -41,32 +41,31 @@
41#include <linux/debug_locks.h> 41#include <linux/debug_locks.h>
42#include <linux/lockdep.h> 42#include <linux/lockdep.h>
43#include <linux/idr.h> 43#include <linux/idr.h>
44#include <linux/hashtable.h>
44 45
45#include "workqueue_sched.h" 46#include "workqueue_internal.h"
46 47
47enum { 48enum {
48 /* 49 /*
49 * global_cwq flags 50 * worker_pool flags
50 * 51 *
51 * A bound gcwq is either associated or disassociated with its CPU. 52 * A bound pool is either associated or disassociated with its CPU.
52 * While associated (!DISASSOCIATED), all workers are bound to the 53 * While associated (!DISASSOCIATED), all workers are bound to the
53 * CPU and none has %WORKER_UNBOUND set and concurrency management 54 * CPU and none has %WORKER_UNBOUND set and concurrency management
54 * is in effect. 55 * is in effect.
55 * 56 *
56 * While DISASSOCIATED, the cpu may be offline and all workers have 57 * While DISASSOCIATED, the cpu may be offline and all workers have
57 * %WORKER_UNBOUND set and concurrency management disabled, and may 58 * %WORKER_UNBOUND set and concurrency management disabled, and may
58 * be executing on any CPU. The gcwq behaves as an unbound one. 59 * be executing on any CPU. The pool behaves as an unbound one.
59 * 60 *
60 * Note that DISASSOCIATED can be flipped only while holding 61 * Note that DISASSOCIATED can be flipped only while holding
61 * assoc_mutex of all pools on the gcwq to avoid changing binding 62 * assoc_mutex to avoid changing binding state while
62 * state while create_worker() is in progress. 63 * create_worker() is in progress.
63 */ 64 */
64 GCWQ_DISASSOCIATED = 1 << 0, /* cpu can't serve workers */
65 GCWQ_FREEZING = 1 << 1, /* freeze in progress */
66
67 /* pool flags */
68 POOL_MANAGE_WORKERS = 1 << 0, /* need to manage workers */ 65 POOL_MANAGE_WORKERS = 1 << 0, /* need to manage workers */
69 POOL_MANAGING_WORKERS = 1 << 1, /* managing workers */ 66 POOL_MANAGING_WORKERS = 1 << 1, /* managing workers */
67 POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */
68 POOL_FREEZING = 1 << 3, /* freeze in progress */
70 69
71 /* worker flags */ 70 /* worker flags */
72 WORKER_STARTED = 1 << 0, /* started */ 71 WORKER_STARTED = 1 << 0, /* started */
@@ -79,11 +78,9 @@ enum {
79 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_UNBOUND | 78 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_UNBOUND |
80 WORKER_CPU_INTENSIVE, 79 WORKER_CPU_INTENSIVE,
81 80
82 NR_WORKER_POOLS = 2, /* # worker pools per gcwq */ 81 NR_STD_WORKER_POOLS = 2, /* # standard pools per cpu */
83 82
84 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */ 83 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
85 BUSY_WORKER_HASH_SIZE = 1 << BUSY_WORKER_HASH_ORDER,
86 BUSY_WORKER_HASH_MASK = BUSY_WORKER_HASH_SIZE - 1,
87 84
88 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ 85 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */
89 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ 86 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */
@@ -111,48 +108,24 @@ enum {
111 * P: Preemption protected. Disabling preemption is enough and should 108 * P: Preemption protected. Disabling preemption is enough and should
112 * only be modified and accessed from the local cpu. 109 * only be modified and accessed from the local cpu.
113 * 110 *
114 * L: gcwq->lock protected. Access with gcwq->lock held. 111 * L: pool->lock protected. Access with pool->lock held.
115 * 112 *
116 * X: During normal operation, modification requires gcwq->lock and 113 * X: During normal operation, modification requires pool->lock and should
117 * should be done only from local cpu. Either disabling preemption 114 * be done only from local cpu. Either disabling preemption on local
118 * on local cpu or grabbing gcwq->lock is enough for read access. 115 * cpu or grabbing pool->lock is enough for read access. If
119 * If GCWQ_DISASSOCIATED is set, it's identical to L. 116 * POOL_DISASSOCIATED is set, it's identical to L.
120 * 117 *
121 * F: wq->flush_mutex protected. 118 * F: wq->flush_mutex protected.
122 * 119 *
123 * W: workqueue_lock protected. 120 * W: workqueue_lock protected.
124 */ 121 */
125 122
126struct global_cwq; 123/* struct worker is defined in workqueue_internal.h */
127struct worker_pool;
128
129/*
130 * The poor guys doing the actual heavy lifting. All on-duty workers
131 * are either serving the manager role, on idle list or on busy hash.
132 */
133struct worker {
134 /* on idle list while idle, on busy hash table while busy */
135 union {
136 struct list_head entry; /* L: while idle */
137 struct hlist_node hentry; /* L: while busy */
138 };
139
140 struct work_struct *current_work; /* L: work being processed */
141 struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
142 struct list_head scheduled; /* L: scheduled works */
143 struct task_struct *task; /* I: worker task */
144 struct worker_pool *pool; /* I: the associated pool */
145 /* 64 bytes boundary on 64bit, 32 on 32bit */
146 unsigned long last_active; /* L: last active timestamp */
147 unsigned int flags; /* X: flags */
148 int id; /* I: worker id */
149
150 /* for rebinding worker to CPU */
151 struct work_struct rebind_work; /* L: for busy worker */
152};
153 124
154struct worker_pool { 125struct worker_pool {
155 struct global_cwq *gcwq; /* I: the owning gcwq */ 126 spinlock_t lock; /* the pool lock */
127 unsigned int cpu; /* I: the associated cpu */
128 int id; /* I: pool ID */
156 unsigned int flags; /* X: flags */ 129 unsigned int flags; /* X: flags */
157 130
158 struct list_head worklist; /* L: list of pending works */ 131 struct list_head worklist; /* L: list of pending works */
@@ -165,34 +138,28 @@ struct worker_pool {
165 struct timer_list idle_timer; /* L: worker idle timeout */ 138 struct timer_list idle_timer; /* L: worker idle timeout */
166 struct timer_list mayday_timer; /* L: SOS timer for workers */ 139 struct timer_list mayday_timer; /* L: SOS timer for workers */
167 140
168 struct mutex assoc_mutex; /* protect GCWQ_DISASSOCIATED */ 141 /* workers are chained either in busy_hash or idle_list */
169 struct ida worker_ida; /* L: for worker IDs */ 142 DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
170};
171
172/*
173 * Global per-cpu workqueue. There's one and only one for each cpu
174 * and all works are queued and processed here regardless of their
175 * target workqueues.
176 */
177struct global_cwq {
178 spinlock_t lock; /* the gcwq lock */
179 unsigned int cpu; /* I: the associated cpu */
180 unsigned int flags; /* L: GCWQ_* flags */
181
182 /* workers are chained either in busy_hash or pool idle_list */
183 struct hlist_head busy_hash[BUSY_WORKER_HASH_SIZE];
184 /* L: hash of busy workers */ 143 /* L: hash of busy workers */
185 144
186 struct worker_pool pools[NR_WORKER_POOLS]; 145 struct mutex assoc_mutex; /* protect POOL_DISASSOCIATED */
187 /* normal and highpri pools */ 146 struct ida worker_ida; /* L: for worker IDs */
147
148 /*
149 * The current concurrency level. As it's likely to be accessed
150 * from other CPUs during try_to_wake_up(), put it in a separate
151 * cacheline.
152 */
153 atomic_t nr_running ____cacheline_aligned_in_smp;
188} ____cacheline_aligned_in_smp; 154} ____cacheline_aligned_in_smp;
189 155
190/* 156/*
191 * The per-CPU workqueue. The lower WORK_STRUCT_FLAG_BITS of 157 * The per-pool workqueue. While queued, the lower WORK_STRUCT_FLAG_BITS
192 * work_struct->data are used for flags and thus cwqs need to be 158 * of work_struct->data are used for flags and the remaining high bits
193 * aligned at two's power of the number of flag bits. 159 * point to the pwq; thus, pwqs need to be aligned at two's power of the
160 * number of flag bits.
194 */ 161 */
195struct cpu_workqueue_struct { 162struct pool_workqueue {
196 struct worker_pool *pool; /* I: the associated pool */ 163 struct worker_pool *pool; /* I: the associated pool */
197 struct workqueue_struct *wq; /* I: the owning workqueue */ 164 struct workqueue_struct *wq; /* I: the owning workqueue */
198 int work_color; /* L: current color */ 165 int work_color; /* L: current color */
@@ -241,16 +208,16 @@ typedef unsigned long mayday_mask_t;
241struct workqueue_struct { 208struct workqueue_struct {
242 unsigned int flags; /* W: WQ_* flags */ 209 unsigned int flags; /* W: WQ_* flags */
243 union { 210 union {
244 struct cpu_workqueue_struct __percpu *pcpu; 211 struct pool_workqueue __percpu *pcpu;
245 struct cpu_workqueue_struct *single; 212 struct pool_workqueue *single;
246 unsigned long v; 213 unsigned long v;
247 } cpu_wq; /* I: cwq's */ 214 } pool_wq; /* I: pwq's */
248 struct list_head list; /* W: list of all workqueues */ 215 struct list_head list; /* W: list of all workqueues */
249 216
250 struct mutex flush_mutex; /* protects wq flushing */ 217 struct mutex flush_mutex; /* protects wq flushing */
251 int work_color; /* F: current work color */ 218 int work_color; /* F: current work color */
252 int flush_color; /* F: current flush color */ 219 int flush_color; /* F: current flush color */
253 atomic_t nr_cwqs_to_flush; /* flush in progress */ 220 atomic_t nr_pwqs_to_flush; /* flush in progress */
254 struct wq_flusher *first_flusher; /* F: first flusher */ 221 struct wq_flusher *first_flusher; /* F: first flusher */
255 struct list_head flusher_queue; /* F: flush waiters */ 222 struct list_head flusher_queue; /* F: flush waiters */
256 struct list_head flusher_overflow; /* F: flush overflow list */ 223 struct list_head flusher_overflow; /* F: flush overflow list */
@@ -259,7 +226,7 @@ struct workqueue_struct {
259 struct worker *rescuer; /* I: rescue worker */ 226 struct worker *rescuer; /* I: rescue worker */
260 227
261 int nr_drainers; /* W: drain in progress */ 228 int nr_drainers; /* W: drain in progress */
262 int saved_max_active; /* W: saved cwq max_active */ 229 int saved_max_active; /* W: saved pwq max_active */
263#ifdef CONFIG_LOCKDEP 230#ifdef CONFIG_LOCKDEP
264 struct lockdep_map lockdep_map; 231 struct lockdep_map lockdep_map;
265#endif 232#endif
@@ -280,16 +247,15 @@ EXPORT_SYMBOL_GPL(system_freezable_wq);
280#define CREATE_TRACE_POINTS 247#define CREATE_TRACE_POINTS
281#include <trace/events/workqueue.h> 248#include <trace/events/workqueue.h>
282 249
283#define for_each_worker_pool(pool, gcwq) \ 250#define for_each_std_worker_pool(pool, cpu) \
284 for ((pool) = &(gcwq)->pools[0]; \ 251 for ((pool) = &std_worker_pools(cpu)[0]; \
285 (pool) < &(gcwq)->pools[NR_WORKER_POOLS]; (pool)++) 252 (pool) < &std_worker_pools(cpu)[NR_STD_WORKER_POOLS]; (pool)++)
286 253
287#define for_each_busy_worker(worker, i, pos, gcwq) \ 254#define for_each_busy_worker(worker, i, pos, pool) \
288 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \ 255 hash_for_each(pool->busy_hash, i, pos, worker, hentry)
289 hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
290 256
291static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask, 257static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
292 unsigned int sw) 258 unsigned int sw)
293{ 259{
294 if (cpu < nr_cpu_ids) { 260 if (cpu < nr_cpu_ids) {
295 if (sw & 1) { 261 if (sw & 1) {
@@ -300,42 +266,42 @@ static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask,
300 if (sw & 2) 266 if (sw & 2)
301 return WORK_CPU_UNBOUND; 267 return WORK_CPU_UNBOUND;
302 } 268 }
303 return WORK_CPU_NONE; 269 return WORK_CPU_END;
304} 270}
305 271
306static inline int __next_wq_cpu(int cpu, const struct cpumask *mask, 272static inline int __next_pwq_cpu(int cpu, const struct cpumask *mask,
307 struct workqueue_struct *wq) 273 struct workqueue_struct *wq)
308{ 274{
309 return __next_gcwq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2); 275 return __next_wq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2);
310} 276}
311 277
312/* 278/*
313 * CPU iterators 279 * CPU iterators
314 * 280 *
315 * An extra gcwq is defined for an invalid cpu number 281 * An extra cpu number is defined using an invalid cpu number
316 * (WORK_CPU_UNBOUND) to host workqueues which are not bound to any 282 * (WORK_CPU_UNBOUND) to host workqueues which are not bound to any
317 * specific CPU. The following iterators are similar to 283 * specific CPU. The following iterators are similar to for_each_*_cpu()
318 * for_each_*_cpu() iterators but also considers the unbound gcwq. 284 * iterators but also considers the unbound CPU.
319 * 285 *
320 * for_each_gcwq_cpu() : possible CPUs + WORK_CPU_UNBOUND 286 * for_each_wq_cpu() : possible CPUs + WORK_CPU_UNBOUND
321 * for_each_online_gcwq_cpu() : online CPUs + WORK_CPU_UNBOUND 287 * for_each_online_wq_cpu() : online CPUs + WORK_CPU_UNBOUND
322 * for_each_cwq_cpu() : possible CPUs for bound workqueues, 288 * for_each_pwq_cpu() : possible CPUs for bound workqueues,
323 * WORK_CPU_UNBOUND for unbound workqueues 289 * WORK_CPU_UNBOUND for unbound workqueues
324 */ 290 */
325#define for_each_gcwq_cpu(cpu) \ 291#define for_each_wq_cpu(cpu) \
326 for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3); \ 292 for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, 3); \
327 (cpu) < WORK_CPU_NONE; \ 293 (cpu) < WORK_CPU_END; \
328 (cpu) = __next_gcwq_cpu((cpu), cpu_possible_mask, 3)) 294 (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, 3))
329 295
330#define for_each_online_gcwq_cpu(cpu) \ 296#define for_each_online_wq_cpu(cpu) \
331 for ((cpu) = __next_gcwq_cpu(-1, cpu_online_mask, 3); \ 297 for ((cpu) = __next_wq_cpu(-1, cpu_online_mask, 3); \
332 (cpu) < WORK_CPU_NONE; \ 298 (cpu) < WORK_CPU_END; \
333 (cpu) = __next_gcwq_cpu((cpu), cpu_online_mask, 3)) 299 (cpu) = __next_wq_cpu((cpu), cpu_online_mask, 3))
334 300
335#define for_each_cwq_cpu(cpu, wq) \ 301#define for_each_pwq_cpu(cpu, wq) \
336 for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, (wq)); \ 302 for ((cpu) = __next_pwq_cpu(-1, cpu_possible_mask, (wq)); \
337 (cpu) < WORK_CPU_NONE; \ 303 (cpu) < WORK_CPU_END; \
338 (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq))) 304 (cpu) = __next_pwq_cpu((cpu), cpu_possible_mask, (wq)))
339 305
340#ifdef CONFIG_DEBUG_OBJECTS_WORK 306#ifdef CONFIG_DEBUG_OBJECTS_WORK
341 307
@@ -459,57 +425,69 @@ static LIST_HEAD(workqueues);
459static bool workqueue_freezing; /* W: have wqs started freezing? */ 425static bool workqueue_freezing; /* W: have wqs started freezing? */
460 426
461/* 427/*
462 * The almighty global cpu workqueues. nr_running is the only field 428 * The CPU and unbound standard worker pools. The unbound ones have
463 * which is expected to be used frequently by other cpus via 429 * POOL_DISASSOCIATED set, and their workers have WORKER_UNBOUND set.
464 * try_to_wake_up(). Put it in a separate cacheline.
465 */ 430 */
466static DEFINE_PER_CPU(struct global_cwq, global_cwq); 431static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS],
467static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, pool_nr_running[NR_WORKER_POOLS]); 432 cpu_std_worker_pools);
433static struct worker_pool unbound_std_worker_pools[NR_STD_WORKER_POOLS];
468 434
469/* 435/* idr of all pools */
470 * Global cpu workqueue and nr_running counter for unbound gcwq. The 436static DEFINE_MUTEX(worker_pool_idr_mutex);
471 * gcwq is always online, has GCWQ_DISASSOCIATED set, and all its 437static DEFINE_IDR(worker_pool_idr);
472 * workers have WORKER_UNBOUND set.
473 */
474static struct global_cwq unbound_global_cwq;
475static atomic_t unbound_pool_nr_running[NR_WORKER_POOLS] = {
476 [0 ... NR_WORKER_POOLS - 1] = ATOMIC_INIT(0), /* always 0 */
477};
478 438
479static int worker_thread(void *__worker); 439static int worker_thread(void *__worker);
480 440
481static int worker_pool_pri(struct worker_pool *pool) 441static struct worker_pool *std_worker_pools(int cpu)
482{ 442{
483 return pool - pool->gcwq->pools; 443 if (cpu != WORK_CPU_UNBOUND)
444 return per_cpu(cpu_std_worker_pools, cpu);
445 else
446 return unbound_std_worker_pools;
484} 447}
485 448
486static struct global_cwq *get_gcwq(unsigned int cpu) 449static int std_worker_pool_pri(struct worker_pool *pool)
487{ 450{
488 if (cpu != WORK_CPU_UNBOUND) 451 return pool - std_worker_pools(pool->cpu);
489 return &per_cpu(global_cwq, cpu);
490 else
491 return &unbound_global_cwq;
492} 452}
493 453
494static atomic_t *get_pool_nr_running(struct worker_pool *pool) 454/* allocate ID and assign it to @pool */
455static int worker_pool_assign_id(struct worker_pool *pool)
495{ 456{
496 int cpu = pool->gcwq->cpu; 457 int ret;
497 int idx = worker_pool_pri(pool);
498 458
499 if (cpu != WORK_CPU_UNBOUND) 459 mutex_lock(&worker_pool_idr_mutex);
500 return &per_cpu(pool_nr_running, cpu)[idx]; 460 idr_pre_get(&worker_pool_idr, GFP_KERNEL);
501 else 461 ret = idr_get_new(&worker_pool_idr, pool, &pool->id);
502 return &unbound_pool_nr_running[idx]; 462 mutex_unlock(&worker_pool_idr_mutex);
463
464 return ret;
503} 465}
504 466
505static struct cpu_workqueue_struct *get_cwq(unsigned int cpu, 467/*
506 struct workqueue_struct *wq) 468 * Lookup worker_pool by id. The idr currently is built during boot and
469 * never modified. Don't worry about locking for now.
470 */
471static struct worker_pool *worker_pool_by_id(int pool_id)
472{
473 return idr_find(&worker_pool_idr, pool_id);
474}
475
476static struct worker_pool *get_std_worker_pool(int cpu, bool highpri)
477{
478 struct worker_pool *pools = std_worker_pools(cpu);
479
480 return &pools[highpri];
481}
482
483static struct pool_workqueue *get_pwq(unsigned int cpu,
484 struct workqueue_struct *wq)
507{ 485{
508 if (!(wq->flags & WQ_UNBOUND)) { 486 if (!(wq->flags & WQ_UNBOUND)) {
509 if (likely(cpu < nr_cpu_ids)) 487 if (likely(cpu < nr_cpu_ids))
510 return per_cpu_ptr(wq->cpu_wq.pcpu, cpu); 488 return per_cpu_ptr(wq->pool_wq.pcpu, cpu);
511 } else if (likely(cpu == WORK_CPU_UNBOUND)) 489 } else if (likely(cpu == WORK_CPU_UNBOUND))
512 return wq->cpu_wq.single; 490 return wq->pool_wq.single;
513 return NULL; 491 return NULL;
514} 492}
515 493
@@ -530,19 +508,19 @@ static int work_next_color(int color)
530} 508}
531 509
532/* 510/*
533 * While queued, %WORK_STRUCT_CWQ is set and non flag bits of a work's data 511 * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
534 * contain the pointer to the queued cwq. Once execution starts, the flag 512 * contain the pointer to the queued pwq. Once execution starts, the flag
535 * is cleared and the high bits contain OFFQ flags and CPU number. 513 * is cleared and the high bits contain OFFQ flags and pool ID.
536 * 514 *
537 * set_work_cwq(), set_work_cpu_and_clear_pending(), mark_work_canceling() 515 * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
538 * and clear_work_data() can be used to set the cwq, cpu or clear 516 * and clear_work_data() can be used to set the pwq, pool or clear
539 * work->data. These functions should only be called while the work is 517 * work->data. These functions should only be called while the work is
540 * owned - ie. while the PENDING bit is set. 518 * owned - ie. while the PENDING bit is set.
541 * 519 *
542 * get_work_[g]cwq() can be used to obtain the gcwq or cwq corresponding to 520 * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
543 * a work. gcwq is available once the work has been queued anywhere after 521 * corresponding to a work. Pool is available once the work has been
544 * initialization until it is sync canceled. cwq is available only while 522 * queued anywhere after initialization until it is sync canceled. pwq is
545 * the work item is queued. 523 * available only while the work item is queued.
546 * 524 *
547 * %WORK_OFFQ_CANCELING is used to mark a work item which is being 525 * %WORK_OFFQ_CANCELING is used to mark a work item which is being
548 * canceled. While being canceled, a work item may have its PENDING set 526 * canceled. While being canceled, a work item may have its PENDING set
@@ -556,16 +534,22 @@ static inline void set_work_data(struct work_struct *work, unsigned long data,
556 atomic_long_set(&work->data, data | flags | work_static(work)); 534 atomic_long_set(&work->data, data | flags | work_static(work));
557} 535}
558 536
559static void set_work_cwq(struct work_struct *work, 537static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
560 struct cpu_workqueue_struct *cwq,
561 unsigned long extra_flags) 538 unsigned long extra_flags)
562{ 539{
563 set_work_data(work, (unsigned long)cwq, 540 set_work_data(work, (unsigned long)pwq,
564 WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags); 541 WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags);
565} 542}
566 543
567static void set_work_cpu_and_clear_pending(struct work_struct *work, 544static void set_work_pool_and_keep_pending(struct work_struct *work,
568 unsigned int cpu) 545 int pool_id)
546{
547 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT,
548 WORK_STRUCT_PENDING);
549}
550
551static void set_work_pool_and_clear_pending(struct work_struct *work,
552 int pool_id)
569{ 553{
570 /* 554 /*
571 * The following wmb is paired with the implied mb in 555 * The following wmb is paired with the implied mb in
@@ -574,67 +558,92 @@ static void set_work_cpu_and_clear_pending(struct work_struct *work,
574 * owner. 558 * owner.
575 */ 559 */
576 smp_wmb(); 560 smp_wmb();
577 set_work_data(work, (unsigned long)cpu << WORK_OFFQ_CPU_SHIFT, 0); 561 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
578} 562}
579 563
580static void clear_work_data(struct work_struct *work) 564static void clear_work_data(struct work_struct *work)
581{ 565{
582 smp_wmb(); /* see set_work_cpu_and_clear_pending() */ 566 smp_wmb(); /* see set_work_pool_and_clear_pending() */
583 set_work_data(work, WORK_STRUCT_NO_CPU, 0); 567 set_work_data(work, WORK_STRUCT_NO_POOL, 0);
584} 568}
585 569
586static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work) 570static struct pool_workqueue *get_work_pwq(struct work_struct *work)
587{ 571{
588 unsigned long data = atomic_long_read(&work->data); 572 unsigned long data = atomic_long_read(&work->data);
589 573
590 if (data & WORK_STRUCT_CWQ) 574 if (data & WORK_STRUCT_PWQ)
591 return (void *)(data & WORK_STRUCT_WQ_DATA_MASK); 575 return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
592 else 576 else
593 return NULL; 577 return NULL;
594} 578}
595 579
596static struct global_cwq *get_work_gcwq(struct work_struct *work) 580/**
581 * get_work_pool - return the worker_pool a given work was associated with
582 * @work: the work item of interest
583 *
584 * Return the worker_pool @work was last associated with. %NULL if none.
585 */
586static struct worker_pool *get_work_pool(struct work_struct *work)
597{ 587{
598 unsigned long data = atomic_long_read(&work->data); 588 unsigned long data = atomic_long_read(&work->data);
599 unsigned int cpu; 589 struct worker_pool *pool;
590 int pool_id;
600 591
601 if (data & WORK_STRUCT_CWQ) 592 if (data & WORK_STRUCT_PWQ)
602 return ((struct cpu_workqueue_struct *) 593 return ((struct pool_workqueue *)
603 (data & WORK_STRUCT_WQ_DATA_MASK))->pool->gcwq; 594 (data & WORK_STRUCT_WQ_DATA_MASK))->pool;
604 595
605 cpu = data >> WORK_OFFQ_CPU_SHIFT; 596 pool_id = data >> WORK_OFFQ_POOL_SHIFT;
606 if (cpu == WORK_CPU_NONE) 597 if (pool_id == WORK_OFFQ_POOL_NONE)
607 return NULL; 598 return NULL;
608 599
609 BUG_ON(cpu >= nr_cpu_ids && cpu != WORK_CPU_UNBOUND); 600 pool = worker_pool_by_id(pool_id);
610 return get_gcwq(cpu); 601 WARN_ON_ONCE(!pool);
602 return pool;
603}
604
605/**
606 * get_work_pool_id - return the worker pool ID a given work is associated with
607 * @work: the work item of interest
608 *
609 * Return the worker_pool ID @work was last associated with.
610 * %WORK_OFFQ_POOL_NONE if none.
611 */
612static int get_work_pool_id(struct work_struct *work)
613{
614 unsigned long data = atomic_long_read(&work->data);
615
616 if (data & WORK_STRUCT_PWQ)
617 return ((struct pool_workqueue *)
618 (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id;
619
620 return data >> WORK_OFFQ_POOL_SHIFT;
611} 621}
612 622
613static void mark_work_canceling(struct work_struct *work) 623static void mark_work_canceling(struct work_struct *work)
614{ 624{
615 struct global_cwq *gcwq = get_work_gcwq(work); 625 unsigned long pool_id = get_work_pool_id(work);
616 unsigned long cpu = gcwq ? gcwq->cpu : WORK_CPU_NONE;
617 626
618 set_work_data(work, (cpu << WORK_OFFQ_CPU_SHIFT) | WORK_OFFQ_CANCELING, 627 pool_id <<= WORK_OFFQ_POOL_SHIFT;
619 WORK_STRUCT_PENDING); 628 set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING);
620} 629}
621 630
622static bool work_is_canceling(struct work_struct *work) 631static bool work_is_canceling(struct work_struct *work)
623{ 632{
624 unsigned long data = atomic_long_read(&work->data); 633 unsigned long data = atomic_long_read(&work->data);
625 634
626 return !(data & WORK_STRUCT_CWQ) && (data & WORK_OFFQ_CANCELING); 635 return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING);
627} 636}
628 637
629/* 638/*
630 * Policy functions. These define the policies on how the global worker 639 * Policy functions. These define the policies on how the global worker
631 * pools are managed. Unless noted otherwise, these functions assume that 640 * pools are managed. Unless noted otherwise, these functions assume that
632 * they're being called with gcwq->lock held. 641 * they're being called with pool->lock held.
633 */ 642 */
634 643
635static bool __need_more_worker(struct worker_pool *pool) 644static bool __need_more_worker(struct worker_pool *pool)
636{ 645{
637 return !atomic_read(get_pool_nr_running(pool)); 646 return !atomic_read(&pool->nr_running);
638} 647}
639 648
640/* 649/*
@@ -642,7 +651,7 @@ static bool __need_more_worker(struct worker_pool *pool)
642 * running workers. 651 * running workers.
643 * 652 *
644 * Note that, because unbound workers never contribute to nr_running, this 653 * Note that, because unbound workers never contribute to nr_running, this
645 * function will always return %true for unbound gcwq as long as the 654 * function will always return %true for unbound pools as long as the
646 * worklist isn't empty. 655 * worklist isn't empty.
647 */ 656 */
648static bool need_more_worker(struct worker_pool *pool) 657static bool need_more_worker(struct worker_pool *pool)
@@ -659,9 +668,8 @@ static bool may_start_working(struct worker_pool *pool)
659/* Do I need to keep working? Called from currently running workers. */ 668/* Do I need to keep working? Called from currently running workers. */
660static bool keep_working(struct worker_pool *pool) 669static bool keep_working(struct worker_pool *pool)
661{ 670{
662 atomic_t *nr_running = get_pool_nr_running(pool); 671 return !list_empty(&pool->worklist) &&
663 672 atomic_read(&pool->nr_running) <= 1;
664 return !list_empty(&pool->worklist) && atomic_read(nr_running) <= 1;
665} 673}
666 674
667/* Do we need a new worker? Called from manager. */ 675/* Do we need a new worker? Called from manager. */
@@ -714,7 +722,7 @@ static struct worker *first_worker(struct worker_pool *pool)
714 * Wake up the first idle worker of @pool. 722 * Wake up the first idle worker of @pool.
715 * 723 *
716 * CONTEXT: 724 * CONTEXT:
717 * spin_lock_irq(gcwq->lock). 725 * spin_lock_irq(pool->lock).
718 */ 726 */
719static void wake_up_worker(struct worker_pool *pool) 727static void wake_up_worker(struct worker_pool *pool)
720{ 728{
@@ -740,8 +748,8 @@ void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
740 struct worker *worker = kthread_data(task); 748 struct worker *worker = kthread_data(task);
741 749
742 if (!(worker->flags & WORKER_NOT_RUNNING)) { 750 if (!(worker->flags & WORKER_NOT_RUNNING)) {
743 WARN_ON_ONCE(worker->pool->gcwq->cpu != cpu); 751 WARN_ON_ONCE(worker->pool->cpu != cpu);
744 atomic_inc(get_pool_nr_running(worker->pool)); 752 atomic_inc(&worker->pool->nr_running);
745 } 753 }
746} 754}
747 755
@@ -764,12 +772,18 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task,
764 unsigned int cpu) 772 unsigned int cpu)
765{ 773{
766 struct worker *worker = kthread_data(task), *to_wakeup = NULL; 774 struct worker *worker = kthread_data(task), *to_wakeup = NULL;
767 struct worker_pool *pool = worker->pool; 775 struct worker_pool *pool;
768 atomic_t *nr_running = get_pool_nr_running(pool);
769 776
777 /*
778 * Rescuers, which may not have all the fields set up like normal
779 * workers, also reach here, let's not access anything before
780 * checking NOT_RUNNING.
781 */
770 if (worker->flags & WORKER_NOT_RUNNING) 782 if (worker->flags & WORKER_NOT_RUNNING)
771 return NULL; 783 return NULL;
772 784
785 pool = worker->pool;
786
773 /* this can only happen on the local cpu */ 787 /* this can only happen on the local cpu */
774 BUG_ON(cpu != raw_smp_processor_id()); 788 BUG_ON(cpu != raw_smp_processor_id());
775 789
@@ -781,10 +795,11 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task,
781 * NOT_RUNNING is clear. This means that we're bound to and 795 * NOT_RUNNING is clear. This means that we're bound to and
782 * running on the local cpu w/ rq lock held and preemption 796 * running on the local cpu w/ rq lock held and preemption
783 * disabled, which in turn means that none else could be 797 * disabled, which in turn means that none else could be
784 * manipulating idle_list, so dereferencing idle_list without gcwq 798 * manipulating idle_list, so dereferencing idle_list without pool
785 * lock is safe. 799 * lock is safe.
786 */ 800 */
787 if (atomic_dec_and_test(nr_running) && !list_empty(&pool->worklist)) 801 if (atomic_dec_and_test(&pool->nr_running) &&
802 !list_empty(&pool->worklist))
788 to_wakeup = first_worker(pool); 803 to_wakeup = first_worker(pool);
789 return to_wakeup ? to_wakeup->task : NULL; 804 return to_wakeup ? to_wakeup->task : NULL;
790} 805}
@@ -800,7 +815,7 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task,
800 * woken up. 815 * woken up.
801 * 816 *
802 * CONTEXT: 817 * CONTEXT:
803 * spin_lock_irq(gcwq->lock) 818 * spin_lock_irq(pool->lock)
804 */ 819 */
805static inline void worker_set_flags(struct worker *worker, unsigned int flags, 820static inline void worker_set_flags(struct worker *worker, unsigned int flags,
806 bool wakeup) 821 bool wakeup)
@@ -816,14 +831,12 @@ static inline void worker_set_flags(struct worker *worker, unsigned int flags,
816 */ 831 */
817 if ((flags & WORKER_NOT_RUNNING) && 832 if ((flags & WORKER_NOT_RUNNING) &&
818 !(worker->flags & WORKER_NOT_RUNNING)) { 833 !(worker->flags & WORKER_NOT_RUNNING)) {
819 atomic_t *nr_running = get_pool_nr_running(pool);
820
821 if (wakeup) { 834 if (wakeup) {
822 if (atomic_dec_and_test(nr_running) && 835 if (atomic_dec_and_test(&pool->nr_running) &&
823 !list_empty(&pool->worklist)) 836 !list_empty(&pool->worklist))
824 wake_up_worker(pool); 837 wake_up_worker(pool);
825 } else 838 } else
826 atomic_dec(nr_running); 839 atomic_dec(&pool->nr_running);
827 } 840 }
828 841
829 worker->flags |= flags; 842 worker->flags |= flags;
@@ -837,7 +850,7 @@ static inline void worker_set_flags(struct worker *worker, unsigned int flags,
837 * Clear @flags in @worker->flags and adjust nr_running accordingly. 850 * Clear @flags in @worker->flags and adjust nr_running accordingly.
838 * 851 *
839 * CONTEXT: 852 * CONTEXT:
840 * spin_lock_irq(gcwq->lock) 853 * spin_lock_irq(pool->lock)
841 */ 854 */
842static inline void worker_clr_flags(struct worker *worker, unsigned int flags) 855static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
843{ 856{
@@ -855,87 +868,56 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
855 */ 868 */
856 if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING)) 869 if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
857 if (!(worker->flags & WORKER_NOT_RUNNING)) 870 if (!(worker->flags & WORKER_NOT_RUNNING))
858 atomic_inc(get_pool_nr_running(pool)); 871 atomic_inc(&pool->nr_running);
859} 872}
860 873
861/** 874/**
862 * busy_worker_head - return the busy hash head for a work 875 * find_worker_executing_work - find worker which is executing a work
863 * @gcwq: gcwq of interest 876 * @pool: pool of interest
864 * @work: work to be hashed
865 *
866 * Return hash head of @gcwq for @work.
867 *
868 * CONTEXT:
869 * spin_lock_irq(gcwq->lock).
870 *
871 * RETURNS:
872 * Pointer to the hash head.
873 */
874static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
875 struct work_struct *work)
876{
877 const int base_shift = ilog2(sizeof(struct work_struct));
878 unsigned long v = (unsigned long)work;
879
880 /* simple shift and fold hash, do we need something better? */
881 v >>= base_shift;
882 v += v >> BUSY_WORKER_HASH_ORDER;
883 v &= BUSY_WORKER_HASH_MASK;
884
885 return &gcwq->busy_hash[v];
886}
887
888/**
889 * __find_worker_executing_work - find worker which is executing a work
890 * @gcwq: gcwq of interest
891 * @bwh: hash head as returned by busy_worker_head()
892 * @work: work to find worker for 877 * @work: work to find worker for
893 * 878 *
894 * Find a worker which is executing @work on @gcwq. @bwh should be 879 * Find a worker which is executing @work on @pool by searching
895 * the hash head obtained by calling busy_worker_head() with the same 880 * @pool->busy_hash which is keyed by the address of @work. For a worker
896 * work. 881 * to match, its current execution should match the address of @work and
882 * its work function. This is to avoid unwanted dependency between
883 * unrelated work executions through a work item being recycled while still
884 * being executed.
885 *
886 * This is a bit tricky. A work item may be freed once its execution
887 * starts and nothing prevents the freed area from being recycled for
888 * another work item. If the same work item address ends up being reused
889 * before the original execution finishes, workqueue will identify the
890 * recycled work item as currently executing and make it wait until the
891 * current execution finishes, introducing an unwanted dependency.
892 *
893 * This function checks the work item address, work function and workqueue
894 * to avoid false positives. Note that this isn't complete as one may
895 * construct a work function which can introduce dependency onto itself
896 * through a recycled work item. Well, if somebody wants to shoot oneself
897 * in the foot that badly, there's only so much we can do, and if such
898 * deadlock actually occurs, it should be easy to locate the culprit work
899 * function.
897 * 900 *
898 * CONTEXT: 901 * CONTEXT:
899 * spin_lock_irq(gcwq->lock). 902 * spin_lock_irq(pool->lock).
900 * 903 *
901 * RETURNS: 904 * RETURNS:
902 * Pointer to worker which is executing @work if found, NULL 905 * Pointer to worker which is executing @work if found, NULL
903 * otherwise. 906 * otherwise.
904 */ 907 */
905static struct worker *__find_worker_executing_work(struct global_cwq *gcwq, 908static struct worker *find_worker_executing_work(struct worker_pool *pool,
906 struct hlist_head *bwh, 909 struct work_struct *work)
907 struct work_struct *work)
908{ 910{
909 struct worker *worker; 911 struct worker *worker;
910 struct hlist_node *tmp; 912 struct hlist_node *tmp;
911 913
912 hlist_for_each_entry(worker, tmp, bwh, hentry) 914 hash_for_each_possible(pool->busy_hash, worker, tmp, hentry,
913 if (worker->current_work == work) 915 (unsigned long)work)
916 if (worker->current_work == work &&
917 worker->current_func == work->func)
914 return worker; 918 return worker;
915 return NULL;
916}
917 919
918/** 920 return NULL;
919 * find_worker_executing_work - find worker which is executing a work
920 * @gcwq: gcwq of interest
921 * @work: work to find worker for
922 *
923 * Find a worker which is executing @work on @gcwq. This function is
924 * identical to __find_worker_executing_work() except that this
925 * function calculates @bwh itself.
926 *
927 * CONTEXT:
928 * spin_lock_irq(gcwq->lock).
929 *
930 * RETURNS:
931 * Pointer to worker which is executing @work if found, NULL
932 * otherwise.
933 */
934static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
935 struct work_struct *work)
936{
937 return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work),
938 work);
939} 921}
940 922
941/** 923/**
@@ -953,7 +935,7 @@ static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
953 * nested inside outer list_for_each_entry_safe(). 935 * nested inside outer list_for_each_entry_safe().
954 * 936 *
955 * CONTEXT: 937 * CONTEXT:
956 * spin_lock_irq(gcwq->lock). 938 * spin_lock_irq(pool->lock).
957 */ 939 */
958static void move_linked_works(struct work_struct *work, struct list_head *head, 940static void move_linked_works(struct work_struct *work, struct list_head *head,
959 struct work_struct **nextp) 941 struct work_struct **nextp)
@@ -979,67 +961,67 @@ static void move_linked_works(struct work_struct *work, struct list_head *head,
979 *nextp = n; 961 *nextp = n;
980} 962}
981 963
982static void cwq_activate_delayed_work(struct work_struct *work) 964static void pwq_activate_delayed_work(struct work_struct *work)
983{ 965{
984 struct cpu_workqueue_struct *cwq = get_work_cwq(work); 966 struct pool_workqueue *pwq = get_work_pwq(work);
985 967
986 trace_workqueue_activate_work(work); 968 trace_workqueue_activate_work(work);
987 move_linked_works(work, &cwq->pool->worklist, NULL); 969 move_linked_works(work, &pwq->pool->worklist, NULL);
988 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work)); 970 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
989 cwq->nr_active++; 971 pwq->nr_active++;
990} 972}
991 973
992static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) 974static void pwq_activate_first_delayed(struct pool_workqueue *pwq)
993{ 975{
994 struct work_struct *work = list_first_entry(&cwq->delayed_works, 976 struct work_struct *work = list_first_entry(&pwq->delayed_works,
995 struct work_struct, entry); 977 struct work_struct, entry);
996 978
997 cwq_activate_delayed_work(work); 979 pwq_activate_delayed_work(work);
998} 980}
999 981
1000/** 982/**
1001 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight 983 * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight
1002 * @cwq: cwq of interest 984 * @pwq: pwq of interest
1003 * @color: color of work which left the queue 985 * @color: color of work which left the queue
1004 * 986 *
1005 * A work either has completed or is removed from pending queue, 987 * A work either has completed or is removed from pending queue,
1006 * decrement nr_in_flight of its cwq and handle workqueue flushing. 988 * decrement nr_in_flight of its pwq and handle workqueue flushing.
1007 * 989 *
1008 * CONTEXT: 990 * CONTEXT:
1009 * spin_lock_irq(gcwq->lock). 991 * spin_lock_irq(pool->lock).
1010 */ 992 */
1011static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) 993static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
1012{ 994{
1013 /* ignore uncolored works */ 995 /* ignore uncolored works */
1014 if (color == WORK_NO_COLOR) 996 if (color == WORK_NO_COLOR)
1015 return; 997 return;
1016 998
1017 cwq->nr_in_flight[color]--; 999 pwq->nr_in_flight[color]--;
1018 1000
1019 cwq->nr_active--; 1001 pwq->nr_active--;
1020 if (!list_empty(&cwq->delayed_works)) { 1002 if (!list_empty(&pwq->delayed_works)) {
1021 /* one down, submit a delayed one */ 1003 /* one down, submit a delayed one */
1022 if (cwq->nr_active < cwq->max_active) 1004 if (pwq->nr_active < pwq->max_active)
1023 cwq_activate_first_delayed(cwq); 1005 pwq_activate_first_delayed(pwq);
1024 } 1006 }
1025 1007
1026 /* is flush in progress and are we at the flushing tip? */ 1008 /* is flush in progress and are we at the flushing tip? */
1027 if (likely(cwq->flush_color != color)) 1009 if (likely(pwq->flush_color != color))
1028 return; 1010 return;
1029 1011
1030 /* are there still in-flight works? */ 1012 /* are there still in-flight works? */
1031 if (cwq->nr_in_flight[color]) 1013 if (pwq->nr_in_flight[color])
1032 return; 1014 return;
1033 1015
1034 /* this cwq is done, clear flush_color */ 1016 /* this pwq is done, clear flush_color */
1035 cwq->flush_color = -1; 1017 pwq->flush_color = -1;
1036 1018
1037 /* 1019 /*
1038 * If this was the last cwq, wake up the first flusher. It 1020 * If this was the last pwq, wake up the first flusher. It
1039 * will handle the rest. 1021 * will handle the rest.
1040 */ 1022 */
1041 if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush)) 1023 if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
1042 complete(&cwq->wq->first_flusher->done); 1024 complete(&pwq->wq->first_flusher->done);
1043} 1025}
1044 1026
1045/** 1027/**
@@ -1070,7 +1052,8 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
1070static int try_to_grab_pending(struct work_struct *work, bool is_dwork, 1052static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
1071 unsigned long *flags) 1053 unsigned long *flags)
1072{ 1054{
1073 struct global_cwq *gcwq; 1055 struct worker_pool *pool;
1056 struct pool_workqueue *pwq;
1074 1057
1075 local_irq_save(*flags); 1058 local_irq_save(*flags);
1076 1059
@@ -1095,41 +1078,43 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
1095 * The queueing is in progress, or it is already queued. Try to 1078 * The queueing is in progress, or it is already queued. Try to
1096 * steal it from ->worklist without clearing WORK_STRUCT_PENDING. 1079 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
1097 */ 1080 */
1098 gcwq = get_work_gcwq(work); 1081 pool = get_work_pool(work);
1099 if (!gcwq) 1082 if (!pool)
1100 goto fail; 1083 goto fail;
1101 1084
1102 spin_lock(&gcwq->lock); 1085 spin_lock(&pool->lock);
1103 if (!list_empty(&work->entry)) { 1086 /*
1087 * work->data is guaranteed to point to pwq only while the work
1088 * item is queued on pwq->wq, and both updating work->data to point
1089 * to pwq on queueing and to pool on dequeueing are done under
1090 * pwq->pool->lock. This in turn guarantees that, if work->data
1091 * points to pwq which is associated with a locked pool, the work
1092 * item is currently queued on that pool.
1093 */
1094 pwq = get_work_pwq(work);
1095 if (pwq && pwq->pool == pool) {
1096 debug_work_deactivate(work);
1097
1104 /* 1098 /*
1105 * This work is queued, but perhaps we locked the wrong gcwq. 1099 * A delayed work item cannot be grabbed directly because
1106 * In that case we must see the new value after rmb(), see 1100 * it might have linked NO_COLOR work items which, if left
1107 * insert_work()->wmb(). 1101 * on the delayed_list, will confuse pwq->nr_active
1102 * management later on and cause stall. Make sure the work
1103 * item is activated before grabbing.
1108 */ 1104 */
1109 smp_rmb(); 1105 if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
1110 if (gcwq == get_work_gcwq(work)) { 1106 pwq_activate_delayed_work(work);
1111 debug_work_deactivate(work);
1112 1107
1113 /* 1108 list_del_init(&work->entry);
1114 * A delayed work item cannot be grabbed directly 1109 pwq_dec_nr_in_flight(get_work_pwq(work), get_work_color(work));
1115 * because it might have linked NO_COLOR work items
1116 * which, if left on the delayed_list, will confuse
1117 * cwq->nr_active management later on and cause
1118 * stall. Make sure the work item is activated
1119 * before grabbing.
1120 */
1121 if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
1122 cwq_activate_delayed_work(work);
1123 1110
1124 list_del_init(&work->entry); 1111 /* work->data points to pwq iff queued, point to pool */
1125 cwq_dec_nr_in_flight(get_work_cwq(work), 1112 set_work_pool_and_keep_pending(work, pool->id);
1126 get_work_color(work));
1127 1113
1128 spin_unlock(&gcwq->lock); 1114 spin_unlock(&pool->lock);
1129 return 1; 1115 return 1;
1130 }
1131 } 1116 }
1132 spin_unlock(&gcwq->lock); 1117 spin_unlock(&pool->lock);
1133fail: 1118fail:
1134 local_irq_restore(*flags); 1119 local_irq_restore(*flags);
1135 if (work_is_canceling(work)) 1120 if (work_is_canceling(work))
@@ -1139,33 +1124,25 @@ fail:
1139} 1124}
1140 1125
1141/** 1126/**
1142 * insert_work - insert a work into gcwq 1127 * insert_work - insert a work into a pool
1143 * @cwq: cwq @work belongs to 1128 * @pwq: pwq @work belongs to
1144 * @work: work to insert 1129 * @work: work to insert
1145 * @head: insertion point 1130 * @head: insertion point
1146 * @extra_flags: extra WORK_STRUCT_* flags to set 1131 * @extra_flags: extra WORK_STRUCT_* flags to set
1147 * 1132 *
1148 * Insert @work which belongs to @cwq into @gcwq after @head. 1133 * Insert @work which belongs to @pwq after @head. @extra_flags is or'd to
1149 * @extra_flags is or'd to work_struct flags. 1134 * work_struct flags.
1150 * 1135 *
1151 * CONTEXT: 1136 * CONTEXT:
1152 * spin_lock_irq(gcwq->lock). 1137 * spin_lock_irq(pool->lock).
1153 */ 1138 */
1154static void insert_work(struct cpu_workqueue_struct *cwq, 1139static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
1155 struct work_struct *work, struct list_head *head, 1140 struct list_head *head, unsigned int extra_flags)
1156 unsigned int extra_flags)
1157{ 1141{
1158 struct worker_pool *pool = cwq->pool; 1142 struct worker_pool *pool = pwq->pool;
1159 1143
1160 /* we own @work, set data and link */ 1144 /* we own @work, set data and link */
1161 set_work_cwq(work, cwq, extra_flags); 1145 set_work_pwq(work, pwq, extra_flags);
1162
1163 /*
1164 * Ensure that we get the right work->data if we see the
1165 * result of list_add() below, see try_to_grab_pending().
1166 */
1167 smp_wmb();
1168
1169 list_add_tail(&work->entry, head); 1146 list_add_tail(&work->entry, head);
1170 1147
1171 /* 1148 /*
@@ -1181,41 +1158,24 @@ static void insert_work(struct cpu_workqueue_struct *cwq,
1181 1158
1182/* 1159/*
1183 * Test whether @work is being queued from another work executing on the 1160 * Test whether @work is being queued from another work executing on the
1184 * same workqueue. This is rather expensive and should only be used from 1161 * same workqueue.
1185 * cold paths.
1186 */ 1162 */
1187static bool is_chained_work(struct workqueue_struct *wq) 1163static bool is_chained_work(struct workqueue_struct *wq)
1188{ 1164{
1189 unsigned long flags; 1165 struct worker *worker;
1190 unsigned int cpu;
1191
1192 for_each_gcwq_cpu(cpu) {
1193 struct global_cwq *gcwq = get_gcwq(cpu);
1194 struct worker *worker;
1195 struct hlist_node *pos;
1196 int i;
1197 1166
1198 spin_lock_irqsave(&gcwq->lock, flags); 1167 worker = current_wq_worker();
1199 for_each_busy_worker(worker, i, pos, gcwq) { 1168 /*
1200 if (worker->task != current) 1169 * Return %true iff I'm a worker execuing a work item on @wq. If
1201 continue; 1170 * I'm @worker, it's safe to dereference it without locking.
1202 spin_unlock_irqrestore(&gcwq->lock, flags); 1171 */
1203 /* 1172 return worker && worker->current_pwq->wq == wq;
1204 * I'm @worker, no locking necessary. See if @work
1205 * is headed to the same workqueue.
1206 */
1207 return worker->current_cwq->wq == wq;
1208 }
1209 spin_unlock_irqrestore(&gcwq->lock, flags);
1210 }
1211 return false;
1212} 1173}
1213 1174
1214static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, 1175static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
1215 struct work_struct *work) 1176 struct work_struct *work)
1216{ 1177{
1217 struct global_cwq *gcwq; 1178 struct pool_workqueue *pwq;
1218 struct cpu_workqueue_struct *cwq;
1219 struct list_head *worklist; 1179 struct list_head *worklist;
1220 unsigned int work_flags; 1180 unsigned int work_flags;
1221 unsigned int req_cpu = cpu; 1181 unsigned int req_cpu = cpu;
@@ -1235,9 +1195,9 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
1235 WARN_ON_ONCE(!is_chained_work(wq))) 1195 WARN_ON_ONCE(!is_chained_work(wq)))
1236 return; 1196 return;
1237 1197
1238 /* determine gcwq to use */ 1198 /* determine the pwq to use */
1239 if (!(wq->flags & WQ_UNBOUND)) { 1199 if (!(wq->flags & WQ_UNBOUND)) {
1240 struct global_cwq *last_gcwq; 1200 struct worker_pool *last_pool;
1241 1201
1242 if (cpu == WORK_CPU_UNBOUND) 1202 if (cpu == WORK_CPU_UNBOUND)
1243 cpu = raw_smp_processor_id(); 1203 cpu = raw_smp_processor_id();
@@ -1248,55 +1208,54 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
1248 * work needs to be queued on that cpu to guarantee 1208 * work needs to be queued on that cpu to guarantee
1249 * non-reentrancy. 1209 * non-reentrancy.
1250 */ 1210 */
1251 gcwq = get_gcwq(cpu); 1211 pwq = get_pwq(cpu, wq);
1252 last_gcwq = get_work_gcwq(work); 1212 last_pool = get_work_pool(work);
1253 1213
1254 if (last_gcwq && last_gcwq != gcwq) { 1214 if (last_pool && last_pool != pwq->pool) {
1255 struct worker *worker; 1215 struct worker *worker;
1256 1216
1257 spin_lock(&last_gcwq->lock); 1217 spin_lock(&last_pool->lock);
1258 1218
1259 worker = find_worker_executing_work(last_gcwq, work); 1219 worker = find_worker_executing_work(last_pool, work);
1260 1220
1261 if (worker && worker->current_cwq->wq == wq) 1221 if (worker && worker->current_pwq->wq == wq) {
1262 gcwq = last_gcwq; 1222 pwq = get_pwq(last_pool->cpu, wq);
1263 else { 1223 } else {
1264 /* meh... not running there, queue here */ 1224 /* meh... not running there, queue here */
1265 spin_unlock(&last_gcwq->lock); 1225 spin_unlock(&last_pool->lock);
1266 spin_lock(&gcwq->lock); 1226 spin_lock(&pwq->pool->lock);
1267 } 1227 }
1268 } else { 1228 } else {
1269 spin_lock(&gcwq->lock); 1229 spin_lock(&pwq->pool->lock);
1270 } 1230 }
1271 } else { 1231 } else {
1272 gcwq = get_gcwq(WORK_CPU_UNBOUND); 1232 pwq = get_pwq(WORK_CPU_UNBOUND, wq);
1273 spin_lock(&gcwq->lock); 1233 spin_lock(&pwq->pool->lock);
1274 } 1234 }
1275 1235
1276 /* gcwq determined, get cwq and queue */ 1236 /* pwq determined, queue */
1277 cwq = get_cwq(gcwq->cpu, wq); 1237 trace_workqueue_queue_work(req_cpu, pwq, work);
1278 trace_workqueue_queue_work(req_cpu, cwq, work);
1279 1238
1280 if (WARN_ON(!list_empty(&work->entry))) { 1239 if (WARN_ON(!list_empty(&work->entry))) {
1281 spin_unlock(&gcwq->lock); 1240 spin_unlock(&pwq->pool->lock);
1282 return; 1241 return;
1283 } 1242 }
1284 1243
1285 cwq->nr_in_flight[cwq->work_color]++; 1244 pwq->nr_in_flight[pwq->work_color]++;
1286 work_flags = work_color_to_flags(cwq->work_color); 1245 work_flags = work_color_to_flags(pwq->work_color);
1287 1246
1288 if (likely(cwq->nr_active < cwq->max_active)) { 1247 if (likely(pwq->nr_active < pwq->max_active)) {
1289 trace_workqueue_activate_work(work); 1248 trace_workqueue_activate_work(work);
1290 cwq->nr_active++; 1249 pwq->nr_active++;
1291 worklist = &cwq->pool->worklist; 1250 worklist = &pwq->pool->worklist;
1292 } else { 1251 } else {
1293 work_flags |= WORK_STRUCT_DELAYED; 1252 work_flags |= WORK_STRUCT_DELAYED;
1294 worklist = &cwq->delayed_works; 1253 worklist = &pwq->delayed_works;
1295 } 1254 }
1296 1255
1297 insert_work(cwq, work, worklist, work_flags); 1256 insert_work(pwq, work, worklist, work_flags);
1298 1257
1299 spin_unlock(&gcwq->lock); 1258 spin_unlock(&pwq->pool->lock);
1300} 1259}
1301 1260
1302/** 1261/**
@@ -1347,19 +1306,17 @@ EXPORT_SYMBOL_GPL(queue_work);
1347void delayed_work_timer_fn(unsigned long __data) 1306void delayed_work_timer_fn(unsigned long __data)
1348{ 1307{
1349 struct delayed_work *dwork = (struct delayed_work *)__data; 1308 struct delayed_work *dwork = (struct delayed_work *)__data;
1350 struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work);
1351 1309
1352 /* should have been called from irqsafe timer with irq already off */ 1310 /* should have been called from irqsafe timer with irq already off */
1353 __queue_work(dwork->cpu, cwq->wq, &dwork->work); 1311 __queue_work(dwork->cpu, dwork->wq, &dwork->work);
1354} 1312}
1355EXPORT_SYMBOL_GPL(delayed_work_timer_fn); 1313EXPORT_SYMBOL(delayed_work_timer_fn);
1356 1314
1357static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, 1315static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
1358 struct delayed_work *dwork, unsigned long delay) 1316 struct delayed_work *dwork, unsigned long delay)
1359{ 1317{
1360 struct timer_list *timer = &dwork->timer; 1318 struct timer_list *timer = &dwork->timer;
1361 struct work_struct *work = &dwork->work; 1319 struct work_struct *work = &dwork->work;
1362 unsigned int lcpu;
1363 1320
1364 WARN_ON_ONCE(timer->function != delayed_work_timer_fn || 1321 WARN_ON_ONCE(timer->function != delayed_work_timer_fn ||
1365 timer->data != (unsigned long)dwork); 1322 timer->data != (unsigned long)dwork);
@@ -1379,30 +1336,7 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
1379 1336
1380 timer_stats_timer_set_start_info(&dwork->timer); 1337 timer_stats_timer_set_start_info(&dwork->timer);
1381 1338
1382 /* 1339 dwork->wq = wq;
1383 * This stores cwq for the moment, for the timer_fn. Note that the
1384 * work's gcwq is preserved to allow reentrance detection for
1385 * delayed works.
1386 */
1387 if (!(wq->flags & WQ_UNBOUND)) {
1388 struct global_cwq *gcwq = get_work_gcwq(work);
1389
1390 /*
1391 * If we cannot get the last gcwq from @work directly,
1392 * select the last CPU such that it avoids unnecessarily
1393 * triggering non-reentrancy check in __queue_work().
1394 */
1395 lcpu = cpu;
1396 if (gcwq)
1397 lcpu = gcwq->cpu;
1398 if (lcpu == WORK_CPU_UNBOUND)
1399 lcpu = raw_smp_processor_id();
1400 } else {
1401 lcpu = WORK_CPU_UNBOUND;
1402 }
1403
1404 set_work_cwq(work, get_cwq(lcpu, wq), 0);
1405
1406 dwork->cpu = cpu; 1340 dwork->cpu = cpu;
1407 timer->expires = jiffies + delay; 1341 timer->expires = jiffies + delay;
1408 1342
@@ -1519,12 +1453,11 @@ EXPORT_SYMBOL_GPL(mod_delayed_work);
1519 * necessary. 1453 * necessary.
1520 * 1454 *
1521 * LOCKING: 1455 * LOCKING:
1522 * spin_lock_irq(gcwq->lock). 1456 * spin_lock_irq(pool->lock).
1523 */ 1457 */
1524static void worker_enter_idle(struct worker *worker) 1458static void worker_enter_idle(struct worker *worker)
1525{ 1459{
1526 struct worker_pool *pool = worker->pool; 1460 struct worker_pool *pool = worker->pool;
1527 struct global_cwq *gcwq = pool->gcwq;
1528 1461
1529 BUG_ON(worker->flags & WORKER_IDLE); 1462 BUG_ON(worker->flags & WORKER_IDLE);
1530 BUG_ON(!list_empty(&worker->entry) && 1463 BUG_ON(!list_empty(&worker->entry) &&
@@ -1542,14 +1475,14 @@ static void worker_enter_idle(struct worker *worker)
1542 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); 1475 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
1543 1476
1544 /* 1477 /*
1545 * Sanity check nr_running. Because gcwq_unbind_fn() releases 1478 * Sanity check nr_running. Because wq_unbind_fn() releases
1546 * gcwq->lock between setting %WORKER_UNBOUND and zapping 1479 * pool->lock between setting %WORKER_UNBOUND and zapping
1547 * nr_running, the warning may trigger spuriously. Check iff 1480 * nr_running, the warning may trigger spuriously. Check iff
1548 * unbind is not in progress. 1481 * unbind is not in progress.
1549 */ 1482 */
1550 WARN_ON_ONCE(!(gcwq->flags & GCWQ_DISASSOCIATED) && 1483 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
1551 pool->nr_workers == pool->nr_idle && 1484 pool->nr_workers == pool->nr_idle &&
1552 atomic_read(get_pool_nr_running(pool))); 1485 atomic_read(&pool->nr_running));
1553} 1486}
1554 1487
1555/** 1488/**
@@ -1559,7 +1492,7 @@ static void worker_enter_idle(struct worker *worker)
1559 * @worker is leaving idle state. Update stats. 1492 * @worker is leaving idle state. Update stats.
1560 * 1493 *
1561 * LOCKING: 1494 * LOCKING:
1562 * spin_lock_irq(gcwq->lock). 1495 * spin_lock_irq(pool->lock).
1563 */ 1496 */
1564static void worker_leave_idle(struct worker *worker) 1497static void worker_leave_idle(struct worker *worker)
1565{ 1498{
@@ -1572,7 +1505,7 @@ static void worker_leave_idle(struct worker *worker)
1572} 1505}
1573 1506
1574/** 1507/**
1575 * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock gcwq 1508 * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock pool
1576 * @worker: self 1509 * @worker: self
1577 * 1510 *
1578 * Works which are scheduled while the cpu is online must at least be 1511 * Works which are scheduled while the cpu is online must at least be
@@ -1584,27 +1517,27 @@ static void worker_leave_idle(struct worker *worker)
1584 * themselves to the target cpu and may race with cpu going down or 1517 * themselves to the target cpu and may race with cpu going down or
1585 * coming online. kthread_bind() can't be used because it may put the 1518 * coming online. kthread_bind() can't be used because it may put the
1586 * worker to already dead cpu and set_cpus_allowed_ptr() can't be used 1519 * worker to already dead cpu and set_cpus_allowed_ptr() can't be used
1587 * verbatim as it's best effort and blocking and gcwq may be 1520 * verbatim as it's best effort and blocking and pool may be
1588 * [dis]associated in the meantime. 1521 * [dis]associated in the meantime.
1589 * 1522 *
1590 * This function tries set_cpus_allowed() and locks gcwq and verifies the 1523 * This function tries set_cpus_allowed() and locks pool and verifies the
1591 * binding against %GCWQ_DISASSOCIATED which is set during 1524 * binding against %POOL_DISASSOCIATED which is set during
1592 * %CPU_DOWN_PREPARE and cleared during %CPU_ONLINE, so if the worker 1525 * %CPU_DOWN_PREPARE and cleared during %CPU_ONLINE, so if the worker
1593 * enters idle state or fetches works without dropping lock, it can 1526 * enters idle state or fetches works without dropping lock, it can
1594 * guarantee the scheduling requirement described in the first paragraph. 1527 * guarantee the scheduling requirement described in the first paragraph.
1595 * 1528 *
1596 * CONTEXT: 1529 * CONTEXT:
1597 * Might sleep. Called without any lock but returns with gcwq->lock 1530 * Might sleep. Called without any lock but returns with pool->lock
1598 * held. 1531 * held.
1599 * 1532 *
1600 * RETURNS: 1533 * RETURNS:
1601 * %true if the associated gcwq is online (@worker is successfully 1534 * %true if the associated pool is online (@worker is successfully
1602 * bound), %false if offline. 1535 * bound), %false if offline.
1603 */ 1536 */
1604static bool worker_maybe_bind_and_lock(struct worker *worker) 1537static bool worker_maybe_bind_and_lock(struct worker *worker)
1605__acquires(&gcwq->lock) 1538__acquires(&pool->lock)
1606{ 1539{
1607 struct global_cwq *gcwq = worker->pool->gcwq; 1540 struct worker_pool *pool = worker->pool;
1608 struct task_struct *task = worker->task; 1541 struct task_struct *task = worker->task;
1609 1542
1610 while (true) { 1543 while (true) {
@@ -1612,19 +1545,19 @@ __acquires(&gcwq->lock)
1612 * The following call may fail, succeed or succeed 1545 * The following call may fail, succeed or succeed
1613 * without actually migrating the task to the cpu if 1546 * without actually migrating the task to the cpu if
1614 * it races with cpu hotunplug operation. Verify 1547 * it races with cpu hotunplug operation. Verify
1615 * against GCWQ_DISASSOCIATED. 1548 * against POOL_DISASSOCIATED.
1616 */ 1549 */
1617 if (!(gcwq->flags & GCWQ_DISASSOCIATED)) 1550 if (!(pool->flags & POOL_DISASSOCIATED))
1618 set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu)); 1551 set_cpus_allowed_ptr(task, get_cpu_mask(pool->cpu));
1619 1552
1620 spin_lock_irq(&gcwq->lock); 1553 spin_lock_irq(&pool->lock);
1621 if (gcwq->flags & GCWQ_DISASSOCIATED) 1554 if (pool->flags & POOL_DISASSOCIATED)
1622 return false; 1555 return false;
1623 if (task_cpu(task) == gcwq->cpu && 1556 if (task_cpu(task) == pool->cpu &&
1624 cpumask_equal(&current->cpus_allowed, 1557 cpumask_equal(&current->cpus_allowed,
1625 get_cpu_mask(gcwq->cpu))) 1558 get_cpu_mask(pool->cpu)))
1626 return true; 1559 return true;
1627 spin_unlock_irq(&gcwq->lock); 1560 spin_unlock_irq(&pool->lock);
1628 1561
1629 /* 1562 /*
1630 * We've raced with CPU hot[un]plug. Give it a breather 1563 * We've raced with CPU hot[un]plug. Give it a breather
@@ -1643,15 +1576,13 @@ __acquires(&gcwq->lock)
1643 */ 1576 */
1644static void idle_worker_rebind(struct worker *worker) 1577static void idle_worker_rebind(struct worker *worker)
1645{ 1578{
1646 struct global_cwq *gcwq = worker->pool->gcwq;
1647
1648 /* CPU may go down again inbetween, clear UNBOUND only on success */ 1579 /* CPU may go down again inbetween, clear UNBOUND only on success */
1649 if (worker_maybe_bind_and_lock(worker)) 1580 if (worker_maybe_bind_and_lock(worker))
1650 worker_clr_flags(worker, WORKER_UNBOUND); 1581 worker_clr_flags(worker, WORKER_UNBOUND);
1651 1582
1652 /* rebind complete, become available again */ 1583 /* rebind complete, become available again */
1653 list_add(&worker->entry, &worker->pool->idle_list); 1584 list_add(&worker->entry, &worker->pool->idle_list);
1654 spin_unlock_irq(&gcwq->lock); 1585 spin_unlock_irq(&worker->pool->lock);
1655} 1586}
1656 1587
1657/* 1588/*
@@ -1663,19 +1594,18 @@ static void idle_worker_rebind(struct worker *worker)
1663static void busy_worker_rebind_fn(struct work_struct *work) 1594static void busy_worker_rebind_fn(struct work_struct *work)
1664{ 1595{
1665 struct worker *worker = container_of(work, struct worker, rebind_work); 1596 struct worker *worker = container_of(work, struct worker, rebind_work);
1666 struct global_cwq *gcwq = worker->pool->gcwq;
1667 1597
1668 if (worker_maybe_bind_and_lock(worker)) 1598 if (worker_maybe_bind_and_lock(worker))
1669 worker_clr_flags(worker, WORKER_UNBOUND); 1599 worker_clr_flags(worker, WORKER_UNBOUND);
1670 1600
1671 spin_unlock_irq(&gcwq->lock); 1601 spin_unlock_irq(&worker->pool->lock);
1672} 1602}
1673 1603
1674/** 1604/**
1675 * rebind_workers - rebind all workers of a gcwq to the associated CPU 1605 * rebind_workers - rebind all workers of a pool to the associated CPU
1676 * @gcwq: gcwq of interest 1606 * @pool: pool of interest
1677 * 1607 *
1678 * @gcwq->cpu is coming online. Rebind all workers to the CPU. Rebinding 1608 * @pool->cpu is coming online. Rebind all workers to the CPU. Rebinding
1679 * is different for idle and busy ones. 1609 * is different for idle and busy ones.
1680 * 1610 *
1681 * Idle ones will be removed from the idle_list and woken up. They will 1611 * Idle ones will be removed from the idle_list and woken up. They will
@@ -1693,38 +1623,32 @@ static void busy_worker_rebind_fn(struct work_struct *work)
1693 * including the manager will not appear on @idle_list until rebind is 1623 * including the manager will not appear on @idle_list until rebind is
1694 * complete, making local wake-ups safe. 1624 * complete, making local wake-ups safe.
1695 */ 1625 */
1696static void rebind_workers(struct global_cwq *gcwq) 1626static void rebind_workers(struct worker_pool *pool)
1697{ 1627{
1698 struct worker_pool *pool;
1699 struct worker *worker, *n; 1628 struct worker *worker, *n;
1700 struct hlist_node *pos; 1629 struct hlist_node *pos;
1701 int i; 1630 int i;
1702 1631
1703 lockdep_assert_held(&gcwq->lock); 1632 lockdep_assert_held(&pool->assoc_mutex);
1704 1633 lockdep_assert_held(&pool->lock);
1705 for_each_worker_pool(pool, gcwq)
1706 lockdep_assert_held(&pool->assoc_mutex);
1707 1634
1708 /* dequeue and kick idle ones */ 1635 /* dequeue and kick idle ones */
1709 for_each_worker_pool(pool, gcwq) { 1636 list_for_each_entry_safe(worker, n, &pool->idle_list, entry) {
1710 list_for_each_entry_safe(worker, n, &pool->idle_list, entry) { 1637 /*
1711 /* 1638 * idle workers should be off @pool->idle_list until rebind
1712 * idle workers should be off @pool->idle_list 1639 * is complete to avoid receiving premature local wake-ups.
1713 * until rebind is complete to avoid receiving 1640 */
1714 * premature local wake-ups. 1641 list_del_init(&worker->entry);
1715 */
1716 list_del_init(&worker->entry);
1717 1642
1718 /* 1643 /*
1719 * worker_thread() will see the above dequeuing 1644 * worker_thread() will see the above dequeuing and call
1720 * and call idle_worker_rebind(). 1645 * idle_worker_rebind().
1721 */ 1646 */
1722 wake_up_process(worker->task); 1647 wake_up_process(worker->task);
1723 }
1724 } 1648 }
1725 1649
1726 /* rebind busy workers */ 1650 /* rebind busy workers */
1727 for_each_busy_worker(worker, i, pos, gcwq) { 1651 for_each_busy_worker(worker, i, pos, pool) {
1728 struct work_struct *rebind_work = &worker->rebind_work; 1652 struct work_struct *rebind_work = &worker->rebind_work;
1729 struct workqueue_struct *wq; 1653 struct workqueue_struct *wq;
1730 1654
@@ -1736,16 +1660,16 @@ static void rebind_workers(struct global_cwq *gcwq)
1736 1660
1737 /* 1661 /*
1738 * wq doesn't really matter but let's keep @worker->pool 1662 * wq doesn't really matter but let's keep @worker->pool
1739 * and @cwq->pool consistent for sanity. 1663 * and @pwq->pool consistent for sanity.
1740 */ 1664 */
1741 if (worker_pool_pri(worker->pool)) 1665 if (std_worker_pool_pri(worker->pool))
1742 wq = system_highpri_wq; 1666 wq = system_highpri_wq;
1743 else 1667 else
1744 wq = system_wq; 1668 wq = system_wq;
1745 1669
1746 insert_work(get_cwq(gcwq->cpu, wq), rebind_work, 1670 insert_work(get_pwq(pool->cpu, wq), rebind_work,
1747 worker->scheduled.next, 1671 worker->scheduled.next,
1748 work_color_to_flags(WORK_NO_COLOR)); 1672 work_color_to_flags(WORK_NO_COLOR));
1749 } 1673 }
1750} 1674}
1751 1675
@@ -1780,19 +1704,18 @@ static struct worker *alloc_worker(void)
1780 */ 1704 */
1781static struct worker *create_worker(struct worker_pool *pool) 1705static struct worker *create_worker(struct worker_pool *pool)
1782{ 1706{
1783 struct global_cwq *gcwq = pool->gcwq; 1707 const char *pri = std_worker_pool_pri(pool) ? "H" : "";
1784 const char *pri = worker_pool_pri(pool) ? "H" : "";
1785 struct worker *worker = NULL; 1708 struct worker *worker = NULL;
1786 int id = -1; 1709 int id = -1;
1787 1710
1788 spin_lock_irq(&gcwq->lock); 1711 spin_lock_irq(&pool->lock);
1789 while (ida_get_new(&pool->worker_ida, &id)) { 1712 while (ida_get_new(&pool->worker_ida, &id)) {
1790 spin_unlock_irq(&gcwq->lock); 1713 spin_unlock_irq(&pool->lock);
1791 if (!ida_pre_get(&pool->worker_ida, GFP_KERNEL)) 1714 if (!ida_pre_get(&pool->worker_ida, GFP_KERNEL))
1792 goto fail; 1715 goto fail;
1793 spin_lock_irq(&gcwq->lock); 1716 spin_lock_irq(&pool->lock);
1794 } 1717 }
1795 spin_unlock_irq(&gcwq->lock); 1718 spin_unlock_irq(&pool->lock);
1796 1719
1797 worker = alloc_worker(); 1720 worker = alloc_worker();
1798 if (!worker) 1721 if (!worker)
@@ -1801,30 +1724,30 @@ static struct worker *create_worker(struct worker_pool *pool)
1801 worker->pool = pool; 1724 worker->pool = pool;
1802 worker->id = id; 1725 worker->id = id;
1803 1726
1804 if (gcwq->cpu != WORK_CPU_UNBOUND) 1727 if (pool->cpu != WORK_CPU_UNBOUND)
1805 worker->task = kthread_create_on_node(worker_thread, 1728 worker->task = kthread_create_on_node(worker_thread,
1806 worker, cpu_to_node(gcwq->cpu), 1729 worker, cpu_to_node(pool->cpu),
1807 "kworker/%u:%d%s", gcwq->cpu, id, pri); 1730 "kworker/%u:%d%s", pool->cpu, id, pri);
1808 else 1731 else
1809 worker->task = kthread_create(worker_thread, worker, 1732 worker->task = kthread_create(worker_thread, worker,
1810 "kworker/u:%d%s", id, pri); 1733 "kworker/u:%d%s", id, pri);
1811 if (IS_ERR(worker->task)) 1734 if (IS_ERR(worker->task))
1812 goto fail; 1735 goto fail;
1813 1736
1814 if (worker_pool_pri(pool)) 1737 if (std_worker_pool_pri(pool))
1815 set_user_nice(worker->task, HIGHPRI_NICE_LEVEL); 1738 set_user_nice(worker->task, HIGHPRI_NICE_LEVEL);
1816 1739
1817 /* 1740 /*
1818 * Determine CPU binding of the new worker depending on 1741 * Determine CPU binding of the new worker depending on
1819 * %GCWQ_DISASSOCIATED. The caller is responsible for ensuring the 1742 * %POOL_DISASSOCIATED. The caller is responsible for ensuring the
1820 * flag remains stable across this function. See the comments 1743 * flag remains stable across this function. See the comments
1821 * above the flag definition for details. 1744 * above the flag definition for details.
1822 * 1745 *
1823 * As an unbound worker may later become a regular one if CPU comes 1746 * As an unbound worker may later become a regular one if CPU comes
1824 * online, make sure every worker has %PF_THREAD_BOUND set. 1747 * online, make sure every worker has %PF_THREAD_BOUND set.
1825 */ 1748 */
1826 if (!(gcwq->flags & GCWQ_DISASSOCIATED)) { 1749 if (!(pool->flags & POOL_DISASSOCIATED)) {
1827 kthread_bind(worker->task, gcwq->cpu); 1750 kthread_bind(worker->task, pool->cpu);
1828 } else { 1751 } else {
1829 worker->task->flags |= PF_THREAD_BOUND; 1752 worker->task->flags |= PF_THREAD_BOUND;
1830 worker->flags |= WORKER_UNBOUND; 1753 worker->flags |= WORKER_UNBOUND;
@@ -1833,9 +1756,9 @@ static struct worker *create_worker(struct worker_pool *pool)
1833 return worker; 1756 return worker;
1834fail: 1757fail:
1835 if (id >= 0) { 1758 if (id >= 0) {
1836 spin_lock_irq(&gcwq->lock); 1759 spin_lock_irq(&pool->lock);
1837 ida_remove(&pool->worker_ida, id); 1760 ida_remove(&pool->worker_ida, id);
1838 spin_unlock_irq(&gcwq->lock); 1761 spin_unlock_irq(&pool->lock);
1839 } 1762 }
1840 kfree(worker); 1763 kfree(worker);
1841 return NULL; 1764 return NULL;
@@ -1845,10 +1768,10 @@ fail:
1845 * start_worker - start a newly created worker 1768 * start_worker - start a newly created worker
1846 * @worker: worker to start 1769 * @worker: worker to start
1847 * 1770 *
1848 * Make the gcwq aware of @worker and start it. 1771 * Make the pool aware of @worker and start it.
1849 * 1772 *
1850 * CONTEXT: 1773 * CONTEXT:
1851 * spin_lock_irq(gcwq->lock). 1774 * spin_lock_irq(pool->lock).
1852 */ 1775 */
1853static void start_worker(struct worker *worker) 1776static void start_worker(struct worker *worker)
1854{ 1777{
@@ -1862,15 +1785,14 @@ static void start_worker(struct worker *worker)
1862 * destroy_worker - destroy a workqueue worker 1785 * destroy_worker - destroy a workqueue worker
1863 * @worker: worker to be destroyed 1786 * @worker: worker to be destroyed
1864 * 1787 *
1865 * Destroy @worker and adjust @gcwq stats accordingly. 1788 * Destroy @worker and adjust @pool stats accordingly.
1866 * 1789 *
1867 * CONTEXT: 1790 * CONTEXT:
1868 * spin_lock_irq(gcwq->lock) which is released and regrabbed. 1791 * spin_lock_irq(pool->lock) which is released and regrabbed.
1869 */ 1792 */
1870static void destroy_worker(struct worker *worker) 1793static void destroy_worker(struct worker *worker)
1871{ 1794{
1872 struct worker_pool *pool = worker->pool; 1795 struct worker_pool *pool = worker->pool;
1873 struct global_cwq *gcwq = pool->gcwq;
1874 int id = worker->id; 1796 int id = worker->id;
1875 1797
1876 /* sanity check frenzy */ 1798 /* sanity check frenzy */
@@ -1885,21 +1807,20 @@ static void destroy_worker(struct worker *worker)
1885 list_del_init(&worker->entry); 1807 list_del_init(&worker->entry);
1886 worker->flags |= WORKER_DIE; 1808 worker->flags |= WORKER_DIE;
1887 1809
1888 spin_unlock_irq(&gcwq->lock); 1810 spin_unlock_irq(&pool->lock);
1889 1811
1890 kthread_stop(worker->task); 1812 kthread_stop(worker->task);
1891 kfree(worker); 1813 kfree(worker);
1892 1814
1893 spin_lock_irq(&gcwq->lock); 1815 spin_lock_irq(&pool->lock);
1894 ida_remove(&pool->worker_ida, id); 1816 ida_remove(&pool->worker_ida, id);
1895} 1817}
1896 1818
1897static void idle_worker_timeout(unsigned long __pool) 1819static void idle_worker_timeout(unsigned long __pool)
1898{ 1820{
1899 struct worker_pool *pool = (void *)__pool; 1821 struct worker_pool *pool = (void *)__pool;
1900 struct global_cwq *gcwq = pool->gcwq;
1901 1822
1902 spin_lock_irq(&gcwq->lock); 1823 spin_lock_irq(&pool->lock);
1903 1824
1904 if (too_many_workers(pool)) { 1825 if (too_many_workers(pool)) {
1905 struct worker *worker; 1826 struct worker *worker;
@@ -1918,20 +1839,20 @@ static void idle_worker_timeout(unsigned long __pool)
1918 } 1839 }
1919 } 1840 }
1920 1841
1921 spin_unlock_irq(&gcwq->lock); 1842 spin_unlock_irq(&pool->lock);
1922} 1843}
1923 1844
1924static bool send_mayday(struct work_struct *work) 1845static bool send_mayday(struct work_struct *work)
1925{ 1846{
1926 struct cpu_workqueue_struct *cwq = get_work_cwq(work); 1847 struct pool_workqueue *pwq = get_work_pwq(work);
1927 struct workqueue_struct *wq = cwq->wq; 1848 struct workqueue_struct *wq = pwq->wq;
1928 unsigned int cpu; 1849 unsigned int cpu;
1929 1850
1930 if (!(wq->flags & WQ_RESCUER)) 1851 if (!(wq->flags & WQ_RESCUER))
1931 return false; 1852 return false;
1932 1853
1933 /* mayday mayday mayday */ 1854 /* mayday mayday mayday */
1934 cpu = cwq->pool->gcwq->cpu; 1855 cpu = pwq->pool->cpu;
1935 /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */ 1856 /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */
1936 if (cpu == WORK_CPU_UNBOUND) 1857 if (cpu == WORK_CPU_UNBOUND)
1937 cpu = 0; 1858 cpu = 0;
@@ -1940,13 +1861,12 @@ static bool send_mayday(struct work_struct *work)
1940 return true; 1861 return true;
1941} 1862}
1942 1863
1943static void gcwq_mayday_timeout(unsigned long __pool) 1864static void pool_mayday_timeout(unsigned long __pool)
1944{ 1865{
1945 struct worker_pool *pool = (void *)__pool; 1866 struct worker_pool *pool = (void *)__pool;
1946 struct global_cwq *gcwq = pool->gcwq;
1947 struct work_struct *work; 1867 struct work_struct *work;
1948 1868
1949 spin_lock_irq(&gcwq->lock); 1869 spin_lock_irq(&pool->lock);
1950 1870
1951 if (need_to_create_worker(pool)) { 1871 if (need_to_create_worker(pool)) {
1952 /* 1872 /*
@@ -1959,7 +1879,7 @@ static void gcwq_mayday_timeout(unsigned long __pool)
1959 send_mayday(work); 1879 send_mayday(work);
1960 } 1880 }
1961 1881
1962 spin_unlock_irq(&gcwq->lock); 1882 spin_unlock_irq(&pool->lock);
1963 1883
1964 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL); 1884 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
1965} 1885}
@@ -1978,24 +1898,22 @@ static void gcwq_mayday_timeout(unsigned long __pool)
1978 * may_start_working() true. 1898 * may_start_working() true.
1979 * 1899 *
1980 * LOCKING: 1900 * LOCKING:
1981 * spin_lock_irq(gcwq->lock) which may be released and regrabbed 1901 * spin_lock_irq(pool->lock) which may be released and regrabbed
1982 * multiple times. Does GFP_KERNEL allocations. Called only from 1902 * multiple times. Does GFP_KERNEL allocations. Called only from
1983 * manager. 1903 * manager.
1984 * 1904 *
1985 * RETURNS: 1905 * RETURNS:
1986 * false if no action was taken and gcwq->lock stayed locked, true 1906 * false if no action was taken and pool->lock stayed locked, true
1987 * otherwise. 1907 * otherwise.
1988 */ 1908 */
1989static bool maybe_create_worker(struct worker_pool *pool) 1909static bool maybe_create_worker(struct worker_pool *pool)
1990__releases(&gcwq->lock) 1910__releases(&pool->lock)
1991__acquires(&gcwq->lock) 1911__acquires(&pool->lock)
1992{ 1912{
1993 struct global_cwq *gcwq = pool->gcwq;
1994
1995 if (!need_to_create_worker(pool)) 1913 if (!need_to_create_worker(pool))
1996 return false; 1914 return false;
1997restart: 1915restart:
1998 spin_unlock_irq(&gcwq->lock); 1916 spin_unlock_irq(&pool->lock);
1999 1917
2000 /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */ 1918 /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
2001 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); 1919 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
@@ -2006,7 +1924,7 @@ restart:
2006 worker = create_worker(pool); 1924 worker = create_worker(pool);
2007 if (worker) { 1925 if (worker) {
2008 del_timer_sync(&pool->mayday_timer); 1926 del_timer_sync(&pool->mayday_timer);
2009 spin_lock_irq(&gcwq->lock); 1927 spin_lock_irq(&pool->lock);
2010 start_worker(worker); 1928 start_worker(worker);
2011 BUG_ON(need_to_create_worker(pool)); 1929 BUG_ON(need_to_create_worker(pool));
2012 return true; 1930 return true;
@@ -2023,7 +1941,7 @@ restart:
2023 } 1941 }
2024 1942
2025 del_timer_sync(&pool->mayday_timer); 1943 del_timer_sync(&pool->mayday_timer);
2026 spin_lock_irq(&gcwq->lock); 1944 spin_lock_irq(&pool->lock);
2027 if (need_to_create_worker(pool)) 1945 if (need_to_create_worker(pool))
2028 goto restart; 1946 goto restart;
2029 return true; 1947 return true;
@@ -2037,11 +1955,11 @@ restart:
2037 * IDLE_WORKER_TIMEOUT. 1955 * IDLE_WORKER_TIMEOUT.
2038 * 1956 *
2039 * LOCKING: 1957 * LOCKING:
2040 * spin_lock_irq(gcwq->lock) which may be released and regrabbed 1958 * spin_lock_irq(pool->lock) which may be released and regrabbed
2041 * multiple times. Called only from manager. 1959 * multiple times. Called only from manager.
2042 * 1960 *
2043 * RETURNS: 1961 * RETURNS:
2044 * false if no action was taken and gcwq->lock stayed locked, true 1962 * false if no action was taken and pool->lock stayed locked, true
2045 * otherwise. 1963 * otherwise.
2046 */ 1964 */
2047static bool maybe_destroy_workers(struct worker_pool *pool) 1965static bool maybe_destroy_workers(struct worker_pool *pool)
@@ -2071,21 +1989,21 @@ static bool maybe_destroy_workers(struct worker_pool *pool)
2071 * manage_workers - manage worker pool 1989 * manage_workers - manage worker pool
2072 * @worker: self 1990 * @worker: self
2073 * 1991 *
2074 * Assume the manager role and manage gcwq worker pool @worker belongs 1992 * Assume the manager role and manage the worker pool @worker belongs
2075 * to. At any given time, there can be only zero or one manager per 1993 * to. At any given time, there can be only zero or one manager per
2076 * gcwq. The exclusion is handled automatically by this function. 1994 * pool. The exclusion is handled automatically by this function.
2077 * 1995 *
2078 * The caller can safely start processing works on false return. On 1996 * The caller can safely start processing works on false return. On
2079 * true return, it's guaranteed that need_to_create_worker() is false 1997 * true return, it's guaranteed that need_to_create_worker() is false
2080 * and may_start_working() is true. 1998 * and may_start_working() is true.
2081 * 1999 *
2082 * CONTEXT: 2000 * CONTEXT:
2083 * spin_lock_irq(gcwq->lock) which may be released and regrabbed 2001 * spin_lock_irq(pool->lock) which may be released and regrabbed
2084 * multiple times. Does GFP_KERNEL allocations. 2002 * multiple times. Does GFP_KERNEL allocations.
2085 * 2003 *
2086 * RETURNS: 2004 * RETURNS:
2087 * false if no action was taken and gcwq->lock stayed locked, true if 2005 * spin_lock_irq(pool->lock) which may be released and regrabbed
2088 * some action was taken. 2006 * multiple times. Does GFP_KERNEL allocations.
2089 */ 2007 */
2090static bool manage_workers(struct worker *worker) 2008static bool manage_workers(struct worker *worker)
2091{ 2009{
@@ -2107,20 +2025,20 @@ static bool manage_workers(struct worker *worker)
2107 * manager against CPU hotplug. 2025 * manager against CPU hotplug.
2108 * 2026 *
2109 * assoc_mutex would always be free unless CPU hotplug is in 2027 * assoc_mutex would always be free unless CPU hotplug is in
2110 * progress. trylock first without dropping @gcwq->lock. 2028 * progress. trylock first without dropping @pool->lock.
2111 */ 2029 */
2112 if (unlikely(!mutex_trylock(&pool->assoc_mutex))) { 2030 if (unlikely(!mutex_trylock(&pool->assoc_mutex))) {
2113 spin_unlock_irq(&pool->gcwq->lock); 2031 spin_unlock_irq(&pool->lock);
2114 mutex_lock(&pool->assoc_mutex); 2032 mutex_lock(&pool->assoc_mutex);
2115 /* 2033 /*
2116 * CPU hotplug could have happened while we were waiting 2034 * CPU hotplug could have happened while we were waiting
2117 * for assoc_mutex. Hotplug itself can't handle us 2035 * for assoc_mutex. Hotplug itself can't handle us
2118 * because manager isn't either on idle or busy list, and 2036 * because manager isn't either on idle or busy list, and
2119 * @gcwq's state and ours could have deviated. 2037 * @pool's state and ours could have deviated.
2120 * 2038 *
2121 * As hotplug is now excluded via assoc_mutex, we can 2039 * As hotplug is now excluded via assoc_mutex, we can
2122 * simply try to bind. It will succeed or fail depending 2040 * simply try to bind. It will succeed or fail depending
2123 * on @gcwq's current state. Try it and adjust 2041 * on @pool's current state. Try it and adjust
2124 * %WORKER_UNBOUND accordingly. 2042 * %WORKER_UNBOUND accordingly.
2125 */ 2043 */
2126 if (worker_maybe_bind_and_lock(worker)) 2044 if (worker_maybe_bind_and_lock(worker))
@@ -2157,18 +2075,15 @@ static bool manage_workers(struct worker *worker)
2157 * call this function to process a work. 2075 * call this function to process a work.
2158 * 2076 *
2159 * CONTEXT: 2077 * CONTEXT:
2160 * spin_lock_irq(gcwq->lock) which is released and regrabbed. 2078 * spin_lock_irq(pool->lock) which is released and regrabbed.
2161 */ 2079 */
2162static void process_one_work(struct worker *worker, struct work_struct *work) 2080static void process_one_work(struct worker *worker, struct work_struct *work)
2163__releases(&gcwq->lock) 2081__releases(&pool->lock)
2164__acquires(&gcwq->lock) 2082__acquires(&pool->lock)
2165{ 2083{
2166 struct cpu_workqueue_struct *cwq = get_work_cwq(work); 2084 struct pool_workqueue *pwq = get_work_pwq(work);
2167 struct worker_pool *pool = worker->pool; 2085 struct worker_pool *pool = worker->pool;
2168 struct global_cwq *gcwq = pool->gcwq; 2086 bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE;
2169 struct hlist_head *bwh = busy_worker_head(gcwq, work);
2170 bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
2171 work_func_t f = work->func;
2172 int work_color; 2087 int work_color;
2173 struct worker *collision; 2088 struct worker *collision;
2174#ifdef CONFIG_LOCKDEP 2089#ifdef CONFIG_LOCKDEP
@@ -2186,11 +2101,11 @@ __acquires(&gcwq->lock)
2186 /* 2101 /*
2187 * Ensure we're on the correct CPU. DISASSOCIATED test is 2102 * Ensure we're on the correct CPU. DISASSOCIATED test is
2188 * necessary to avoid spurious warnings from rescuers servicing the 2103 * necessary to avoid spurious warnings from rescuers servicing the
2189 * unbound or a disassociated gcwq. 2104 * unbound or a disassociated pool.
2190 */ 2105 */
2191 WARN_ON_ONCE(!(worker->flags & WORKER_UNBOUND) && 2106 WARN_ON_ONCE(!(worker->flags & WORKER_UNBOUND) &&
2192 !(gcwq->flags & GCWQ_DISASSOCIATED) && 2107 !(pool->flags & POOL_DISASSOCIATED) &&
2193 raw_smp_processor_id() != gcwq->cpu); 2108 raw_smp_processor_id() != pool->cpu);
2194 2109
2195 /* 2110 /*
2196 * A single work shouldn't be executed concurrently by 2111 * A single work shouldn't be executed concurrently by
@@ -2198,7 +2113,7 @@ __acquires(&gcwq->lock)
2198 * already processing the work. If so, defer the work to the 2113 * already processing the work. If so, defer the work to the
2199 * currently executing one. 2114 * currently executing one.
2200 */ 2115 */
2201 collision = __find_worker_executing_work(gcwq, bwh, work); 2116 collision = find_worker_executing_work(pool, work);
2202 if (unlikely(collision)) { 2117 if (unlikely(collision)) {
2203 move_linked_works(work, &collision->scheduled, NULL); 2118 move_linked_works(work, &collision->scheduled, NULL);
2204 return; 2119 return;
@@ -2206,9 +2121,10 @@ __acquires(&gcwq->lock)
2206 2121
2207 /* claim and dequeue */ 2122 /* claim and dequeue */
2208 debug_work_deactivate(work); 2123 debug_work_deactivate(work);
2209 hlist_add_head(&worker->hentry, bwh); 2124 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
2210 worker->current_work = work; 2125 worker->current_work = work;
2211 worker->current_cwq = cwq; 2126 worker->current_func = work->func;
2127 worker->current_pwq = pwq;
2212 work_color = get_work_color(work); 2128 work_color = get_work_color(work);
2213 2129
2214 list_del_init(&work->entry); 2130 list_del_init(&work->entry);
@@ -2221,53 +2137,55 @@ __acquires(&gcwq->lock)
2221 worker_set_flags(worker, WORKER_CPU_INTENSIVE, true); 2137 worker_set_flags(worker, WORKER_CPU_INTENSIVE, true);
2222 2138
2223 /* 2139 /*
2224 * Unbound gcwq isn't concurrency managed and work items should be 2140 * Unbound pool isn't concurrency managed and work items should be
2225 * executed ASAP. Wake up another worker if necessary. 2141 * executed ASAP. Wake up another worker if necessary.
2226 */ 2142 */
2227 if ((worker->flags & WORKER_UNBOUND) && need_more_worker(pool)) 2143 if ((worker->flags & WORKER_UNBOUND) && need_more_worker(pool))
2228 wake_up_worker(pool); 2144 wake_up_worker(pool);
2229 2145
2230 /* 2146 /*
2231 * Record the last CPU and clear PENDING which should be the last 2147 * Record the last pool and clear PENDING which should be the last
2232 * update to @work. Also, do this inside @gcwq->lock so that 2148 * update to @work. Also, do this inside @pool->lock so that
2233 * PENDING and queued state changes happen together while IRQ is 2149 * PENDING and queued state changes happen together while IRQ is
2234 * disabled. 2150 * disabled.
2235 */ 2151 */
2236 set_work_cpu_and_clear_pending(work, gcwq->cpu); 2152 set_work_pool_and_clear_pending(work, pool->id);
2237 2153
2238 spin_unlock_irq(&gcwq->lock); 2154 spin_unlock_irq(&pool->lock);
2239 2155
2240 lock_map_acquire_read(&cwq->wq->lockdep_map); 2156 lock_map_acquire_read(&pwq->wq->lockdep_map);
2241 lock_map_acquire(&lockdep_map); 2157 lock_map_acquire(&lockdep_map);
2242 trace_workqueue_execute_start(work); 2158 trace_workqueue_execute_start(work);
2243 f(work); 2159 worker->current_func(work);
2244 /* 2160 /*
2245 * While we must be careful to not use "work" after this, the trace 2161 * While we must be careful to not use "work" after this, the trace
2246 * point will only record its address. 2162 * point will only record its address.
2247 */ 2163 */
2248 trace_workqueue_execute_end(work); 2164 trace_workqueue_execute_end(work);
2249 lock_map_release(&lockdep_map); 2165 lock_map_release(&lockdep_map);
2250 lock_map_release(&cwq->wq->lockdep_map); 2166 lock_map_release(&pwq->wq->lockdep_map);
2251 2167
2252 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 2168 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
2253 pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n" 2169 pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
2254 " last function: %pf\n", 2170 " last function: %pf\n",
2255 current->comm, preempt_count(), task_pid_nr(current), f); 2171 current->comm, preempt_count(), task_pid_nr(current),
2172 worker->current_func);
2256 debug_show_held_locks(current); 2173 debug_show_held_locks(current);
2257 dump_stack(); 2174 dump_stack();
2258 } 2175 }
2259 2176
2260 spin_lock_irq(&gcwq->lock); 2177 spin_lock_irq(&pool->lock);
2261 2178
2262 /* clear cpu intensive status */ 2179 /* clear cpu intensive status */
2263 if (unlikely(cpu_intensive)) 2180 if (unlikely(cpu_intensive))
2264 worker_clr_flags(worker, WORKER_CPU_INTENSIVE); 2181 worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
2265 2182
2266 /* we're done with it, release */ 2183 /* we're done with it, release */
2267 hlist_del_init(&worker->hentry); 2184 hash_del(&worker->hentry);
2268 worker->current_work = NULL; 2185 worker->current_work = NULL;
2269 worker->current_cwq = NULL; 2186 worker->current_func = NULL;
2270 cwq_dec_nr_in_flight(cwq, work_color); 2187 worker->current_pwq = NULL;
2188 pwq_dec_nr_in_flight(pwq, work_color);
2271} 2189}
2272 2190
2273/** 2191/**
@@ -2279,7 +2197,7 @@ __acquires(&gcwq->lock)
2279 * fetches a work from the top and executes it. 2197 * fetches a work from the top and executes it.
2280 * 2198 *
2281 * CONTEXT: 2199 * CONTEXT:
2282 * spin_lock_irq(gcwq->lock) which may be released and regrabbed 2200 * spin_lock_irq(pool->lock) which may be released and regrabbed
2283 * multiple times. 2201 * multiple times.
2284 */ 2202 */
2285static void process_scheduled_works(struct worker *worker) 2203static void process_scheduled_works(struct worker *worker)
@@ -2295,8 +2213,8 @@ static void process_scheduled_works(struct worker *worker)
2295 * worker_thread - the worker thread function 2213 * worker_thread - the worker thread function
2296 * @__worker: self 2214 * @__worker: self
2297 * 2215 *
2298 * The gcwq worker thread function. There's a single dynamic pool of 2216 * The worker thread function. There are NR_CPU_WORKER_POOLS dynamic pools
2299 * these per each cpu. These workers process all works regardless of 2217 * of these per each cpu. These workers process all works regardless of
2300 * their specific target workqueue. The only exception is works which 2218 * their specific target workqueue. The only exception is works which
2301 * belong to workqueues with a rescuer which will be explained in 2219 * belong to workqueues with a rescuer which will be explained in
2302 * rescuer_thread(). 2220 * rescuer_thread().
@@ -2305,16 +2223,15 @@ static int worker_thread(void *__worker)
2305{ 2223{
2306 struct worker *worker = __worker; 2224 struct worker *worker = __worker;
2307 struct worker_pool *pool = worker->pool; 2225 struct worker_pool *pool = worker->pool;
2308 struct global_cwq *gcwq = pool->gcwq;
2309 2226
2310 /* tell the scheduler that this is a workqueue worker */ 2227 /* tell the scheduler that this is a workqueue worker */
2311 worker->task->flags |= PF_WQ_WORKER; 2228 worker->task->flags |= PF_WQ_WORKER;
2312woke_up: 2229woke_up:
2313 spin_lock_irq(&gcwq->lock); 2230 spin_lock_irq(&pool->lock);
2314 2231
2315 /* we are off idle list if destruction or rebind is requested */ 2232 /* we are off idle list if destruction or rebind is requested */
2316 if (unlikely(list_empty(&worker->entry))) { 2233 if (unlikely(list_empty(&worker->entry))) {
2317 spin_unlock_irq(&gcwq->lock); 2234 spin_unlock_irq(&pool->lock);
2318 2235
2319 /* if DIE is set, destruction is requested */ 2236 /* if DIE is set, destruction is requested */
2320 if (worker->flags & WORKER_DIE) { 2237 if (worker->flags & WORKER_DIE) {
@@ -2373,52 +2290,59 @@ sleep:
2373 goto recheck; 2290 goto recheck;
2374 2291
2375 /* 2292 /*
2376 * gcwq->lock is held and there's no work to process and no 2293 * pool->lock is held and there's no work to process and no need to
2377 * need to manage, sleep. Workers are woken up only while 2294 * manage, sleep. Workers are woken up only while holding
2378 * holding gcwq->lock or from local cpu, so setting the 2295 * pool->lock or from local cpu, so setting the current state
2379 * current state before releasing gcwq->lock is enough to 2296 * before releasing pool->lock is enough to prevent losing any
2380 * prevent losing any event. 2297 * event.
2381 */ 2298 */
2382 worker_enter_idle(worker); 2299 worker_enter_idle(worker);
2383 __set_current_state(TASK_INTERRUPTIBLE); 2300 __set_current_state(TASK_INTERRUPTIBLE);
2384 spin_unlock_irq(&gcwq->lock); 2301 spin_unlock_irq(&pool->lock);
2385 schedule(); 2302 schedule();
2386 goto woke_up; 2303 goto woke_up;
2387} 2304}
2388 2305
2389/** 2306/**
2390 * rescuer_thread - the rescuer thread function 2307 * rescuer_thread - the rescuer thread function
2391 * @__wq: the associated workqueue 2308 * @__rescuer: self
2392 * 2309 *
2393 * Workqueue rescuer thread function. There's one rescuer for each 2310 * Workqueue rescuer thread function. There's one rescuer for each
2394 * workqueue which has WQ_RESCUER set. 2311 * workqueue which has WQ_RESCUER set.
2395 * 2312 *
2396 * Regular work processing on a gcwq may block trying to create a new 2313 * Regular work processing on a pool may block trying to create a new
2397 * worker which uses GFP_KERNEL allocation which has slight chance of 2314 * worker which uses GFP_KERNEL allocation which has slight chance of
2398 * developing into deadlock if some works currently on the same queue 2315 * developing into deadlock if some works currently on the same queue
2399 * need to be processed to satisfy the GFP_KERNEL allocation. This is 2316 * need to be processed to satisfy the GFP_KERNEL allocation. This is
2400 * the problem rescuer solves. 2317 * the problem rescuer solves.
2401 * 2318 *
2402 * When such condition is possible, the gcwq summons rescuers of all 2319 * When such condition is possible, the pool summons rescuers of all
2403 * workqueues which have works queued on the gcwq and let them process 2320 * workqueues which have works queued on the pool and let them process
2404 * those works so that forward progress can be guaranteed. 2321 * those works so that forward progress can be guaranteed.
2405 * 2322 *
2406 * This should happen rarely. 2323 * This should happen rarely.
2407 */ 2324 */
2408static int rescuer_thread(void *__wq) 2325static int rescuer_thread(void *__rescuer)
2409{ 2326{
2410 struct workqueue_struct *wq = __wq; 2327 struct worker *rescuer = __rescuer;
2411 struct worker *rescuer = wq->rescuer; 2328 struct workqueue_struct *wq = rescuer->rescue_wq;
2412 struct list_head *scheduled = &rescuer->scheduled; 2329 struct list_head *scheduled = &rescuer->scheduled;
2413 bool is_unbound = wq->flags & WQ_UNBOUND; 2330 bool is_unbound = wq->flags & WQ_UNBOUND;
2414 unsigned int cpu; 2331 unsigned int cpu;
2415 2332
2416 set_user_nice(current, RESCUER_NICE_LEVEL); 2333 set_user_nice(current, RESCUER_NICE_LEVEL);
2334
2335 /*
2336 * Mark rescuer as worker too. As WORKER_PREP is never cleared, it
2337 * doesn't participate in concurrency management.
2338 */
2339 rescuer->task->flags |= PF_WQ_WORKER;
2417repeat: 2340repeat:
2418 set_current_state(TASK_INTERRUPTIBLE); 2341 set_current_state(TASK_INTERRUPTIBLE);
2419 2342
2420 if (kthread_should_stop()) { 2343 if (kthread_should_stop()) {
2421 __set_current_state(TASK_RUNNING); 2344 __set_current_state(TASK_RUNNING);
2345 rescuer->task->flags &= ~PF_WQ_WORKER;
2422 return 0; 2346 return 0;
2423 } 2347 }
2424 2348
@@ -2428,9 +2352,8 @@ repeat:
2428 */ 2352 */
2429 for_each_mayday_cpu(cpu, wq->mayday_mask) { 2353 for_each_mayday_cpu(cpu, wq->mayday_mask) {
2430 unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu; 2354 unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu;
2431 struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq); 2355 struct pool_workqueue *pwq = get_pwq(tcpu, wq);
2432 struct worker_pool *pool = cwq->pool; 2356 struct worker_pool *pool = pwq->pool;
2433 struct global_cwq *gcwq = pool->gcwq;
2434 struct work_struct *work, *n; 2357 struct work_struct *work, *n;
2435 2358
2436 __set_current_state(TASK_RUNNING); 2359 __set_current_state(TASK_RUNNING);
@@ -2446,22 +2369,24 @@ repeat:
2446 */ 2369 */
2447 BUG_ON(!list_empty(&rescuer->scheduled)); 2370 BUG_ON(!list_empty(&rescuer->scheduled));
2448 list_for_each_entry_safe(work, n, &pool->worklist, entry) 2371 list_for_each_entry_safe(work, n, &pool->worklist, entry)
2449 if (get_work_cwq(work) == cwq) 2372 if (get_work_pwq(work) == pwq)
2450 move_linked_works(work, scheduled, &n); 2373 move_linked_works(work, scheduled, &n);
2451 2374
2452 process_scheduled_works(rescuer); 2375 process_scheduled_works(rescuer);
2453 2376
2454 /* 2377 /*
2455 * Leave this gcwq. If keep_working() is %true, notify a 2378 * Leave this pool. If keep_working() is %true, notify a
2456 * regular worker; otherwise, we end up with 0 concurrency 2379 * regular worker; otherwise, we end up with 0 concurrency
2457 * and stalling the execution. 2380 * and stalling the execution.
2458 */ 2381 */
2459 if (keep_working(pool)) 2382 if (keep_working(pool))
2460 wake_up_worker(pool); 2383 wake_up_worker(pool);
2461 2384
2462 spin_unlock_irq(&gcwq->lock); 2385 spin_unlock_irq(&pool->lock);
2463 } 2386 }
2464 2387
2388 /* rescuers should never participate in concurrency management */
2389 WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
2465 schedule(); 2390 schedule();
2466 goto repeat; 2391 goto repeat;
2467} 2392}
@@ -2479,7 +2404,7 @@ static void wq_barrier_func(struct work_struct *work)
2479 2404
2480/** 2405/**
2481 * insert_wq_barrier - insert a barrier work 2406 * insert_wq_barrier - insert a barrier work
2482 * @cwq: cwq to insert barrier into 2407 * @pwq: pwq to insert barrier into
2483 * @barr: wq_barrier to insert 2408 * @barr: wq_barrier to insert
2484 * @target: target work to attach @barr to 2409 * @target: target work to attach @barr to
2485 * @worker: worker currently executing @target, NULL if @target is not executing 2410 * @worker: worker currently executing @target, NULL if @target is not executing
@@ -2496,12 +2421,12 @@ static void wq_barrier_func(struct work_struct *work)
2496 * after a work with LINKED flag set. 2421 * after a work with LINKED flag set.
2497 * 2422 *
2498 * Note that when @worker is non-NULL, @target may be modified 2423 * Note that when @worker is non-NULL, @target may be modified
2499 * underneath us, so we can't reliably determine cwq from @target. 2424 * underneath us, so we can't reliably determine pwq from @target.
2500 * 2425 *
2501 * CONTEXT: 2426 * CONTEXT:
2502 * spin_lock_irq(gcwq->lock). 2427 * spin_lock_irq(pool->lock).
2503 */ 2428 */
2504static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, 2429static void insert_wq_barrier(struct pool_workqueue *pwq,
2505 struct wq_barrier *barr, 2430 struct wq_barrier *barr,
2506 struct work_struct *target, struct worker *worker) 2431 struct work_struct *target, struct worker *worker)
2507{ 2432{
@@ -2509,7 +2434,7 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
2509 unsigned int linked = 0; 2434 unsigned int linked = 0;
2510 2435
2511 /* 2436 /*
2512 * debugobject calls are safe here even with gcwq->lock locked 2437 * debugobject calls are safe here even with pool->lock locked
2513 * as we know for sure that this will not trigger any of the 2438 * as we know for sure that this will not trigger any of the
2514 * checks and call back into the fixup functions where we 2439 * checks and call back into the fixup functions where we
2515 * might deadlock. 2440 * might deadlock.
@@ -2534,23 +2459,23 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
2534 } 2459 }
2535 2460
2536 debug_work_activate(&barr->work); 2461 debug_work_activate(&barr->work);
2537 insert_work(cwq, &barr->work, head, 2462 insert_work(pwq, &barr->work, head,
2538 work_color_to_flags(WORK_NO_COLOR) | linked); 2463 work_color_to_flags(WORK_NO_COLOR) | linked);
2539} 2464}
2540 2465
2541/** 2466/**
2542 * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing 2467 * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing
2543 * @wq: workqueue being flushed 2468 * @wq: workqueue being flushed
2544 * @flush_color: new flush color, < 0 for no-op 2469 * @flush_color: new flush color, < 0 for no-op
2545 * @work_color: new work color, < 0 for no-op 2470 * @work_color: new work color, < 0 for no-op
2546 * 2471 *
2547 * Prepare cwqs for workqueue flushing. 2472 * Prepare pwqs for workqueue flushing.
2548 * 2473 *
2549 * If @flush_color is non-negative, flush_color on all cwqs should be 2474 * If @flush_color is non-negative, flush_color on all pwqs should be
2550 * -1. If no cwq has in-flight commands at the specified color, all 2475 * -1. If no pwq has in-flight commands at the specified color, all
2551 * cwq->flush_color's stay at -1 and %false is returned. If any cwq 2476 * pwq->flush_color's stay at -1 and %false is returned. If any pwq
2552 * has in flight commands, its cwq->flush_color is set to 2477 * has in flight commands, its pwq->flush_color is set to
2553 * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq 2478 * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq
2554 * wakeup logic is armed and %true is returned. 2479 * wakeup logic is armed and %true is returned.
2555 * 2480 *
2556 * The caller should have initialized @wq->first_flusher prior to 2481 * The caller should have initialized @wq->first_flusher prior to
@@ -2558,7 +2483,7 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
2558 * @flush_color is negative, no flush color update is done and %false 2483 * @flush_color is negative, no flush color update is done and %false
2559 * is returned. 2484 * is returned.
2560 * 2485 *
2561 * If @work_color is non-negative, all cwqs should have the same 2486 * If @work_color is non-negative, all pwqs should have the same
2562 * work_color which is previous to @work_color and all will be 2487 * work_color which is previous to @work_color and all will be
2563 * advanced to @work_color. 2488 * advanced to @work_color.
2564 * 2489 *
@@ -2569,42 +2494,42 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
2569 * %true if @flush_color >= 0 and there's something to flush. %false 2494 * %true if @flush_color >= 0 and there's something to flush. %false
2570 * otherwise. 2495 * otherwise.
2571 */ 2496 */
2572static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq, 2497static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
2573 int flush_color, int work_color) 2498 int flush_color, int work_color)
2574{ 2499{
2575 bool wait = false; 2500 bool wait = false;
2576 unsigned int cpu; 2501 unsigned int cpu;
2577 2502
2578 if (flush_color >= 0) { 2503 if (flush_color >= 0) {
2579 BUG_ON(atomic_read(&wq->nr_cwqs_to_flush)); 2504 BUG_ON(atomic_read(&wq->nr_pwqs_to_flush));
2580 atomic_set(&wq->nr_cwqs_to_flush, 1); 2505 atomic_set(&wq->nr_pwqs_to_flush, 1);
2581 } 2506 }
2582 2507
2583 for_each_cwq_cpu(cpu, wq) { 2508 for_each_pwq_cpu(cpu, wq) {
2584 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 2509 struct pool_workqueue *pwq = get_pwq(cpu, wq);
2585 struct global_cwq *gcwq = cwq->pool->gcwq; 2510 struct worker_pool *pool = pwq->pool;
2586 2511
2587 spin_lock_irq(&gcwq->lock); 2512 spin_lock_irq(&pool->lock);
2588 2513
2589 if (flush_color >= 0) { 2514 if (flush_color >= 0) {
2590 BUG_ON(cwq->flush_color != -1); 2515 BUG_ON(pwq->flush_color != -1);
2591 2516
2592 if (cwq->nr_in_flight[flush_color]) { 2517 if (pwq->nr_in_flight[flush_color]) {
2593 cwq->flush_color = flush_color; 2518 pwq->flush_color = flush_color;
2594 atomic_inc(&wq->nr_cwqs_to_flush); 2519 atomic_inc(&wq->nr_pwqs_to_flush);
2595 wait = true; 2520 wait = true;
2596 } 2521 }
2597 } 2522 }
2598 2523
2599 if (work_color >= 0) { 2524 if (work_color >= 0) {
2600 BUG_ON(work_color != work_next_color(cwq->work_color)); 2525 BUG_ON(work_color != work_next_color(pwq->work_color));
2601 cwq->work_color = work_color; 2526 pwq->work_color = work_color;
2602 } 2527 }
2603 2528
2604 spin_unlock_irq(&gcwq->lock); 2529 spin_unlock_irq(&pool->lock);
2605 } 2530 }
2606 2531
2607 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush)) 2532 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
2608 complete(&wq->first_flusher->done); 2533 complete(&wq->first_flusher->done);
2609 2534
2610 return wait; 2535 return wait;
@@ -2655,7 +2580,7 @@ void flush_workqueue(struct workqueue_struct *wq)
2655 2580
2656 wq->first_flusher = &this_flusher; 2581 wq->first_flusher = &this_flusher;
2657 2582
2658 if (!flush_workqueue_prep_cwqs(wq, wq->flush_color, 2583 if (!flush_workqueue_prep_pwqs(wq, wq->flush_color,
2659 wq->work_color)) { 2584 wq->work_color)) {
2660 /* nothing to flush, done */ 2585 /* nothing to flush, done */
2661 wq->flush_color = next_color; 2586 wq->flush_color = next_color;
@@ -2666,7 +2591,7 @@ void flush_workqueue(struct workqueue_struct *wq)
2666 /* wait in queue */ 2591 /* wait in queue */
2667 BUG_ON(wq->flush_color == this_flusher.flush_color); 2592 BUG_ON(wq->flush_color == this_flusher.flush_color);
2668 list_add_tail(&this_flusher.list, &wq->flusher_queue); 2593 list_add_tail(&this_flusher.list, &wq->flusher_queue);
2669 flush_workqueue_prep_cwqs(wq, -1, wq->work_color); 2594 flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
2670 } 2595 }
2671 } else { 2596 } else {
2672 /* 2597 /*
@@ -2733,7 +2658,7 @@ void flush_workqueue(struct workqueue_struct *wq)
2733 2658
2734 list_splice_tail_init(&wq->flusher_overflow, 2659 list_splice_tail_init(&wq->flusher_overflow,
2735 &wq->flusher_queue); 2660 &wq->flusher_queue);
2736 flush_workqueue_prep_cwqs(wq, -1, wq->work_color); 2661 flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
2737 } 2662 }
2738 2663
2739 if (list_empty(&wq->flusher_queue)) { 2664 if (list_empty(&wq->flusher_queue)) {
@@ -2743,7 +2668,7 @@ void flush_workqueue(struct workqueue_struct *wq)
2743 2668
2744 /* 2669 /*
2745 * Need to flush more colors. Make the next flusher 2670 * Need to flush more colors. Make the next flusher
2746 * the new first flusher and arm cwqs. 2671 * the new first flusher and arm pwqs.
2747 */ 2672 */
2748 BUG_ON(wq->flush_color == wq->work_color); 2673 BUG_ON(wq->flush_color == wq->work_color);
2749 BUG_ON(wq->flush_color != next->flush_color); 2674 BUG_ON(wq->flush_color != next->flush_color);
@@ -2751,7 +2676,7 @@ void flush_workqueue(struct workqueue_struct *wq)
2751 list_del_init(&next->list); 2676 list_del_init(&next->list);
2752 wq->first_flusher = next; 2677 wq->first_flusher = next;
2753 2678
2754 if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1)) 2679 if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1))
2755 break; 2680 break;
2756 2681
2757 /* 2682 /*
@@ -2794,13 +2719,13 @@ void drain_workqueue(struct workqueue_struct *wq)
2794reflush: 2719reflush:
2795 flush_workqueue(wq); 2720 flush_workqueue(wq);
2796 2721
2797 for_each_cwq_cpu(cpu, wq) { 2722 for_each_pwq_cpu(cpu, wq) {
2798 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 2723 struct pool_workqueue *pwq = get_pwq(cpu, wq);
2799 bool drained; 2724 bool drained;
2800 2725
2801 spin_lock_irq(&cwq->pool->gcwq->lock); 2726 spin_lock_irq(&pwq->pool->lock);
2802 drained = !cwq->nr_active && list_empty(&cwq->delayed_works); 2727 drained = !pwq->nr_active && list_empty(&pwq->delayed_works);
2803 spin_unlock_irq(&cwq->pool->gcwq->lock); 2728 spin_unlock_irq(&pwq->pool->lock);
2804 2729
2805 if (drained) 2730 if (drained)
2806 continue; 2731 continue;
@@ -2822,34 +2747,29 @@ EXPORT_SYMBOL_GPL(drain_workqueue);
2822static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr) 2747static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
2823{ 2748{
2824 struct worker *worker = NULL; 2749 struct worker *worker = NULL;
2825 struct global_cwq *gcwq; 2750 struct worker_pool *pool;
2826 struct cpu_workqueue_struct *cwq; 2751 struct pool_workqueue *pwq;
2827 2752
2828 might_sleep(); 2753 might_sleep();
2829 gcwq = get_work_gcwq(work); 2754 pool = get_work_pool(work);
2830 if (!gcwq) 2755 if (!pool)
2831 return false; 2756 return false;
2832 2757
2833 spin_lock_irq(&gcwq->lock); 2758 spin_lock_irq(&pool->lock);
2834 if (!list_empty(&work->entry)) { 2759 /* see the comment in try_to_grab_pending() with the same code */
2835 /* 2760 pwq = get_work_pwq(work);
2836 * See the comment near try_to_grab_pending()->smp_rmb(). 2761 if (pwq) {
2837 * If it was re-queued to a different gcwq under us, we 2762 if (unlikely(pwq->pool != pool))
2838 * are not going to wait.
2839 */
2840 smp_rmb();
2841 cwq = get_work_cwq(work);
2842 if (unlikely(!cwq || gcwq != cwq->pool->gcwq))
2843 goto already_gone; 2763 goto already_gone;
2844 } else { 2764 } else {
2845 worker = find_worker_executing_work(gcwq, work); 2765 worker = find_worker_executing_work(pool, work);
2846 if (!worker) 2766 if (!worker)
2847 goto already_gone; 2767 goto already_gone;
2848 cwq = worker->current_cwq; 2768 pwq = worker->current_pwq;
2849 } 2769 }
2850 2770
2851 insert_wq_barrier(cwq, barr, work, worker); 2771 insert_wq_barrier(pwq, barr, work, worker);
2852 spin_unlock_irq(&gcwq->lock); 2772 spin_unlock_irq(&pool->lock);
2853 2773
2854 /* 2774 /*
2855 * If @max_active is 1 or rescuer is in use, flushing another work 2775 * If @max_active is 1 or rescuer is in use, flushing another work
@@ -2857,15 +2777,15 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
2857 * flusher is not running on the same workqueue by verifying write 2777 * flusher is not running on the same workqueue by verifying write
2858 * access. 2778 * access.
2859 */ 2779 */
2860 if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER) 2780 if (pwq->wq->saved_max_active == 1 || pwq->wq->flags & WQ_RESCUER)
2861 lock_map_acquire(&cwq->wq->lockdep_map); 2781 lock_map_acquire(&pwq->wq->lockdep_map);
2862 else 2782 else
2863 lock_map_acquire_read(&cwq->wq->lockdep_map); 2783 lock_map_acquire_read(&pwq->wq->lockdep_map);
2864 lock_map_release(&cwq->wq->lockdep_map); 2784 lock_map_release(&pwq->wq->lockdep_map);
2865 2785
2866 return true; 2786 return true;
2867already_gone: 2787already_gone:
2868 spin_unlock_irq(&gcwq->lock); 2788 spin_unlock_irq(&pool->lock);
2869 return false; 2789 return false;
2870} 2790}
2871 2791
@@ -2961,8 +2881,7 @@ bool flush_delayed_work(struct delayed_work *dwork)
2961{ 2881{
2962 local_irq_disable(); 2882 local_irq_disable();
2963 if (del_timer_sync(&dwork->timer)) 2883 if (del_timer_sync(&dwork->timer))
2964 __queue_work(dwork->cpu, 2884 __queue_work(dwork->cpu, dwork->wq, &dwork->work);
2965 get_work_cwq(&dwork->work)->wq, &dwork->work);
2966 local_irq_enable(); 2885 local_irq_enable();
2967 return flush_work(&dwork->work); 2886 return flush_work(&dwork->work);
2968} 2887}
@@ -2992,7 +2911,8 @@ bool cancel_delayed_work(struct delayed_work *dwork)
2992 if (unlikely(ret < 0)) 2911 if (unlikely(ret < 0))
2993 return false; 2912 return false;
2994 2913
2995 set_work_cpu_and_clear_pending(&dwork->work, work_cpu(&dwork->work)); 2914 set_work_pool_and_clear_pending(&dwork->work,
2915 get_work_pool_id(&dwork->work));
2996 local_irq_restore(flags); 2916 local_irq_restore(flags);
2997 return ret; 2917 return ret;
2998} 2918}
@@ -3171,46 +3091,46 @@ int keventd_up(void)
3171 return system_wq != NULL; 3091 return system_wq != NULL;
3172} 3092}
3173 3093
3174static int alloc_cwqs(struct workqueue_struct *wq) 3094static int alloc_pwqs(struct workqueue_struct *wq)
3175{ 3095{
3176 /* 3096 /*
3177 * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS. 3097 * pwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
3178 * Make sure that the alignment isn't lower than that of 3098 * Make sure that the alignment isn't lower than that of
3179 * unsigned long long. 3099 * unsigned long long.
3180 */ 3100 */
3181 const size_t size = sizeof(struct cpu_workqueue_struct); 3101 const size_t size = sizeof(struct pool_workqueue);
3182 const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS, 3102 const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
3183 __alignof__(unsigned long long)); 3103 __alignof__(unsigned long long));
3184 3104
3185 if (!(wq->flags & WQ_UNBOUND)) 3105 if (!(wq->flags & WQ_UNBOUND))
3186 wq->cpu_wq.pcpu = __alloc_percpu(size, align); 3106 wq->pool_wq.pcpu = __alloc_percpu(size, align);
3187 else { 3107 else {
3188 void *ptr; 3108 void *ptr;
3189 3109
3190 /* 3110 /*
3191 * Allocate enough room to align cwq and put an extra 3111 * Allocate enough room to align pwq and put an extra
3192 * pointer at the end pointing back to the originally 3112 * pointer at the end pointing back to the originally
3193 * allocated pointer which will be used for free. 3113 * allocated pointer which will be used for free.
3194 */ 3114 */
3195 ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL); 3115 ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL);
3196 if (ptr) { 3116 if (ptr) {
3197 wq->cpu_wq.single = PTR_ALIGN(ptr, align); 3117 wq->pool_wq.single = PTR_ALIGN(ptr, align);
3198 *(void **)(wq->cpu_wq.single + 1) = ptr; 3118 *(void **)(wq->pool_wq.single + 1) = ptr;
3199 } 3119 }
3200 } 3120 }
3201 3121
3202 /* just in case, make sure it's actually aligned */ 3122 /* just in case, make sure it's actually aligned */
3203 BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align)); 3123 BUG_ON(!IS_ALIGNED(wq->pool_wq.v, align));
3204 return wq->cpu_wq.v ? 0 : -ENOMEM; 3124 return wq->pool_wq.v ? 0 : -ENOMEM;
3205} 3125}
3206 3126
3207static void free_cwqs(struct workqueue_struct *wq) 3127static void free_pwqs(struct workqueue_struct *wq)
3208{ 3128{
3209 if (!(wq->flags & WQ_UNBOUND)) 3129 if (!(wq->flags & WQ_UNBOUND))
3210 free_percpu(wq->cpu_wq.pcpu); 3130 free_percpu(wq->pool_wq.pcpu);
3211 else if (wq->cpu_wq.single) { 3131 else if (wq->pool_wq.single) {
3212 /* the pointer to free is stored right after the cwq */ 3132 /* the pointer to free is stored right after the pwq */
3213 kfree(*(void **)(wq->cpu_wq.single + 1)); 3133 kfree(*(void **)(wq->pool_wq.single + 1));
3214 } 3134 }
3215} 3135}
3216 3136
@@ -3264,27 +3184,25 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
3264 wq->flags = flags; 3184 wq->flags = flags;
3265 wq->saved_max_active = max_active; 3185 wq->saved_max_active = max_active;
3266 mutex_init(&wq->flush_mutex); 3186 mutex_init(&wq->flush_mutex);
3267 atomic_set(&wq->nr_cwqs_to_flush, 0); 3187 atomic_set(&wq->nr_pwqs_to_flush, 0);
3268 INIT_LIST_HEAD(&wq->flusher_queue); 3188 INIT_LIST_HEAD(&wq->flusher_queue);
3269 INIT_LIST_HEAD(&wq->flusher_overflow); 3189 INIT_LIST_HEAD(&wq->flusher_overflow);
3270 3190
3271 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); 3191 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
3272 INIT_LIST_HEAD(&wq->list); 3192 INIT_LIST_HEAD(&wq->list);
3273 3193
3274 if (alloc_cwqs(wq) < 0) 3194 if (alloc_pwqs(wq) < 0)
3275 goto err; 3195 goto err;
3276 3196
3277 for_each_cwq_cpu(cpu, wq) { 3197 for_each_pwq_cpu(cpu, wq) {
3278 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3198 struct pool_workqueue *pwq = get_pwq(cpu, wq);
3279 struct global_cwq *gcwq = get_gcwq(cpu); 3199
3280 int pool_idx = (bool)(flags & WQ_HIGHPRI); 3200 BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
3281 3201 pwq->pool = get_std_worker_pool(cpu, flags & WQ_HIGHPRI);
3282 BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK); 3202 pwq->wq = wq;
3283 cwq->pool = &gcwq->pools[pool_idx]; 3203 pwq->flush_color = -1;
3284 cwq->wq = wq; 3204 pwq->max_active = max_active;
3285 cwq->flush_color = -1; 3205 INIT_LIST_HEAD(&pwq->delayed_works);
3286 cwq->max_active = max_active;
3287 INIT_LIST_HEAD(&cwq->delayed_works);
3288 } 3206 }
3289 3207
3290 if (flags & WQ_RESCUER) { 3208 if (flags & WQ_RESCUER) {
@@ -3297,7 +3215,8 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
3297 if (!rescuer) 3215 if (!rescuer)
3298 goto err; 3216 goto err;
3299 3217
3300 rescuer->task = kthread_create(rescuer_thread, wq, "%s", 3218 rescuer->rescue_wq = wq;
3219 rescuer->task = kthread_create(rescuer_thread, rescuer, "%s",
3301 wq->name); 3220 wq->name);
3302 if (IS_ERR(rescuer->task)) 3221 if (IS_ERR(rescuer->task))
3303 goto err; 3222 goto err;
@@ -3314,8 +3233,8 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
3314 spin_lock(&workqueue_lock); 3233 spin_lock(&workqueue_lock);
3315 3234
3316 if (workqueue_freezing && wq->flags & WQ_FREEZABLE) 3235 if (workqueue_freezing && wq->flags & WQ_FREEZABLE)
3317 for_each_cwq_cpu(cpu, wq) 3236 for_each_pwq_cpu(cpu, wq)
3318 get_cwq(cpu, wq)->max_active = 0; 3237 get_pwq(cpu, wq)->max_active = 0;
3319 3238
3320 list_add(&wq->list, &workqueues); 3239 list_add(&wq->list, &workqueues);
3321 3240
@@ -3324,7 +3243,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
3324 return wq; 3243 return wq;
3325err: 3244err:
3326 if (wq) { 3245 if (wq) {
3327 free_cwqs(wq); 3246 free_pwqs(wq);
3328 free_mayday_mask(wq->mayday_mask); 3247 free_mayday_mask(wq->mayday_mask);
3329 kfree(wq->rescuer); 3248 kfree(wq->rescuer);
3330 kfree(wq); 3249 kfree(wq);
@@ -3355,14 +3274,14 @@ void destroy_workqueue(struct workqueue_struct *wq)
3355 spin_unlock(&workqueue_lock); 3274 spin_unlock(&workqueue_lock);
3356 3275
3357 /* sanity check */ 3276 /* sanity check */
3358 for_each_cwq_cpu(cpu, wq) { 3277 for_each_pwq_cpu(cpu, wq) {
3359 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3278 struct pool_workqueue *pwq = get_pwq(cpu, wq);
3360 int i; 3279 int i;
3361 3280
3362 for (i = 0; i < WORK_NR_COLORS; i++) 3281 for (i = 0; i < WORK_NR_COLORS; i++)
3363 BUG_ON(cwq->nr_in_flight[i]); 3282 BUG_ON(pwq->nr_in_flight[i]);
3364 BUG_ON(cwq->nr_active); 3283 BUG_ON(pwq->nr_active);
3365 BUG_ON(!list_empty(&cwq->delayed_works)); 3284 BUG_ON(!list_empty(&pwq->delayed_works));
3366 } 3285 }
3367 3286
3368 if (wq->flags & WQ_RESCUER) { 3287 if (wq->flags & WQ_RESCUER) {
@@ -3371,29 +3290,29 @@ void destroy_workqueue(struct workqueue_struct *wq)
3371 kfree(wq->rescuer); 3290 kfree(wq->rescuer);
3372 } 3291 }
3373 3292
3374 free_cwqs(wq); 3293 free_pwqs(wq);
3375 kfree(wq); 3294 kfree(wq);
3376} 3295}
3377EXPORT_SYMBOL_GPL(destroy_workqueue); 3296EXPORT_SYMBOL_GPL(destroy_workqueue);
3378 3297
3379/** 3298/**
3380 * cwq_set_max_active - adjust max_active of a cwq 3299 * pwq_set_max_active - adjust max_active of a pwq
3381 * @cwq: target cpu_workqueue_struct 3300 * @pwq: target pool_workqueue
3382 * @max_active: new max_active value. 3301 * @max_active: new max_active value.
3383 * 3302 *
3384 * Set @cwq->max_active to @max_active and activate delayed works if 3303 * Set @pwq->max_active to @max_active and activate delayed works if
3385 * increased. 3304 * increased.
3386 * 3305 *
3387 * CONTEXT: 3306 * CONTEXT:
3388 * spin_lock_irq(gcwq->lock). 3307 * spin_lock_irq(pool->lock).
3389 */ 3308 */
3390static void cwq_set_max_active(struct cpu_workqueue_struct *cwq, int max_active) 3309static void pwq_set_max_active(struct pool_workqueue *pwq, int max_active)
3391{ 3310{
3392 cwq->max_active = max_active; 3311 pwq->max_active = max_active;
3393 3312
3394 while (!list_empty(&cwq->delayed_works) && 3313 while (!list_empty(&pwq->delayed_works) &&
3395 cwq->nr_active < cwq->max_active) 3314 pwq->nr_active < pwq->max_active)
3396 cwq_activate_first_delayed(cwq); 3315 pwq_activate_first_delayed(pwq);
3397} 3316}
3398 3317
3399/** 3318/**
@@ -3416,16 +3335,17 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
3416 3335
3417 wq->saved_max_active = max_active; 3336 wq->saved_max_active = max_active;
3418 3337
3419 for_each_cwq_cpu(cpu, wq) { 3338 for_each_pwq_cpu(cpu, wq) {
3420 struct global_cwq *gcwq = get_gcwq(cpu); 3339 struct pool_workqueue *pwq = get_pwq(cpu, wq);
3340 struct worker_pool *pool = pwq->pool;
3421 3341
3422 spin_lock_irq(&gcwq->lock); 3342 spin_lock_irq(&pool->lock);
3423 3343
3424 if (!(wq->flags & WQ_FREEZABLE) || 3344 if (!(wq->flags & WQ_FREEZABLE) ||
3425 !(gcwq->flags & GCWQ_FREEZING)) 3345 !(pool->flags & POOL_FREEZING))
3426 cwq_set_max_active(get_cwq(gcwq->cpu, wq), max_active); 3346 pwq_set_max_active(pwq, max_active);
3427 3347
3428 spin_unlock_irq(&gcwq->lock); 3348 spin_unlock_irq(&pool->lock);
3429 } 3349 }
3430 3350
3431 spin_unlock(&workqueue_lock); 3351 spin_unlock(&workqueue_lock);
@@ -3446,57 +3366,38 @@ EXPORT_SYMBOL_GPL(workqueue_set_max_active);
3446 */ 3366 */
3447bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq) 3367bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq)
3448{ 3368{
3449 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3369 struct pool_workqueue *pwq = get_pwq(cpu, wq);
3450 3370
3451 return !list_empty(&cwq->delayed_works); 3371 return !list_empty(&pwq->delayed_works);
3452} 3372}
3453EXPORT_SYMBOL_GPL(workqueue_congested); 3373EXPORT_SYMBOL_GPL(workqueue_congested);
3454 3374
3455/** 3375/**
3456 * work_cpu - return the last known associated cpu for @work
3457 * @work: the work of interest
3458 *
3459 * RETURNS:
3460 * CPU number if @work was ever queued. WORK_CPU_NONE otherwise.
3461 */
3462unsigned int work_cpu(struct work_struct *work)
3463{
3464 struct global_cwq *gcwq = get_work_gcwq(work);
3465
3466 return gcwq ? gcwq->cpu : WORK_CPU_NONE;
3467}
3468EXPORT_SYMBOL_GPL(work_cpu);
3469
3470/**
3471 * work_busy - test whether a work is currently pending or running 3376 * work_busy - test whether a work is currently pending or running
3472 * @work: the work to be tested 3377 * @work: the work to be tested
3473 * 3378 *
3474 * Test whether @work is currently pending or running. There is no 3379 * Test whether @work is currently pending or running. There is no
3475 * synchronization around this function and the test result is 3380 * synchronization around this function and the test result is
3476 * unreliable and only useful as advisory hints or for debugging. 3381 * unreliable and only useful as advisory hints or for debugging.
3477 * Especially for reentrant wqs, the pending state might hide the
3478 * running state.
3479 * 3382 *
3480 * RETURNS: 3383 * RETURNS:
3481 * OR'd bitmask of WORK_BUSY_* bits. 3384 * OR'd bitmask of WORK_BUSY_* bits.
3482 */ 3385 */
3483unsigned int work_busy(struct work_struct *work) 3386unsigned int work_busy(struct work_struct *work)
3484{ 3387{
3485 struct global_cwq *gcwq = get_work_gcwq(work); 3388 struct worker_pool *pool = get_work_pool(work);
3486 unsigned long flags; 3389 unsigned long flags;
3487 unsigned int ret = 0; 3390 unsigned int ret = 0;
3488 3391
3489 if (!gcwq)
3490 return 0;
3491
3492 spin_lock_irqsave(&gcwq->lock, flags);
3493
3494 if (work_pending(work)) 3392 if (work_pending(work))
3495 ret |= WORK_BUSY_PENDING; 3393 ret |= WORK_BUSY_PENDING;
3496 if (find_worker_executing_work(gcwq, work))
3497 ret |= WORK_BUSY_RUNNING;
3498 3394
3499 spin_unlock_irqrestore(&gcwq->lock, flags); 3395 if (pool) {
3396 spin_lock_irqsave(&pool->lock, flags);
3397 if (find_worker_executing_work(pool, work))
3398 ret |= WORK_BUSY_RUNNING;
3399 spin_unlock_irqrestore(&pool->lock, flags);
3400 }
3500 3401
3501 return ret; 3402 return ret;
3502} 3403}
@@ -3506,65 +3407,49 @@ EXPORT_SYMBOL_GPL(work_busy);
3506 * CPU hotplug. 3407 * CPU hotplug.
3507 * 3408 *
3508 * There are two challenges in supporting CPU hotplug. Firstly, there 3409 * There are two challenges in supporting CPU hotplug. Firstly, there
3509 * are a lot of assumptions on strong associations among work, cwq and 3410 * are a lot of assumptions on strong associations among work, pwq and
3510 * gcwq which make migrating pending and scheduled works very 3411 * pool which make migrating pending and scheduled works very
3511 * difficult to implement without impacting hot paths. Secondly, 3412 * difficult to implement without impacting hot paths. Secondly,
3512 * gcwqs serve mix of short, long and very long running works making 3413 * worker pools serve mix of short, long and very long running works making
3513 * blocked draining impractical. 3414 * blocked draining impractical.
3514 * 3415 *
3515 * This is solved by allowing a gcwq to be disassociated from the CPU 3416 * This is solved by allowing the pools to be disassociated from the CPU
3516 * running as an unbound one and allowing it to be reattached later if the 3417 * running as an unbound one and allowing it to be reattached later if the
3517 * cpu comes back online. 3418 * cpu comes back online.
3518 */ 3419 */
3519 3420
3520/* claim manager positions of all pools */ 3421static void wq_unbind_fn(struct work_struct *work)
3521static void gcwq_claim_assoc_and_lock(struct global_cwq *gcwq)
3522{ 3422{
3523 struct worker_pool *pool; 3423 int cpu = smp_processor_id();
3524
3525 for_each_worker_pool(pool, gcwq)
3526 mutex_lock_nested(&pool->assoc_mutex, pool - gcwq->pools);
3527 spin_lock_irq(&gcwq->lock);
3528}
3529
3530/* release manager positions */
3531static void gcwq_release_assoc_and_unlock(struct global_cwq *gcwq)
3532{
3533 struct worker_pool *pool;
3534
3535 spin_unlock_irq(&gcwq->lock);
3536 for_each_worker_pool(pool, gcwq)
3537 mutex_unlock(&pool->assoc_mutex);
3538}
3539
3540static void gcwq_unbind_fn(struct work_struct *work)
3541{
3542 struct global_cwq *gcwq = get_gcwq(smp_processor_id());
3543 struct worker_pool *pool; 3424 struct worker_pool *pool;
3544 struct worker *worker; 3425 struct worker *worker;
3545 struct hlist_node *pos; 3426 struct hlist_node *pos;
3546 int i; 3427 int i;
3547 3428
3548 BUG_ON(gcwq->cpu != smp_processor_id()); 3429 for_each_std_worker_pool(pool, cpu) {
3430 BUG_ON(cpu != smp_processor_id());
3549 3431
3550 gcwq_claim_assoc_and_lock(gcwq); 3432 mutex_lock(&pool->assoc_mutex);
3433 spin_lock_irq(&pool->lock);
3551 3434
3552 /* 3435 /*
3553 * We've claimed all manager positions. Make all workers unbound 3436 * We've claimed all manager positions. Make all workers
3554 * and set DISASSOCIATED. Before this, all workers except for the 3437 * unbound and set DISASSOCIATED. Before this, all workers
3555 * ones which are still executing works from before the last CPU 3438 * except for the ones which are still executing works from
3556 * down must be on the cpu. After this, they may become diasporas. 3439 * before the last CPU down must be on the cpu. After
3557 */ 3440 * this, they may become diasporas.
3558 for_each_worker_pool(pool, gcwq) 3441 */
3559 list_for_each_entry(worker, &pool->idle_list, entry) 3442 list_for_each_entry(worker, &pool->idle_list, entry)
3560 worker->flags |= WORKER_UNBOUND; 3443 worker->flags |= WORKER_UNBOUND;
3561 3444
3562 for_each_busy_worker(worker, i, pos, gcwq) 3445 for_each_busy_worker(worker, i, pos, pool)
3563 worker->flags |= WORKER_UNBOUND; 3446 worker->flags |= WORKER_UNBOUND;
3564 3447
3565 gcwq->flags |= GCWQ_DISASSOCIATED; 3448 pool->flags |= POOL_DISASSOCIATED;
3566 3449
3567 gcwq_release_assoc_and_unlock(gcwq); 3450 spin_unlock_irq(&pool->lock);
3451 mutex_unlock(&pool->assoc_mutex);
3452 }
3568 3453
3569 /* 3454 /*
3570 * Call schedule() so that we cross rq->lock and thus can guarantee 3455 * Call schedule() so that we cross rq->lock and thus can guarantee
@@ -3576,16 +3461,16 @@ static void gcwq_unbind_fn(struct work_struct *work)
3576 /* 3461 /*
3577 * Sched callbacks are disabled now. Zap nr_running. After this, 3462 * Sched callbacks are disabled now. Zap nr_running. After this,
3578 * nr_running stays zero and need_more_worker() and keep_working() 3463 * nr_running stays zero and need_more_worker() and keep_working()
3579 * are always true as long as the worklist is not empty. @gcwq now 3464 * are always true as long as the worklist is not empty. Pools on
3580 * behaves as unbound (in terms of concurrency management) gcwq 3465 * @cpu now behave as unbound (in terms of concurrency management)
3581 * which is served by workers tied to the CPU. 3466 * pools which are served by workers tied to the CPU.
3582 * 3467 *
3583 * On return from this function, the current worker would trigger 3468 * On return from this function, the current worker would trigger
3584 * unbound chain execution of pending work items if other workers 3469 * unbound chain execution of pending work items if other workers
3585 * didn't already. 3470 * didn't already.
3586 */ 3471 */
3587 for_each_worker_pool(pool, gcwq) 3472 for_each_std_worker_pool(pool, cpu)
3588 atomic_set(get_pool_nr_running(pool), 0); 3473 atomic_set(&pool->nr_running, 0);
3589} 3474}
3590 3475
3591/* 3476/*
@@ -3597,12 +3482,11 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb,
3597 void *hcpu) 3482 void *hcpu)
3598{ 3483{
3599 unsigned int cpu = (unsigned long)hcpu; 3484 unsigned int cpu = (unsigned long)hcpu;
3600 struct global_cwq *gcwq = get_gcwq(cpu);
3601 struct worker_pool *pool; 3485 struct worker_pool *pool;
3602 3486
3603 switch (action & ~CPU_TASKS_FROZEN) { 3487 switch (action & ~CPU_TASKS_FROZEN) {
3604 case CPU_UP_PREPARE: 3488 case CPU_UP_PREPARE:
3605 for_each_worker_pool(pool, gcwq) { 3489 for_each_std_worker_pool(pool, cpu) {
3606 struct worker *worker; 3490 struct worker *worker;
3607 3491
3608 if (pool->nr_workers) 3492 if (pool->nr_workers)
@@ -3612,18 +3496,24 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb,
3612 if (!worker) 3496 if (!worker)
3613 return NOTIFY_BAD; 3497 return NOTIFY_BAD;
3614 3498
3615 spin_lock_irq(&gcwq->lock); 3499 spin_lock_irq(&pool->lock);
3616 start_worker(worker); 3500 start_worker(worker);
3617 spin_unlock_irq(&gcwq->lock); 3501 spin_unlock_irq(&pool->lock);
3618 } 3502 }
3619 break; 3503 break;
3620 3504
3621 case CPU_DOWN_FAILED: 3505 case CPU_DOWN_FAILED:
3622 case CPU_ONLINE: 3506 case CPU_ONLINE:
3623 gcwq_claim_assoc_and_lock(gcwq); 3507 for_each_std_worker_pool(pool, cpu) {
3624 gcwq->flags &= ~GCWQ_DISASSOCIATED; 3508 mutex_lock(&pool->assoc_mutex);
3625 rebind_workers(gcwq); 3509 spin_lock_irq(&pool->lock);
3626 gcwq_release_assoc_and_unlock(gcwq); 3510
3511 pool->flags &= ~POOL_DISASSOCIATED;
3512 rebind_workers(pool);
3513
3514 spin_unlock_irq(&pool->lock);
3515 mutex_unlock(&pool->assoc_mutex);
3516 }
3627 break; 3517 break;
3628 } 3518 }
3629 return NOTIFY_OK; 3519 return NOTIFY_OK;
@@ -3643,7 +3533,7 @@ static int __cpuinit workqueue_cpu_down_callback(struct notifier_block *nfb,
3643 switch (action & ~CPU_TASKS_FROZEN) { 3533 switch (action & ~CPU_TASKS_FROZEN) {
3644 case CPU_DOWN_PREPARE: 3534 case CPU_DOWN_PREPARE:
3645 /* unbinding should happen on the local CPU */ 3535 /* unbinding should happen on the local CPU */
3646 INIT_WORK_ONSTACK(&unbind_work, gcwq_unbind_fn); 3536 INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn);
3647 queue_work_on(cpu, system_highpri_wq, &unbind_work); 3537 queue_work_on(cpu, system_highpri_wq, &unbind_work);
3648 flush_work(&unbind_work); 3538 flush_work(&unbind_work);
3649 break; 3539 break;
@@ -3696,10 +3586,10 @@ EXPORT_SYMBOL_GPL(work_on_cpu);
3696 * 3586 *
3697 * Start freezing workqueues. After this function returns, all freezable 3587 * Start freezing workqueues. After this function returns, all freezable
3698 * workqueues will queue new works to their frozen_works list instead of 3588 * workqueues will queue new works to their frozen_works list instead of
3699 * gcwq->worklist. 3589 * pool->worklist.
3700 * 3590 *
3701 * CONTEXT: 3591 * CONTEXT:
3702 * Grabs and releases workqueue_lock and gcwq->lock's. 3592 * Grabs and releases workqueue_lock and pool->lock's.
3703 */ 3593 */
3704void freeze_workqueues_begin(void) 3594void freeze_workqueues_begin(void)
3705{ 3595{
@@ -3710,23 +3600,26 @@ void freeze_workqueues_begin(void)
3710 BUG_ON(workqueue_freezing); 3600 BUG_ON(workqueue_freezing);
3711 workqueue_freezing = true; 3601 workqueue_freezing = true;
3712 3602
3713 for_each_gcwq_cpu(cpu) { 3603 for_each_wq_cpu(cpu) {
3714 struct global_cwq *gcwq = get_gcwq(cpu); 3604 struct worker_pool *pool;
3715 struct workqueue_struct *wq; 3605 struct workqueue_struct *wq;
3716 3606
3717 spin_lock_irq(&gcwq->lock); 3607 for_each_std_worker_pool(pool, cpu) {
3608 spin_lock_irq(&pool->lock);
3718 3609
3719 BUG_ON(gcwq->flags & GCWQ_FREEZING); 3610 WARN_ON_ONCE(pool->flags & POOL_FREEZING);
3720 gcwq->flags |= GCWQ_FREEZING; 3611 pool->flags |= POOL_FREEZING;
3721 3612
3722 list_for_each_entry(wq, &workqueues, list) { 3613 list_for_each_entry(wq, &workqueues, list) {
3723 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3614 struct pool_workqueue *pwq = get_pwq(cpu, wq);
3724 3615
3725 if (cwq && wq->flags & WQ_FREEZABLE) 3616 if (pwq && pwq->pool == pool &&
3726 cwq->max_active = 0; 3617 (wq->flags & WQ_FREEZABLE))
3727 } 3618 pwq->max_active = 0;
3619 }
3728 3620
3729 spin_unlock_irq(&gcwq->lock); 3621 spin_unlock_irq(&pool->lock);
3622 }
3730 } 3623 }
3731 3624
3732 spin_unlock(&workqueue_lock); 3625 spin_unlock(&workqueue_lock);
@@ -3754,20 +3647,20 @@ bool freeze_workqueues_busy(void)
3754 3647
3755 BUG_ON(!workqueue_freezing); 3648 BUG_ON(!workqueue_freezing);
3756 3649
3757 for_each_gcwq_cpu(cpu) { 3650 for_each_wq_cpu(cpu) {
3758 struct workqueue_struct *wq; 3651 struct workqueue_struct *wq;
3759 /* 3652 /*
3760 * nr_active is monotonically decreasing. It's safe 3653 * nr_active is monotonically decreasing. It's safe
3761 * to peek without lock. 3654 * to peek without lock.
3762 */ 3655 */
3763 list_for_each_entry(wq, &workqueues, list) { 3656 list_for_each_entry(wq, &workqueues, list) {
3764 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3657 struct pool_workqueue *pwq = get_pwq(cpu, wq);
3765 3658
3766 if (!cwq || !(wq->flags & WQ_FREEZABLE)) 3659 if (!pwq || !(wq->flags & WQ_FREEZABLE))
3767 continue; 3660 continue;
3768 3661
3769 BUG_ON(cwq->nr_active < 0); 3662 BUG_ON(pwq->nr_active < 0);
3770 if (cwq->nr_active) { 3663 if (pwq->nr_active) {
3771 busy = true; 3664 busy = true;
3772 goto out_unlock; 3665 goto out_unlock;
3773 } 3666 }
@@ -3782,10 +3675,10 @@ out_unlock:
3782 * thaw_workqueues - thaw workqueues 3675 * thaw_workqueues - thaw workqueues
3783 * 3676 *
3784 * Thaw workqueues. Normal queueing is restored and all collected 3677 * Thaw workqueues. Normal queueing is restored and all collected
3785 * frozen works are transferred to their respective gcwq worklists. 3678 * frozen works are transferred to their respective pool worklists.
3786 * 3679 *
3787 * CONTEXT: 3680 * CONTEXT:
3788 * Grabs and releases workqueue_lock and gcwq->lock's. 3681 * Grabs and releases workqueue_lock and pool->lock's.
3789 */ 3682 */
3790void thaw_workqueues(void) 3683void thaw_workqueues(void)
3791{ 3684{
@@ -3796,30 +3689,31 @@ void thaw_workqueues(void)
3796 if (!workqueue_freezing) 3689 if (!workqueue_freezing)
3797 goto out_unlock; 3690 goto out_unlock;
3798 3691
3799 for_each_gcwq_cpu(cpu) { 3692 for_each_wq_cpu(cpu) {
3800 struct global_cwq *gcwq = get_gcwq(cpu);
3801 struct worker_pool *pool; 3693 struct worker_pool *pool;
3802 struct workqueue_struct *wq; 3694 struct workqueue_struct *wq;
3803 3695
3804 spin_lock_irq(&gcwq->lock); 3696 for_each_std_worker_pool(pool, cpu) {
3697 spin_lock_irq(&pool->lock);
3805 3698
3806 BUG_ON(!(gcwq->flags & GCWQ_FREEZING)); 3699 WARN_ON_ONCE(!(pool->flags & POOL_FREEZING));
3807 gcwq->flags &= ~GCWQ_FREEZING; 3700 pool->flags &= ~POOL_FREEZING;
3808 3701
3809 list_for_each_entry(wq, &workqueues, list) { 3702 list_for_each_entry(wq, &workqueues, list) {
3810 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3703 struct pool_workqueue *pwq = get_pwq(cpu, wq);
3811 3704
3812 if (!cwq || !(wq->flags & WQ_FREEZABLE)) 3705 if (!pwq || pwq->pool != pool ||
3813 continue; 3706 !(wq->flags & WQ_FREEZABLE))
3707 continue;
3814 3708
3815 /* restore max_active and repopulate worklist */ 3709 /* restore max_active and repopulate worklist */
3816 cwq_set_max_active(cwq, wq->saved_max_active); 3710 pwq_set_max_active(pwq, wq->saved_max_active);
3817 } 3711 }
3818 3712
3819 for_each_worker_pool(pool, gcwq)
3820 wake_up_worker(pool); 3713 wake_up_worker(pool);
3821 3714
3822 spin_unlock_irq(&gcwq->lock); 3715 spin_unlock_irq(&pool->lock);
3716 }
3823 } 3717 }
3824 3718
3825 workqueue_freezing = false; 3719 workqueue_freezing = false;
@@ -3831,60 +3725,56 @@ out_unlock:
3831static int __init init_workqueues(void) 3725static int __init init_workqueues(void)
3832{ 3726{
3833 unsigned int cpu; 3727 unsigned int cpu;
3834 int i;
3835 3728
3836 /* make sure we have enough bits for OFFQ CPU number */ 3729 /* make sure we have enough bits for OFFQ pool ID */
3837 BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_CPU_SHIFT)) < 3730 BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT)) <
3838 WORK_CPU_LAST); 3731 WORK_CPU_END * NR_STD_WORKER_POOLS);
3839 3732
3840 cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP); 3733 cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
3841 hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN); 3734 hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
3842 3735
3843 /* initialize gcwqs */ 3736 /* initialize CPU pools */
3844 for_each_gcwq_cpu(cpu) { 3737 for_each_wq_cpu(cpu) {
3845 struct global_cwq *gcwq = get_gcwq(cpu);
3846 struct worker_pool *pool; 3738 struct worker_pool *pool;
3847 3739
3848 spin_lock_init(&gcwq->lock); 3740 for_each_std_worker_pool(pool, cpu) {
3849 gcwq->cpu = cpu; 3741 spin_lock_init(&pool->lock);
3850 gcwq->flags |= GCWQ_DISASSOCIATED; 3742 pool->cpu = cpu;
3851 3743 pool->flags |= POOL_DISASSOCIATED;
3852 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
3853 INIT_HLIST_HEAD(&gcwq->busy_hash[i]);
3854
3855 for_each_worker_pool(pool, gcwq) {
3856 pool->gcwq = gcwq;
3857 INIT_LIST_HEAD(&pool->worklist); 3744 INIT_LIST_HEAD(&pool->worklist);
3858 INIT_LIST_HEAD(&pool->idle_list); 3745 INIT_LIST_HEAD(&pool->idle_list);
3746 hash_init(pool->busy_hash);
3859 3747
3860 init_timer_deferrable(&pool->idle_timer); 3748 init_timer_deferrable(&pool->idle_timer);
3861 pool->idle_timer.function = idle_worker_timeout; 3749 pool->idle_timer.function = idle_worker_timeout;
3862 pool->idle_timer.data = (unsigned long)pool; 3750 pool->idle_timer.data = (unsigned long)pool;
3863 3751
3864 setup_timer(&pool->mayday_timer, gcwq_mayday_timeout, 3752 setup_timer(&pool->mayday_timer, pool_mayday_timeout,
3865 (unsigned long)pool); 3753 (unsigned long)pool);
3866 3754
3867 mutex_init(&pool->assoc_mutex); 3755 mutex_init(&pool->assoc_mutex);
3868 ida_init(&pool->worker_ida); 3756 ida_init(&pool->worker_ida);
3757
3758 /* alloc pool ID */
3759 BUG_ON(worker_pool_assign_id(pool));
3869 } 3760 }
3870 } 3761 }
3871 3762
3872 /* create the initial worker */ 3763 /* create the initial worker */
3873 for_each_online_gcwq_cpu(cpu) { 3764 for_each_online_wq_cpu(cpu) {
3874 struct global_cwq *gcwq = get_gcwq(cpu);
3875 struct worker_pool *pool; 3765 struct worker_pool *pool;
3876 3766
3877 if (cpu != WORK_CPU_UNBOUND) 3767 for_each_std_worker_pool(pool, cpu) {
3878 gcwq->flags &= ~GCWQ_DISASSOCIATED;
3879
3880 for_each_worker_pool(pool, gcwq) {
3881 struct worker *worker; 3768 struct worker *worker;
3882 3769
3770 if (cpu != WORK_CPU_UNBOUND)
3771 pool->flags &= ~POOL_DISASSOCIATED;
3772
3883 worker = create_worker(pool); 3773 worker = create_worker(pool);
3884 BUG_ON(!worker); 3774 BUG_ON(!worker);
3885 spin_lock_irq(&gcwq->lock); 3775 spin_lock_irq(&pool->lock);
3886 start_worker(worker); 3776 start_worker(worker);
3887 spin_unlock_irq(&gcwq->lock); 3777 spin_unlock_irq(&pool->lock);
3888 } 3778 }
3889 } 3779 }
3890 3780
diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h
new file mode 100644
index 000000000000..07650264ec15
--- /dev/null
+++ b/kernel/workqueue_internal.h
@@ -0,0 +1,65 @@
1/*
2 * kernel/workqueue_internal.h
3 *
4 * Workqueue internal header file. Only to be included by workqueue and
5 * core kernel subsystems.
6 */
7#ifndef _KERNEL_WORKQUEUE_INTERNAL_H
8#define _KERNEL_WORKQUEUE_INTERNAL_H
9
10#include <linux/workqueue.h>
11#include <linux/kthread.h>
12
13struct worker_pool;
14
15/*
16 * The poor guys doing the actual heavy lifting. All on-duty workers are
17 * either serving the manager role, on idle list or on busy hash. For
18 * details on the locking annotation (L, I, X...), refer to workqueue.c.
19 *
20 * Only to be used in workqueue and async.
21 */
22struct worker {
23 /* on idle list while idle, on busy hash table while busy */
24 union {
25 struct list_head entry; /* L: while idle */
26 struct hlist_node hentry; /* L: while busy */
27 };
28
29 struct work_struct *current_work; /* L: work being processed */
30 work_func_t current_func; /* L: current_work's fn */
31 struct pool_workqueue *current_pwq; /* L: current_work's pwq */
32 struct list_head scheduled; /* L: scheduled works */
33 struct task_struct *task; /* I: worker task */
34 struct worker_pool *pool; /* I: the associated pool */
35 /* 64 bytes boundary on 64bit, 32 on 32bit */
36 unsigned long last_active; /* L: last active timestamp */
37 unsigned int flags; /* X: flags */
38 int id; /* I: worker id */
39
40 /* for rebinding worker to CPU */
41 struct work_struct rebind_work; /* L: for busy worker */
42
43 /* used only by rescuers to point to the target workqueue */
44 struct workqueue_struct *rescue_wq; /* I: the workqueue to rescue */
45};
46
47/**
48 * current_wq_worker - return struct worker if %current is a workqueue worker
49 */
50static inline struct worker *current_wq_worker(void)
51{
52 if (current->flags & PF_WQ_WORKER)
53 return kthread_data(current);
54 return NULL;
55}
56
57/*
58 * Scheduler hooks for concurrency managed workqueue. Only to be used from
59 * sched.c and workqueue.c.
60 */
61void wq_worker_waking_up(struct task_struct *task, unsigned int cpu);
62struct task_struct *wq_worker_sleeping(struct task_struct *task,
63 unsigned int cpu);
64
65#endif /* _KERNEL_WORKQUEUE_INTERNAL_H */
diff --git a/kernel/workqueue_sched.h b/kernel/workqueue_sched.h
deleted file mode 100644
index 2d10fc98dc79..000000000000
--- a/kernel/workqueue_sched.h
+++ /dev/null
@@ -1,9 +0,0 @@
1/*
2 * kernel/workqueue_sched.h
3 *
4 * Scheduler hooks for concurrency managed workqueue. Only to be
5 * included from sched.c and workqueue.c.
6 */
7void wq_worker_waking_up(struct task_struct *task, unsigned int cpu);
8struct task_struct *wq_worker_sleeping(struct task_struct *task,
9 unsigned int cpu);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 67604e599384..a1714c897e3f 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -605,61 +605,6 @@ config PROVE_LOCKING
605 605
606 For more details, see Documentation/lockdep-design.txt. 606 For more details, see Documentation/lockdep-design.txt.
607 607
608config PROVE_RCU
609 bool "RCU debugging: prove RCU correctness"
610 depends on PROVE_LOCKING
611 default n
612 help
613 This feature enables lockdep extensions that check for correct
614 use of RCU APIs. This is currently under development. Say Y
615 if you want to debug RCU usage or help work on the PROVE_RCU
616 feature.
617
618 Say N if you are unsure.
619
620config PROVE_RCU_REPEATEDLY
621 bool "RCU debugging: don't disable PROVE_RCU on first splat"
622 depends on PROVE_RCU
623 default n
624 help
625 By itself, PROVE_RCU will disable checking upon issuing the
626 first warning (or "splat"). This feature prevents such
627 disabling, allowing multiple RCU-lockdep warnings to be printed
628 on a single reboot.
629
630 Say Y to allow multiple RCU-lockdep warnings per boot.
631
632 Say N if you are unsure.
633
634config PROVE_RCU_DELAY
635 bool "RCU debugging: preemptible RCU race provocation"
636 depends on DEBUG_KERNEL && PREEMPT_RCU
637 default n
638 help
639 There is a class of races that involve an unlikely preemption
640 of __rcu_read_unlock() just after ->rcu_read_lock_nesting has
641 been set to INT_MIN. This feature inserts a delay at that
642 point to increase the probability of these races.
643
644 Say Y to increase probability of preemption of __rcu_read_unlock().
645
646 Say N if you are unsure.
647
648config SPARSE_RCU_POINTER
649 bool "RCU debugging: sparse-based checks for pointer usage"
650 default n
651 help
652 This feature enables the __rcu sparse annotation for
653 RCU-protected pointers. This annotation will cause sparse
654 to flag any non-RCU used of annotated pointers. This can be
655 helpful when debugging RCU usage. Please note that this feature
656 is not intended to enforce code cleanliness; it is instead merely
657 a debugging aid.
658
659 Say Y to make sparse flag questionable use of RCU-protected pointers
660
661 Say N if you are unsure.
662
663config LOCKDEP 608config LOCKDEP
664 bool 609 bool
665 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 610 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
@@ -937,6 +882,63 @@ config BOOT_PRINTK_DELAY
937 BOOT_PRINTK_DELAY also may cause LOCKUP_DETECTOR to detect 882 BOOT_PRINTK_DELAY also may cause LOCKUP_DETECTOR to detect
938 what it believes to be lockup conditions. 883 what it believes to be lockup conditions.
939 884
885menu "RCU Debugging"
886
887config PROVE_RCU
888 bool "RCU debugging: prove RCU correctness"
889 depends on PROVE_LOCKING
890 default n
891 help
892 This feature enables lockdep extensions that check for correct
893 use of RCU APIs. This is currently under development. Say Y
894 if you want to debug RCU usage or help work on the PROVE_RCU
895 feature.
896
897 Say N if you are unsure.
898
899config PROVE_RCU_REPEATEDLY
900 bool "RCU debugging: don't disable PROVE_RCU on first splat"
901 depends on PROVE_RCU
902 default n
903 help
904 By itself, PROVE_RCU will disable checking upon issuing the
905 first warning (or "splat"). This feature prevents such
906 disabling, allowing multiple RCU-lockdep warnings to be printed
907 on a single reboot.
908
909 Say Y to allow multiple RCU-lockdep warnings per boot.
910
911 Say N if you are unsure.
912
913config PROVE_RCU_DELAY
914 bool "RCU debugging: preemptible RCU race provocation"
915 depends on DEBUG_KERNEL && PREEMPT_RCU
916 default n
917 help
918 There is a class of races that involve an unlikely preemption
919 of __rcu_read_unlock() just after ->rcu_read_lock_nesting has
920 been set to INT_MIN. This feature inserts a delay at that
921 point to increase the probability of these races.
922
923 Say Y to increase probability of preemption of __rcu_read_unlock().
924
925 Say N if you are unsure.
926
927config SPARSE_RCU_POINTER
928 bool "RCU debugging: sparse-based checks for pointer usage"
929 default n
930 help
931 This feature enables the __rcu sparse annotation for
932 RCU-protected pointers. This annotation will cause sparse
933 to flag any non-RCU used of annotated pointers. This can be
934 helpful when debugging RCU usage. Please note that this feature
935 is not intended to enforce code cleanliness; it is instead merely
936 a debugging aid.
937
938 Say Y to make sparse flag questionable use of RCU-protected pointers
939
940 Say N if you are unsure.
941
940config RCU_TORTURE_TEST 942config RCU_TORTURE_TEST
941 tristate "torture tests for RCU" 943 tristate "torture tests for RCU"
942 depends on DEBUG_KERNEL 944 depends on DEBUG_KERNEL
@@ -970,7 +972,7 @@ config RCU_TORTURE_TEST_RUNNABLE
970 972
971config RCU_CPU_STALL_TIMEOUT 973config RCU_CPU_STALL_TIMEOUT
972 int "RCU CPU stall timeout in seconds" 974 int "RCU CPU stall timeout in seconds"
973 depends on TREE_RCU || TREE_PREEMPT_RCU 975 depends on RCU_STALL_COMMON
974 range 3 300 976 range 3 300
975 default 21 977 default 21
976 help 978 help
@@ -1008,6 +1010,7 @@ config RCU_CPU_STALL_INFO
1008config RCU_TRACE 1010config RCU_TRACE
1009 bool "Enable tracing for RCU" 1011 bool "Enable tracing for RCU"
1010 depends on DEBUG_KERNEL 1012 depends on DEBUG_KERNEL
1013 select TRACE_CLOCK
1011 help 1014 help
1012 This option provides tracing in RCU which presents stats 1015 This option provides tracing in RCU which presents stats
1013 in debugfs for debugging RCU implementation. 1016 in debugfs for debugging RCU implementation.
@@ -1015,6 +1018,8 @@ config RCU_TRACE
1015 Say Y here if you want to enable RCU tracing 1018 Say Y here if you want to enable RCU tracing
1016 Say N if you are unsure. 1019 Say N if you are unsure.
1017 1020
1021endmenu # "RCU Debugging"
1022
1018config KPROBES_SANITY_TEST 1023config KPROBES_SANITY_TEST
1019 bool "Kprobes sanity tests" 1024 bool "Kprobes sanity tests"
1020 depends on DEBUG_KERNEL 1025 depends on DEBUG_KERNEL
diff --git a/lib/bug.c b/lib/bug.c
index a28c1415357c..d0cdf14c651a 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -55,6 +55,7 @@ static inline unsigned long bug_addr(const struct bug_entry *bug)
55} 55}
56 56
57#ifdef CONFIG_MODULES 57#ifdef CONFIG_MODULES
58/* Updates are protected by module mutex */
58static LIST_HEAD(module_bug_list); 59static LIST_HEAD(module_bug_list);
59 60
60static const struct bug_entry *module_find_bug(unsigned long bugaddr) 61static const struct bug_entry *module_find_bug(unsigned long bugaddr)
diff --git a/lib/digsig.c b/lib/digsig.c
index 8c0e62975c88..dc2be7ed1765 100644
--- a/lib/digsig.c
+++ b/lib/digsig.c
@@ -162,6 +162,8 @@ static int digsig_verify_rsa(struct key *key,
162 memset(out1, 0, head); 162 memset(out1, 0, head);
163 memcpy(out1 + head, p, l); 163 memcpy(out1 + head, p, l);
164 164
165 kfree(p);
166
165 err = pkcs_1_v1_5_decode_emsa(out1, len, mblen, out2, &len); 167 err = pkcs_1_v1_5_decode_emsa(out1, len, mblen, out2, &len);
166 if (err) 168 if (err)
167 goto err; 169 goto err;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 6001ee6347a9..b5783d81eda9 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1257,6 +1257,10 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1257 if (flags & FOLL_WRITE && !pmd_write(*pmd)) 1257 if (flags & FOLL_WRITE && !pmd_write(*pmd))
1258 goto out; 1258 goto out;
1259 1259
1260 /* Avoid dumping huge zero page */
1261 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
1262 return ERR_PTR(-EFAULT);
1263
1260 page = pmd_page(*pmd); 1264 page = pmd_page(*pmd);
1261 VM_BUG_ON(!PageHead(page)); 1265 VM_BUG_ON(!PageHead(page));
1262 if (flags & FOLL_TOUCH) { 1266 if (flags & FOLL_TOUCH) {
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 4f3ea0b1e57c..546db81820e4 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3033,6 +3033,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
3033 if (!huge_pte_none(huge_ptep_get(ptep))) { 3033 if (!huge_pte_none(huge_ptep_get(ptep))) {
3034 pte = huge_ptep_get_and_clear(mm, address, ptep); 3034 pte = huge_ptep_get_and_clear(mm, address, ptep);
3035 pte = pte_mkhuge(pte_modify(pte, newprot)); 3035 pte = pte_mkhuge(pte_modify(pte, newprot));
3036 pte = arch_make_huge_pte(pte, vma, NULL, 0);
3036 set_huge_pte_at(mm, address, ptep, pte); 3037 set_huge_pte_at(mm, address, ptep, pte);
3037 pages++; 3038 pages++;
3038 } 3039 }
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 09255ec8159c..fbb60b103e64 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3030,7 +3030,9 @@ int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
3030 if (memcg) { 3030 if (memcg) {
3031 s->memcg_params->memcg = memcg; 3031 s->memcg_params->memcg = memcg;
3032 s->memcg_params->root_cache = root_cache; 3032 s->memcg_params->root_cache = root_cache;
3033 } 3033 } else
3034 s->memcg_params->is_root_cache = true;
3035
3034 return 0; 3036 return 0;
3035} 3037}
3036 3038
diff --git a/mm/migrate.c b/mm/migrate.c
index c38778610aa8..2fd8b4af4744 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -160,8 +160,10 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
160 if (is_write_migration_entry(entry)) 160 if (is_write_migration_entry(entry))
161 pte = pte_mkwrite(pte); 161 pte = pte_mkwrite(pte);
162#ifdef CONFIG_HUGETLB_PAGE 162#ifdef CONFIG_HUGETLB_PAGE
163 if (PageHuge(new)) 163 if (PageHuge(new)) {
164 pte = pte_mkhuge(pte); 164 pte = pte_mkhuge(pte);
165 pte = arch_make_huge_pte(pte, vma, new, 0);
166 }
165#endif 167#endif
166 flush_cache_page(vma, addr, pte_pfn(pte)); 168 flush_cache_page(vma, addr, pte_pfn(pte));
167 set_pte_at(mm, addr, ptep, pte); 169 set_pte_at(mm, addr, ptep, pte);
diff --git a/mm/mlock.c b/mm/mlock.c
index f0b9ce572fc7..c9bd528b01d2 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -517,11 +517,11 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
517static int do_mlockall(int flags) 517static int do_mlockall(int flags)
518{ 518{
519 struct vm_area_struct * vma, * prev = NULL; 519 struct vm_area_struct * vma, * prev = NULL;
520 unsigned int def_flags = 0;
521 520
522 if (flags & MCL_FUTURE) 521 if (flags & MCL_FUTURE)
523 def_flags = VM_LOCKED; 522 current->mm->def_flags |= VM_LOCKED;
524 current->mm->def_flags = def_flags; 523 else
524 current->mm->def_flags &= ~VM_LOCKED;
525 if (flags == MCL_FUTURE) 525 if (flags == MCL_FUTURE)
526 goto out; 526 goto out;
527 527
diff --git a/mm/mmap.c b/mm/mmap.c
index 35730ee9d515..09da0b264982 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -32,6 +32,7 @@
32#include <linux/khugepaged.h> 32#include <linux/khugepaged.h>
33#include <linux/uprobes.h> 33#include <linux/uprobes.h>
34#include <linux/rbtree_augmented.h> 34#include <linux/rbtree_augmented.h>
35#include <linux/sched/sysctl.h>
35 36
36#include <asm/uaccess.h> 37#include <asm/uaccess.h>
37#include <asm/cacheflush.h> 38#include <asm/cacheflush.h>
@@ -2943,7 +2944,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
2943 * vma in this mm is backed by the same anon_vma or address_space. 2944 * vma in this mm is backed by the same anon_vma or address_space.
2944 * 2945 *
2945 * We can take all the locks in random order because the VM code 2946 * We can take all the locks in random order because the VM code
2946 * taking i_mmap_mutex or anon_vma->mutex outside the mmap_sem never 2947 * taking i_mmap_mutex or anon_vma->rwsem outside the mmap_sem never
2947 * takes more than one of them in a row. Secondly we're protected 2948 * takes more than one of them in a row. Secondly we're protected
2948 * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex. 2949 * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex.
2949 * 2950 *
diff --git a/mm/mremap.c b/mm/mremap.c
index e1031e1f6a61..f9766f460299 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -19,6 +19,7 @@
19#include <linux/security.h> 19#include <linux/security.h>
20#include <linux/syscalls.h> 20#include <linux/syscalls.h>
21#include <linux/mmu_notifier.h> 21#include <linux/mmu_notifier.h>
22#include <linux/sched/sysctl.h>
22 23
23#include <asm/uaccess.h> 24#include <asm/uaccess.h>
24#include <asm/cacheflush.h> 25#include <asm/cacheflush.h>
diff --git a/mm/nommu.c b/mm/nommu.c
index 79c3cac87afa..b20db4e22263 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -29,6 +29,7 @@
29#include <linux/security.h> 29#include <linux/security.h>
30#include <linux/syscalls.h> 30#include <linux/syscalls.h>
31#include <linux/audit.h> 31#include <linux/audit.h>
32#include <linux/sched/sysctl.h>
32 33
33#include <asm/uaccess.h> 34#include <asm/uaccess.h>
34#include <asm/tlb.h> 35#include <asm/tlb.h>
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 0713bfbf0954..66a0024becd9 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -35,6 +35,7 @@
35#include <linux/buffer_head.h> /* __set_page_dirty_buffers */ 35#include <linux/buffer_head.h> /* __set_page_dirty_buffers */
36#include <linux/pagevec.h> 36#include <linux/pagevec.h>
37#include <linux/timer.h> 37#include <linux/timer.h>
38#include <linux/sched/rt.h>
38#include <trace/events/writeback.h> 39#include <trace/events/writeback.h>
39 40
40/* 41/*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index df2022ff0c8a..d1107adf174a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -58,6 +58,7 @@
58#include <linux/prefetch.h> 58#include <linux/prefetch.h>
59#include <linux/migrate.h> 59#include <linux/migrate.h>
60#include <linux/page-debug-flags.h> 60#include <linux/page-debug-flags.h>
61#include <linux/sched/rt.h>
61 62
62#include <asm/tlbflush.h> 63#include <asm/tlbflush.h>
63#include <asm/div64.h> 64#include <asm/div64.h>
@@ -773,6 +774,10 @@ void __init init_cma_reserved_pageblock(struct page *page)
773 set_pageblock_migratetype(page, MIGRATE_CMA); 774 set_pageblock_migratetype(page, MIGRATE_CMA);
774 __free_pages(page, pageblock_order); 775 __free_pages(page, pageblock_order);
775 totalram_pages += pageblock_nr_pages; 776 totalram_pages += pageblock_nr_pages;
777#ifdef CONFIG_HIGHMEM
778 if (PageHighMem(page))
779 totalhigh_pages += pageblock_nr_pages;
780#endif
776} 781}
777#endif 782#endif
778 783
@@ -4416,10 +4421,11 @@ static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
4416 * round what is now in bits to nearest long in bits, then return it in 4421 * round what is now in bits to nearest long in bits, then return it in
4417 * bytes. 4422 * bytes.
4418 */ 4423 */
4419static unsigned long __init usemap_size(unsigned long zonesize) 4424static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
4420{ 4425{
4421 unsigned long usemapsize; 4426 unsigned long usemapsize;
4422 4427
4428 zonesize += zone_start_pfn & (pageblock_nr_pages-1);
4423 usemapsize = roundup(zonesize, pageblock_nr_pages); 4429 usemapsize = roundup(zonesize, pageblock_nr_pages);
4424 usemapsize = usemapsize >> pageblock_order; 4430 usemapsize = usemapsize >> pageblock_order;
4425 usemapsize *= NR_PAGEBLOCK_BITS; 4431 usemapsize *= NR_PAGEBLOCK_BITS;
@@ -4429,17 +4435,19 @@ static unsigned long __init usemap_size(unsigned long zonesize)
4429} 4435}
4430 4436
4431static void __init setup_usemap(struct pglist_data *pgdat, 4437static void __init setup_usemap(struct pglist_data *pgdat,
4432 struct zone *zone, unsigned long zonesize) 4438 struct zone *zone,
4439 unsigned long zone_start_pfn,
4440 unsigned long zonesize)
4433{ 4441{
4434 unsigned long usemapsize = usemap_size(zonesize); 4442 unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
4435 zone->pageblock_flags = NULL; 4443 zone->pageblock_flags = NULL;
4436 if (usemapsize) 4444 if (usemapsize)
4437 zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat, 4445 zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat,
4438 usemapsize); 4446 usemapsize);
4439} 4447}
4440#else 4448#else
4441static inline void setup_usemap(struct pglist_data *pgdat, 4449static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
4442 struct zone *zone, unsigned long zonesize) {} 4450 unsigned long zone_start_pfn, unsigned long zonesize) {}
4443#endif /* CONFIG_SPARSEMEM */ 4451#endif /* CONFIG_SPARSEMEM */
4444 4452
4445#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 4453#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
@@ -4590,7 +4598,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
4590 continue; 4598 continue;
4591 4599
4592 set_pageblock_order(); 4600 set_pageblock_order();
4593 setup_usemap(pgdat, zone, size); 4601 setup_usemap(pgdat, zone, zone_start_pfn, size);
4594 ret = init_currently_empty_zone(zone, zone_start_pfn, 4602 ret = init_currently_empty_zone(zone, zone_start_pfn,
4595 size, MEMMAP_EARLY); 4603 size, MEMMAP_EARLY);
4596 BUG_ON(ret); 4604 BUG_ON(ret);
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 8e1d89d2b1c1..553921511e4e 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -440,7 +440,7 @@ static bool batadv_is_orig_node_eligible(struct batadv_dat_candidate *res,
440 /* this is an hash collision with the temporary selected node. Choose 440 /* this is an hash collision with the temporary selected node. Choose
441 * the one with the lowest address 441 * the one with the lowest address
442 */ 442 */
443 if ((tmp_max == max) && 443 if ((tmp_max == max) && max_orig_node &&
444 (batadv_compare_eth(candidate->orig, max_orig_node->orig) > 0)) 444 (batadv_compare_eth(candidate->orig, max_orig_node->orig) > 0))
445 goto out; 445 goto out;
446 446
@@ -738,6 +738,7 @@ static uint16_t batadv_arp_get_type(struct batadv_priv *bat_priv,
738 struct arphdr *arphdr; 738 struct arphdr *arphdr;
739 struct ethhdr *ethhdr; 739 struct ethhdr *ethhdr;
740 __be32 ip_src, ip_dst; 740 __be32 ip_src, ip_dst;
741 uint8_t *hw_src, *hw_dst;
741 uint16_t type = 0; 742 uint16_t type = 0;
742 743
743 /* pull the ethernet header */ 744 /* pull the ethernet header */
@@ -777,9 +778,23 @@ static uint16_t batadv_arp_get_type(struct batadv_priv *bat_priv,
777 ip_src = batadv_arp_ip_src(skb, hdr_size); 778 ip_src = batadv_arp_ip_src(skb, hdr_size);
778 ip_dst = batadv_arp_ip_dst(skb, hdr_size); 779 ip_dst = batadv_arp_ip_dst(skb, hdr_size);
779 if (ipv4_is_loopback(ip_src) || ipv4_is_multicast(ip_src) || 780 if (ipv4_is_loopback(ip_src) || ipv4_is_multicast(ip_src) ||
780 ipv4_is_loopback(ip_dst) || ipv4_is_multicast(ip_dst)) 781 ipv4_is_loopback(ip_dst) || ipv4_is_multicast(ip_dst) ||
782 ipv4_is_zeronet(ip_src) || ipv4_is_lbcast(ip_src) ||
783 ipv4_is_zeronet(ip_dst) || ipv4_is_lbcast(ip_dst))
781 goto out; 784 goto out;
782 785
786 hw_src = batadv_arp_hw_src(skb, hdr_size);
787 if (is_zero_ether_addr(hw_src) || is_multicast_ether_addr(hw_src))
788 goto out;
789
790 /* we don't care about the destination MAC address in ARP requests */
791 if (arphdr->ar_op != htons(ARPOP_REQUEST)) {
792 hw_dst = batadv_arp_hw_dst(skb, hdr_size);
793 if (is_zero_ether_addr(hw_dst) ||
794 is_multicast_ether_addr(hw_dst))
795 goto out;
796 }
797
783 type = ntohs(arphdr->ar_op); 798 type = ntohs(arphdr->ar_op);
784out: 799out:
785 return type; 800 return type;
@@ -1012,6 +1027,8 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
1012 */ 1027 */
1013 ret = !batadv_is_my_client(bat_priv, hw_dst); 1028 ret = !batadv_is_my_client(bat_priv, hw_dst);
1014out: 1029out:
1030 if (ret)
1031 kfree_skb(skb);
1015 /* if ret == false -> packet has to be delivered to the interface */ 1032 /* if ret == false -> packet has to be delivered to the interface */
1016 return ret; 1033 return ret;
1017} 1034}
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 25bfce0666eb..4925a02ae7e4 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -249,12 +249,12 @@ static void hci_conn_disconnect(struct hci_conn *conn)
249 __u8 reason = hci_proto_disconn_ind(conn); 249 __u8 reason = hci_proto_disconn_ind(conn);
250 250
251 switch (conn->type) { 251 switch (conn->type) {
252 case ACL_LINK:
253 hci_acl_disconn(conn, reason);
254 break;
255 case AMP_LINK: 252 case AMP_LINK:
256 hci_amp_disconn(conn, reason); 253 hci_amp_disconn(conn, reason);
257 break; 254 break;
255 default:
256 hci_acl_disconn(conn, reason);
257 break;
258 } 258 }
259} 259}
260 260
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 596660d37c5e..0f78e34220c9 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -2810,14 +2810,6 @@ static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2810 if (conn) { 2810 if (conn) {
2811 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF); 2811 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2812 2812
2813 hci_dev_lock(hdev);
2814 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2815 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2816 mgmt_device_connected(hdev, &conn->dst, conn->type,
2817 conn->dst_type, 0, NULL, 0,
2818 conn->dev_class);
2819 hci_dev_unlock(hdev);
2820
2821 /* Send to upper protocol */ 2813 /* Send to upper protocol */
2822 l2cap_recv_acldata(conn, skb, flags); 2814 l2cap_recv_acldata(conn, skb, flags);
2823 return; 2815 return;
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 705078a0cc39..81b44481d0d9 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -2688,7 +2688,7 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2688 if (ev->opcode != HCI_OP_NOP) 2688 if (ev->opcode != HCI_OP_NOP)
2689 del_timer(&hdev->cmd_timer); 2689 del_timer(&hdev->cmd_timer);
2690 2690
2691 if (ev->ncmd) { 2691 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2692 atomic_set(&hdev->cmd_cnt, 1); 2692 atomic_set(&hdev->cmd_cnt, 1);
2693 if (!skb_queue_empty(&hdev->cmd_q)) 2693 if (!skb_queue_empty(&hdev->cmd_q))
2694 queue_work(hdev->workqueue, &hdev->cmd_work); 2694 queue_work(hdev->workqueue, &hdev->cmd_work);
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index b2bcbe2dc328..a7352ff3fd1e 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -931,7 +931,7 @@ static int hidp_setup_hid(struct hidp_session *session,
931 hid->version = req->version; 931 hid->version = req->version;
932 hid->country = req->country; 932 hid->country = req->country;
933 933
934 strncpy(hid->name, req->name, 128); 934 strncpy(hid->name, req->name, sizeof(req->name) - 1);
935 935
936 snprintf(hid->phys, sizeof(hid->phys), "%pMR", 936 snprintf(hid->phys, sizeof(hid->phys), "%pMR",
937 &bt_sk(session->ctrl_sock->sk)->src); 937 &bt_sk(session->ctrl_sock->sk)->src);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 2c78208d793e..22e658322845 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -3727,6 +3727,17 @@ sendresp:
3727static int l2cap_connect_req(struct l2cap_conn *conn, 3727static int l2cap_connect_req(struct l2cap_conn *conn,
3728 struct l2cap_cmd_hdr *cmd, u8 *data) 3728 struct l2cap_cmd_hdr *cmd, u8 *data)
3729{ 3729{
3730 struct hci_dev *hdev = conn->hcon->hdev;
3731 struct hci_conn *hcon = conn->hcon;
3732
3733 hci_dev_lock(hdev);
3734 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3735 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3736 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3737 hcon->dst_type, 0, NULL, 0,
3738 hcon->dev_class);
3739 hci_dev_unlock(hdev);
3740
3730 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0); 3741 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3731 return 0; 3742 return 0;
3732} 3743}
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 531a93d613d4..57f250c20e39 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -352,7 +352,7 @@ static void __sco_sock_close(struct sock *sk)
352 352
353 case BT_CONNECTED: 353 case BT_CONNECTED:
354 case BT_CONFIG: 354 case BT_CONFIG:
355 if (sco_pi(sk)->conn) { 355 if (sco_pi(sk)->conn->hcon) {
356 sk->sk_state = BT_DISCONN; 356 sk->sk_state = BT_DISCONN;
357 sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT); 357 sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT);
358 hci_conn_put(sco_pi(sk)->conn->hcon); 358 hci_conn_put(sco_pi(sk)->conn->hcon);
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 68a9587c9694..5abefb12891d 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -859,6 +859,19 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
859 859
860 skb_pull(skb, sizeof(code)); 860 skb_pull(skb, sizeof(code));
861 861
862 /*
863 * The SMP context must be initialized for all other PDUs except
864 * pairing and security requests. If we get any other PDU when
865 * not initialized simply disconnect (done if this function
866 * returns an error).
867 */
868 if (code != SMP_CMD_PAIRING_REQ && code != SMP_CMD_SECURITY_REQ &&
869 !conn->smp_chan) {
870 BT_ERR("Unexpected SMP command 0x%02x. Disconnecting.", code);
871 kfree_skb(skb);
872 return -ENOTSUPP;
873 }
874
862 switch (code) { 875 switch (code) {
863 case SMP_CMD_PAIRING_REQ: 876 case SMP_CMD_PAIRING_REQ:
864 reason = smp_cmd_pairing_req(conn, skb); 877 reason = smp_cmd_pairing_req(conn, skb);
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index 7f884e3fb955..8660ea3be705 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -16,6 +16,7 @@
16#include <linux/etherdevice.h> 16#include <linux/etherdevice.h>
17#include <linux/llc.h> 17#include <linux/llc.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/pkt_sched.h>
19#include <net/net_namespace.h> 20#include <net/net_namespace.h>
20#include <net/llc.h> 21#include <net/llc.h>
21#include <net/llc_pdu.h> 22#include <net/llc_pdu.h>
@@ -40,6 +41,7 @@ static void br_send_bpdu(struct net_bridge_port *p,
40 41
41 skb->dev = p->dev; 42 skb->dev = p->dev;
42 skb->protocol = htons(ETH_P_802_2); 43 skb->protocol = htons(ETH_P_802_2);
44 skb->priority = TC_PRIO_CONTROL;
43 45
44 skb_reserve(skb, LLC_RESERVE); 46 skb_reserve(skb, LLC_RESERVE);
45 memcpy(__skb_put(skb, length), data, length); 47 memcpy(__skb_put(skb, length), data, length);
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 0337e2b76862..368f9c3f9dc6 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -187,7 +187,7 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
187 skb_queue_walk(queue, skb) { 187 skb_queue_walk(queue, skb) {
188 *peeked = skb->peeked; 188 *peeked = skb->peeked;
189 if (flags & MSG_PEEK) { 189 if (flags & MSG_PEEK) {
190 if (*off >= skb->len) { 190 if (*off >= skb->len && skb->len) {
191 *off -= skb->len; 191 *off -= skb->len;
192 continue; 192 continue;
193 } 193 }
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index b29dacf900f9..e6e1cbe863f5 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -1781,10 +1781,13 @@ static ssize_t pktgen_thread_write(struct file *file,
1781 return -EFAULT; 1781 return -EFAULT;
1782 i += len; 1782 i += len;
1783 mutex_lock(&pktgen_thread_lock); 1783 mutex_lock(&pktgen_thread_lock);
1784 pktgen_add_device(t, f); 1784 ret = pktgen_add_device(t, f);
1785 mutex_unlock(&pktgen_thread_lock); 1785 mutex_unlock(&pktgen_thread_lock);
1786 ret = count; 1786 if (!ret) {
1787 sprintf(pg_result, "OK: add_device=%s", f); 1787 ret = count;
1788 sprintf(pg_result, "OK: add_device=%s", f);
1789 } else
1790 sprintf(pg_result, "ERROR: can not add device %s", f);
1788 goto out; 1791 goto out;
1789 } 1792 }
1790 1793
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index c31d9e8668c3..4425148d2b51 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -186,8 +186,6 @@ void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
186 struct fastopen_queue *fastopenq = 186 struct fastopen_queue *fastopenq =
187 inet_csk(lsk)->icsk_accept_queue.fastopenq; 187 inet_csk(lsk)->icsk_accept_queue.fastopenq;
188 188
189 BUG_ON(!spin_is_locked(&sk->sk_lock.slock) && !sock_owned_by_user(sk));
190
191 tcp_sk(sk)->fastopen_rsk = NULL; 189 tcp_sk(sk)->fastopen_rsk = NULL;
192 spin_lock_bh(&fastopenq->lock); 190 spin_lock_bh(&fastopenq->lock);
193 fastopenq->qlen--; 191 fastopenq->qlen--;
diff --git a/net/core/scm.c b/net/core/scm.c
index 57fb1ee6649f..905dcc6ad1e3 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -35,6 +35,7 @@
35#include <net/sock.h> 35#include <net/sock.h>
36#include <net/compat.h> 36#include <net/compat.h>
37#include <net/scm.h> 37#include <net/scm.h>
38#include <net/cls_cgroup.h>
38 39
39 40
40/* 41/*
@@ -302,8 +303,10 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
302 } 303 }
303 /* Bump the usage count and install the file. */ 304 /* Bump the usage count and install the file. */
304 sock = sock_from_file(fp[i], &err); 305 sock = sock_from_file(fp[i], &err);
305 if (sock) 306 if (sock) {
306 sock_update_netprioidx(sock->sk, current); 307 sock_update_netprioidx(sock->sk, current);
308 sock_update_classid(sock->sk, current);
309 }
307 fd_install(new_fd, get_file(fp[i])); 310 fd_install(new_fd, get_file(fp[i]));
308 } 311 }
309 312
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 3ab989b0de42..32443ebc3e89 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -683,7 +683,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
683 new->network_header = old->network_header; 683 new->network_header = old->network_header;
684 new->mac_header = old->mac_header; 684 new->mac_header = old->mac_header;
685 new->inner_transport_header = old->inner_transport_header; 685 new->inner_transport_header = old->inner_transport_header;
686 new->inner_network_header = old->inner_transport_header; 686 new->inner_network_header = old->inner_network_header;
687 skb_dst_copy(new, old); 687 skb_dst_copy(new, old);
688 new->rxhash = old->rxhash; 688 new->rxhash = old->rxhash;
689 new->ooo_okay = old->ooo_okay; 689 new->ooo_okay = old->ooo_okay;
@@ -1649,7 +1649,7 @@ static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
1649 1649
1650static struct page *linear_to_page(struct page *page, unsigned int *len, 1650static struct page *linear_to_page(struct page *page, unsigned int *len,
1651 unsigned int *offset, 1651 unsigned int *offset,
1652 struct sk_buff *skb, struct sock *sk) 1652 struct sock *sk)
1653{ 1653{
1654 struct page_frag *pfrag = sk_page_frag(sk); 1654 struct page_frag *pfrag = sk_page_frag(sk);
1655 1655
@@ -1682,14 +1682,14 @@ static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
1682static bool spd_fill_page(struct splice_pipe_desc *spd, 1682static bool spd_fill_page(struct splice_pipe_desc *spd,
1683 struct pipe_inode_info *pipe, struct page *page, 1683 struct pipe_inode_info *pipe, struct page *page,
1684 unsigned int *len, unsigned int offset, 1684 unsigned int *len, unsigned int offset,
1685 struct sk_buff *skb, bool linear, 1685 bool linear,
1686 struct sock *sk) 1686 struct sock *sk)
1687{ 1687{
1688 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) 1688 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
1689 return true; 1689 return true;
1690 1690
1691 if (linear) { 1691 if (linear) {
1692 page = linear_to_page(page, len, &offset, skb, sk); 1692 page = linear_to_page(page, len, &offset, sk);
1693 if (!page) 1693 if (!page)
1694 return true; 1694 return true;
1695 } 1695 }
@@ -1706,23 +1706,9 @@ static bool spd_fill_page(struct splice_pipe_desc *spd,
1706 return false; 1706 return false;
1707} 1707}
1708 1708
1709static inline void __segment_seek(struct page **page, unsigned int *poff,
1710 unsigned int *plen, unsigned int off)
1711{
1712 unsigned long n;
1713
1714 *poff += off;
1715 n = *poff / PAGE_SIZE;
1716 if (n)
1717 *page = nth_page(*page, n);
1718
1719 *poff = *poff % PAGE_SIZE;
1720 *plen -= off;
1721}
1722
1723static bool __splice_segment(struct page *page, unsigned int poff, 1709static bool __splice_segment(struct page *page, unsigned int poff,
1724 unsigned int plen, unsigned int *off, 1710 unsigned int plen, unsigned int *off,
1725 unsigned int *len, struct sk_buff *skb, 1711 unsigned int *len,
1726 struct splice_pipe_desc *spd, bool linear, 1712 struct splice_pipe_desc *spd, bool linear,
1727 struct sock *sk, 1713 struct sock *sk,
1728 struct pipe_inode_info *pipe) 1714 struct pipe_inode_info *pipe)
@@ -1737,23 +1723,19 @@ static bool __splice_segment(struct page *page, unsigned int poff,
1737 } 1723 }
1738 1724
1739 /* ignore any bits we already processed */ 1725 /* ignore any bits we already processed */
1740 if (*off) { 1726 poff += *off;
1741 __segment_seek(&page, &poff, &plen, *off); 1727 plen -= *off;
1742 *off = 0; 1728 *off = 0;
1743 }
1744 1729
1745 do { 1730 do {
1746 unsigned int flen = min(*len, plen); 1731 unsigned int flen = min(*len, plen);
1747 1732
1748 /* the linear region may spread across several pages */ 1733 if (spd_fill_page(spd, pipe, page, &flen, poff,
1749 flen = min_t(unsigned int, flen, PAGE_SIZE - poff); 1734 linear, sk))
1750
1751 if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk))
1752 return true; 1735 return true;
1753 1736 poff += flen;
1754 __segment_seek(&page, &poff, &plen, flen); 1737 plen -= flen;
1755 *len -= flen; 1738 *len -= flen;
1756
1757 } while (*len && plen); 1739 } while (*len && plen);
1758 1740
1759 return false; 1741 return false;
@@ -1777,7 +1759,7 @@ static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
1777 if (__splice_segment(virt_to_page(skb->data), 1759 if (__splice_segment(virt_to_page(skb->data),
1778 (unsigned long) skb->data & (PAGE_SIZE - 1), 1760 (unsigned long) skb->data & (PAGE_SIZE - 1),
1779 skb_headlen(skb), 1761 skb_headlen(skb),
1780 offset, len, skb, spd, 1762 offset, len, spd,
1781 skb_head_is_locked(skb), 1763 skb_head_is_locked(skb),
1782 sk, pipe)) 1764 sk, pipe))
1783 return true; 1765 return true;
@@ -1790,7 +1772,7 @@ static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
1790 1772
1791 if (__splice_segment(skb_frag_page(f), 1773 if (__splice_segment(skb_frag_page(f),
1792 f->page_offset, skb_frag_size(f), 1774 f->page_offset, skb_frag_size(f),
1793 offset, len, skb, spd, false, sk, pipe)) 1775 offset, len, spd, false, sk, pipe))
1794 return true; 1776 return true;
1795 } 1777 }
1796 1778
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index a0d8392491c3..a69b4e4a02b5 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -269,7 +269,11 @@ static void ah_input_done(struct crypto_async_request *base, int err)
269 skb->network_header += ah_hlen; 269 skb->network_header += ah_hlen;
270 memcpy(skb_network_header(skb), work_iph, ihl); 270 memcpy(skb_network_header(skb), work_iph, ihl);
271 __skb_pull(skb, ah_hlen + ihl); 271 __skb_pull(skb, ah_hlen + ihl);
272 skb_set_transport_header(skb, -ihl); 272
273 if (x->props.mode == XFRM_MODE_TUNNEL)
274 skb_reset_transport_header(skb);
275 else
276 skb_set_transport_header(skb, -ihl);
273out: 277out:
274 kfree(AH_SKB_CB(skb)->tmp); 278 kfree(AH_SKB_CB(skb)->tmp);
275 xfrm_input_resume(skb, err); 279 xfrm_input_resume(skb, err);
@@ -381,7 +385,10 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
381 skb->network_header += ah_hlen; 385 skb->network_header += ah_hlen;
382 memcpy(skb_network_header(skb), work_iph, ihl); 386 memcpy(skb_network_header(skb), work_iph, ihl);
383 __skb_pull(skb, ah_hlen + ihl); 387 __skb_pull(skb, ah_hlen + ihl);
384 skb_set_transport_header(skb, -ihl); 388 if (x->props.mode == XFRM_MODE_TUNNEL)
389 skb_reset_transport_header(skb);
390 else
391 skb_set_transport_header(skb, -ihl);
385 392
386 err = nexthdr; 393 err = nexthdr;
387 394
@@ -413,9 +420,12 @@ static void ah4_err(struct sk_buff *skb, u32 info)
413 if (!x) 420 if (!x)
414 return; 421 return;
415 422
416 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) 423 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
424 atomic_inc(&flow_cache_genid);
425 rt_genid_bump(net);
426
417 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0); 427 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0);
418 else 428 } else
419 ipv4_redirect(skb, net, 0, 0, IPPROTO_AH, 0); 429 ipv4_redirect(skb, net, 0, 0, IPPROTO_AH, 0);
420 xfrm_state_put(x); 430 xfrm_state_put(x);
421} 431}
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 9547a273b9e9..ded146b217f1 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -928,24 +928,25 @@ static void parp_redo(struct sk_buff *skb)
928static int arp_rcv(struct sk_buff *skb, struct net_device *dev, 928static int arp_rcv(struct sk_buff *skb, struct net_device *dev,
929 struct packet_type *pt, struct net_device *orig_dev) 929 struct packet_type *pt, struct net_device *orig_dev)
930{ 930{
931 struct arphdr *arp; 931 const struct arphdr *arp;
932
933 if (dev->flags & IFF_NOARP ||
934 skb->pkt_type == PACKET_OTHERHOST ||
935 skb->pkt_type == PACKET_LOOPBACK)
936 goto freeskb;
937
938 skb = skb_share_check(skb, GFP_ATOMIC);
939 if (!skb)
940 goto out_of_mem;
932 941
933 /* ARP header, plus 2 device addresses, plus 2 IP addresses. */ 942 /* ARP header, plus 2 device addresses, plus 2 IP addresses. */
934 if (!pskb_may_pull(skb, arp_hdr_len(dev))) 943 if (!pskb_may_pull(skb, arp_hdr_len(dev)))
935 goto freeskb; 944 goto freeskb;
936 945
937 arp = arp_hdr(skb); 946 arp = arp_hdr(skb);
938 if (arp->ar_hln != dev->addr_len || 947 if (arp->ar_hln != dev->addr_len || arp->ar_pln != 4)
939 dev->flags & IFF_NOARP ||
940 skb->pkt_type == PACKET_OTHERHOST ||
941 skb->pkt_type == PACKET_LOOPBACK ||
942 arp->ar_pln != 4)
943 goto freeskb; 948 goto freeskb;
944 949
945 skb = skb_share_check(skb, GFP_ATOMIC);
946 if (skb == NULL)
947 goto out_of_mem;
948
949 memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb)); 950 memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb));
950 951
951 return NF_HOOK(NFPROTO_ARP, NF_ARP_IN, skb, dev, NULL, arp_process); 952 return NF_HOOK(NFPROTO_ARP, NF_ARP_IN, skb, dev, NULL, arp_process);
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index 424fafbc8cb0..b28e863fe0a7 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -85,3 +85,28 @@ out:
85 return err; 85 return err;
86} 86}
87EXPORT_SYMBOL(ip4_datagram_connect); 87EXPORT_SYMBOL(ip4_datagram_connect);
88
89void ip4_datagram_release_cb(struct sock *sk)
90{
91 const struct inet_sock *inet = inet_sk(sk);
92 const struct ip_options_rcu *inet_opt;
93 __be32 daddr = inet->inet_daddr;
94 struct flowi4 fl4;
95 struct rtable *rt;
96
97 if (! __sk_dst_get(sk) || __sk_dst_check(sk, 0))
98 return;
99
100 rcu_read_lock();
101 inet_opt = rcu_dereference(inet->inet_opt);
102 if (inet_opt && inet_opt->opt.srr)
103 daddr = inet_opt->opt.faddr;
104 rt = ip_route_output_ports(sock_net(sk), &fl4, sk, daddr,
105 inet->inet_saddr, inet->inet_dport,
106 inet->inet_sport, sk->sk_protocol,
107 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
108 if (!IS_ERR(rt))
109 __sk_dst_set(sk, &rt->dst);
110 rcu_read_unlock();
111}
112EXPORT_SYMBOL_GPL(ip4_datagram_release_cb);
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index b61e9deb7c7e..3b4f0cd2e63e 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -346,7 +346,10 @@ static int esp_input_done2(struct sk_buff *skb, int err)
346 346
347 pskb_trim(skb, skb->len - alen - padlen - 2); 347 pskb_trim(skb, skb->len - alen - padlen - 2);
348 __skb_pull(skb, hlen); 348 __skb_pull(skb, hlen);
349 skb_set_transport_header(skb, -ihl); 349 if (x->props.mode == XFRM_MODE_TUNNEL)
350 skb_reset_transport_header(skb);
351 else
352 skb_set_transport_header(skb, -ihl);
350 353
351 err = nexthdr[1]; 354 err = nexthdr[1];
352 355
@@ -499,9 +502,12 @@ static void esp4_err(struct sk_buff *skb, u32 info)
499 if (!x) 502 if (!x)
500 return; 503 return;
501 504
502 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) 505 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
506 atomic_inc(&flow_cache_genid);
507 rt_genid_bump(net);
508
503 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0); 509 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0);
504 else 510 } else
505 ipv4_redirect(skb, net, 0, 0, IPPROTO_ESP, 0); 511 ipv4_redirect(skb, net, 0, 0, IPPROTO_ESP, 0);
506 xfrm_state_put(x); 512 xfrm_state_put(x);
507} 513}
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 303012adf9e6..e81b1caf2ea2 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -963,8 +963,12 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
963 ptr--; 963 ptr--;
964 } 964 }
965 if (tunnel->parms.o_flags&GRE_CSUM) { 965 if (tunnel->parms.o_flags&GRE_CSUM) {
966 int offset = skb_transport_offset(skb);
967
966 *ptr = 0; 968 *ptr = 0;
967 *(__sum16 *)ptr = ip_compute_csum((void *)(iph+1), skb->len - sizeof(struct iphdr)); 969 *(__sum16 *)ptr = csum_fold(skb_checksum(skb, offset,
970 skb->len - offset,
971 0));
968 } 972 }
969 } 973 }
970 974
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index d3ab47e19a89..9a46daed2f3c 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -47,9 +47,12 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
47 if (!x) 47 if (!x)
48 return; 48 return;
49 49
50 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) 50 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
51 atomic_inc(&flow_cache_genid);
52 rt_genid_bump(net);
53
51 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_COMP, 0); 54 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_COMP, 0);
52 else 55 } else
53 ipv4_redirect(skb, net, 0, 0, IPPROTO_COMP, 0); 56 ipv4_redirect(skb, net, 0, 0, IPPROTO_COMP, 0);
54 xfrm_state_put(x); 57 xfrm_state_put(x);
55} 58}
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 8f3d05424a3e..6f9c07268cf6 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -738,6 +738,7 @@ struct proto ping_prot = {
738 .recvmsg = ping_recvmsg, 738 .recvmsg = ping_recvmsg,
739 .bind = ping_bind, 739 .bind = ping_bind,
740 .backlog_rcv = ping_queue_rcv_skb, 740 .backlog_rcv = ping_queue_rcv_skb,
741 .release_cb = ip4_datagram_release_cb,
741 .hash = ping_v4_hash, 742 .hash = ping_v4_hash,
742 .unhash = ping_v4_unhash, 743 .unhash = ping_v4_unhash,
743 .get_port = ping_v4_get_port, 744 .get_port = ping_v4_get_port,
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 73d1e4df4bf6..6f08991409c3 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -894,6 +894,7 @@ struct proto raw_prot = {
894 .recvmsg = raw_recvmsg, 894 .recvmsg = raw_recvmsg,
895 .bind = raw_bind, 895 .bind = raw_bind,
896 .backlog_rcv = raw_rcv_skb, 896 .backlog_rcv = raw_rcv_skb,
897 .release_cb = ip4_datagram_release_cb,
897 .hash = raw_hash_sk, 898 .hash = raw_hash_sk,
898 .unhash = raw_unhash_sk, 899 .unhash = raw_unhash_sk,
899 .obj_size = sizeof(struct raw_sock), 900 .obj_size = sizeof(struct raw_sock),
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 844a9ef60dbd..a0fcc47fee73 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -912,6 +912,9 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
912 struct dst_entry *dst = &rt->dst; 912 struct dst_entry *dst = &rt->dst;
913 struct fib_result res; 913 struct fib_result res;
914 914
915 if (dst_metric_locked(dst, RTAX_MTU))
916 return;
917
915 if (dst->dev->mtu < mtu) 918 if (dst->dev->mtu < mtu)
916 return; 919 return;
917 920
@@ -962,7 +965,7 @@ void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
962} 965}
963EXPORT_SYMBOL_GPL(ipv4_update_pmtu); 966EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
964 967
965void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu) 968static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
966{ 969{
967 const struct iphdr *iph = (const struct iphdr *) skb->data; 970 const struct iphdr *iph = (const struct iphdr *) skb->data;
968 struct flowi4 fl4; 971 struct flowi4 fl4;
@@ -975,6 +978,53 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
975 ip_rt_put(rt); 978 ip_rt_put(rt);
976 } 979 }
977} 980}
981
982void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
983{
984 const struct iphdr *iph = (const struct iphdr *) skb->data;
985 struct flowi4 fl4;
986 struct rtable *rt;
987 struct dst_entry *dst;
988 bool new = false;
989
990 bh_lock_sock(sk);
991 rt = (struct rtable *) __sk_dst_get(sk);
992
993 if (sock_owned_by_user(sk) || !rt) {
994 __ipv4_sk_update_pmtu(skb, sk, mtu);
995 goto out;
996 }
997
998 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
999
1000 if (!__sk_dst_check(sk, 0)) {
1001 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1002 if (IS_ERR(rt))
1003 goto out;
1004
1005 new = true;
1006 }
1007
1008 __ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu);
1009
1010 dst = dst_check(&rt->dst, 0);
1011 if (!dst) {
1012 if (new)
1013 dst_release(&rt->dst);
1014
1015 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1016 if (IS_ERR(rt))
1017 goto out;
1018
1019 new = true;
1020 }
1021
1022 if (new)
1023 __sk_dst_set(sk, &rt->dst);
1024
1025out:
1026 bh_unlock_sock(sk);
1027}
978EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu); 1028EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
979 1029
980void ipv4_redirect(struct sk_buff *skb, struct net *net, 1030void ipv4_redirect(struct sk_buff *skb, struct net *net,
@@ -1120,7 +1170,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
1120 if (!mtu || time_after_eq(jiffies, rt->dst.expires)) 1170 if (!mtu || time_after_eq(jiffies, rt->dst.expires))
1121 mtu = dst_metric_raw(dst, RTAX_MTU); 1171 mtu = dst_metric_raw(dst, RTAX_MTU);
1122 1172
1123 if (mtu && rt_is_output_route(rt)) 1173 if (mtu)
1124 return mtu; 1174 return mtu;
1125 1175
1126 mtu = dst->dev->mtu; 1176 mtu = dst->dev->mtu;
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 291f2ed7cc31..cdf2e707bb10 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -310,6 +310,12 @@ void tcp_slow_start(struct tcp_sock *tp)
310{ 310{
311 int cnt; /* increase in packets */ 311 int cnt; /* increase in packets */
312 unsigned int delta = 0; 312 unsigned int delta = 0;
313 u32 snd_cwnd = tp->snd_cwnd;
314
315 if (unlikely(!snd_cwnd)) {
316 pr_err_once("snd_cwnd is nul, please report this bug.\n");
317 snd_cwnd = 1U;
318 }
313 319
314 /* RFC3465: ABC Slow start 320 /* RFC3465: ABC Slow start
315 * Increase only after a full MSS of bytes is acked 321 * Increase only after a full MSS of bytes is acked
@@ -324,7 +330,7 @@ void tcp_slow_start(struct tcp_sock *tp)
324 if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh) 330 if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh)
325 cnt = sysctl_tcp_max_ssthresh >> 1; /* limited slow start */ 331 cnt = sysctl_tcp_max_ssthresh >> 1; /* limited slow start */
326 else 332 else
327 cnt = tp->snd_cwnd; /* exponential increase */ 333 cnt = snd_cwnd; /* exponential increase */
328 334
329 /* RFC3465: ABC 335 /* RFC3465: ABC
330 * We MAY increase by 2 if discovered delayed ack 336 * We MAY increase by 2 if discovered delayed ack
@@ -334,11 +340,11 @@ void tcp_slow_start(struct tcp_sock *tp)
334 tp->bytes_acked = 0; 340 tp->bytes_acked = 0;
335 341
336 tp->snd_cwnd_cnt += cnt; 342 tp->snd_cwnd_cnt += cnt;
337 while (tp->snd_cwnd_cnt >= tp->snd_cwnd) { 343 while (tp->snd_cwnd_cnt >= snd_cwnd) {
338 tp->snd_cwnd_cnt -= tp->snd_cwnd; 344 tp->snd_cwnd_cnt -= snd_cwnd;
339 delta++; 345 delta++;
340 } 346 }
341 tp->snd_cwnd = min(tp->snd_cwnd + delta, tp->snd_cwnd_clamp); 347 tp->snd_cwnd = min(snd_cwnd + delta, tp->snd_cwnd_clamp);
342} 348}
343EXPORT_SYMBOL_GPL(tcp_slow_start); 349EXPORT_SYMBOL_GPL(tcp_slow_start);
344 350
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 18f97ca76b00..ad70a962c20e 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3504,6 +3504,11 @@ static bool tcp_process_frto(struct sock *sk, int flag)
3504 } 3504 }
3505 } else { 3505 } else {
3506 if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) { 3506 if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) {
3507 if (!tcp_packets_in_flight(tp)) {
3508 tcp_enter_frto_loss(sk, 2, flag);
3509 return true;
3510 }
3511
3507 /* Prevent sending of new data. */ 3512 /* Prevent sending of new data. */
3508 tp->snd_cwnd = min(tp->snd_cwnd, 3513 tp->snd_cwnd = min(tp->snd_cwnd,
3509 tcp_packets_in_flight(tp)); 3514 tcp_packets_in_flight(tp));
@@ -5649,8 +5654,7 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
5649 * the remote receives only the retransmitted (regular) SYNs: either 5654 * the remote receives only the retransmitted (regular) SYNs: either
5650 * the original SYN-data or the corresponding SYN-ACK is lost. 5655 * the original SYN-data or the corresponding SYN-ACK is lost.
5651 */ 5656 */
5652 syn_drop = (cookie->len <= 0 && data && 5657 syn_drop = (cookie->len <= 0 && data && tp->total_retrans);
5653 inet_csk(sk)->icsk_retransmits);
5654 5658
5655 tcp_fastopen_cache_set(sk, mss, cookie, syn_drop); 5659 tcp_fastopen_cache_set(sk, mss, cookie, syn_drop);
5656 5660
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 54139fa514e6..eadb693eef55 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -369,11 +369,10 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
369 * We do take care of PMTU discovery (RFC1191) special case : 369 * We do take care of PMTU discovery (RFC1191) special case :
370 * we can receive locally generated ICMP messages while socket is held. 370 * we can receive locally generated ICMP messages while socket is held.
371 */ 371 */
372 if (sock_owned_by_user(sk) && 372 if (sock_owned_by_user(sk)) {
373 type != ICMP_DEST_UNREACH && 373 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
374 code != ICMP_FRAG_NEEDED) 374 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
375 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); 375 }
376
377 if (sk->sk_state == TCP_CLOSE) 376 if (sk->sk_state == TCP_CLOSE)
378 goto out; 377 goto out;
379 378
@@ -497,6 +496,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
497 * errors returned from accept(). 496 * errors returned from accept().
498 */ 497 */
499 inet_csk_reqsk_queue_drop(sk, req, prev); 498 inet_csk_reqsk_queue_drop(sk, req, prev);
499 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
500 goto out; 500 goto out;
501 501
502 case TCP_SYN_SENT: 502 case TCP_SYN_SENT:
@@ -1501,8 +1501,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1501 * clogging syn queue with openreqs with exponentially increasing 1501 * clogging syn queue with openreqs with exponentially increasing
1502 * timeout. 1502 * timeout.
1503 */ 1503 */
1504 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) 1504 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
1505 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1505 goto drop; 1506 goto drop;
1507 }
1506 1508
1507 req = inet_reqsk_alloc(&tcp_request_sock_ops); 1509 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1508 if (!req) 1510 if (!req)
@@ -1667,6 +1669,7 @@ drop_and_release:
1667drop_and_free: 1669drop_and_free:
1668 reqsk_free(req); 1670 reqsk_free(req);
1669drop: 1671drop:
1672 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1670 return 0; 1673 return 0;
1671} 1674}
1672EXPORT_SYMBOL(tcp_v4_conn_request); 1675EXPORT_SYMBOL(tcp_v4_conn_request);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 79c8dbe59b54..1f4d405eafba 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1952,6 +1952,7 @@ struct proto udp_prot = {
1952 .recvmsg = udp_recvmsg, 1952 .recvmsg = udp_recvmsg,
1953 .sendpage = udp_sendpage, 1953 .sendpage = udp_sendpage,
1954 .backlog_rcv = __udp_queue_rcv_skb, 1954 .backlog_rcv = __udp_queue_rcv_skb,
1955 .release_cb = ip4_datagram_release_cb,
1955 .hash = udp_lib_hash, 1956 .hash = udp_lib_hash,
1956 .unhash = udp_lib_unhash, 1957 .unhash = udp_lib_unhash,
1957 .rehash = udp_v4_rehash, 1958 .rehash = udp_v4_rehash,
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 420e56326384..1b5d8cb9b123 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1660,6 +1660,7 @@ static int addrconf_ifid_eui64(u8 *eui, struct net_device *dev)
1660 if (dev->addr_len != IEEE802154_ADDR_LEN) 1660 if (dev->addr_len != IEEE802154_ADDR_LEN)
1661 return -1; 1661 return -1;
1662 memcpy(eui, dev->dev_addr, 8); 1662 memcpy(eui, dev->dev_addr, 8);
1663 eui[0] ^= 2;
1663 return 0; 1664 return 0;
1664} 1665}
1665 1666
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index ecc35b93314b..384233188ac1 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -472,7 +472,10 @@ static void ah6_input_done(struct crypto_async_request *base, int err)
472 skb->network_header += ah_hlen; 472 skb->network_header += ah_hlen;
473 memcpy(skb_network_header(skb), work_iph, hdr_len); 473 memcpy(skb_network_header(skb), work_iph, hdr_len);
474 __skb_pull(skb, ah_hlen + hdr_len); 474 __skb_pull(skb, ah_hlen + hdr_len);
475 skb_set_transport_header(skb, -hdr_len); 475 if (x->props.mode == XFRM_MODE_TUNNEL)
476 skb_reset_transport_header(skb);
477 else
478 skb_set_transport_header(skb, -hdr_len);
476out: 479out:
477 kfree(AH_SKB_CB(skb)->tmp); 480 kfree(AH_SKB_CB(skb)->tmp);
478 xfrm_input_resume(skb, err); 481 xfrm_input_resume(skb, err);
@@ -593,9 +596,13 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
593 596
594 skb->network_header += ah_hlen; 597 skb->network_header += ah_hlen;
595 memcpy(skb_network_header(skb), work_iph, hdr_len); 598 memcpy(skb_network_header(skb), work_iph, hdr_len);
596 skb->transport_header = skb->network_header;
597 __skb_pull(skb, ah_hlen + hdr_len); 599 __skb_pull(skb, ah_hlen + hdr_len);
598 600
601 if (x->props.mode == XFRM_MODE_TUNNEL)
602 skb_reset_transport_header(skb);
603 else
604 skb_set_transport_header(skb, -hdr_len);
605
599 err = nexthdr; 606 err = nexthdr;
600 607
601out_free: 608out_free:
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 8edf2601065a..7a778b9a7b85 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -380,7 +380,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
380 if (skb->protocol == htons(ETH_P_IPV6)) { 380 if (skb->protocol == htons(ETH_P_IPV6)) {
381 sin->sin6_addr = ipv6_hdr(skb)->saddr; 381 sin->sin6_addr = ipv6_hdr(skb)->saddr;
382 if (np->rxopt.all) 382 if (np->rxopt.all)
383 datagram_recv_ctl(sk, msg, skb); 383 ip6_datagram_recv_ctl(sk, msg, skb);
384 if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL) 384 if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL)
385 sin->sin6_scope_id = IP6CB(skb)->iif; 385 sin->sin6_scope_id = IP6CB(skb)->iif;
386 } else { 386 } else {
@@ -468,7 +468,8 @@ out:
468} 468}
469 469
470 470
471int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) 471int ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg,
472 struct sk_buff *skb)
472{ 473{
473 struct ipv6_pinfo *np = inet6_sk(sk); 474 struct ipv6_pinfo *np = inet6_sk(sk);
474 struct inet6_skb_parm *opt = IP6CB(skb); 475 struct inet6_skb_parm *opt = IP6CB(skb);
@@ -597,11 +598,12 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
597 } 598 }
598 return 0; 599 return 0;
599} 600}
601EXPORT_SYMBOL_GPL(ip6_datagram_recv_ctl);
600 602
601int datagram_send_ctl(struct net *net, struct sock *sk, 603int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
602 struct msghdr *msg, struct flowi6 *fl6, 604 struct msghdr *msg, struct flowi6 *fl6,
603 struct ipv6_txoptions *opt, 605 struct ipv6_txoptions *opt,
604 int *hlimit, int *tclass, int *dontfrag) 606 int *hlimit, int *tclass, int *dontfrag)
605{ 607{
606 struct in6_pktinfo *src_info; 608 struct in6_pktinfo *src_info;
607 struct cmsghdr *cmsg; 609 struct cmsghdr *cmsg;
@@ -871,4 +873,4 @@ int datagram_send_ctl(struct net *net, struct sock *sk,
871exit_f: 873exit_f:
872 return err; 874 return err;
873} 875}
874EXPORT_SYMBOL_GPL(datagram_send_ctl); 876EXPORT_SYMBOL_GPL(ip6_datagram_send_ctl);
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 282f3723ee19..40ffd72243a4 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -300,7 +300,10 @@ static int esp_input_done2(struct sk_buff *skb, int err)
300 300
301 pskb_trim(skb, skb->len - alen - padlen - 2); 301 pskb_trim(skb, skb->len - alen - padlen - 2);
302 __skb_pull(skb, hlen); 302 __skb_pull(skb, hlen);
303 skb_set_transport_header(skb, -hdr_len); 303 if (x->props.mode == XFRM_MODE_TUNNEL)
304 skb_reset_transport_header(skb);
305 else
306 skb_set_transport_header(skb, -hdr_len);
304 307
305 err = nexthdr[1]; 308 err = nexthdr[1];
306 309
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index b4a9fd51dae7..fff5bdd8b680 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -81,10 +81,22 @@ static inline struct sock *icmpv6_sk(struct net *net)
81 return net->ipv6.icmp_sk[smp_processor_id()]; 81 return net->ipv6.icmp_sk[smp_processor_id()];
82} 82}
83 83
84static void icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
85 u8 type, u8 code, int offset, __be32 info)
86{
87 struct net *net = dev_net(skb->dev);
88
89 if (type == ICMPV6_PKT_TOOBIG)
90 ip6_update_pmtu(skb, net, info, 0, 0);
91 else if (type == NDISC_REDIRECT)
92 ip6_redirect(skb, net, 0, 0);
93}
94
84static int icmpv6_rcv(struct sk_buff *skb); 95static int icmpv6_rcv(struct sk_buff *skb);
85 96
86static const struct inet6_protocol icmpv6_protocol = { 97static const struct inet6_protocol icmpv6_protocol = {
87 .handler = icmpv6_rcv, 98 .handler = icmpv6_rcv,
99 .err_handler = icmpv6_err,
88 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 100 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
89}; 101};
90 102
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 29124b7a04c8..d6de4b447250 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -365,8 +365,8 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
365 msg.msg_control = (void*)(fl->opt+1); 365 msg.msg_control = (void*)(fl->opt+1);
366 memset(&flowi6, 0, sizeof(flowi6)); 366 memset(&flowi6, 0, sizeof(flowi6));
367 367
368 err = datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, &junk, 368 err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt,
369 &junk, &junk); 369 &junk, &junk, &junk);
370 if (err) 370 if (err)
371 goto done; 371 goto done;
372 err = -EINVAL; 372 err = -EINVAL;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index c727e4712751..131dd097736d 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -960,7 +960,7 @@ static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
960 int ret; 960 int ret;
961 961
962 if (!ip6_tnl_xmit_ctl(t)) 962 if (!ip6_tnl_xmit_ctl(t))
963 return -1; 963 goto tx_err;
964 964
965 switch (skb->protocol) { 965 switch (skb->protocol) {
966 case htons(ETH_P_IP): 966 case htons(ETH_P_IP):
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 5552d13ae92f..0c7c03d50dc0 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1213,10 +1213,10 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1213 if (dst_allfrag(rt->dst.path)) 1213 if (dst_allfrag(rt->dst.path))
1214 cork->flags |= IPCORK_ALLFRAG; 1214 cork->flags |= IPCORK_ALLFRAG;
1215 cork->length = 0; 1215 cork->length = 0;
1216 exthdrlen = (opt ? opt->opt_flen : 0) - rt->rt6i_nfheader_len; 1216 exthdrlen = (opt ? opt->opt_flen : 0);
1217 length += exthdrlen; 1217 length += exthdrlen;
1218 transhdrlen += exthdrlen; 1218 transhdrlen += exthdrlen;
1219 dst_exthdrlen = rt->dst.header_len; 1219 dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
1220 } else { 1220 } else {
1221 rt = (struct rt6_info *)cork->dst; 1221 rt = (struct rt6_info *)cork->dst;
1222 fl6 = &inet->cork.fl.u.ip6; 1222 fl6 = &inet->cork.fl.u.ip6;
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 26dcdec9e3a5..8fd154e5f079 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1710,6 +1710,9 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
1710 return -EINVAL; 1710 return -EINVAL;
1711 if (get_user(v, (u32 __user *)optval)) 1711 if (get_user(v, (u32 __user *)optval))
1712 return -EFAULT; 1712 return -EFAULT;
1713 /* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */
1714 if (v != RT_TABLE_DEFAULT && v >= 100000000)
1715 return -EINVAL;
1713 if (sk == mrt->mroute6_sk) 1716 if (sk == mrt->mroute6_sk)
1714 return -EBUSY; 1717 return -EBUSY;
1715 1718
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index ee94d31c9d4d..d1e2e8ef29c5 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -476,8 +476,8 @@ sticky_done:
476 msg.msg_controllen = optlen; 476 msg.msg_controllen = optlen;
477 msg.msg_control = (void*)(opt+1); 477 msg.msg_control = (void*)(opt+1);
478 478
479 retv = datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, &junk, 479 retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk,
480 &junk); 480 &junk, &junk);
481 if (retv) 481 if (retv)
482 goto done; 482 goto done;
483update: 483update:
@@ -1002,7 +1002,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
1002 release_sock(sk); 1002 release_sock(sk);
1003 1003
1004 if (skb) { 1004 if (skb) {
1005 int err = datagram_recv_ctl(sk, &msg, skb); 1005 int err = ip6_datagram_recv_ctl(sk, &msg, skb);
1006 kfree_skb(skb); 1006 kfree_skb(skb);
1007 if (err) 1007 if (err)
1008 return err; 1008 return err;
diff --git a/net/ipv6/netfilter/ip6t_NPT.c b/net/ipv6/netfilter/ip6t_NPT.c
index 7302b0b7b642..83acc1405a18 100644
--- a/net/ipv6/netfilter/ip6t_NPT.c
+++ b/net/ipv6/netfilter/ip6t_NPT.c
@@ -9,6 +9,7 @@
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/skbuff.h> 10#include <linux/skbuff.h>
11#include <linux/ipv6.h> 11#include <linux/ipv6.h>
12#include <net/ipv6.h>
12#include <linux/netfilter.h> 13#include <linux/netfilter.h>
13#include <linux/netfilter_ipv6.h> 14#include <linux/netfilter_ipv6.h>
14#include <linux/netfilter_ipv6/ip6t_NPT.h> 15#include <linux/netfilter_ipv6/ip6t_NPT.h>
@@ -18,11 +19,20 @@ static int ip6t_npt_checkentry(const struct xt_tgchk_param *par)
18{ 19{
19 struct ip6t_npt_tginfo *npt = par->targinfo; 20 struct ip6t_npt_tginfo *npt = par->targinfo;
20 __wsum src_sum = 0, dst_sum = 0; 21 __wsum src_sum = 0, dst_sum = 0;
22 struct in6_addr pfx;
21 unsigned int i; 23 unsigned int i;
22 24
23 if (npt->src_pfx_len > 64 || npt->dst_pfx_len > 64) 25 if (npt->src_pfx_len > 64 || npt->dst_pfx_len > 64)
24 return -EINVAL; 26 return -EINVAL;
25 27
28 /* Ensure that LSB of prefix is zero */
29 ipv6_addr_prefix(&pfx, &npt->src_pfx.in6, npt->src_pfx_len);
30 if (!ipv6_addr_equal(&pfx, &npt->src_pfx.in6))
31 return -EINVAL;
32 ipv6_addr_prefix(&pfx, &npt->dst_pfx.in6, npt->dst_pfx_len);
33 if (!ipv6_addr_equal(&pfx, &npt->dst_pfx.in6))
34 return -EINVAL;
35
26 for (i = 0; i < ARRAY_SIZE(npt->src_pfx.in6.s6_addr16); i++) { 36 for (i = 0; i < ARRAY_SIZE(npt->src_pfx.in6.s6_addr16); i++) {
27 src_sum = csum_add(src_sum, 37 src_sum = csum_add(src_sum,
28 (__force __wsum)npt->src_pfx.in6.s6_addr16[i]); 38 (__force __wsum)npt->src_pfx.in6.s6_addr16[i]);
@@ -30,7 +40,7 @@ static int ip6t_npt_checkentry(const struct xt_tgchk_param *par)
30 (__force __wsum)npt->dst_pfx.in6.s6_addr16[i]); 40 (__force __wsum)npt->dst_pfx.in6.s6_addr16[i]);
31 } 41 }
32 42
33 npt->adjustment = (__force __sum16) csum_sub(src_sum, dst_sum); 43 npt->adjustment = ~csum_fold(csum_sub(src_sum, dst_sum));
34 return 0; 44 return 0;
35} 45}
36 46
@@ -51,7 +61,7 @@ static bool ip6t_npt_map_pfx(const struct ip6t_npt_tginfo *npt,
51 61
52 idx = i / 32; 62 idx = i / 32;
53 addr->s6_addr32[idx] &= mask; 63 addr->s6_addr32[idx] &= mask;
54 addr->s6_addr32[idx] |= npt->dst_pfx.in6.s6_addr32[idx]; 64 addr->s6_addr32[idx] |= ~mask & npt->dst_pfx.in6.s6_addr32[idx];
55 } 65 }
56 66
57 if (pfx_len <= 48) 67 if (pfx_len <= 48)
@@ -66,8 +76,8 @@ static bool ip6t_npt_map_pfx(const struct ip6t_npt_tginfo *npt,
66 return false; 76 return false;
67 } 77 }
68 78
69 sum = (__force __sum16) csum_add((__force __wsum)addr->s6_addr16[idx], 79 sum = ~csum_fold(csum_add(csum_unfold((__force __sum16)addr->s6_addr16[idx]),
70 npt->adjustment); 80 csum_unfold(npt->adjustment)));
71 if (sum == CSUM_MANGLED_0) 81 if (sum == CSUM_MANGLED_0)
72 sum = 0; 82 sum = 0;
73 *(__force __sum16 *)&addr->s6_addr16[idx] = sum; 83 *(__force __sum16 *)&addr->s6_addr16[idx] = sum;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 6cd29b1e8b92..70fa81449997 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -507,7 +507,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
507 sock_recv_ts_and_drops(msg, sk, skb); 507 sock_recv_ts_and_drops(msg, sk, skb);
508 508
509 if (np->rxopt.all) 509 if (np->rxopt.all)
510 datagram_recv_ctl(sk, msg, skb); 510 ip6_datagram_recv_ctl(sk, msg, skb);
511 511
512 err = copied; 512 err = copied;
513 if (flags & MSG_TRUNC) 513 if (flags & MSG_TRUNC)
@@ -822,8 +822,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
822 memset(opt, 0, sizeof(struct ipv6_txoptions)); 822 memset(opt, 0, sizeof(struct ipv6_txoptions));
823 opt->tot_len = sizeof(struct ipv6_txoptions); 823 opt->tot_len = sizeof(struct ipv6_txoptions);
824 824
825 err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, 825 err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
826 &hlimit, &tclass, &dontfrag); 826 &hlimit, &tclass, &dontfrag);
827 if (err < 0) { 827 if (err < 0) {
828 fl6_sock_release(flowlabel); 828 fl6_sock_release(flowlabel);
829 return err; 829 return err;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index e229a3bc345d..363d8b7772e8 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -928,7 +928,7 @@ restart:
928 dst_hold(&rt->dst); 928 dst_hold(&rt->dst);
929 read_unlock_bh(&table->tb6_lock); 929 read_unlock_bh(&table->tb6_lock);
930 930
931 if (!rt->n && !(rt->rt6i_flags & RTF_NONEXTHOP)) 931 if (!rt->n && !(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_LOCAL)))
932 nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr); 932 nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
933 else if (!(rt->dst.flags & DST_HOST)) 933 else if (!(rt->dst.flags & DST_HOST))
934 nrt = rt6_alloc_clone(rt, &fl6->daddr); 934 nrt = rt6_alloc_clone(rt, &fl6->daddr);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 93825dd3a7c0..4f43537197ef 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -423,6 +423,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
423 } 423 }
424 424
425 inet_csk_reqsk_queue_drop(sk, req, prev); 425 inet_csk_reqsk_queue_drop(sk, req, prev);
426 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
426 goto out; 427 goto out;
427 428
428 case TCP_SYN_SENT: 429 case TCP_SYN_SENT:
@@ -958,8 +959,10 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
958 goto drop; 959 goto drop;
959 } 960 }
960 961
961 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) 962 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
963 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
962 goto drop; 964 goto drop;
965 }
963 966
964 req = inet6_reqsk_alloc(&tcp6_request_sock_ops); 967 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
965 if (req == NULL) 968 if (req == NULL)
@@ -1108,6 +1111,7 @@ drop_and_release:
1108drop_and_free: 1111drop_and_free:
1109 reqsk_free(req); 1112 reqsk_free(req);
1110drop: 1113drop:
1114 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1111 return 0; /* don't send reset */ 1115 return 0; /* don't send reset */
1112} 1116}
1113 1117
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index dfaa29b8b293..fb083295ff0b 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -443,7 +443,7 @@ try_again:
443 ip_cmsg_recv(msg, skb); 443 ip_cmsg_recv(msg, skb);
444 } else { 444 } else {
445 if (np->rxopt.all) 445 if (np->rxopt.all)
446 datagram_recv_ctl(sk, msg, skb); 446 ip6_datagram_recv_ctl(sk, msg, skb);
447 } 447 }
448 448
449 err = copied; 449 err = copied;
@@ -1153,8 +1153,8 @@ do_udp_sendmsg:
1153 memset(opt, 0, sizeof(struct ipv6_txoptions)); 1153 memset(opt, 0, sizeof(struct ipv6_txoptions));
1154 opt->tot_len = sizeof(*opt); 1154 opt->tot_len = sizeof(*opt);
1155 1155
1156 err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, 1156 err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
1157 &hlimit, &tclass, &dontfrag); 1157 &hlimit, &tclass, &dontfrag);
1158 if (err < 0) { 1158 if (err < 0) {
1159 fl6_sock_release(flowlabel); 1159 fl6_sock_release(flowlabel);
1160 return err; 1160 return err;
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 1a9f3723c13c..2ac884d0e89b 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -168,6 +168,51 @@ l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)
168 168
169} 169}
170 170
171/* Lookup the tunnel socket, possibly involving the fs code if the socket is
172 * owned by userspace. A struct sock returned from this function must be
173 * released using l2tp_tunnel_sock_put once you're done with it.
174 */
175struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel)
176{
177 int err = 0;
178 struct socket *sock = NULL;
179 struct sock *sk = NULL;
180
181 if (!tunnel)
182 goto out;
183
184 if (tunnel->fd >= 0) {
185 /* Socket is owned by userspace, who might be in the process
186 * of closing it. Look the socket up using the fd to ensure
187 * consistency.
188 */
189 sock = sockfd_lookup(tunnel->fd, &err);
190 if (sock)
191 sk = sock->sk;
192 } else {
193 /* Socket is owned by kernelspace */
194 sk = tunnel->sock;
195 }
196
197out:
198 return sk;
199}
200EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_lookup);
201
202/* Drop a reference to a tunnel socket obtained via. l2tp_tunnel_sock_put */
203void l2tp_tunnel_sock_put(struct sock *sk)
204{
205 struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
206 if (tunnel) {
207 if (tunnel->fd >= 0) {
208 /* Socket is owned by userspace */
209 sockfd_put(sk->sk_socket);
210 }
211 sock_put(sk);
212 }
213}
214EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_put);
215
171/* Lookup a session by id in the global session list 216/* Lookup a session by id in the global session list
172 */ 217 */
173static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id) 218static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id)
@@ -1123,8 +1168,6 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
1123 struct udphdr *uh; 1168 struct udphdr *uh;
1124 struct inet_sock *inet; 1169 struct inet_sock *inet;
1125 __wsum csum; 1170 __wsum csum;
1126 int old_headroom;
1127 int new_headroom;
1128 int headroom; 1171 int headroom;
1129 int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0; 1172 int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
1130 int udp_len; 1173 int udp_len;
@@ -1136,16 +1179,12 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
1136 */ 1179 */
1137 headroom = NET_SKB_PAD + sizeof(struct iphdr) + 1180 headroom = NET_SKB_PAD + sizeof(struct iphdr) +
1138 uhlen + hdr_len; 1181 uhlen + hdr_len;
1139 old_headroom = skb_headroom(skb);
1140 if (skb_cow_head(skb, headroom)) { 1182 if (skb_cow_head(skb, headroom)) {
1141 kfree_skb(skb); 1183 kfree_skb(skb);
1142 return NET_XMIT_DROP; 1184 return NET_XMIT_DROP;
1143 } 1185 }
1144 1186
1145 new_headroom = skb_headroom(skb);
1146 skb_orphan(skb); 1187 skb_orphan(skb);
1147 skb->truesize += new_headroom - old_headroom;
1148
1149 /* Setup L2TP header */ 1188 /* Setup L2TP header */
1150 session->build_header(session, __skb_push(skb, hdr_len)); 1189 session->build_header(session, __skb_push(skb, hdr_len));
1151 1190
@@ -1607,6 +1646,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1607 tunnel->old_sk_destruct = sk->sk_destruct; 1646 tunnel->old_sk_destruct = sk->sk_destruct;
1608 sk->sk_destruct = &l2tp_tunnel_destruct; 1647 sk->sk_destruct = &l2tp_tunnel_destruct;
1609 tunnel->sock = sk; 1648 tunnel->sock = sk;
1649 tunnel->fd = fd;
1610 lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock"); 1650 lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock");
1611 1651
1612 sk->sk_allocation = GFP_ATOMIC; 1652 sk->sk_allocation = GFP_ATOMIC;
@@ -1642,24 +1682,32 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
1642 */ 1682 */
1643int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) 1683int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
1644{ 1684{
1645 int err = 0; 1685 int err = -EBADF;
1646 struct socket *sock = tunnel->sock ? tunnel->sock->sk_socket : NULL; 1686 struct socket *sock = NULL;
1687 struct sock *sk = NULL;
1688
1689 sk = l2tp_tunnel_sock_lookup(tunnel);
1690 if (!sk)
1691 goto out;
1692
1693 sock = sk->sk_socket;
1694 BUG_ON(!sock);
1647 1695
1648 /* Force the tunnel socket to close. This will eventually 1696 /* Force the tunnel socket to close. This will eventually
1649 * cause the tunnel to be deleted via the normal socket close 1697 * cause the tunnel to be deleted via the normal socket close
1650 * mechanisms when userspace closes the tunnel socket. 1698 * mechanisms when userspace closes the tunnel socket.
1651 */ 1699 */
1652 if (sock != NULL) { 1700 err = inet_shutdown(sock, 2);
1653 err = inet_shutdown(sock, 2);
1654 1701
1655 /* If the tunnel's socket was created by the kernel, 1702 /* If the tunnel's socket was created by the kernel,
1656 * close the socket here since the socket was not 1703 * close the socket here since the socket was not
1657 * created by userspace. 1704 * created by userspace.
1658 */ 1705 */
1659 if (sock->file == NULL) 1706 if (sock->file == NULL)
1660 err = inet_release(sock); 1707 err = inet_release(sock);
1661 }
1662 1708
1709 l2tp_tunnel_sock_put(sk);
1710out:
1663 return err; 1711 return err;
1664} 1712}
1665EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); 1713EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 56d583e083a7..e62204cad4fe 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -188,7 +188,8 @@ struct l2tp_tunnel {
188 int (*recv_payload_hook)(struct sk_buff *skb); 188 int (*recv_payload_hook)(struct sk_buff *skb);
189 void (*old_sk_destruct)(struct sock *); 189 void (*old_sk_destruct)(struct sock *);
190 struct sock *sock; /* Parent socket */ 190 struct sock *sock; /* Parent socket */
191 int fd; 191 int fd; /* Parent fd, if tunnel socket
192 * was created by userspace */
192 193
193 uint8_t priv[0]; /* private data */ 194 uint8_t priv[0]; /* private data */
194}; 195};
@@ -228,6 +229,8 @@ out:
228 return tunnel; 229 return tunnel;
229} 230}
230 231
232extern struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel);
233extern void l2tp_tunnel_sock_put(struct sock *sk);
231extern struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id); 234extern struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id);
232extern struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth); 235extern struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth);
233extern struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname); 236extern struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname);
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 927547171bc7..8ee4a86ae996 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -554,8 +554,8 @@ static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk,
554 memset(opt, 0, sizeof(struct ipv6_txoptions)); 554 memset(opt, 0, sizeof(struct ipv6_txoptions));
555 opt->tot_len = sizeof(struct ipv6_txoptions); 555 opt->tot_len = sizeof(struct ipv6_txoptions);
556 556
557 err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, 557 err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
558 &hlimit, &tclass, &dontfrag); 558 &hlimit, &tclass, &dontfrag);
559 if (err < 0) { 559 if (err < 0) {
560 fl6_sock_release(flowlabel); 560 fl6_sock_release(flowlabel);
561 return err; 561 return err;
@@ -646,7 +646,7 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,
646 struct msghdr *msg, size_t len, int noblock, 646 struct msghdr *msg, size_t len, int noblock,
647 int flags, int *addr_len) 647 int flags, int *addr_len)
648{ 648{
649 struct inet_sock *inet = inet_sk(sk); 649 struct ipv6_pinfo *np = inet6_sk(sk);
650 struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)msg->msg_name; 650 struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)msg->msg_name;
651 size_t copied = 0; 651 size_t copied = 0;
652 int err = -EOPNOTSUPP; 652 int err = -EOPNOTSUPP;
@@ -688,8 +688,8 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,
688 lsa->l2tp_scope_id = IP6CB(skb)->iif; 688 lsa->l2tp_scope_id = IP6CB(skb)->iif;
689 } 689 }
690 690
691 if (inet->cmsg_flags) 691 if (np->rxopt.all)
692 ip_cmsg_recv(msg, skb); 692 ip6_datagram_recv_ctl(sk, msg, skb);
693 693
694 if (flags & MSG_TRUNC) 694 if (flags & MSG_TRUNC)
695 copied = skb->len; 695 copied = skb->len;
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 286366ef8930..716605c241f4 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -388,8 +388,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
388 struct l2tp_session *session; 388 struct l2tp_session *session;
389 struct l2tp_tunnel *tunnel; 389 struct l2tp_tunnel *tunnel;
390 struct pppol2tp_session *ps; 390 struct pppol2tp_session *ps;
391 int old_headroom;
392 int new_headroom;
393 int uhlen, headroom; 391 int uhlen, headroom;
394 392
395 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) 393 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
@@ -408,7 +406,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
408 if (tunnel == NULL) 406 if (tunnel == NULL)
409 goto abort_put_sess; 407 goto abort_put_sess;
410 408
411 old_headroom = skb_headroom(skb);
412 uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0; 409 uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
413 headroom = NET_SKB_PAD + 410 headroom = NET_SKB_PAD +
414 sizeof(struct iphdr) + /* IP header */ 411 sizeof(struct iphdr) + /* IP header */
@@ -418,9 +415,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
418 if (skb_cow_head(skb, headroom)) 415 if (skb_cow_head(skb, headroom))
419 goto abort_put_sess_tun; 416 goto abort_put_sess_tun;
420 417
421 new_headroom = skb_headroom(skb);
422 skb->truesize += new_headroom - old_headroom;
423
424 /* Setup PPP header */ 418 /* Setup PPP header */
425 __skb_push(skb, sizeof(ppph)); 419 __skb_push(skb, sizeof(ppph));
426 skb->data[0] = ppph[0]; 420 skb->data[0] = ppph[0];
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 47e0aca614b7..0479c64aa83c 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -164,7 +164,17 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
164 sta = sta_info_get(sdata, mac_addr); 164 sta = sta_info_get(sdata, mac_addr);
165 else 165 else
166 sta = sta_info_get_bss(sdata, mac_addr); 166 sta = sta_info_get_bss(sdata, mac_addr);
167 if (!sta) { 167 /*
168 * The ASSOC test makes sure the driver is ready to
169 * receive the key. When wpa_supplicant has roamed
170 * using FT, it attempts to set the key before
171 * association has completed, this rejects that attempt
172 * so it will set the key again after assocation.
173 *
174 * TODO: accept the key if we have a station entry and
175 * add it to the device after the station.
176 */
177 if (!sta || !test_sta_flag(sta, WLAN_STA_ASSOC)) {
168 ieee80211_key_free(sdata->local, key); 178 ieee80211_key_free(sdata->local, key);
169 err = -ENOENT; 179 err = -ENOENT;
170 goto out_unlock; 180 goto out_unlock;
@@ -1994,7 +2004,8 @@ static int ieee80211_set_mcast_rate(struct wiphy *wiphy, struct net_device *dev,
1994{ 2004{
1995 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 2005 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1996 2006
1997 memcpy(sdata->vif.bss_conf.mcast_rate, rate, sizeof(rate)); 2007 memcpy(sdata->vif.bss_conf.mcast_rate, rate,
2008 sizeof(int) * IEEE80211_NUM_BANDS);
1998 2009
1999 return 0; 2010 return 0;
2000} 2011}
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 8563b9a5cac3..2ed065c09562 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1358,10 +1358,8 @@ int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata);
1358void ieee80211_sched_scan_stopped_work(struct work_struct *work); 1358void ieee80211_sched_scan_stopped_work(struct work_struct *work);
1359 1359
1360/* off-channel helpers */ 1360/* off-channel helpers */
1361void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local, 1361void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local);
1362 bool offchannel_ps_enable); 1362void ieee80211_offchannel_return(struct ieee80211_local *local);
1363void ieee80211_offchannel_return(struct ieee80211_local *local,
1364 bool offchannel_ps_disable);
1365void ieee80211_roc_setup(struct ieee80211_local *local); 1363void ieee80211_roc_setup(struct ieee80211_local *local);
1366void ieee80211_start_next_roc(struct ieee80211_local *local); 1364void ieee80211_start_next_roc(struct ieee80211_local *local);
1367void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata); 1365void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata);
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 47aeee2d8db1..2659e428b80c 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -215,6 +215,7 @@ static void prepare_frame_for_deferred_tx(struct ieee80211_sub_if_data *sdata,
215 skb->priority = 7; 215 skb->priority = 7;
216 216
217 info->control.vif = &sdata->vif; 217 info->control.vif = &sdata->vif;
218 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
218 ieee80211_set_qos_hdr(sdata, skb); 219 ieee80211_set_qos_hdr(sdata, skb);
219} 220}
220 221
@@ -246,11 +247,13 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
246 return -EAGAIN; 247 return -EAGAIN;
247 248
248 skb = dev_alloc_skb(local->tx_headroom + 249 skb = dev_alloc_skb(local->tx_headroom +
250 IEEE80211_ENCRYPT_HEADROOM +
251 IEEE80211_ENCRYPT_TAILROOM +
249 hdr_len + 252 hdr_len +
250 2 + 15 /* PERR IE */); 253 2 + 15 /* PERR IE */);
251 if (!skb) 254 if (!skb)
252 return -1; 255 return -1;
253 skb_reserve(skb, local->tx_headroom); 256 skb_reserve(skb, local->tx_headroom + IEEE80211_ENCRYPT_HEADROOM);
254 mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len); 257 mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
255 memset(mgmt, 0, hdr_len); 258 memset(mgmt, 0, hdr_len);
256 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 259 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index a3552929a21d..5107248af7fb 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -3400,6 +3400,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
3400 3400
3401 ret = 0; 3401 ret = 0;
3402 3402
3403out:
3403 while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef, 3404 while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
3404 IEEE80211_CHAN_DISABLED)) { 3405 IEEE80211_CHAN_DISABLED)) {
3405 if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) { 3406 if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) {
@@ -3408,14 +3409,13 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
3408 goto out; 3409 goto out;
3409 } 3410 }
3410 3411
3411 ret = chandef_downgrade(chandef); 3412 ret |= chandef_downgrade(chandef);
3412 } 3413 }
3413 3414
3414 if (chandef->width != vht_chandef.width) 3415 if (chandef->width != vht_chandef.width)
3415 sdata_info(sdata, 3416 sdata_info(sdata,
3416 "local regulatory prevented using AP HT/VHT configuration, downgraded\n"); 3417 "capabilities/regulatory prevented using AP HT/VHT configuration, downgraded\n");
3417 3418
3418out:
3419 WARN_ON_ONCE(!cfg80211_chandef_valid(chandef)); 3419 WARN_ON_ONCE(!cfg80211_chandef_valid(chandef));
3420 return ret; 3420 return ret;
3421} 3421}
@@ -3529,8 +3529,11 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
3529 */ 3529 */
3530 ret = ieee80211_vif_use_channel(sdata, &chandef, 3530 ret = ieee80211_vif_use_channel(sdata, &chandef,
3531 IEEE80211_CHANCTX_SHARED); 3531 IEEE80211_CHANCTX_SHARED);
3532 while (ret && chandef.width != NL80211_CHAN_WIDTH_20_NOHT) 3532 while (ret && chandef.width != NL80211_CHAN_WIDTH_20_NOHT) {
3533 ifmgd->flags |= chandef_downgrade(&chandef); 3533 ifmgd->flags |= chandef_downgrade(&chandef);
3534 ret = ieee80211_vif_use_channel(sdata, &chandef,
3535 IEEE80211_CHANCTX_SHARED);
3536 }
3534 return ret; 3537 return ret;
3535} 3538}
3536 3539
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index a5379aea7d09..a3ad4c3c80a3 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -102,8 +102,7 @@ static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata)
102 ieee80211_sta_reset_conn_monitor(sdata); 102 ieee80211_sta_reset_conn_monitor(sdata);
103} 103}
104 104
105void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local, 105void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local)
106 bool offchannel_ps_enable)
107{ 106{
108 struct ieee80211_sub_if_data *sdata; 107 struct ieee80211_sub_if_data *sdata;
109 108
@@ -134,8 +133,7 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
134 133
135 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) { 134 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
136 netif_tx_stop_all_queues(sdata->dev); 135 netif_tx_stop_all_queues(sdata->dev);
137 if (offchannel_ps_enable && 136 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
138 (sdata->vif.type == NL80211_IFTYPE_STATION) &&
139 sdata->u.mgd.associated) 137 sdata->u.mgd.associated)
140 ieee80211_offchannel_ps_enable(sdata); 138 ieee80211_offchannel_ps_enable(sdata);
141 } 139 }
@@ -143,8 +141,7 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
143 mutex_unlock(&local->iflist_mtx); 141 mutex_unlock(&local->iflist_mtx);
144} 142}
145 143
146void ieee80211_offchannel_return(struct ieee80211_local *local, 144void ieee80211_offchannel_return(struct ieee80211_local *local)
147 bool offchannel_ps_disable)
148{ 145{
149 struct ieee80211_sub_if_data *sdata; 146 struct ieee80211_sub_if_data *sdata;
150 147
@@ -163,11 +160,9 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
163 continue; 160 continue;
164 161
165 /* Tell AP we're back */ 162 /* Tell AP we're back */
166 if (offchannel_ps_disable && 163 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
167 sdata->vif.type == NL80211_IFTYPE_STATION) { 164 sdata->u.mgd.associated)
168 if (sdata->u.mgd.associated) 165 ieee80211_offchannel_ps_disable(sdata);
169 ieee80211_offchannel_ps_disable(sdata);
170 }
171 166
172 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) { 167 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
173 /* 168 /*
@@ -385,7 +380,7 @@ void ieee80211_sw_roc_work(struct work_struct *work)
385 local->tmp_channel = NULL; 380 local->tmp_channel = NULL;
386 ieee80211_hw_config(local, 0); 381 ieee80211_hw_config(local, 0);
387 382
388 ieee80211_offchannel_return(local, true); 383 ieee80211_offchannel_return(local);
389 } 384 }
390 385
391 ieee80211_recalc_idle(local); 386 ieee80211_recalc_idle(local);
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index d59fc6818b1c..bf82e69d0601 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -292,7 +292,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
292 if (!was_hw_scan) { 292 if (!was_hw_scan) {
293 ieee80211_configure_filter(local); 293 ieee80211_configure_filter(local);
294 drv_sw_scan_complete(local); 294 drv_sw_scan_complete(local);
295 ieee80211_offchannel_return(local, true); 295 ieee80211_offchannel_return(local);
296 } 296 }
297 297
298 ieee80211_recalc_idle(local); 298 ieee80211_recalc_idle(local);
@@ -341,7 +341,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
341 local->next_scan_state = SCAN_DECISION; 341 local->next_scan_state = SCAN_DECISION;
342 local->scan_channel_idx = 0; 342 local->scan_channel_idx = 0;
343 343
344 ieee80211_offchannel_stop_vifs(local, true); 344 ieee80211_offchannel_stop_vifs(local);
345 345
346 ieee80211_configure_filter(local); 346 ieee80211_configure_filter(local);
347 347
@@ -678,12 +678,8 @@ static void ieee80211_scan_state_suspend(struct ieee80211_local *local,
678 local->scan_channel = NULL; 678 local->scan_channel = NULL;
679 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); 679 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
680 680
681 /* 681 /* disable PS */
682 * Re-enable vifs and beaconing. Leave PS 682 ieee80211_offchannel_return(local);
683 * in off-channel state..will put that back
684 * on-channel at the end of scanning.
685 */
686 ieee80211_offchannel_return(local, false);
687 683
688 *next_delay = HZ / 5; 684 *next_delay = HZ / 5;
689 /* afterwards, resume scan & go to next channel */ 685 /* afterwards, resume scan & go to next channel */
@@ -693,8 +689,7 @@ static void ieee80211_scan_state_suspend(struct ieee80211_local *local,
693static void ieee80211_scan_state_resume(struct ieee80211_local *local, 689static void ieee80211_scan_state_resume(struct ieee80211_local *local,
694 unsigned long *next_delay) 690 unsigned long *next_delay)
695{ 691{
696 /* PS already is in off-channel mode */ 692 ieee80211_offchannel_stop_vifs(local);
697 ieee80211_offchannel_stop_vifs(local, false);
698 693
699 if (local->ops->flush) { 694 if (local->ops->flush) {
700 drv_flush(local, false); 695 drv_flush(local, false);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index e9eadc40c09c..467c1d1b66f2 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1673,10 +1673,13 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
1673 chanctx_conf = 1673 chanctx_conf =
1674 rcu_dereference(tmp_sdata->vif.chanctx_conf); 1674 rcu_dereference(tmp_sdata->vif.chanctx_conf);
1675 } 1675 }
1676 if (!chanctx_conf)
1677 goto fail_rcu;
1678 1676
1679 chan = chanctx_conf->def.chan; 1677 if (chanctx_conf)
1678 chan = chanctx_conf->def.chan;
1679 else if (!local->use_chanctx)
1680 chan = local->_oper_channel;
1681 else
1682 goto fail_rcu;
1680 1683
1681 /* 1684 /*
1682 * Frame injection is not allowed if beaconing is not allowed 1685 * Frame injection is not allowed if beaconing is not allowed
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index 746048b13ef3..ae8ec6f27688 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -61,14 +61,27 @@ sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
61 return 1; 61 return 1;
62} 62}
63 63
64static void sctp_nat_csum(struct sk_buff *skb, sctp_sctphdr_t *sctph,
65 unsigned int sctphoff)
66{
67 __u32 crc32;
68 struct sk_buff *iter;
69
70 crc32 = sctp_start_cksum((__u8 *)sctph, skb_headlen(skb) - sctphoff);
71 skb_walk_frags(skb, iter)
72 crc32 = sctp_update_cksum((u8 *) iter->data,
73 skb_headlen(iter), crc32);
74 sctph->checksum = sctp_end_cksum(crc32);
75
76 skb->ip_summed = CHECKSUM_UNNECESSARY;
77}
78
64static int 79static int
65sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, 80sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
66 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) 81 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
67{ 82{
68 sctp_sctphdr_t *sctph; 83 sctp_sctphdr_t *sctph;
69 unsigned int sctphoff = iph->len; 84 unsigned int sctphoff = iph->len;
70 struct sk_buff *iter;
71 __be32 crc32;
72 85
73#ifdef CONFIG_IP_VS_IPV6 86#ifdef CONFIG_IP_VS_IPV6
74 if (cp->af == AF_INET6 && iph->fragoffs) 87 if (cp->af == AF_INET6 && iph->fragoffs)
@@ -92,13 +105,7 @@ sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
92 sctph = (void *) skb_network_header(skb) + sctphoff; 105 sctph = (void *) skb_network_header(skb) + sctphoff;
93 sctph->source = cp->vport; 106 sctph->source = cp->vport;
94 107
95 /* Calculate the checksum */ 108 sctp_nat_csum(skb, sctph, sctphoff);
96 crc32 = sctp_start_cksum((u8 *) sctph, skb_headlen(skb) - sctphoff);
97 skb_walk_frags(skb, iter)
98 crc32 = sctp_update_cksum((u8 *) iter->data, skb_headlen(iter),
99 crc32);
100 crc32 = sctp_end_cksum(crc32);
101 sctph->checksum = crc32;
102 109
103 return 1; 110 return 1;
104} 111}
@@ -109,8 +116,6 @@ sctp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
109{ 116{
110 sctp_sctphdr_t *sctph; 117 sctp_sctphdr_t *sctph;
111 unsigned int sctphoff = iph->len; 118 unsigned int sctphoff = iph->len;
112 struct sk_buff *iter;
113 __be32 crc32;
114 119
115#ifdef CONFIG_IP_VS_IPV6 120#ifdef CONFIG_IP_VS_IPV6
116 if (cp->af == AF_INET6 && iph->fragoffs) 121 if (cp->af == AF_INET6 && iph->fragoffs)
@@ -134,13 +139,7 @@ sctp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
134 sctph = (void *) skb_network_header(skb) + sctphoff; 139 sctph = (void *) skb_network_header(skb) + sctphoff;
135 sctph->dest = cp->dport; 140 sctph->dest = cp->dport;
136 141
137 /* Calculate the checksum */ 142 sctp_nat_csum(skb, sctph, sctphoff);
138 crc32 = sctp_start_cksum((u8 *) sctph, skb_headlen(skb) - sctphoff);
139 skb_walk_frags(skb, iter)
140 crc32 = sctp_update_cksum((u8 *) iter->data, skb_headlen(iter),
141 crc32);
142 crc32 = sctp_end_cksum(crc32);
143 sctph->checksum = crc32;
144 143
145 return 1; 144 return 1;
146} 145}
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index effa10c9e4e3..44fd10c539ac 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -1795,6 +1795,8 @@ int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid)
1795 GFP_KERNEL); 1795 GFP_KERNEL);
1796 if (!tinfo->buf) 1796 if (!tinfo->buf)
1797 goto outtinfo; 1797 goto outtinfo;
1798 } else {
1799 tinfo->buf = NULL;
1798 } 1800 }
1799 tinfo->id = id; 1801 tinfo->id = id;
1800 1802
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 016d95ead930..e4a0c4fb3a7c 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1376,11 +1376,12 @@ void nf_conntrack_cleanup(struct net *net)
1376 synchronize_net(); 1376 synchronize_net();
1377 nf_conntrack_proto_fini(net); 1377 nf_conntrack_proto_fini(net);
1378 nf_conntrack_cleanup_net(net); 1378 nf_conntrack_cleanup_net(net);
1379}
1379 1380
1380 if (net_eq(net, &init_net)) { 1381void nf_conntrack_cleanup_end(void)
1381 RCU_INIT_POINTER(nf_ct_destroy, NULL); 1382{
1382 nf_conntrack_cleanup_init_net(); 1383 RCU_INIT_POINTER(nf_ct_destroy, NULL);
1383 } 1384 nf_conntrack_cleanup_init_net();
1384} 1385}
1385 1386
1386void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls) 1387void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 363285d544a1..e7185c684816 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -575,6 +575,7 @@ static int __init nf_conntrack_standalone_init(void)
575static void __exit nf_conntrack_standalone_fini(void) 575static void __exit nf_conntrack_standalone_fini(void)
576{ 576{
577 unregister_pernet_subsys(&nf_conntrack_net_ops); 577 unregister_pernet_subsys(&nf_conntrack_net_ops);
578 nf_conntrack_cleanup_end();
578} 579}
579 580
580module_init(nf_conntrack_standalone_init); 581module_init(nf_conntrack_standalone_init);
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 8d987c3573fd..7b3a9e5999c0 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -345,19 +345,27 @@ int xt_find_revision(u8 af, const char *name, u8 revision, int target,
345} 345}
346EXPORT_SYMBOL_GPL(xt_find_revision); 346EXPORT_SYMBOL_GPL(xt_find_revision);
347 347
348static char *textify_hooks(char *buf, size_t size, unsigned int mask) 348static char *
349textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto)
349{ 350{
350 static const char *const names[] = { 351 static const char *const inetbr_names[] = {
351 "PREROUTING", "INPUT", "FORWARD", 352 "PREROUTING", "INPUT", "FORWARD",
352 "OUTPUT", "POSTROUTING", "BROUTING", 353 "OUTPUT", "POSTROUTING", "BROUTING",
353 }; 354 };
354 unsigned int i; 355 static const char *const arp_names[] = {
356 "INPUT", "FORWARD", "OUTPUT",
357 };
358 const char *const *names;
359 unsigned int i, max;
355 char *p = buf; 360 char *p = buf;
356 bool np = false; 361 bool np = false;
357 int res; 362 int res;
358 363
364 names = (nfproto == NFPROTO_ARP) ? arp_names : inetbr_names;
365 max = (nfproto == NFPROTO_ARP) ? ARRAY_SIZE(arp_names) :
366 ARRAY_SIZE(inetbr_names);
359 *p = '\0'; 367 *p = '\0';
360 for (i = 0; i < ARRAY_SIZE(names); ++i) { 368 for (i = 0; i < max; ++i) {
361 if (!(mask & (1 << i))) 369 if (!(mask & (1 << i)))
362 continue; 370 continue;
363 res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]); 371 res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]);
@@ -402,8 +410,10 @@ int xt_check_match(struct xt_mtchk_param *par,
402 pr_err("%s_tables: %s match: used from hooks %s, but only " 410 pr_err("%s_tables: %s match: used from hooks %s, but only "
403 "valid from %s\n", 411 "valid from %s\n",
404 xt_prefix[par->family], par->match->name, 412 xt_prefix[par->family], par->match->name,
405 textify_hooks(used, sizeof(used), par->hook_mask), 413 textify_hooks(used, sizeof(used), par->hook_mask,
406 textify_hooks(allow, sizeof(allow), par->match->hooks)); 414 par->family),
415 textify_hooks(allow, sizeof(allow), par->match->hooks,
416 par->family));
407 return -EINVAL; 417 return -EINVAL;
408 } 418 }
409 if (par->match->proto && (par->match->proto != proto || inv_proto)) { 419 if (par->match->proto && (par->match->proto != proto || inv_proto)) {
@@ -575,8 +585,10 @@ int xt_check_target(struct xt_tgchk_param *par,
575 pr_err("%s_tables: %s target: used from hooks %s, but only " 585 pr_err("%s_tables: %s target: used from hooks %s, but only "
576 "usable from %s\n", 586 "usable from %s\n",
577 xt_prefix[par->family], par->target->name, 587 xt_prefix[par->family], par->target->name,
578 textify_hooks(used, sizeof(used), par->hook_mask), 588 textify_hooks(used, sizeof(used), par->hook_mask,
579 textify_hooks(allow, sizeof(allow), par->target->hooks)); 589 par->family),
590 textify_hooks(allow, sizeof(allow), par->target->hooks,
591 par->family));
580 return -EINVAL; 592 return -EINVAL;
581 } 593 }
582 if (par->target->proto && (par->target->proto != proto || inv_proto)) { 594 if (par->target->proto && (par->target->proto != proto || inv_proto)) {
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 2a0843081840..bde009ed8d3b 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -109,7 +109,7 @@ static int xt_ct_tg_check_v0(const struct xt_tgchk_param *par)
109 struct xt_ct_target_info *info = par->targinfo; 109 struct xt_ct_target_info *info = par->targinfo;
110 struct nf_conntrack_tuple t; 110 struct nf_conntrack_tuple t;
111 struct nf_conn *ct; 111 struct nf_conn *ct;
112 int ret; 112 int ret = -EOPNOTSUPP;
113 113
114 if (info->flags & ~XT_CT_NOTRACK) 114 if (info->flags & ~XT_CT_NOTRACK)
115 return -EINVAL; 115 return -EINVAL;
@@ -247,7 +247,7 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
247 struct xt_ct_target_info_v1 *info = par->targinfo; 247 struct xt_ct_target_info_v1 *info = par->targinfo;
248 struct nf_conntrack_tuple t; 248 struct nf_conntrack_tuple t;
249 struct nf_conn *ct; 249 struct nf_conn *ct;
250 int ret; 250 int ret = -EOPNOTSUPP;
251 251
252 if (info->flags & ~XT_CT_NOTRACK) 252 if (info->flags & ~XT_CT_NOTRACK)
253 return -EINVAL; 253 return -EINVAL;
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index a9327e2e48ce..670cbc3518de 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -35,10 +35,11 @@
35/* Must be called with rcu_read_lock. */ 35/* Must be called with rcu_read_lock. */
36static void netdev_port_receive(struct vport *vport, struct sk_buff *skb) 36static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
37{ 37{
38 if (unlikely(!vport)) { 38 if (unlikely(!vport))
39 kfree_skb(skb); 39 goto error;
40 return; 40
41 } 41 if (unlikely(skb_warn_if_lro(skb)))
42 goto error;
42 43
43 /* Make our own copy of the packet. Otherwise we will mangle the 44 /* Make our own copy of the packet. Otherwise we will mangle the
44 * packet for anyone who came before us (e.g. tcpdump via AF_PACKET). 45 * packet for anyone who came before us (e.g. tcpdump via AF_PACKET).
@@ -50,6 +51,10 @@ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
50 51
51 skb_push(skb, ETH_HLEN); 52 skb_push(skb, ETH_HLEN);
52 ovs_vport_receive(vport, skb); 53 ovs_vport_receive(vport, skb);
54 return;
55
56error:
57 kfree_skb(skb);
53} 58}
54 59
55/* Called with rcu_read_lock and bottom-halves disabled. */ 60/* Called with rcu_read_lock and bottom-halves disabled. */
@@ -169,9 +174,6 @@ static int netdev_send(struct vport *vport, struct sk_buff *skb)
169 goto error; 174 goto error;
170 } 175 }
171 176
172 if (unlikely(skb_warn_if_lro(skb)))
173 goto error;
174
175 skb->dev = netdev_vport->dev; 177 skb->dev = netdev_vport->dev;
176 len = skb->len; 178 len = skb->len;
177 dev_queue_xmit(skb); 179 dev_queue_xmit(skb);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index e639645e8fec..c111bd0e083a 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2361,13 +2361,15 @@ static int packet_release(struct socket *sock)
2361 2361
2362 packet_flush_mclist(sk); 2362 packet_flush_mclist(sk);
2363 2363
2364 memset(&req_u, 0, sizeof(req_u)); 2364 if (po->rx_ring.pg_vec) {
2365 2365 memset(&req_u, 0, sizeof(req_u));
2366 if (po->rx_ring.pg_vec)
2367 packet_set_ring(sk, &req_u, 1, 0); 2366 packet_set_ring(sk, &req_u, 1, 0);
2367 }
2368 2368
2369 if (po->tx_ring.pg_vec) 2369 if (po->tx_ring.pg_vec) {
2370 memset(&req_u, 0, sizeof(req_u));
2370 packet_set_ring(sk, &req_u, 1, 1); 2371 packet_set_ring(sk, &req_u, 1, 1);
2372 }
2371 2373
2372 fanout_release(sk); 2374 fanout_release(sk);
2373 2375
diff --git a/net/rfkill/input.c b/net/rfkill/input.c
index c9d931e7ffec..b85107b5ef62 100644
--- a/net/rfkill/input.c
+++ b/net/rfkill/input.c
@@ -148,11 +148,9 @@ static unsigned long rfkill_ratelimit(const unsigned long last)
148 148
149static void rfkill_schedule_ratelimited(void) 149static void rfkill_schedule_ratelimited(void)
150{ 150{
151 if (delayed_work_pending(&rfkill_op_work)) 151 if (schedule_delayed_work(&rfkill_op_work,
152 return; 152 rfkill_ratelimit(rfkill_last_scheduled)))
153 schedule_delayed_work(&rfkill_op_work, 153 rfkill_last_scheduled = jiffies;
154 rfkill_ratelimit(rfkill_last_scheduled));
155 rfkill_last_scheduled = jiffies;
156} 154}
157 155
158static void rfkill_schedule_global_op(enum rfkill_sched_op op) 156static void rfkill_schedule_global_op(enum rfkill_sched_op op)
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 51561eafcb72..79e8ed4ac7ce 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1135,9 +1135,9 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1135 memset(&opt, 0, sizeof(opt)); 1135 memset(&opt, 0, sizeof(opt));
1136 1136
1137 opt.rate.rate = cl->rate.rate_bps >> 3; 1137 opt.rate.rate = cl->rate.rate_bps >> 3;
1138 opt.buffer = cl->buffer; 1138 opt.buffer = PSCHED_NS2TICKS(cl->buffer);
1139 opt.ceil.rate = cl->ceil.rate_bps >> 3; 1139 opt.ceil.rate = cl->ceil.rate_bps >> 3;
1140 opt.cbuffer = cl->cbuffer; 1140 opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer);
1141 opt.quantum = cl->quantum; 1141 opt.quantum = cl->quantum;
1142 opt.prio = cl->prio; 1142 opt.prio = cl->prio;
1143 opt.level = cl->level; 1143 opt.level = cl->level;
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 298c0ddfb57e..3d2acc7a9c80 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -438,18 +438,18 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
438 if (q->rate) { 438 if (q->rate) {
439 struct sk_buff_head *list = &sch->q; 439 struct sk_buff_head *list = &sch->q;
440 440
441 delay += packet_len_2_sched_time(skb->len, q);
442
443 if (!skb_queue_empty(list)) { 441 if (!skb_queue_empty(list)) {
444 /* 442 /*
445 * Last packet in queue is reference point (now). 443 * Last packet in queue is reference point (now),
446 * First packet in queue is already in flight, 444 * calculate this time bonus and subtract
447 * calculate this time bonus and substract
448 * from delay. 445 * from delay.
449 */ 446 */
450 delay -= now - netem_skb_cb(skb_peek(list))->time_to_send; 447 delay -= netem_skb_cb(skb_peek_tail(list))->time_to_send - now;
448 delay = max_t(psched_tdiff_t, 0, delay);
451 now = netem_skb_cb(skb_peek_tail(list))->time_to_send; 449 now = netem_skb_cb(skb_peek_tail(list))->time_to_send;
452 } 450 }
451
452 delay += packet_len_2_sched_time(skb->len, q);
453 } 453 }
454 454
455 cb->time_to_send = now + delay; 455 cb->time_to_send = now + delay;
diff --git a/net/sctp/Kconfig b/net/sctp/Kconfig
index 7521d944c0fb..cf4852814e0c 100644
--- a/net/sctp/Kconfig
+++ b/net/sctp/Kconfig
@@ -3,8 +3,8 @@
3# 3#
4 4
5menuconfig IP_SCTP 5menuconfig IP_SCTP
6 tristate "The SCTP Protocol (EXPERIMENTAL)" 6 tristate "The SCTP Protocol"
7 depends on INET && EXPERIMENTAL 7 depends on INET
8 depends on IPV6 || IPV6=n 8 depends on IPV6 || IPV6=n
9 select CRYPTO 9 select CRYPTO
10 select CRYPTO_HMAC 10 select CRYPTO_HMAC
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 159b9bc5d633..d8420ae614dc 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -71,7 +71,7 @@ void sctp_auth_key_put(struct sctp_auth_bytes *key)
71 return; 71 return;
72 72
73 if (atomic_dec_and_test(&key->refcnt)) { 73 if (atomic_dec_and_test(&key->refcnt)) {
74 kfree(key); 74 kzfree(key);
75 SCTP_DBG_OBJCNT_DEC(keys); 75 SCTP_DBG_OBJCNT_DEC(keys);
76 } 76 }
77} 77}
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 17a001bac2cc..1a9c5fb77310 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -249,6 +249,8 @@ void sctp_endpoint_free(struct sctp_endpoint *ep)
249/* Final destructor for endpoint. */ 249/* Final destructor for endpoint. */
250static void sctp_endpoint_destroy(struct sctp_endpoint *ep) 250static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
251{ 251{
252 int i;
253
252 SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return); 254 SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return);
253 255
254 /* Free up the HMAC transform. */ 256 /* Free up the HMAC transform. */
@@ -271,6 +273,9 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
271 sctp_inq_free(&ep->base.inqueue); 273 sctp_inq_free(&ep->base.inqueue);
272 sctp_bind_addr_free(&ep->base.bind_addr); 274 sctp_bind_addr_free(&ep->base.bind_addr);
273 275
276 for (i = 0; i < SCTP_HOW_MANY_SECRETS; ++i)
277 memset(&ep->secret_key[i], 0, SCTP_SECRET_SIZE);
278
274 /* Remove and free the port */ 279 /* Remove and free the port */
275 if (sctp_sk(ep->base.sk)->bind_hash) 280 if (sctp_sk(ep->base.sk)->bind_hash)
276 sctp_put_port(ep->base.sk); 281 sctp_put_port(ep->base.sk);
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index f3f0f4dc31dd..391a245d5203 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -326,9 +326,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
326 */ 326 */
327 rcu_read_lock(); 327 rcu_read_lock();
328 list_for_each_entry_rcu(laddr, &bp->address_list, list) { 328 list_for_each_entry_rcu(laddr, &bp->address_list, list) {
329 if (!laddr->valid && laddr->state != SCTP_ADDR_SRC) 329 if (!laddr->valid)
330 continue; 330 continue;
331 if ((laddr->a.sa.sa_family == AF_INET6) && 331 if ((laddr->state == SCTP_ADDR_SRC) &&
332 (laddr->a.sa.sa_family == AF_INET6) &&
332 (scope <= sctp_scope(&laddr->a))) { 333 (scope <= sctp_scope(&laddr->a))) {
333 bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a); 334 bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
334 if (!baddr || (matchlen < bmatchlen)) { 335 if (!baddr || (matchlen < bmatchlen)) {
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 379c81dee9d1..9bcdbd02d777 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -224,7 +224,7 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
224 224
225/* Free the outqueue structure and any related pending chunks. 225/* Free the outqueue structure and any related pending chunks.
226 */ 226 */
227void sctp_outq_teardown(struct sctp_outq *q) 227static void __sctp_outq_teardown(struct sctp_outq *q)
228{ 228{
229 struct sctp_transport *transport; 229 struct sctp_transport *transport;
230 struct list_head *lchunk, *temp; 230 struct list_head *lchunk, *temp;
@@ -277,8 +277,6 @@ void sctp_outq_teardown(struct sctp_outq *q)
277 sctp_chunk_free(chunk); 277 sctp_chunk_free(chunk);
278 } 278 }
279 279
280 q->error = 0;
281
282 /* Throw away any leftover control chunks. */ 280 /* Throw away any leftover control chunks. */
283 list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) { 281 list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
284 list_del_init(&chunk->list); 282 list_del_init(&chunk->list);
@@ -286,11 +284,17 @@ void sctp_outq_teardown(struct sctp_outq *q)
286 } 284 }
287} 285}
288 286
287void sctp_outq_teardown(struct sctp_outq *q)
288{
289 __sctp_outq_teardown(q);
290 sctp_outq_init(q->asoc, q);
291}
292
289/* Free the outqueue structure and any related pending chunks. */ 293/* Free the outqueue structure and any related pending chunks. */
290void sctp_outq_free(struct sctp_outq *q) 294void sctp_outq_free(struct sctp_outq *q)
291{ 295{
292 /* Throw away leftover chunks. */ 296 /* Throw away leftover chunks. */
293 sctp_outq_teardown(q); 297 __sctp_outq_teardown(q);
294 298
295 /* If we were kmalloc()'d, free the memory. */ 299 /* If we were kmalloc()'d, free the memory. */
296 if (q->malloced) 300 if (q->malloced)
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 618ec7e216ca..5131fcfedb03 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -1779,8 +1779,10 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(struct net *net,
1779 1779
1780 /* Update the content of current association. */ 1780 /* Update the content of current association. */
1781 sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc)); 1781 sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
1782 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
1783 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); 1782 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
1783 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
1784 SCTP_STATE(SCTP_STATE_ESTABLISHED));
1785 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
1784 return SCTP_DISPOSITION_CONSUME; 1786 return SCTP_DISPOSITION_CONSUME;
1785 1787
1786nomem_ev: 1788nomem_ev:
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 9e65758cb038..cedd9bf67b8c 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -3390,7 +3390,7 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
3390 3390
3391 ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey); 3391 ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey);
3392out: 3392out:
3393 kfree(authkey); 3393 kzfree(authkey);
3394 return ret; 3394 return ret;
3395} 3395}
3396 3396
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 043889ac86c0..bf3c6e8fc401 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -366,7 +366,11 @@ int sctp_sysctl_net_register(struct net *net)
366 366
367void sctp_sysctl_net_unregister(struct net *net) 367void sctp_sysctl_net_unregister(struct net *net)
368{ 368{
369 struct ctl_table *table;
370
371 table = net->sctp.sysctl_header->ctl_table_arg;
369 unregister_net_sysctl_table(net->sctp.sysctl_header); 372 unregister_net_sysctl_table(net->sctp.sysctl_header);
373 kfree(table);
370} 374}
371 375
372static struct ctl_table_header * sctp_sysctl_header; 376static struct ctl_table_header * sctp_sysctl_header;
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index bfa31714581f..fb20f25ddec9 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -98,9 +98,25 @@ __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
98 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); 98 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
99} 99}
100 100
101static void rpc_rotate_queue_owner(struct rpc_wait_queue *queue)
102{
103 struct list_head *q = &queue->tasks[queue->priority];
104 struct rpc_task *task;
105
106 if (!list_empty(q)) {
107 task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
108 if (task->tk_owner == queue->owner)
109 list_move_tail(&task->u.tk_wait.list, q);
110 }
111}
112
101static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority) 113static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
102{ 114{
103 queue->priority = priority; 115 if (queue->priority != priority) {
116 /* Fairness: rotate the list when changing priority */
117 rpc_rotate_queue_owner(queue);
118 queue->priority = priority;
119 }
104} 120}
105 121
106static void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid) 122static void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid)
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 0a148c9d2a5c..0f679df7d072 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -465,7 +465,7 @@ static int svc_udp_get_dest_address4(struct svc_rqst *rqstp,
465} 465}
466 466
467/* 467/*
468 * See net/ipv6/datagram.c : datagram_recv_ctl 468 * See net/ipv6/datagram.c : ip6_datagram_recv_ctl
469 */ 469 */
470static int svc_udp_get_dest_address6(struct svc_rqst *rqstp, 470static int svc_udp_get_dest_address6(struct svc_rqst *rqstp,
471 struct cmsghdr *cmh) 471 struct cmsghdr *cmh)
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 01592d7d4789..45f1618c8e23 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -1358,7 +1358,7 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
1358 &iwe, IW_EV_UINT_LEN); 1358 &iwe, IW_EV_UINT_LEN);
1359 } 1359 }
1360 1360
1361 buf = kmalloc(30, GFP_ATOMIC); 1361 buf = kmalloc(31, GFP_ATOMIC);
1362 if (buf) { 1362 if (buf) {
1363 memset(&iwe, 0, sizeof(iwe)); 1363 memset(&iwe, 0, sizeof(iwe));
1364 iwe.cmd = IWEVCUSTOM; 1364 iwe.cmd = IWEVCUSTOM;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 41eabc46f110..07c585756d2a 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2656,7 +2656,7 @@ static void xfrm_policy_fini(struct net *net)
2656 WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir])); 2656 WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
2657 2657
2658 htab = &net->xfrm.policy_bydst[dir]; 2658 htab = &net->xfrm.policy_bydst[dir];
2659 sz = (htab->hmask + 1); 2659 sz = (htab->hmask + 1) * sizeof(struct hlist_head);
2660 WARN_ON(!hlist_empty(htab->table)); 2660 WARN_ON(!hlist_empty(htab->table));
2661 xfrm_hash_free(htab->table, sz); 2661 xfrm_hash_free(htab->table, sz);
2662 } 2662 }
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index 765f6fe951eb..35754cc8a9e5 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -242,11 +242,13 @@ static void xfrm_replay_advance_bmp(struct xfrm_state *x, __be32 net_seq)
242 u32 diff; 242 u32 diff;
243 struct xfrm_replay_state_esn *replay_esn = x->replay_esn; 243 struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
244 u32 seq = ntohl(net_seq); 244 u32 seq = ntohl(net_seq);
245 u32 pos = (replay_esn->seq - 1) % replay_esn->replay_window; 245 u32 pos;
246 246
247 if (!replay_esn->replay_window) 247 if (!replay_esn->replay_window)
248 return; 248 return;
249 249
250 pos = (replay_esn->seq - 1) % replay_esn->replay_window;
251
250 if (seq > replay_esn->seq) { 252 if (seq > replay_esn->seq) {
251 diff = seq - replay_esn->seq; 253 diff = seq - replay_esn->seq;
252 254
diff --git a/samples/Kconfig b/samples/Kconfig
index 7b6792a18c05..6181c2cc9ca0 100644
--- a/samples/Kconfig
+++ b/samples/Kconfig
@@ -5,12 +5,6 @@ menuconfig SAMPLES
5 5
6if SAMPLES 6if SAMPLES
7 7
8config SAMPLE_TRACEPOINTS
9 tristate "Build tracepoints examples -- loadable modules only"
10 depends on TRACEPOINTS && m
11 help
12 This build tracepoints example modules.
13
14config SAMPLE_TRACE_EVENTS 8config SAMPLE_TRACE_EVENTS
15 tristate "Build trace_events examples -- loadable modules only" 9 tristate "Build trace_events examples -- loadable modules only"
16 depends on EVENT_TRACING && m 10 depends on EVENT_TRACING && m
diff --git a/samples/Makefile b/samples/Makefile
index 5ef08bba96ce..1a60c62e2045 100644
--- a/samples/Makefile
+++ b/samples/Makefile
@@ -1,4 +1,4 @@
1# Makefile for Linux samples code 1# Makefile for Linux samples code
2 2
3obj-$(CONFIG_SAMPLES) += kobject/ kprobes/ tracepoints/ trace_events/ \ 3obj-$(CONFIG_SAMPLES) += kobject/ kprobes/ trace_events/ \
4 hw_breakpoint/ kfifo/ kdb/ hidraw/ rpmsg/ seccomp/ 4 hw_breakpoint/ kfifo/ kdb/ hidraw/ rpmsg/ seccomp/
diff --git a/samples/seccomp/Makefile b/samples/seccomp/Makefile
index bbbd276659ba..7203e66dcd6f 100644
--- a/samples/seccomp/Makefile
+++ b/samples/seccomp/Makefile
@@ -19,6 +19,7 @@ bpf-direct-objs := bpf-direct.o
19 19
20# Try to match the kernel target. 20# Try to match the kernel target.
21ifndef CONFIG_64BIT 21ifndef CONFIG_64BIT
22ifndef CROSS_COMPILE
22 23
23# s390 has -m31 flag to build 31 bit binaries 24# s390 has -m31 flag to build 31 bit binaries
24ifndef CONFIG_S390 25ifndef CONFIG_S390
@@ -35,6 +36,7 @@ HOSTLOADLIBES_bpf-direct += $(MFLAG)
35HOSTLOADLIBES_bpf-fancy += $(MFLAG) 36HOSTLOADLIBES_bpf-fancy += $(MFLAG)
36HOSTLOADLIBES_dropper += $(MFLAG) 37HOSTLOADLIBES_dropper += $(MFLAG)
37endif 38endif
39endif
38 40
39# Tell kbuild to always build the programs 41# Tell kbuild to always build the programs
40always := $(hostprogs-y) 42always := $(hostprogs-y)
diff --git a/samples/tracepoints/Makefile b/samples/tracepoints/Makefile
deleted file mode 100644
index 36479ad9ae14..000000000000
--- a/samples/tracepoints/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
1# builds the tracepoint example kernel modules;
2# then to use one (as root): insmod <module_name.ko>
3
4obj-$(CONFIG_SAMPLE_TRACEPOINTS) += tracepoint-sample.o
5obj-$(CONFIG_SAMPLE_TRACEPOINTS) += tracepoint-probe-sample.o
6obj-$(CONFIG_SAMPLE_TRACEPOINTS) += tracepoint-probe-sample2.o
diff --git a/samples/tracepoints/tp-samples-trace.h b/samples/tracepoints/tp-samples-trace.h
deleted file mode 100644
index 4d46be965961..000000000000
--- a/samples/tracepoints/tp-samples-trace.h
+++ /dev/null
@@ -1,11 +0,0 @@
1#ifndef _TP_SAMPLES_TRACE_H
2#define _TP_SAMPLES_TRACE_H
3
4#include <linux/proc_fs.h> /* for struct inode and struct file */
5#include <linux/tracepoint.h>
6
7DECLARE_TRACE(subsys_event,
8 TP_PROTO(struct inode *inode, struct file *file),
9 TP_ARGS(inode, file));
10DECLARE_TRACE_NOARGS(subsys_eventb);
11#endif
diff --git a/samples/tracepoints/tracepoint-probe-sample.c b/samples/tracepoints/tracepoint-probe-sample.c
deleted file mode 100644
index 744c0b9652a7..000000000000
--- a/samples/tracepoints/tracepoint-probe-sample.c
+++ /dev/null
@@ -1,57 +0,0 @@
1/*
2 * tracepoint-probe-sample.c
3 *
4 * sample tracepoint probes.
5 */
6
7#include <linux/module.h>
8#include <linux/file.h>
9#include <linux/dcache.h>
10#include "tp-samples-trace.h"
11
12/*
13 * Here the caller only guarantees locking for struct file and struct inode.
14 * Locking must therefore be done in the probe to use the dentry.
15 */
16static void probe_subsys_event(void *ignore,
17 struct inode *inode, struct file *file)
18{
19 path_get(&file->f_path);
20 dget(file->f_path.dentry);
21 printk(KERN_INFO "Event is encountered with filename %s\n",
22 file->f_path.dentry->d_name.name);
23 dput(file->f_path.dentry);
24 path_put(&file->f_path);
25}
26
27static void probe_subsys_eventb(void *ignore)
28{
29 printk(KERN_INFO "Event B is encountered\n");
30}
31
32static int __init tp_sample_trace_init(void)
33{
34 int ret;
35
36 ret = register_trace_subsys_event(probe_subsys_event, NULL);
37 WARN_ON(ret);
38 ret = register_trace_subsys_eventb(probe_subsys_eventb, NULL);
39 WARN_ON(ret);
40
41 return 0;
42}
43
44module_init(tp_sample_trace_init);
45
46static void __exit tp_sample_trace_exit(void)
47{
48 unregister_trace_subsys_eventb(probe_subsys_eventb, NULL);
49 unregister_trace_subsys_event(probe_subsys_event, NULL);
50 tracepoint_synchronize_unregister();
51}
52
53module_exit(tp_sample_trace_exit);
54
55MODULE_LICENSE("GPL");
56MODULE_AUTHOR("Mathieu Desnoyers");
57MODULE_DESCRIPTION("Tracepoint Probes Samples");
diff --git a/samples/tracepoints/tracepoint-probe-sample2.c b/samples/tracepoints/tracepoint-probe-sample2.c
deleted file mode 100644
index 9fcf990e5d4b..000000000000
--- a/samples/tracepoints/tracepoint-probe-sample2.c
+++ /dev/null
@@ -1,44 +0,0 @@
1/*
2 * tracepoint-probe-sample2.c
3 *
4 * 2nd sample tracepoint probes.
5 */
6
7#include <linux/module.h>
8#include <linux/fs.h>
9#include "tp-samples-trace.h"
10
11/*
12 * Here the caller only guarantees locking for struct file and struct inode.
13 * Locking must therefore be done in the probe to use the dentry.
14 */
15static void probe_subsys_event(void *ignore,
16 struct inode *inode, struct file *file)
17{
18 printk(KERN_INFO "Event is encountered with inode number %lu\n",
19 inode->i_ino);
20}
21
22static int __init tp_sample_trace_init(void)
23{
24 int ret;
25
26 ret = register_trace_subsys_event(probe_subsys_event, NULL);
27 WARN_ON(ret);
28
29 return 0;
30}
31
32module_init(tp_sample_trace_init);
33
34static void __exit tp_sample_trace_exit(void)
35{
36 unregister_trace_subsys_event(probe_subsys_event, NULL);
37 tracepoint_synchronize_unregister();
38}
39
40module_exit(tp_sample_trace_exit);
41
42MODULE_LICENSE("GPL");
43MODULE_AUTHOR("Mathieu Desnoyers");
44MODULE_DESCRIPTION("Tracepoint Probes Samples");
diff --git a/samples/tracepoints/tracepoint-sample.c b/samples/tracepoints/tracepoint-sample.c
deleted file mode 100644
index f4d89e008c32..000000000000
--- a/samples/tracepoints/tracepoint-sample.c
+++ /dev/null
@@ -1,57 +0,0 @@
1/* tracepoint-sample.c
2 *
3 * Executes a tracepoint when /proc/tracepoint-sample is opened.
4 *
5 * (C) Copyright 2007 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
6 *
7 * This file is released under the GPLv2.
8 * See the file COPYING for more details.
9 */
10
11#include <linux/module.h>
12#include <linux/sched.h>
13#include <linux/proc_fs.h>
14#include "tp-samples-trace.h"
15
16DEFINE_TRACE(subsys_event);
17DEFINE_TRACE(subsys_eventb);
18
19struct proc_dir_entry *pentry_sample;
20
21static int my_open(struct inode *inode, struct file *file)
22{
23 int i;
24
25 trace_subsys_event(inode, file);
26 for (i = 0; i < 10; i++)
27 trace_subsys_eventb();
28 return -EPERM;
29}
30
31static const struct file_operations mark_ops = {
32 .open = my_open,
33 .llseek = noop_llseek,
34};
35
36static int __init sample_init(void)
37{
38 printk(KERN_ALERT "sample init\n");
39 pentry_sample = proc_create("tracepoint-sample", 0444, NULL,
40 &mark_ops);
41 if (!pentry_sample)
42 return -EPERM;
43 return 0;
44}
45
46static void __exit sample_exit(void)
47{
48 printk(KERN_ALERT "sample exit\n");
49 remove_proc_entry("tracepoint-sample", NULL);
50}
51
52module_init(sample_init)
53module_exit(sample_exit)
54
55MODULE_LICENSE("GPL");
56MODULE_AUTHOR("Mathieu Desnoyers");
57MODULE_DESCRIPTION("Tracepoint sample");
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index bdf42fdf64c9..07125e697d7a 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -156,6 +156,11 @@ cpp_flags = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \
156 156
157ld_flags = $(LDFLAGS) $(ldflags-y) 157ld_flags = $(LDFLAGS) $(ldflags-y)
158 158
159dtc_cpp_flags = -Wp,-MD,$(depfile) -nostdinc \
160 -I$(srctree)/arch/$(SRCARCH)/boot/dts \
161 -I$(srctree)/arch/$(SRCARCH)/include/dts \
162 -undef -D__DTS__
163
159# Finds the multi-part object the current object will be linked into 164# Finds the multi-part object the current object will be linked into
160modname-multi = $(sort $(foreach m,$(multi-used),\ 165modname-multi = $(sort $(foreach m,$(multi-used),\
161 $(if $(filter $(subst $(obj)/,,$*.o), $($(m:.o=-objs)) $($(m:.o=-y))),$(m:.o=)))) 166 $(if $(filter $(subst $(obj)/,,$*.o), $($(m:.o=-objs)) $($(m:.o=-y))),$(m:.o=))))
@@ -269,6 +274,15 @@ cmd_dtc = $(objtree)/scripts/dtc/dtc -O dtb -o $@ -b 0 $(DTC_FLAGS) -d $(depfile
269$(obj)/%.dtb: $(src)/%.dts FORCE 274$(obj)/%.dtb: $(src)/%.dts FORCE
270 $(call if_changed_dep,dtc) 275 $(call if_changed_dep,dtc)
271 276
277dtc-tmp = $(subst $(comma),_,$(dot-target).dts)
278
279quiet_cmd_dtc_cpp = DTC+CPP $@
280cmd_dtc_cpp = $(CPP) $(dtc_cpp_flags) -x assembler-with-cpp -o $(dtc-tmp) $< ; \
281 $(objtree)/scripts/dtc/dtc -O dtb -o $@ -b 0 $(DTC_FLAGS) $(dtc-tmp)
282
283$(obj)/%.dtb: $(src)/%.dtsp FORCE
284 $(call if_changed_dep,dtc_cpp)
285
272# Bzip2 286# Bzip2
273# --------------------------------------------------------------------------- 287# ---------------------------------------------------------------------------
274 288
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 4d2c7dfdaabd..2bb08a962ce3 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -230,12 +230,12 @@ our $Inline = qr{inline|__always_inline|noinline};
230our $Member = qr{->$Ident|\.$Ident|\[[^]]*\]}; 230our $Member = qr{->$Ident|\.$Ident|\[[^]]*\]};
231our $Lval = qr{$Ident(?:$Member)*}; 231our $Lval = qr{$Ident(?:$Member)*};
232 232
233our $Float_hex = qr{(?i:0x[0-9a-f]+p-?[0-9]+[fl]?)}; 233our $Float_hex = qr{(?i)0x[0-9a-f]+p-?[0-9]+[fl]?};
234our $Float_dec = qr{(?i:((?:[0-9]+\.[0-9]*|[0-9]*\.[0-9]+)(?:e-?[0-9]+)?[fl]?))}; 234our $Float_dec = qr{(?i)(?:[0-9]+\.[0-9]*|[0-9]*\.[0-9]+)(?:e-?[0-9]+)?[fl]?};
235our $Float_int = qr{(?i:[0-9]+e-?[0-9]+[fl]?)}; 235our $Float_int = qr{(?i)[0-9]+e-?[0-9]+[fl]?};
236our $Float = qr{$Float_hex|$Float_dec|$Float_int}; 236our $Float = qr{$Float_hex|$Float_dec|$Float_int};
237our $Constant = qr{(?:$Float|(?i:(?:0x[0-9a-f]+|[0-9]+)[ul]*))}; 237our $Constant = qr{$Float|(?i)(?:0x[0-9a-f]+|[0-9]+)[ul]*};
238our $Assignment = qr{(?:\*\=|/=|%=|\+=|-=|<<=|>>=|&=|\^=|\|=|=)}; 238our $Assignment = qr{\*\=|/=|%=|\+=|-=|<<=|>>=|&=|\^=|\|=|=};
239our $Compare = qr{<=|>=|==|!=|<|>}; 239our $Compare = qr{<=|>=|==|!=|<|>};
240our $Operators = qr{ 240our $Operators = qr{
241 <=|>=|==|!=| 241 <=|>=|==|!=|
diff --git a/security/capability.c b/security/capability.c
index 0fe5a026aef8..579775088967 100644
--- a/security/capability.c
+++ b/security/capability.c
@@ -709,16 +709,31 @@ static void cap_req_classify_flow(const struct request_sock *req,
709{ 709{
710} 710}
711 711
712static int cap_tun_dev_alloc_security(void **security)
713{
714 return 0;
715}
716
717static void cap_tun_dev_free_security(void *security)
718{
719}
720
712static int cap_tun_dev_create(void) 721static int cap_tun_dev_create(void)
713{ 722{
714 return 0; 723 return 0;
715} 724}
716 725
717static void cap_tun_dev_post_create(struct sock *sk) 726static int cap_tun_dev_attach_queue(void *security)
727{
728 return 0;
729}
730
731static int cap_tun_dev_attach(struct sock *sk, void *security)
718{ 732{
733 return 0;
719} 734}
720 735
721static int cap_tun_dev_attach(struct sock *sk) 736static int cap_tun_dev_open(void *security)
722{ 737{
723 return 0; 738 return 0;
724} 739}
@@ -1050,8 +1065,11 @@ void __init security_fixup_ops(struct security_operations *ops)
1050 set_to_cap_if_null(ops, secmark_refcount_inc); 1065 set_to_cap_if_null(ops, secmark_refcount_inc);
1051 set_to_cap_if_null(ops, secmark_refcount_dec); 1066 set_to_cap_if_null(ops, secmark_refcount_dec);
1052 set_to_cap_if_null(ops, req_classify_flow); 1067 set_to_cap_if_null(ops, req_classify_flow);
1068 set_to_cap_if_null(ops, tun_dev_alloc_security);
1069 set_to_cap_if_null(ops, tun_dev_free_security);
1053 set_to_cap_if_null(ops, tun_dev_create); 1070 set_to_cap_if_null(ops, tun_dev_create);
1054 set_to_cap_if_null(ops, tun_dev_post_create); 1071 set_to_cap_if_null(ops, tun_dev_open);
1072 set_to_cap_if_null(ops, tun_dev_attach_queue);
1055 set_to_cap_if_null(ops, tun_dev_attach); 1073 set_to_cap_if_null(ops, tun_dev_attach);
1056#endif /* CONFIG_SECURITY_NETWORK */ 1074#endif /* CONFIG_SECURITY_NETWORK */
1057#ifdef CONFIG_SECURITY_NETWORK_XFRM 1075#ifdef CONFIG_SECURITY_NETWORK_XFRM
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index 19ecc8de9e6b..d794abcc4b3b 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -215,7 +215,9 @@ static void devcgroup_css_free(struct cgroup *cgroup)
215 struct dev_cgroup *dev_cgroup; 215 struct dev_cgroup *dev_cgroup;
216 216
217 dev_cgroup = cgroup_to_devcgroup(cgroup); 217 dev_cgroup = cgroup_to_devcgroup(cgroup);
218 mutex_lock(&devcgroup_mutex);
218 dev_exception_clean(dev_cgroup); 219 dev_exception_clean(dev_cgroup);
220 mutex_unlock(&devcgroup_mutex);
219 kfree(dev_cgroup); 221 kfree(dev_cgroup);
220} 222}
221 223
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
index dfb26918699c..7dd538ef5b83 100644
--- a/security/integrity/evm/evm_crypto.c
+++ b/security/integrity/evm/evm_crypto.c
@@ -205,9 +205,9 @@ int evm_update_evmxattr(struct dentry *dentry, const char *xattr_name,
205 rc = __vfs_setxattr_noperm(dentry, XATTR_NAME_EVM, 205 rc = __vfs_setxattr_noperm(dentry, XATTR_NAME_EVM,
206 &xattr_data, 206 &xattr_data,
207 sizeof(xattr_data), 0); 207 sizeof(xattr_data), 0);
208 } 208 } else if (rc == -ENODATA && inode->i_op->removexattr) {
209 else if (rc == -ENODATA)
210 rc = inode->i_op->removexattr(dentry, XATTR_NAME_EVM); 209 rc = inode->i_op->removexattr(dentry, XATTR_NAME_EVM);
210 }
211 return rc; 211 return rc;
212} 212}
213 213
diff --git a/security/security.c b/security/security.c
index daa97f4ac9d1..7b88c6aeaed4 100644
--- a/security/security.c
+++ b/security/security.c
@@ -1254,24 +1254,42 @@ void security_secmark_refcount_dec(void)
1254} 1254}
1255EXPORT_SYMBOL(security_secmark_refcount_dec); 1255EXPORT_SYMBOL(security_secmark_refcount_dec);
1256 1256
1257int security_tun_dev_alloc_security(void **security)
1258{
1259 return security_ops->tun_dev_alloc_security(security);
1260}
1261EXPORT_SYMBOL(security_tun_dev_alloc_security);
1262
1263void security_tun_dev_free_security(void *security)
1264{
1265 security_ops->tun_dev_free_security(security);
1266}
1267EXPORT_SYMBOL(security_tun_dev_free_security);
1268
1257int security_tun_dev_create(void) 1269int security_tun_dev_create(void)
1258{ 1270{
1259 return security_ops->tun_dev_create(); 1271 return security_ops->tun_dev_create();
1260} 1272}
1261EXPORT_SYMBOL(security_tun_dev_create); 1273EXPORT_SYMBOL(security_tun_dev_create);
1262 1274
1263void security_tun_dev_post_create(struct sock *sk) 1275int security_tun_dev_attach_queue(void *security)
1264{ 1276{
1265 return security_ops->tun_dev_post_create(sk); 1277 return security_ops->tun_dev_attach_queue(security);
1266} 1278}
1267EXPORT_SYMBOL(security_tun_dev_post_create); 1279EXPORT_SYMBOL(security_tun_dev_attach_queue);
1268 1280
1269int security_tun_dev_attach(struct sock *sk) 1281int security_tun_dev_attach(struct sock *sk, void *security)
1270{ 1282{
1271 return security_ops->tun_dev_attach(sk); 1283 return security_ops->tun_dev_attach(sk, security);
1272} 1284}
1273EXPORT_SYMBOL(security_tun_dev_attach); 1285EXPORT_SYMBOL(security_tun_dev_attach);
1274 1286
1287int security_tun_dev_open(void *security)
1288{
1289 return security_ops->tun_dev_open(security);
1290}
1291EXPORT_SYMBOL(security_tun_dev_open);
1292
1275#endif /* CONFIG_SECURITY_NETWORK */ 1293#endif /* CONFIG_SECURITY_NETWORK */
1276 1294
1277#ifdef CONFIG_SECURITY_NETWORK_XFRM 1295#ifdef CONFIG_SECURITY_NETWORK_XFRM
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 61a53367d029..ef26e9611ffb 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -4399,6 +4399,24 @@ static void selinux_req_classify_flow(const struct request_sock *req,
4399 fl->flowi_secid = req->secid; 4399 fl->flowi_secid = req->secid;
4400} 4400}
4401 4401
4402static int selinux_tun_dev_alloc_security(void **security)
4403{
4404 struct tun_security_struct *tunsec;
4405
4406 tunsec = kzalloc(sizeof(*tunsec), GFP_KERNEL);
4407 if (!tunsec)
4408 return -ENOMEM;
4409 tunsec->sid = current_sid();
4410
4411 *security = tunsec;
4412 return 0;
4413}
4414
4415static void selinux_tun_dev_free_security(void *security)
4416{
4417 kfree(security);
4418}
4419
4402static int selinux_tun_dev_create(void) 4420static int selinux_tun_dev_create(void)
4403{ 4421{
4404 u32 sid = current_sid(); 4422 u32 sid = current_sid();
@@ -4414,8 +4432,17 @@ static int selinux_tun_dev_create(void)
4414 NULL); 4432 NULL);
4415} 4433}
4416 4434
4417static void selinux_tun_dev_post_create(struct sock *sk) 4435static int selinux_tun_dev_attach_queue(void *security)
4418{ 4436{
4437 struct tun_security_struct *tunsec = security;
4438
4439 return avc_has_perm(current_sid(), tunsec->sid, SECCLASS_TUN_SOCKET,
4440 TUN_SOCKET__ATTACH_QUEUE, NULL);
4441}
4442
4443static int selinux_tun_dev_attach(struct sock *sk, void *security)
4444{
4445 struct tun_security_struct *tunsec = security;
4419 struct sk_security_struct *sksec = sk->sk_security; 4446 struct sk_security_struct *sksec = sk->sk_security;
4420 4447
4421 /* we don't currently perform any NetLabel based labeling here and it 4448 /* we don't currently perform any NetLabel based labeling here and it
@@ -4425,20 +4452,19 @@ static void selinux_tun_dev_post_create(struct sock *sk)
4425 * cause confusion to the TUN user that had no idea network labeling 4452 * cause confusion to the TUN user that had no idea network labeling
4426 * protocols were being used */ 4453 * protocols were being used */
4427 4454
4428 /* see the comments in selinux_tun_dev_create() about why we don't use 4455 sksec->sid = tunsec->sid;
4429 * the sockcreate SID here */
4430
4431 sksec->sid = current_sid();
4432 sksec->sclass = SECCLASS_TUN_SOCKET; 4456 sksec->sclass = SECCLASS_TUN_SOCKET;
4457
4458 return 0;
4433} 4459}
4434 4460
4435static int selinux_tun_dev_attach(struct sock *sk) 4461static int selinux_tun_dev_open(void *security)
4436{ 4462{
4437 struct sk_security_struct *sksec = sk->sk_security; 4463 struct tun_security_struct *tunsec = security;
4438 u32 sid = current_sid(); 4464 u32 sid = current_sid();
4439 int err; 4465 int err;
4440 4466
4441 err = avc_has_perm(sid, sksec->sid, SECCLASS_TUN_SOCKET, 4467 err = avc_has_perm(sid, tunsec->sid, SECCLASS_TUN_SOCKET,
4442 TUN_SOCKET__RELABELFROM, NULL); 4468 TUN_SOCKET__RELABELFROM, NULL);
4443 if (err) 4469 if (err)
4444 return err; 4470 return err;
@@ -4446,8 +4472,7 @@ static int selinux_tun_dev_attach(struct sock *sk)
4446 TUN_SOCKET__RELABELTO, NULL); 4472 TUN_SOCKET__RELABELTO, NULL);
4447 if (err) 4473 if (err)
4448 return err; 4474 return err;
4449 4475 tunsec->sid = sid;
4450 sksec->sid = sid;
4451 4476
4452 return 0; 4477 return 0;
4453} 4478}
@@ -5642,9 +5667,12 @@ static struct security_operations selinux_ops = {
5642 .secmark_refcount_inc = selinux_secmark_refcount_inc, 5667 .secmark_refcount_inc = selinux_secmark_refcount_inc,
5643 .secmark_refcount_dec = selinux_secmark_refcount_dec, 5668 .secmark_refcount_dec = selinux_secmark_refcount_dec,
5644 .req_classify_flow = selinux_req_classify_flow, 5669 .req_classify_flow = selinux_req_classify_flow,
5670 .tun_dev_alloc_security = selinux_tun_dev_alloc_security,
5671 .tun_dev_free_security = selinux_tun_dev_free_security,
5645 .tun_dev_create = selinux_tun_dev_create, 5672 .tun_dev_create = selinux_tun_dev_create,
5646 .tun_dev_post_create = selinux_tun_dev_post_create, 5673 .tun_dev_attach_queue = selinux_tun_dev_attach_queue,
5647 .tun_dev_attach = selinux_tun_dev_attach, 5674 .tun_dev_attach = selinux_tun_dev_attach,
5675 .tun_dev_open = selinux_tun_dev_open,
5648 5676
5649#ifdef CONFIG_SECURITY_NETWORK_XFRM 5677#ifdef CONFIG_SECURITY_NETWORK_XFRM
5650 .xfrm_policy_alloc_security = selinux_xfrm_policy_alloc, 5678 .xfrm_policy_alloc_security = selinux_xfrm_policy_alloc,
diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h
index df2de54a958d..14d04e63b1f0 100644
--- a/security/selinux/include/classmap.h
+++ b/security/selinux/include/classmap.h
@@ -150,6 +150,6 @@ struct security_class_mapping secclass_map[] = {
150 NULL } }, 150 NULL } },
151 { "kernel_service", { "use_as_override", "create_files_as", NULL } }, 151 { "kernel_service", { "use_as_override", "create_files_as", NULL } },
152 { "tun_socket", 152 { "tun_socket",
153 { COMMON_SOCK_PERMS, NULL } }, 153 { COMMON_SOCK_PERMS, "attach_queue", NULL } },
154 { NULL } 154 { NULL }
155 }; 155 };
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
index 26c7eee1c309..aa47bcabb5f6 100644
--- a/security/selinux/include/objsec.h
+++ b/security/selinux/include/objsec.h
@@ -110,6 +110,10 @@ struct sk_security_struct {
110 u16 sclass; /* sock security class */ 110 u16 sclass; /* sock security class */
111}; 111};
112 112
113struct tun_security_struct {
114 u32 sid; /* SID for the tun device sockets */
115};
116
113struct key_security_struct { 117struct key_security_struct {
114 u32 sid; /* SID of key */ 118 u32 sid; /* SID of key */
115}; 119};
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index b8fb0a5adb9b..822df971972c 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -3654,6 +3654,7 @@ static void hda_call_codec_resume(struct hda_codec *codec)
3654 hda_set_power_state(codec, AC_PWRST_D0); 3654 hda_set_power_state(codec, AC_PWRST_D0);
3655 restore_shutup_pins(codec); 3655 restore_shutup_pins(codec);
3656 hda_exec_init_verbs(codec); 3656 hda_exec_init_verbs(codec);
3657 snd_hda_jack_set_dirty_all(codec);
3657 if (codec->patch_ops.resume) 3658 if (codec->patch_ops.resume)
3658 codec->patch_ops.resume(codec); 3659 codec->patch_ops.resume(codec);
3659 else { 3660 else {
@@ -3665,10 +3666,8 @@ static void hda_call_codec_resume(struct hda_codec *codec)
3665 3666
3666 if (codec->jackpoll_interval) 3667 if (codec->jackpoll_interval)
3667 hda_jackpoll_work(&codec->jackpoll_work.work); 3668 hda_jackpoll_work(&codec->jackpoll_work.work);
3668 else { 3669 else
3669 snd_hda_jack_set_dirty_all(codec);
3670 snd_hda_jack_report_sync(codec); 3670 snd_hda_jack_report_sync(codec);
3671 }
3672 3671
3673 codec->in_pm = 0; 3672 codec->in_pm = 0;
3674 snd_hda_power_down(codec); /* flag down before returning */ 3673 snd_hda_power_down(codec); /* flag down before returning */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 0b6aebacc56b..c78286f6e5d8 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -656,29 +656,43 @@ static char *driver_short_names[] = {
656#define get_azx_dev(substream) (substream->runtime->private_data) 656#define get_azx_dev(substream) (substream->runtime->private_data)
657 657
658#ifdef CONFIG_X86 658#ifdef CONFIG_X86
659static void __mark_pages_wc(struct azx *chip, void *addr, size_t size, bool on) 659static void __mark_pages_wc(struct azx *chip, struct snd_dma_buffer *dmab, bool on)
660{ 660{
661 int pages;
662
661 if (azx_snoop(chip)) 663 if (azx_snoop(chip))
662 return; 664 return;
663 if (addr && size) { 665 if (!dmab || !dmab->area || !dmab->bytes)
664 int pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 666 return;
667
668#ifdef CONFIG_SND_DMA_SGBUF
669 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_SG) {
670 struct snd_sg_buf *sgbuf = dmab->private_data;
665 if (on) 671 if (on)
666 set_memory_wc((unsigned long)addr, pages); 672 set_pages_array_wc(sgbuf->page_table, sgbuf->pages);
667 else 673 else
668 set_memory_wb((unsigned long)addr, pages); 674 set_pages_array_wb(sgbuf->page_table, sgbuf->pages);
675 return;
669 } 676 }
677#endif
678
679 pages = (dmab->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
680 if (on)
681 set_memory_wc((unsigned long)dmab->area, pages);
682 else
683 set_memory_wb((unsigned long)dmab->area, pages);
670} 684}
671 685
672static inline void mark_pages_wc(struct azx *chip, struct snd_dma_buffer *buf, 686static inline void mark_pages_wc(struct azx *chip, struct snd_dma_buffer *buf,
673 bool on) 687 bool on)
674{ 688{
675 __mark_pages_wc(chip, buf->area, buf->bytes, on); 689 __mark_pages_wc(chip, buf, on);
676} 690}
677static inline void mark_runtime_wc(struct azx *chip, struct azx_dev *azx_dev, 691static inline void mark_runtime_wc(struct azx *chip, struct azx_dev *azx_dev,
678 struct snd_pcm_runtime *runtime, bool on) 692 struct snd_pcm_substream *substream, bool on)
679{ 693{
680 if (azx_dev->wc_marked != on) { 694 if (azx_dev->wc_marked != on) {
681 __mark_pages_wc(chip, runtime->dma_area, runtime->dma_bytes, on); 695 __mark_pages_wc(chip, snd_pcm_get_dma_buf(substream), on);
682 azx_dev->wc_marked = on; 696 azx_dev->wc_marked = on;
683 } 697 }
684} 698}
@@ -689,7 +703,7 @@ static inline void mark_pages_wc(struct azx *chip, struct snd_dma_buffer *buf,
689{ 703{
690} 704}
691static inline void mark_runtime_wc(struct azx *chip, struct azx_dev *azx_dev, 705static inline void mark_runtime_wc(struct azx *chip, struct azx_dev *azx_dev,
692 struct snd_pcm_runtime *runtime, bool on) 706 struct snd_pcm_substream *substream, bool on)
693{ 707{
694} 708}
695#endif 709#endif
@@ -1968,11 +1982,10 @@ static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
1968{ 1982{
1969 struct azx_pcm *apcm = snd_pcm_substream_chip(substream); 1983 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
1970 struct azx *chip = apcm->chip; 1984 struct azx *chip = apcm->chip;
1971 struct snd_pcm_runtime *runtime = substream->runtime;
1972 struct azx_dev *azx_dev = get_azx_dev(substream); 1985 struct azx_dev *azx_dev = get_azx_dev(substream);
1973 int ret; 1986 int ret;
1974 1987
1975 mark_runtime_wc(chip, azx_dev, runtime, false); 1988 mark_runtime_wc(chip, azx_dev, substream, false);
1976 azx_dev->bufsize = 0; 1989 azx_dev->bufsize = 0;
1977 azx_dev->period_bytes = 0; 1990 azx_dev->period_bytes = 0;
1978 azx_dev->format_val = 0; 1991 azx_dev->format_val = 0;
@@ -1980,7 +1993,7 @@ static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
1980 params_buffer_bytes(hw_params)); 1993 params_buffer_bytes(hw_params));
1981 if (ret < 0) 1994 if (ret < 0)
1982 return ret; 1995 return ret;
1983 mark_runtime_wc(chip, azx_dev, runtime, true); 1996 mark_runtime_wc(chip, azx_dev, substream, true);
1984 return ret; 1997 return ret;
1985} 1998}
1986 1999
@@ -1989,7 +2002,6 @@ static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
1989 struct azx_pcm *apcm = snd_pcm_substream_chip(substream); 2002 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
1990 struct azx_dev *azx_dev = get_azx_dev(substream); 2003 struct azx_dev *azx_dev = get_azx_dev(substream);
1991 struct azx *chip = apcm->chip; 2004 struct azx *chip = apcm->chip;
1992 struct snd_pcm_runtime *runtime = substream->runtime;
1993 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream]; 2005 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
1994 2006
1995 /* reset BDL address */ 2007 /* reset BDL address */
@@ -2002,7 +2014,7 @@ static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
2002 2014
2003 snd_hda_codec_cleanup(apcm->codec, hinfo, substream); 2015 snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
2004 2016
2005 mark_runtime_wc(chip, azx_dev, runtime, false); 2017 mark_runtime_wc(chip, azx_dev, substream, false);
2006 return snd_pcm_lib_free_pages(substream); 2018 return snd_pcm_lib_free_pages(substream);
2007} 2019}
2008 2020
@@ -3613,13 +3625,12 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
3613 /* 5 Series/3400 */ 3625 /* 5 Series/3400 */
3614 { PCI_DEVICE(0x8086, 0x3b56), 3626 { PCI_DEVICE(0x8086, 0x3b56),
3615 .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH }, 3627 .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH },
3616 /* SCH */ 3628 /* Poulsbo */
3617 { PCI_DEVICE(0x8086, 0x811b), 3629 { PCI_DEVICE(0x8086, 0x811b),
3618 .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP | 3630 .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM },
3619 AZX_DCAPS_BUFSIZE | AZX_DCAPS_POSFIX_LPIB }, /* Poulsbo */ 3631 /* Oaktrail */
3620 { PCI_DEVICE(0x8086, 0x080a), 3632 { PCI_DEVICE(0x8086, 0x080a),
3621 .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP | 3633 .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM },
3622 AZX_DCAPS_BUFSIZE | AZX_DCAPS_POSFIX_LPIB }, /* Oaktrail */
3623 /* ICH */ 3634 /* ICH */
3624 { PCI_DEVICE(0x8086, 0x2668), 3635 { PCI_DEVICE(0x8086, 0x2668),
3625 .driver_data = AZX_DRIVER_ICH | AZX_DCAPS_OLD_SSYNC | 3636 .driver_data = AZX_DRIVER_ICH | AZX_DCAPS_OLD_SSYNC |
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index dd798c3196ff..009b77a693cf 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -4636,6 +4636,12 @@ static const struct hda_codec_preset snd_hda_preset_conexant[] = {
4636 .patch = patch_conexant_auto }, 4636 .patch = patch_conexant_auto },
4637 { .id = 0x14f15111, .name = "CX20753/4", 4637 { .id = 0x14f15111, .name = "CX20753/4",
4638 .patch = patch_conexant_auto }, 4638 .patch = patch_conexant_auto },
4639 { .id = 0x14f15113, .name = "CX20755",
4640 .patch = patch_conexant_auto },
4641 { .id = 0x14f15114, .name = "CX20756",
4642 .patch = patch_conexant_auto },
4643 { .id = 0x14f15115, .name = "CX20757",
4644 .patch = patch_conexant_auto },
4639 {} /* terminator */ 4645 {} /* terminator */
4640}; 4646};
4641 4647
@@ -4659,6 +4665,9 @@ MODULE_ALIAS("snd-hda-codec-id:14f150b9");
4659MODULE_ALIAS("snd-hda-codec-id:14f1510f"); 4665MODULE_ALIAS("snd-hda-codec-id:14f1510f");
4660MODULE_ALIAS("snd-hda-codec-id:14f15110"); 4666MODULE_ALIAS("snd-hda-codec-id:14f15110");
4661MODULE_ALIAS("snd-hda-codec-id:14f15111"); 4667MODULE_ALIAS("snd-hda-codec-id:14f15111");
4668MODULE_ALIAS("snd-hda-codec-id:14f15113");
4669MODULE_ALIAS("snd-hda-codec-id:14f15114");
4670MODULE_ALIAS("snd-hda-codec-id:14f15115");
4662 4671
4663MODULE_LICENSE("GPL"); 4672MODULE_LICENSE("GPL");
4664MODULE_DESCRIPTION("Conexant HD-audio codec"); 4673MODULE_DESCRIPTION("Conexant HD-audio codec");
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index f5196277b6e9..5faaad219a7f 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4694,6 +4694,7 @@ static const struct snd_pci_quirk alc880_fixup_tbl[] = {
4694 SND_PCI_QUIRK(0x1584, 0x9077, "Uniwill P53", ALC880_FIXUP_VOL_KNOB), 4694 SND_PCI_QUIRK(0x1584, 0x9077, "Uniwill P53", ALC880_FIXUP_VOL_KNOB),
4695 SND_PCI_QUIRK(0x161f, 0x203d, "W810", ALC880_FIXUP_W810), 4695 SND_PCI_QUIRK(0x161f, 0x203d, "W810", ALC880_FIXUP_W810),
4696 SND_PCI_QUIRK(0x161f, 0x205d, "Medion Rim 2150", ALC880_FIXUP_MEDION_RIM), 4696 SND_PCI_QUIRK(0x161f, 0x205d, "Medion Rim 2150", ALC880_FIXUP_MEDION_RIM),
4697 SND_PCI_QUIRK(0x1631, 0xe011, "PB 13201056", ALC880_FIXUP_6ST),
4697 SND_PCI_QUIRK(0x1734, 0x107c, "FSC F1734", ALC880_FIXUP_F1734), 4698 SND_PCI_QUIRK(0x1734, 0x107c, "FSC F1734", ALC880_FIXUP_F1734),
4698 SND_PCI_QUIRK(0x1734, 0x1094, "FSC Amilo M1451G", ALC880_FIXUP_FUJITSU), 4699 SND_PCI_QUIRK(0x1734, 0x1094, "FSC Amilo M1451G", ALC880_FIXUP_FUJITSU),
4699 SND_PCI_QUIRK(0x1734, 0x10ac, "FSC AMILO Xi 1526", ALC880_FIXUP_F1734), 4700 SND_PCI_QUIRK(0x1734, 0x10ac, "FSC AMILO Xi 1526", ALC880_FIXUP_F1734),
@@ -5708,6 +5709,7 @@ static const struct alc_model_fixup alc268_fixup_models[] = {
5708}; 5709};
5709 5710
5710static const struct snd_pci_quirk alc268_fixup_tbl[] = { 5711static const struct snd_pci_quirk alc268_fixup_tbl[] = {
5712 SND_PCI_QUIRK(0x1025, 0x015b, "Acer AOA 150 (ZG5)", ALC268_FIXUP_INV_DMIC),
5711 /* below is codec SSID since multiple Toshiba laptops have the 5713 /* below is codec SSID since multiple Toshiba laptops have the
5712 * same PCI SSID 1179:ff00 5714 * same PCI SSID 1179:ff00
5713 */ 5715 */
@@ -6251,6 +6253,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6251 SND_PCI_QUIRK(0x1025, 0x0349, "Acer AOD260", ALC269_FIXUP_INV_DMIC), 6253 SND_PCI_QUIRK(0x1025, 0x0349, "Acer AOD260", ALC269_FIXUP_INV_DMIC),
6252 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_MIC2_MUTE_LED), 6254 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_MIC2_MUTE_LED),
6253 SND_PCI_QUIRK(0x103c, 0x1972, "HP Pavilion 17", ALC269_FIXUP_MIC1_MUTE_LED), 6255 SND_PCI_QUIRK(0x103c, 0x1972, "HP Pavilion 17", ALC269_FIXUP_MIC1_MUTE_LED),
6256 SND_PCI_QUIRK(0x103c, 0x1977, "HP Pavilion 14", ALC269_FIXUP_MIC1_MUTE_LED),
6254 SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_DMIC), 6257 SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_DMIC),
6255 SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_DMIC), 6258 SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_DMIC),
6256 SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), 6259 SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
@@ -6265,6 +6268,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6265 SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ), 6268 SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
6266 SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO), 6269 SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
6267 SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), 6270 SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
6271 SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK),
6268 SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK), 6272 SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
6269 SND_PCI_QUIRK_VENDOR(0x1025, "Acer Aspire", ALC271_FIXUP_DMIC), 6273 SND_PCI_QUIRK_VENDOR(0x1025, "Acer Aspire", ALC271_FIXUP_DMIC),
6270 SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK), 6274 SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK),
diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
index 1d8bb5917594..ef62c435848e 100644
--- a/sound/soc/codecs/arizona.c
+++ b/sound/soc/codecs/arizona.c
@@ -685,7 +685,7 @@ static int arizona_hw_params(struct snd_pcm_substream *substream,
685 } 685 }
686 sr_val = i; 686 sr_val = i;
687 687
688 lrclk = snd_soc_params_to_bclk(params) / params_rate(params); 688 lrclk = rates[bclk] / params_rate(params);
689 689
690 arizona_aif_dbg(dai, "BCLK %dHz LRCLK %dHz\n", 690 arizona_aif_dbg(dai, "BCLK %dHz LRCLK %dHz\n",
691 rates[bclk], rates[bclk] / lrclk); 691 rates[bclk], rates[bclk] / lrclk);
@@ -1082,6 +1082,9 @@ int arizona_init_fll(struct arizona *arizona, int id, int base, int lock_irq,
1082 id, ret); 1082 id, ret);
1083 } 1083 }
1084 1084
1085 regmap_update_bits(arizona->regmap, fll->base + 1,
1086 ARIZONA_FLL1_FREERUN, 0);
1087
1085 return 0; 1088 return 0;
1086} 1089}
1087EXPORT_SYMBOL_GPL(arizona_init_fll); 1090EXPORT_SYMBOL_GPL(arizona_init_fll);
diff --git a/sound/soc/codecs/wm2200.c b/sound/soc/codecs/wm2200.c
index e6cefe1ac677..d8c65f574658 100644
--- a/sound/soc/codecs/wm2200.c
+++ b/sound/soc/codecs/wm2200.c
@@ -1019,8 +1019,6 @@ static const char *wm2200_mixer_texts[] = {
1019 "EQR", 1019 "EQR",
1020 "LHPF1", 1020 "LHPF1",
1021 "LHPF2", 1021 "LHPF2",
1022 "LHPF3",
1023 "LHPF4",
1024 "DSP1.1", 1022 "DSP1.1",
1025 "DSP1.2", 1023 "DSP1.2",
1026 "DSP1.3", 1024 "DSP1.3",
@@ -1053,7 +1051,6 @@ static int wm2200_mixer_values[] = {
1053 0x25, 1051 0x25,
1054 0x50, /* EQ */ 1052 0x50, /* EQ */
1055 0x51, 1053 0x51,
1056 0x52,
1057 0x60, /* LHPF1 */ 1054 0x60, /* LHPF1 */
1058 0x61, /* LHPF2 */ 1055 0x61, /* LHPF2 */
1059 0x68, /* DSP1 */ 1056 0x68, /* DSP1 */
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
index 7a9048dad1cd..988e817dca05 100644
--- a/sound/soc/codecs/wm5102.c
+++ b/sound/soc/codecs/wm5102.c
@@ -896,8 +896,7 @@ static const unsigned int wm5102_aec_loopback_values[] = {
896 896
897static const struct soc_enum wm5102_aec_loopback = 897static const struct soc_enum wm5102_aec_loopback =
898 SOC_VALUE_ENUM_SINGLE(ARIZONA_DAC_AEC_CONTROL_1, 898 SOC_VALUE_ENUM_SINGLE(ARIZONA_DAC_AEC_CONTROL_1,
899 ARIZONA_AEC_LOOPBACK_SRC_SHIFT, 899 ARIZONA_AEC_LOOPBACK_SRC_SHIFT, 0xf,
900 ARIZONA_AEC_LOOPBACK_SRC_MASK,
901 ARRAY_SIZE(wm5102_aec_loopback_texts), 900 ARRAY_SIZE(wm5102_aec_loopback_texts),
902 wm5102_aec_loopback_texts, 901 wm5102_aec_loopback_texts,
903 wm5102_aec_loopback_values); 902 wm5102_aec_loopback_values);
@@ -1153,6 +1152,8 @@ SND_SOC_DAPM_OUTPUT("SPKOUTRN"),
1153SND_SOC_DAPM_OUTPUT("SPKOUTRP"), 1152SND_SOC_DAPM_OUTPUT("SPKOUTRP"),
1154SND_SOC_DAPM_OUTPUT("SPKDAT1L"), 1153SND_SOC_DAPM_OUTPUT("SPKDAT1L"),
1155SND_SOC_DAPM_OUTPUT("SPKDAT1R"), 1154SND_SOC_DAPM_OUTPUT("SPKDAT1R"),
1155
1156SND_SOC_DAPM_OUTPUT("MICSUPP"),
1156}; 1157};
1157 1158
1158#define ARIZONA_MIXER_INPUT_ROUTES(name) \ 1159#define ARIZONA_MIXER_INPUT_ROUTES(name) \
@@ -1365,6 +1366,8 @@ static const struct snd_soc_dapm_route wm5102_dapm_routes[] = {
1365 { "AEC Loopback", "SPKDAT1R", "OUT5R" }, 1366 { "AEC Loopback", "SPKDAT1R", "OUT5R" },
1366 { "SPKDAT1L", NULL, "OUT5L" }, 1367 { "SPKDAT1L", NULL, "OUT5L" },
1367 { "SPKDAT1R", NULL, "OUT5R" }, 1368 { "SPKDAT1R", NULL, "OUT5R" },
1369
1370 { "MICSUPP", NULL, "SYSCLK" },
1368}; 1371};
1369 1372
1370static int wm5102_set_fll(struct snd_soc_codec *codec, int fll_id, int source, 1373static int wm5102_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index ae80c8c28536..0320a32670d3 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -344,8 +344,7 @@ static const unsigned int wm5110_aec_loopback_values[] = {
344 344
345static const struct soc_enum wm5110_aec_loopback = 345static const struct soc_enum wm5110_aec_loopback =
346 SOC_VALUE_ENUM_SINGLE(ARIZONA_DAC_AEC_CONTROL_1, 346 SOC_VALUE_ENUM_SINGLE(ARIZONA_DAC_AEC_CONTROL_1,
347 ARIZONA_AEC_LOOPBACK_SRC_SHIFT, 347 ARIZONA_AEC_LOOPBACK_SRC_SHIFT, 0xf,
348 ARIZONA_AEC_LOOPBACK_SRC_MASK,
349 ARRAY_SIZE(wm5110_aec_loopback_texts), 348 ARRAY_SIZE(wm5110_aec_loopback_texts),
350 wm5110_aec_loopback_texts, 349 wm5110_aec_loopback_texts,
351 wm5110_aec_loopback_values); 350 wm5110_aec_loopback_values);
@@ -625,6 +624,8 @@ SND_SOC_DAPM_OUTPUT("SPKDAT1L"),
625SND_SOC_DAPM_OUTPUT("SPKDAT1R"), 624SND_SOC_DAPM_OUTPUT("SPKDAT1R"),
626SND_SOC_DAPM_OUTPUT("SPKDAT2L"), 625SND_SOC_DAPM_OUTPUT("SPKDAT2L"),
627SND_SOC_DAPM_OUTPUT("SPKDAT2R"), 626SND_SOC_DAPM_OUTPUT("SPKDAT2R"),
627
628SND_SOC_DAPM_OUTPUT("MICSUPP"),
628}; 629};
629 630
630#define ARIZONA_MIXER_INPUT_ROUTES(name) \ 631#define ARIZONA_MIXER_INPUT_ROUTES(name) \
@@ -833,6 +834,8 @@ static const struct snd_soc_dapm_route wm5110_dapm_routes[] = {
833 834
834 { "SPKDAT2L", NULL, "OUT6L" }, 835 { "SPKDAT2L", NULL, "OUT6L" },
835 { "SPKDAT2R", NULL, "OUT6R" }, 836 { "SPKDAT2R", NULL, "OUT6R" },
837
838 { "MICSUPP", NULL, "SYSCLK" },
836}; 839};
837 840
838static int wm5110_set_fll(struct snd_soc_codec *codec, int fll_id, int source, 841static int wm5110_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index 7b198c38f3ef..b6b654837585 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -324,7 +324,7 @@ static int wm_adsp_load(struct wm_adsp *dsp)
324 324
325 if (reg) { 325 if (reg) {
326 buf = kmemdup(region->data, le32_to_cpu(region->len), 326 buf = kmemdup(region->data, le32_to_cpu(region->len),
327 GFP_KERNEL); 327 GFP_KERNEL | GFP_DMA);
328 if (!buf) { 328 if (!buf) {
329 adsp_err(dsp, "Out of memory\n"); 329 adsp_err(dsp, "Out of memory\n");
330 return -ENOMEM; 330 return -ENOMEM;
@@ -396,7 +396,7 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
396 hdr = (void*)&firmware->data[0]; 396 hdr = (void*)&firmware->data[0];
397 if (memcmp(hdr->magic, "WMDR", 4) != 0) { 397 if (memcmp(hdr->magic, "WMDR", 4) != 0) {
398 adsp_err(dsp, "%s: invalid magic\n", file); 398 adsp_err(dsp, "%s: invalid magic\n", file);
399 return -EINVAL; 399 goto out_fw;
400 } 400 }
401 401
402 adsp_dbg(dsp, "%s: v%d.%d.%d\n", file, 402 adsp_dbg(dsp, "%s: v%d.%d.%d\n", file,
@@ -439,7 +439,7 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
439 439
440 if (reg) { 440 if (reg) {
441 buf = kmemdup(blk->data, le32_to_cpu(blk->len), 441 buf = kmemdup(blk->data, le32_to_cpu(blk->len),
442 GFP_KERNEL); 442 GFP_KERNEL | GFP_DMA);
443 if (!buf) { 443 if (!buf) {
444 adsp_err(dsp, "Out of memory\n"); 444 adsp_err(dsp, "Out of memory\n");
445 return -ENOMEM; 445 return -ENOMEM;
diff --git a/sound/soc/fsl/imx-pcm-dma.c b/sound/soc/fsl/imx-pcm-dma.c
index bf363d8d044a..500f8ce55d78 100644
--- a/sound/soc/fsl/imx-pcm-dma.c
+++ b/sound/soc/fsl/imx-pcm-dma.c
@@ -154,26 +154,7 @@ static struct snd_soc_platform_driver imx_soc_platform_mx2 = {
154 .pcm_free = imx_pcm_free, 154 .pcm_free = imx_pcm_free,
155}; 155};
156 156
157static int imx_soc_platform_probe(struct platform_device *pdev) 157int imx_pcm_dma_init(struct platform_device *pdev)
158{ 158{
159 return snd_soc_register_platform(&pdev->dev, &imx_soc_platform_mx2); 159 return snd_soc_register_platform(&pdev->dev, &imx_soc_platform_mx2);
160} 160}
161
162static int imx_soc_platform_remove(struct platform_device *pdev)
163{
164 snd_soc_unregister_platform(&pdev->dev);
165 return 0;
166}
167
168static struct platform_driver imx_pcm_driver = {
169 .driver = {
170 .name = "imx-pcm-audio",
171 .owner = THIS_MODULE,
172 },
173 .probe = imx_soc_platform_probe,
174 .remove = imx_soc_platform_remove,
175};
176
177module_platform_driver(imx_pcm_driver);
178MODULE_LICENSE("GPL");
179MODULE_ALIAS("platform:imx-pcm-audio");
diff --git a/sound/soc/fsl/imx-pcm-fiq.c b/sound/soc/fsl/imx-pcm-fiq.c
index 5ec362ae4d01..920f945cb2f4 100644
--- a/sound/soc/fsl/imx-pcm-fiq.c
+++ b/sound/soc/fsl/imx-pcm-fiq.c
@@ -281,7 +281,7 @@ static struct snd_soc_platform_driver imx_soc_platform_fiq = {
281 .pcm_free = imx_pcm_fiq_free, 281 .pcm_free = imx_pcm_fiq_free,
282}; 282};
283 283
284static int imx_soc_platform_probe(struct platform_device *pdev) 284int imx_pcm_fiq_init(struct platform_device *pdev)
285{ 285{
286 struct imx_ssi *ssi = platform_get_drvdata(pdev); 286 struct imx_ssi *ssi = platform_get_drvdata(pdev);
287 int ret; 287 int ret;
@@ -314,23 +314,3 @@ failed_register:
314 314
315 return ret; 315 return ret;
316} 316}
317
318static int imx_soc_platform_remove(struct platform_device *pdev)
319{
320 snd_soc_unregister_platform(&pdev->dev);
321 return 0;
322}
323
324static struct platform_driver imx_pcm_driver = {
325 .driver = {
326 .name = "imx-fiq-pcm-audio",
327 .owner = THIS_MODULE,
328 },
329
330 .probe = imx_soc_platform_probe,
331 .remove = imx_soc_platform_remove,
332};
333
334module_platform_driver(imx_pcm_driver);
335
336MODULE_LICENSE("GPL");
diff --git a/sound/soc/fsl/imx-pcm.c b/sound/soc/fsl/imx-pcm.c
index d5cd9eff3b48..0d0625bfcb65 100644
--- a/sound/soc/fsl/imx-pcm.c
+++ b/sound/soc/fsl/imx-pcm.c
@@ -104,6 +104,38 @@ void imx_pcm_free(struct snd_pcm *pcm)
104} 104}
105EXPORT_SYMBOL_GPL(imx_pcm_free); 105EXPORT_SYMBOL_GPL(imx_pcm_free);
106 106
107static int imx_pcm_probe(struct platform_device *pdev)
108{
109 if (strcmp(pdev->id_entry->name, "imx-fiq-pcm-audio") == 0)
110 return imx_pcm_fiq_init(pdev);
111
112 return imx_pcm_dma_init(pdev);
113}
114
115static int imx_pcm_remove(struct platform_device *pdev)
116{
117 snd_soc_unregister_platform(&pdev->dev);
118 return 0;
119}
120
121static struct platform_device_id imx_pcm_devtype[] = {
122 { .name = "imx-pcm-audio", },
123 { .name = "imx-fiq-pcm-audio", },
124 { /* sentinel */ }
125};
126MODULE_DEVICE_TABLE(platform, imx_pcm_devtype);
127
128static struct platform_driver imx_pcm_driver = {
129 .driver = {
130 .name = "imx-pcm",
131 .owner = THIS_MODULE,
132 },
133 .id_table = imx_pcm_devtype,
134 .probe = imx_pcm_probe,
135 .remove = imx_pcm_remove,
136};
137module_platform_driver(imx_pcm_driver);
138
107MODULE_DESCRIPTION("Freescale i.MX PCM driver"); 139MODULE_DESCRIPTION("Freescale i.MX PCM driver");
108MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>"); 140MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
109MODULE_LICENSE("GPL"); 141MODULE_LICENSE("GPL");
diff --git a/sound/soc/fsl/imx-pcm.h b/sound/soc/fsl/imx-pcm.h
index 83c0ed7d55c9..5ae13a13a353 100644
--- a/sound/soc/fsl/imx-pcm.h
+++ b/sound/soc/fsl/imx-pcm.h
@@ -30,4 +30,22 @@ int snd_imx_pcm_mmap(struct snd_pcm_substream *substream,
30int imx_pcm_new(struct snd_soc_pcm_runtime *rtd); 30int imx_pcm_new(struct snd_soc_pcm_runtime *rtd);
31void imx_pcm_free(struct snd_pcm *pcm); 31void imx_pcm_free(struct snd_pcm *pcm);
32 32
33#ifdef CONFIG_SND_SOC_IMX_PCM_DMA
34int imx_pcm_dma_init(struct platform_device *pdev);
35#else
36static inline int imx_pcm_dma_init(struct platform_device *pdev)
37{
38 return -ENODEV;
39}
40#endif
41
42#ifdef CONFIG_SND_SOC_IMX_PCM_FIQ
43int imx_pcm_fiq_init(struct platform_device *pdev);
44#else
45static inline int imx_pcm_fiq_init(struct platform_device *pdev)
46{
47 return -ENODEV;
48}
49#endif
50
33#endif /* _IMX_PCM_H */ 51#endif /* _IMX_PCM_H */
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 1e36bc81e5af..258acadb9e7d 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -1023,7 +1023,7 @@ int dapm_regulator_event(struct snd_soc_dapm_widget *w,
1023 1023
1024 if (SND_SOC_DAPM_EVENT_ON(event)) { 1024 if (SND_SOC_DAPM_EVENT_ON(event)) {
1025 if (w->invert & SND_SOC_DAPM_REGULATOR_BYPASS) { 1025 if (w->invert & SND_SOC_DAPM_REGULATOR_BYPASS) {
1026 ret = regulator_allow_bypass(w->regulator, true); 1026 ret = regulator_allow_bypass(w->regulator, false);
1027 if (ret != 0) 1027 if (ret != 0)
1028 dev_warn(w->dapm->dev, 1028 dev_warn(w->dapm->dev,
1029 "ASoC: Failed to bypass %s: %d\n", 1029 "ASoC: Failed to bypass %s: %d\n",
@@ -1033,7 +1033,7 @@ int dapm_regulator_event(struct snd_soc_dapm_widget *w,
1033 return regulator_enable(w->regulator); 1033 return regulator_enable(w->regulator);
1034 } else { 1034 } else {
1035 if (w->invert & SND_SOC_DAPM_REGULATOR_BYPASS) { 1035 if (w->invert & SND_SOC_DAPM_REGULATOR_BYPASS) {
1036 ret = regulator_allow_bypass(w->regulator, false); 1036 ret = regulator_allow_bypass(w->regulator, true);
1037 if (ret != 0) 1037 if (ret != 0)
1038 dev_warn(w->dapm->dev, 1038 dev_warn(w->dapm->dev,
1039 "ASoC: Failed to unbypass %s: %d\n", 1039 "ASoC: Failed to unbypass %s: %d\n",
@@ -3039,6 +3039,14 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
3039 w->name, ret); 3039 w->name, ret);
3040 return NULL; 3040 return NULL;
3041 } 3041 }
3042
3043 if (w->invert & SND_SOC_DAPM_REGULATOR_BYPASS) {
3044 ret = regulator_allow_bypass(w->regulator, true);
3045 if (ret != 0)
3046 dev_warn(w->dapm->dev,
3047 "ASoC: Failed to unbypass %s: %d\n",
3048 w->name, ret);
3049 }
3042 break; 3050 break;
3043 case snd_soc_dapm_clock_supply: 3051 case snd_soc_dapm_clock_supply:
3044#ifdef CONFIG_CLKDEV_LOOKUP 3052#ifdef CONFIG_CLKDEV_LOOKUP
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index ed4d89c8b52a..e90daf8cdaa8 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -1331,16 +1331,23 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, void
1331 } 1331 }
1332 channels = (hdr->bLength - 7) / csize - 1; 1332 channels = (hdr->bLength - 7) / csize - 1;
1333 bmaControls = hdr->bmaControls; 1333 bmaControls = hdr->bmaControls;
1334 if (hdr->bLength < 7 + csize) {
1335 snd_printk(KERN_ERR "usbaudio: unit %u: "
1336 "invalid UAC_FEATURE_UNIT descriptor\n",
1337 unitid);
1338 return -EINVAL;
1339 }
1334 } else { 1340 } else {
1335 struct uac2_feature_unit_descriptor *ftr = _ftr; 1341 struct uac2_feature_unit_descriptor *ftr = _ftr;
1336 csize = 4; 1342 csize = 4;
1337 channels = (hdr->bLength - 6) / 4 - 1; 1343 channels = (hdr->bLength - 6) / 4 - 1;
1338 bmaControls = ftr->bmaControls; 1344 bmaControls = ftr->bmaControls;
1339 } 1345 if (hdr->bLength < 6 + csize) {
1340 1346 snd_printk(KERN_ERR "usbaudio: unit %u: "
1341 if (hdr->bLength < 7 || !csize || hdr->bLength < 7 + csize) { 1347 "invalid UAC_FEATURE_UNIT descriptor\n",
1342 snd_printk(KERN_ERR "usbaudio: unit %u: invalid UAC_FEATURE_UNIT descriptor\n", unitid); 1348 unitid);
1343 return -EINVAL; 1349 return -EINVAL;
1350 }
1344 } 1351 }
1345 1352
1346 /* parse the source unit */ 1353 /* parse the source unit */
diff --git a/tools/Makefile b/tools/Makefile
index 1f9a529fe544..fa36565b209d 100644
--- a/tools/Makefile
+++ b/tools/Makefile
@@ -3,6 +3,7 @@ include scripts/Makefile.include
3help: 3help:
4 @echo 'Possible targets:' 4 @echo 'Possible targets:'
5 @echo '' 5 @echo ''
6 @echo ' cgroup - cgroup tools'
6 @echo ' cpupower - a tool for all things x86 CPU power' 7 @echo ' cpupower - a tool for all things x86 CPU power'
7 @echo ' firewire - the userspace part of nosy, an IEEE-1394 traffic sniffer' 8 @echo ' firewire - the userspace part of nosy, an IEEE-1394 traffic sniffer'
8 @echo ' lguest - a minimal 32-bit x86 hypervisor' 9 @echo ' lguest - a minimal 32-bit x86 hypervisor'
@@ -15,7 +16,7 @@ help:
15 @echo ' x86_energy_perf_policy - Intel energy policy tool' 16 @echo ' x86_energy_perf_policy - Intel energy policy tool'
16 @echo '' 17 @echo ''
17 @echo 'You can do:' 18 @echo 'You can do:'
18 @echo ' $$ make -C tools/<tool>_install' 19 @echo ' $$ make -C tools/ <tool>_install'
19 @echo '' 20 @echo ''
20 @echo ' from the kernel command line to build and install one of' 21 @echo ' from the kernel command line to build and install one of'
21 @echo ' the tools above' 22 @echo ' the tools above'
@@ -33,7 +34,7 @@ help:
33cpupower: FORCE 34cpupower: FORCE
34 $(call descend,power/$@) 35 $(call descend,power/$@)
35 36
36firewire lguest perf usb virtio vm: FORCE 37cgroup firewire lguest perf usb virtio vm: FORCE
37 $(call descend,$@) 38 $(call descend,$@)
38 39
39selftests: FORCE 40selftests: FORCE
@@ -45,7 +46,7 @@ turbostat x86_energy_perf_policy: FORCE
45cpupower_install: 46cpupower_install:
46 $(call descend,power/$(@:_install=),install) 47 $(call descend,power/$(@:_install=),install)
47 48
48firewire_install lguest_install perf_install usb_install virtio_install vm_install: 49cgroup_install firewire_install lguest_install perf_install usb_install virtio_install vm_install:
49 $(call descend,$(@:_install=),install) 50 $(call descend,$(@:_install=),install)
50 51
51selftests_install: 52selftests_install:
@@ -54,14 +55,14 @@ selftests_install:
54turbostat_install x86_energy_perf_policy_install: 55turbostat_install x86_energy_perf_policy_install:
55 $(call descend,power/x86/$(@:_install=),install) 56 $(call descend,power/x86/$(@:_install=),install)
56 57
57install: cpupower_install firewire_install lguest_install perf_install \ 58install: cgroup_install cpupower_install firewire_install lguest_install \
58 selftests_install turbostat_install usb_install virtio_install \ 59 perf_install selftests_install turbostat_install usb_install \
59 vm_install x86_energy_perf_policy_install 60 virtio_install vm_install x86_energy_perf_policy_install
60 61
61cpupower_clean: 62cpupower_clean:
62 $(call descend,power/cpupower,clean) 63 $(call descend,power/cpupower,clean)
63 64
64firewire_clean lguest_clean perf_clean usb_clean virtio_clean vm_clean: 65cgroup_clean firewire_clean lguest_clean perf_clean usb_clean virtio_clean vm_clean:
65 $(call descend,$(@:_clean=),clean) 66 $(call descend,$(@:_clean=),clean)
66 67
67selftests_clean: 68selftests_clean:
@@ -70,8 +71,8 @@ selftests_clean:
70turbostat_clean x86_energy_perf_policy_clean: 71turbostat_clean x86_energy_perf_policy_clean:
71 $(call descend,power/x86/$(@:_clean=),clean) 72 $(call descend,power/x86/$(@:_clean=),clean)
72 73
73clean: cpupower_clean firewire_clean lguest_clean perf_clean selftests_clean \ 74clean: cgroup_clean cpupower_clean firewire_clean lguest_clean perf_clean \
74 turbostat_clean usb_clean virtio_clean vm_clean \ 75 selftests_clean turbostat_clean usb_clean virtio_clean \
75 x86_energy_perf_policy_clean 76 vm_clean x86_energy_perf_policy_clean
76 77
77.PHONY: FORCE 78.PHONY: FORCE
diff --git a/tools/cgroup/.gitignore b/tools/cgroup/.gitignore
new file mode 100644
index 000000000000..633cd9b874f9
--- /dev/null
+++ b/tools/cgroup/.gitignore
@@ -0,0 +1 @@
cgroup_event_listener
diff --git a/tools/cgroup/Makefile b/tools/cgroup/Makefile
new file mode 100644
index 000000000000..b4286196b763
--- /dev/null
+++ b/tools/cgroup/Makefile
@@ -0,0 +1,11 @@
1# Makefile for cgroup tools
2
3CC = $(CROSS_COMPILE)gcc
4CFLAGS = -Wall -Wextra
5
6all: cgroup_event_listener
7%: %.c
8 $(CC) $(CFLAGS) -o $@ $^
9
10clean:
11 $(RM) cgroup_event_listener
diff --git a/Documentation/cgroups/cgroup_event_listener.c b/tools/cgroup/cgroup_event_listener.c
index 3e082f96dc12..4eb5507205c9 100644
--- a/Documentation/cgroups/cgroup_event_listener.c
+++ b/tools/cgroup/cgroup_event_listener.c
@@ -5,6 +5,7 @@
5 */ 5 */
6 6
7#include <assert.h> 7#include <assert.h>
8#include <err.h>
8#include <errno.h> 9#include <errno.h>
9#include <fcntl.h> 10#include <fcntl.h>
10#include <libgen.h> 11#include <libgen.h>
@@ -15,7 +16,7 @@
15 16
16#include <sys/eventfd.h> 17#include <sys/eventfd.h>
17 18
18#define USAGE_STR "Usage: cgroup_event_listener <path-to-control-file> <args>\n" 19#define USAGE_STR "Usage: cgroup_event_listener <path-to-control-file> <args>"
19 20
20int main(int argc, char **argv) 21int main(int argc, char **argv)
21{ 22{
@@ -26,49 +27,33 @@ int main(int argc, char **argv)
26 char line[LINE_MAX]; 27 char line[LINE_MAX];
27 int ret; 28 int ret;
28 29
29 if (argc != 3) { 30 if (argc != 3)
30 fputs(USAGE_STR, stderr); 31 errx(1, "%s", USAGE_STR);
31 return 1;
32 }
33 32
34 cfd = open(argv[1], O_RDONLY); 33 cfd = open(argv[1], O_RDONLY);
35 if (cfd == -1) { 34 if (cfd == -1)
36 fprintf(stderr, "Cannot open %s: %s\n", argv[1], 35 err(1, "Cannot open %s", argv[1]);
37 strerror(errno));
38 goto out;
39 }
40 36
41 ret = snprintf(event_control_path, PATH_MAX, "%s/cgroup.event_control", 37 ret = snprintf(event_control_path, PATH_MAX, "%s/cgroup.event_control",
42 dirname(argv[1])); 38 dirname(argv[1]));
43 if (ret >= PATH_MAX) { 39 if (ret >= PATH_MAX)
44 fputs("Path to cgroup.event_control is too long\n", stderr); 40 errx(1, "Path to cgroup.event_control is too long");
45 goto out;
46 }
47 41
48 event_control = open(event_control_path, O_WRONLY); 42 event_control = open(event_control_path, O_WRONLY);
49 if (event_control == -1) { 43 if (event_control == -1)
50 fprintf(stderr, "Cannot open %s: %s\n", event_control_path, 44 err(1, "Cannot open %s", event_control_path);
51 strerror(errno));
52 goto out;
53 }
54 45
55 efd = eventfd(0, 0); 46 efd = eventfd(0, 0);
56 if (efd == -1) { 47 if (efd == -1)
57 perror("eventfd() failed"); 48 err(1, "eventfd() failed");
58 goto out;
59 }
60 49
61 ret = snprintf(line, LINE_MAX, "%d %d %s", efd, cfd, argv[2]); 50 ret = snprintf(line, LINE_MAX, "%d %d %s", efd, cfd, argv[2]);
62 if (ret >= LINE_MAX) { 51 if (ret >= LINE_MAX)
63 fputs("Arguments string is too long\n", stderr); 52 errx(1, "Arguments string is too long");
64 goto out;
65 }
66 53
67 ret = write(event_control, line, strlen(line) + 1); 54 ret = write(event_control, line, strlen(line) + 1);
68 if (ret == -1) { 55 if (ret == -1)
69 perror("Cannot write to cgroup.event_control"); 56 err(1, "Cannot write to cgroup.event_control");
70 goto out;
71 }
72 57
73 while (1) { 58 while (1) {
74 uint64_t result; 59 uint64_t result;
@@ -77,34 +62,21 @@ int main(int argc, char **argv)
77 if (ret == -1) { 62 if (ret == -1) {
78 if (errno == EINTR) 63 if (errno == EINTR)
79 continue; 64 continue;
80 perror("Cannot read from eventfd"); 65 err(1, "Cannot read from eventfd");
81 break;
82 } 66 }
83 assert(ret == sizeof(result)); 67 assert(ret == sizeof(result));
84 68
85 ret = access(event_control_path, W_OK); 69 ret = access(event_control_path, W_OK);
86 if ((ret == -1) && (errno == ENOENT)) { 70 if ((ret == -1) && (errno == ENOENT)) {
87 puts("The cgroup seems to have removed."); 71 puts("The cgroup seems to have removed.");
88 ret = 0;
89 break;
90 }
91
92 if (ret == -1) {
93 perror("cgroup.event_control "
94 "is not accessible any more");
95 break; 72 break;
96 } 73 }
97 74
75 if (ret == -1)
76 err(1, "cgroup.event_control is not accessible any more");
77
98 printf("%s %s: crossed\n", argv[1], argv[2]); 78 printf("%s %s: crossed\n", argv[1], argv[2]);
99 } 79 }
100 80
101out: 81 return 0;
102 if (efd >= 0)
103 close(efd);
104 if (event_control >= 0)
105 close(event_control);
106 if (cfd >= 0)
107 close(cfd);
108
109 return (ret != 0);
110} 82}
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index 5a824e355d04..82b0606dcb8a 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -13,8 +13,7 @@
13 * GNU Lesser General Public License for more details. 13 * GNU Lesser General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU Lesser General Public 15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this program; if not, write to the Free Software 16 * License along with this program; if not, see <http://www.gnu.org/licenses>
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 * 17 *
19 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 18 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
20 * 19 *
@@ -1224,6 +1223,34 @@ static int field_is_long(struct format_field *field)
1224 return 0; 1223 return 0;
1225} 1224}
1226 1225
1226static unsigned int type_size(const char *name)
1227{
1228 /* This covers all FIELD_IS_STRING types. */
1229 static struct {
1230 const char *type;
1231 unsigned int size;
1232 } table[] = {
1233 { "u8", 1 },
1234 { "u16", 2 },
1235 { "u32", 4 },
1236 { "u64", 8 },
1237 { "s8", 1 },
1238 { "s16", 2 },
1239 { "s32", 4 },
1240 { "s64", 8 },
1241 { "char", 1 },
1242 { },
1243 };
1244 int i;
1245
1246 for (i = 0; table[i].type; i++) {
1247 if (!strcmp(table[i].type, name))
1248 return table[i].size;
1249 }
1250
1251 return 0;
1252}
1253
1227static int event_read_fields(struct event_format *event, struct format_field **fields) 1254static int event_read_fields(struct event_format *event, struct format_field **fields)
1228{ 1255{
1229 struct format_field *field = NULL; 1256 struct format_field *field = NULL;
@@ -1233,6 +1260,8 @@ static int event_read_fields(struct event_format *event, struct format_field **f
1233 int count = 0; 1260 int count = 0;
1234 1261
1235 do { 1262 do {
1263 unsigned int size_dynamic = 0;
1264
1236 type = read_token(&token); 1265 type = read_token(&token);
1237 if (type == EVENT_NEWLINE) { 1266 if (type == EVENT_NEWLINE) {
1238 free_token(token); 1267 free_token(token);
@@ -1391,6 +1420,7 @@ static int event_read_fields(struct event_format *event, struct format_field **f
1391 field->type = new_type; 1420 field->type = new_type;
1392 strcat(field->type, " "); 1421 strcat(field->type, " ");
1393 strcat(field->type, field->name); 1422 strcat(field->type, field->name);
1423 size_dynamic = type_size(field->name);
1394 free_token(field->name); 1424 free_token(field->name);
1395 strcat(field->type, brackets); 1425 strcat(field->type, brackets);
1396 field->name = token; 1426 field->name = token;
@@ -1463,7 +1493,8 @@ static int event_read_fields(struct event_format *event, struct format_field **f
1463 if (read_expect_type(EVENT_ITEM, &token)) 1493 if (read_expect_type(EVENT_ITEM, &token))
1464 goto fail; 1494 goto fail;
1465 1495
1466 /* add signed type */ 1496 if (strtoul(token, NULL, 0))
1497 field->flags |= FIELD_IS_SIGNED;
1467 1498
1468 free_token(token); 1499 free_token(token);
1469 if (read_expected(EVENT_OP, ";") < 0) 1500 if (read_expected(EVENT_OP, ";") < 0)
@@ -1478,10 +1509,14 @@ static int event_read_fields(struct event_format *event, struct format_field **f
1478 if (field->flags & FIELD_IS_ARRAY) { 1509 if (field->flags & FIELD_IS_ARRAY) {
1479 if (field->arraylen) 1510 if (field->arraylen)
1480 field->elementsize = field->size / field->arraylen; 1511 field->elementsize = field->size / field->arraylen;
1512 else if (field->flags & FIELD_IS_DYNAMIC)
1513 field->elementsize = size_dynamic;
1481 else if (field->flags & FIELD_IS_STRING) 1514 else if (field->flags & FIELD_IS_STRING)
1482 field->elementsize = 1; 1515 field->elementsize = 1;
1483 else 1516 else if (field->flags & FIELD_IS_LONG)
1484 field->elementsize = event->pevent->long_size; 1517 field->elementsize = event->pevent ?
1518 event->pevent->long_size :
1519 sizeof(long);
1485 } else 1520 } else
1486 field->elementsize = field->size; 1521 field->elementsize = field->size;
1487 1522
@@ -1785,6 +1820,8 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok)
1785 strcmp(token, "/") == 0 || 1820 strcmp(token, "/") == 0 ||
1786 strcmp(token, "<") == 0 || 1821 strcmp(token, "<") == 0 ||
1787 strcmp(token, ">") == 0 || 1822 strcmp(token, ">") == 0 ||
1823 strcmp(token, "<=") == 0 ||
1824 strcmp(token, ">=") == 0 ||
1788 strcmp(token, "==") == 0 || 1825 strcmp(token, "==") == 0 ||
1789 strcmp(token, "!=") == 0) { 1826 strcmp(token, "!=") == 0) {
1790 1827
@@ -2481,7 +2518,7 @@ process_dynamic_array(struct event_format *event, struct print_arg *arg, char **
2481 2518
2482 free_token(token); 2519 free_token(token);
2483 arg = alloc_arg(); 2520 arg = alloc_arg();
2484 if (!field) { 2521 if (!arg) {
2485 do_warning("%s: not enough memory!", __func__); 2522 do_warning("%s: not enough memory!", __func__);
2486 *tok = NULL; 2523 *tok = NULL;
2487 return EVENT_ERROR; 2524 return EVENT_ERROR;
diff --git a/tools/lib/traceevent/event-parse.h b/tools/lib/traceevent/event-parse.h
index 24a4bbabc5d5..7be7e89533e4 100644
--- a/tools/lib/traceevent/event-parse.h
+++ b/tools/lib/traceevent/event-parse.h
@@ -13,8 +13,7 @@
13 * GNU Lesser General Public License for more details. 13 * GNU Lesser General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU Lesser General Public 15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this program; if not, write to the Free Software 16 * License along with this program; if not, see <http://www.gnu.org/licenses>
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 * 17 *
19 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 18 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
20 */ 19 */
diff --git a/tools/lib/traceevent/event-utils.h b/tools/lib/traceevent/event-utils.h
index bc075006966e..e76c9acb92cd 100644
--- a/tools/lib/traceevent/event-utils.h
+++ b/tools/lib/traceevent/event-utils.h
@@ -13,8 +13,7 @@
13 * GNU Lesser General Public License for more details. 13 * GNU Lesser General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU Lesser General Public 15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this program; if not, write to the Free Software 16 * License along with this program; if not, see <http://www.gnu.org/licenses>
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 * 17 *
19 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 18 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
20 */ 19 */
diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c
index 5ea4326ad11f..2500e75583fc 100644
--- a/tools/lib/traceevent/parse-filter.c
+++ b/tools/lib/traceevent/parse-filter.c
@@ -13,8 +13,7 @@
13 * GNU Lesser General Public License for more details. 13 * GNU Lesser General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU Lesser General Public 15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this program; if not, write to the Free Software 16 * License along with this program; if not, see <http://www.gnu.org/licenses>
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 * 17 *
19 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 18 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
20 */ 19 */
diff --git a/tools/lib/traceevent/parse-utils.c b/tools/lib/traceevent/parse-utils.c
index f023a133abb6..bba701cf10e6 100644
--- a/tools/lib/traceevent/parse-utils.c
+++ b/tools/lib/traceevent/parse-utils.c
@@ -1,3 +1,22 @@
1/*
2 * Copyright (C) 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
3 *
4 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation;
8 * version 2.1 of the License (not later!)
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this program; if not, see <http://www.gnu.org/licenses>
17 *
18 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
19 */
1#include <stdio.h> 20#include <stdio.h>
2#include <stdlib.h> 21#include <stdlib.h>
3#include <string.h> 22#include <string.h>
diff --git a/tools/lib/traceevent/trace-seq.c b/tools/lib/traceevent/trace-seq.c
index b1ccc923e8a5..a57db805136a 100644
--- a/tools/lib/traceevent/trace-seq.c
+++ b/tools/lib/traceevent/trace-seq.c
@@ -13,8 +13,7 @@
13 * GNU Lesser General Public License for more details. 13 * GNU Lesser General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU Lesser General Public 15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this program; if not, write to the Free Software 16 * License along with this program; if not, see <http://www.gnu.org/licenses>
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 * 17 *
19 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 18 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
20 */ 19 */
diff --git a/tools/perf/Documentation/Makefile b/tools/perf/Documentation/Makefile
index ef6d22e879eb..eb30044a922a 100644
--- a/tools/perf/Documentation/Makefile
+++ b/tools/perf/Documentation/Makefile
@@ -222,10 +222,14 @@ install-pdf: pdf
222#install-html: html 222#install-html: html
223# '$(SHELL_PATH_SQ)' ./install-webdoc.sh $(DESTDIR)$(htmldir) 223# '$(SHELL_PATH_SQ)' ./install-webdoc.sh $(DESTDIR)$(htmldir)
224 224
225ifneq ($(MAKECMDGOALS),clean)
226ifneq ($(MAKECMDGOALS),tags)
225$(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE 227$(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE
226 $(QUIET_SUBDIR0)../ $(QUIET_SUBDIR1) $(OUTPUT)PERF-VERSION-FILE 228 $(QUIET_SUBDIR0)../ $(QUIET_SUBDIR1) $(OUTPUT)PERF-VERSION-FILE
227 229
228-include $(OUTPUT)PERF-VERSION-FILE 230-include $(OUTPUT)PERF-VERSION-FILE
231endif
232endif
229 233
230# 234#
231# Determine "include::" file references in asciidoc files. 235# Determine "include::" file references in asciidoc files.
diff --git a/tools/perf/Documentation/perf-annotate.txt b/tools/perf/Documentation/perf-annotate.txt
index c8ffd9fd5c6a..5ad07ef417f0 100644
--- a/tools/perf/Documentation/perf-annotate.txt
+++ b/tools/perf/Documentation/perf-annotate.txt
@@ -61,11 +61,13 @@ OPTIONS
61 61
62--stdio:: Use the stdio interface. 62--stdio:: Use the stdio interface.
63 63
64--tui:: Use the TUI interface Use of --tui requires a tty, if one is not 64--tui:: Use the TUI interface. Use of --tui requires a tty, if one is not
65 present, as when piping to other commands, the stdio interface is 65 present, as when piping to other commands, the stdio interface is
66 used. This interfaces starts by centering on the line with more 66 used. This interfaces starts by centering on the line with more
67 samples, TAB/UNTAB cycles through the lines with more samples. 67 samples, TAB/UNTAB cycles through the lines with more samples.
68 68
69--gtk:: Use the GTK interface.
70
69-C:: 71-C::
70--cpu:: Only report samples for the list of CPUs provided. Multiple CPUs can 72--cpu:: Only report samples for the list of CPUs provided. Multiple CPUs can
71 be provided as a comma-separated list with no space: 0,1. Ranges of 73 be provided as a comma-separated list with no space: 0,1. Ranges of
@@ -88,6 +90,9 @@ OPTIONS
88--objdump=<path>:: 90--objdump=<path>::
89 Path to objdump binary. 91 Path to objdump binary.
90 92
93--skip-missing::
94 Skip symbols that cannot be annotated.
95
91SEE ALSO 96SEE ALSO
92-------- 97--------
93linkperf:perf-record[1], linkperf:perf-report[1] 98linkperf:perf-record[1], linkperf:perf-report[1]
diff --git a/tools/perf/Documentation/perf-buildid-cache.txt b/tools/perf/Documentation/perf-buildid-cache.txt
index c1057701a7dc..e9a8349a7172 100644
--- a/tools/perf/Documentation/perf-buildid-cache.txt
+++ b/tools/perf/Documentation/perf-buildid-cache.txt
@@ -24,6 +24,13 @@ OPTIONS
24-r:: 24-r::
25--remove=:: 25--remove=::
26 Remove specified file from the cache. 26 Remove specified file from the cache.
27-M::
28--missing=::
29 List missing build ids in the cache for the specified file.
30-u::
31--update::
32 Update specified file of the cache. It can be used to update kallsyms
33 kernel dso to vmlinux in order to support annotation.
27-v:: 34-v::
28--verbose:: 35--verbose::
29 Be more verbose. 36 Be more verbose.
diff --git a/tools/perf/Documentation/perf-diff.txt b/tools/perf/Documentation/perf-diff.txt
index 194f37d635df..5b3123d5721f 100644
--- a/tools/perf/Documentation/perf-diff.txt
+++ b/tools/perf/Documentation/perf-diff.txt
@@ -22,10 +22,6 @@ specified perf.data files.
22 22
23OPTIONS 23OPTIONS
24------- 24-------
25-M::
26--displacement::
27 Show position displacement relative to baseline.
28
29-D:: 25-D::
30--dump-raw-trace:: 26--dump-raw-trace::
31 Dump raw trace in ASCII. 27 Dump raw trace in ASCII.
diff --git a/tools/perf/Documentation/perf-evlist.txt b/tools/perf/Documentation/perf-evlist.txt
index 15217345c2fa..1ceb3700ffbb 100644
--- a/tools/perf/Documentation/perf-evlist.txt
+++ b/tools/perf/Documentation/perf-evlist.txt
@@ -28,6 +28,10 @@ OPTIONS
28--verbose=:: 28--verbose=::
29 Show all fields. 29 Show all fields.
30 30
31-g::
32--group::
33 Show event group information.
34
31SEE ALSO 35SEE ALSO
32-------- 36--------
33linkperf:perf-record[1], linkperf:perf-list[1], 37linkperf:perf-record[1], linkperf:perf-list[1],
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index f4d91bebd59d..02284a0067f0 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -57,11 +57,44 @@ OPTIONS
57 57
58-s:: 58-s::
59--sort=:: 59--sort=::
60 Sort by key(s): pid, comm, dso, symbol, parent, srcline. 60 Sort histogram entries by given key(s) - multiple keys can be specified
61 in CSV format. Following sort keys are available:
62 pid, comm, dso, symbol, parent, cpu, srcline.
63
64 Each key has following meaning:
65
66 - comm: command (name) of the task which can be read via /proc/<pid>/comm
67 - pid: command and tid of the task
68 - dso: name of library or module executed at the time of sample
69 - symbol: name of function executed at the time of sample
70 - parent: name of function matched to the parent regex filter. Unmatched
71 entries are displayed as "[other]".
72 - cpu: cpu number the task ran at the time of sample
73 - srcline: filename and line number executed at the time of sample. The
74 DWARF debuggin info must be provided.
75
76 By default, comm, dso and symbol keys are used.
77 (i.e. --sort comm,dso,symbol)
78
79 If --branch-stack option is used, following sort keys are also
80 available:
81 dso_from, dso_to, symbol_from, symbol_to, mispredict.
82
83 - dso_from: name of library or module branched from
84 - dso_to: name of library or module branched to
85 - symbol_from: name of function branched from
86 - symbol_to: name of function branched to
87 - mispredict: "N" for predicted branch, "Y" for mispredicted branch
88
89 And default sort keys are changed to comm, dso_from, symbol_from, dso_to
90 and symbol_to, see '--branch-stack'.
61 91
62-p:: 92-p::
63--parent=<regex>:: 93--parent=<regex>::
64 regex filter to identify parent, see: '--sort parent' 94 A regex filter to identify parent. The parent is a caller of this
95 function and searched through the callchain, thus it requires callchain
96 information recorded. The pattern is in the exteneded regex format and
97 defaults to "\^sys_|^do_page_fault", see '--sort parent'.
65 98
66-x:: 99-x::
67--exclude-other:: 100--exclude-other::
@@ -74,7 +107,6 @@ OPTIONS
74 107
75-t:: 108-t::
76--field-separator=:: 109--field-separator=::
77
78 Use a special separator character and don't pad with spaces, replacing 110 Use a special separator character and don't pad with spaces, replacing
79 all occurrences of this separator in symbol names (and other output) 111 all occurrences of this separator in symbol names (and other output)
80 with a '.' character, that thus it's the only non valid separator. 112 with a '.' character, that thus it's the only non valid separator.
@@ -171,6 +203,9 @@ OPTIONS
171--objdump=<path>:: 203--objdump=<path>::
172 Path to objdump binary. 204 Path to objdump binary.
173 205
206--group::
207 Show event group information together.
208
174SEE ALSO 209SEE ALSO
175-------- 210--------
176linkperf:perf-stat[1], linkperf:perf-annotate[1] 211linkperf:perf-stat[1], linkperf:perf-annotate[1]
diff --git a/tools/perf/Documentation/perf-script-python.txt b/tools/perf/Documentation/perf-script-python.txt
index a4027f221a53..9f1f054b8432 100644
--- a/tools/perf/Documentation/perf-script-python.txt
+++ b/tools/perf/Documentation/perf-script-python.txt
@@ -336,7 +336,6 @@ scripts listed by the 'perf script -l' command e.g.:
336---- 336----
337root@tropicana:~# perf script -l 337root@tropicana:~# perf script -l
338List of available trace scripts: 338List of available trace scripts:
339 workqueue-stats workqueue stats (ins/exe/create/destroy)
340 wakeup-latency system-wide min/max/avg wakeup latency 339 wakeup-latency system-wide min/max/avg wakeup latency
341 rw-by-file <comm> r/w activity for a program, by file 340 rw-by-file <comm> r/w activity for a program, by file
342 rw-by-pid system-wide r/w activity 341 rw-by-pid system-wide r/w activity
@@ -402,7 +401,6 @@ should show a new entry for your script:
402---- 401----
403root@tropicana:~# perf script -l 402root@tropicana:~# perf script -l
404List of available trace scripts: 403List of available trace scripts:
405 workqueue-stats workqueue stats (ins/exe/create/destroy)
406 wakeup-latency system-wide min/max/avg wakeup latency 404 wakeup-latency system-wide min/max/avg wakeup latency
407 rw-by-file <comm> r/w activity for a program, by file 405 rw-by-file <comm> r/w activity for a program, by file
408 rw-by-pid system-wide r/w activity 406 rw-by-pid system-wide r/w activity
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index cf0c3107e06e..faf4f4feebcc 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -114,6 +114,17 @@ with it. --append may be used here. Examples:
114 114
115perf stat --repeat 10 --null --sync --pre 'make -s O=defconfig-build/clean' -- make -s -j64 O=defconfig-build/ bzImage 115perf stat --repeat 10 --null --sync --pre 'make -s O=defconfig-build/clean' -- make -s -j64 O=defconfig-build/ bzImage
116 116
117-I msecs::
118--interval-print msecs::
119 Print count deltas every N milliseconds (minimum: 100ms)
120 example: perf stat -I 1000 -e cycles -a sleep 5
121
122--aggr-socket::
123Aggregate counts per processor socket for system-wide mode measurements. This
124is a useful mode to detect imbalance between sockets. To enable this mode,
125use --aggr-socket in addition to -a. (system-wide). The output includes the
126socket number and the number of online processors on that socket. This is
127useful to gauge the amount of aggregation.
117 128
118EXAMPLES 129EXAMPLES
119-------- 130--------
diff --git a/tools/perf/Documentation/perf-test.txt b/tools/perf/Documentation/perf-test.txt
index b24ac40fcd58..d1d3e5121f89 100644
--- a/tools/perf/Documentation/perf-test.txt
+++ b/tools/perf/Documentation/perf-test.txt
@@ -23,6 +23,10 @@ from 'perf test list'.
23 23
24OPTIONS 24OPTIONS
25------- 25-------
26-s::
27--skip::
28 Tests to skip (comma separater numeric list).
29
26-v:: 30-v::
27--verbose:: 31--verbose::
28 Be more verbose. 32 Be more verbose.
diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt
index 5b80d84d6b4a..a414bc95fd52 100644
--- a/tools/perf/Documentation/perf-top.txt
+++ b/tools/perf/Documentation/perf-top.txt
@@ -60,7 +60,7 @@ Default is to monitor all CPUS.
60 60
61-i:: 61-i::
62--inherit:: 62--inherit::
63 Child tasks inherit counters, only makes sens with -p option. 63 Child tasks do not inherit counters.
64 64
65-k <path>:: 65-k <path>::
66--vmlinux=<path>:: 66--vmlinux=<path>::
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST
index 80db3f4bcf7a..39d41068484f 100644
--- a/tools/perf/MANIFEST
+++ b/tools/perf/MANIFEST
@@ -11,11 +11,21 @@ lib/rbtree.c
11include/linux/swab.h 11include/linux/swab.h
12arch/*/include/asm/unistd*.h 12arch/*/include/asm/unistd*.h
13arch/*/include/asm/perf_regs.h 13arch/*/include/asm/perf_regs.h
14arch/*/include/uapi/asm/unistd*.h
15arch/*/include/uapi/asm/perf_regs.h
14arch/*/lib/memcpy*.S 16arch/*/lib/memcpy*.S
15arch/*/lib/memset*.S 17arch/*/lib/memset*.S
16include/linux/poison.h 18include/linux/poison.h
17include/linux/magic.h 19include/linux/magic.h
18include/linux/hw_breakpoint.h 20include/linux/hw_breakpoint.h
21include/linux/rbtree_augmented.h
22include/uapi/linux/perf_event.h
23include/uapi/linux/const.h
24include/uapi/linux/swab.h
25include/uapi/linux/hw_breakpoint.h
19arch/x86/include/asm/svm.h 26arch/x86/include/asm/svm.h
20arch/x86/include/asm/vmx.h 27arch/x86/include/asm/vmx.h
21arch/x86/include/asm/kvm_host.h 28arch/x86/include/asm/kvm_host.h
29arch/x86/include/uapi/asm/svm.h
30arch/x86/include/uapi/asm/vmx.h
31arch/x86/include/uapi/asm/kvm.h
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 8ab05e543ef4..a2108ca1cc17 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -47,10 +47,11 @@ include config/utilities.mak
47# backtrace post unwind. 47# backtrace post unwind.
48# 48#
49# Define NO_BACKTRACE if you do not want stack backtrace debug feature 49# Define NO_BACKTRACE if you do not want stack backtrace debug feature
50#
51# Define NO_LIBNUMA if you do not want numa perf benchmark
50 52
51$(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE 53$(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE
52 @$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT) 54 @$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT)
53-include $(OUTPUT)PERF-VERSION-FILE
54 55
55uname_M := $(shell uname -m 2>/dev/null || echo not) 56uname_M := $(shell uname -m 2>/dev/null || echo not)
56 57
@@ -148,13 +149,25 @@ RM = rm -f
148MKDIR = mkdir 149MKDIR = mkdir
149FIND = find 150FIND = find
150INSTALL = install 151INSTALL = install
152FLEX = flex
153BISON= bison
151 154
152# sparse is architecture-neutral, which means that we need to tell it 155# sparse is architecture-neutral, which means that we need to tell it
153# explicitly what architecture to check for. Fix this up for yours.. 156# explicitly what architecture to check for. Fix this up for yours..
154SPARSE_FLAGS = -D__BIG_ENDIAN__ -D__powerpc__ 157SPARSE_FLAGS = -D__BIG_ENDIAN__ -D__powerpc__
155 158
159ifneq ($(MAKECMDGOALS),clean)
160ifneq ($(MAKECMDGOALS),tags)
156-include config/feature-tests.mak 161-include config/feature-tests.mak
157 162
163ifeq ($(call get-executable,$(FLEX)),)
164 dummy := $(error Error: $(FLEX) is missing on this system, please install it)
165endif
166
167ifeq ($(call get-executable,$(BISON)),)
168 dummy := $(error Error: $(BISON) is missing on this system, please install it)
169endif
170
158ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -Werror -fstack-protector-all,-fstack-protector-all),y) 171ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -Werror -fstack-protector-all,-fstack-protector-all),y)
159 CFLAGS := $(CFLAGS) -fstack-protector-all 172 CFLAGS := $(CFLAGS) -fstack-protector-all
160endif 173endif
@@ -206,6 +219,8 @@ ifeq ($(call try-cc,$(SOURCE_BIONIC),$(CFLAGS),bionic),y)
206 EXTLIBS := $(filter-out -lpthread,$(EXTLIBS)) 219 EXTLIBS := $(filter-out -lpthread,$(EXTLIBS))
207 BASIC_CFLAGS += -I. 220 BASIC_CFLAGS += -I.
208endif 221endif
222endif # MAKECMDGOALS != tags
223endif # MAKECMDGOALS != clean
209 224
210# Guard against environment variables 225# Guard against environment variables
211BUILTIN_OBJS = 226BUILTIN_OBJS =
@@ -230,11 +245,19 @@ endif
230LIBTRACEEVENT = $(TE_PATH)libtraceevent.a 245LIBTRACEEVENT = $(TE_PATH)libtraceevent.a
231TE_LIB := -L$(TE_PATH) -ltraceevent 246TE_LIB := -L$(TE_PATH) -ltraceevent
232 247
248export LIBTRACEEVENT
249
250# python extension build directories
251PYTHON_EXTBUILD := $(OUTPUT)python_ext_build/
252PYTHON_EXTBUILD_LIB := $(PYTHON_EXTBUILD)lib/
253PYTHON_EXTBUILD_TMP := $(PYTHON_EXTBUILD)tmp/
254export PYTHON_EXTBUILD_LIB PYTHON_EXTBUILD_TMP
255
256python-clean := rm -rf $(PYTHON_EXTBUILD) $(OUTPUT)python/perf.so
257
233PYTHON_EXT_SRCS := $(shell grep -v ^\# util/python-ext-sources) 258PYTHON_EXT_SRCS := $(shell grep -v ^\# util/python-ext-sources)
234PYTHON_EXT_DEPS := util/python-ext-sources util/setup.py 259PYTHON_EXT_DEPS := util/python-ext-sources util/setup.py
235 260
236export LIBTRACEEVENT
237
238$(OUTPUT)python/perf.so: $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS) 261$(OUTPUT)python/perf.so: $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS)
239 $(QUIET_GEN)CFLAGS='$(BASIC_CFLAGS)' $(PYTHON_WORD) util/setup.py \ 262 $(QUIET_GEN)CFLAGS='$(BASIC_CFLAGS)' $(PYTHON_WORD) util/setup.py \
240 --quiet build_ext; \ 263 --quiet build_ext; \
@@ -269,20 +292,17 @@ endif
269 292
270export PERL_PATH 293export PERL_PATH
271 294
272FLEX = flex
273BISON= bison
274
275$(OUTPUT)util/parse-events-flex.c: util/parse-events.l $(OUTPUT)util/parse-events-bison.c 295$(OUTPUT)util/parse-events-flex.c: util/parse-events.l $(OUTPUT)util/parse-events-bison.c
276 $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/parse-events-flex.h $(PARSER_DEBUG_FLEX) -t util/parse-events.l > $(OUTPUT)util/parse-events-flex.c 296 $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/parse-events-flex.h $(PARSER_DEBUG_FLEX) -t util/parse-events.l > $(OUTPUT)util/parse-events-flex.c
277 297
278$(OUTPUT)util/parse-events-bison.c: util/parse-events.y 298$(OUTPUT)util/parse-events-bison.c: util/parse-events.y
279 $(QUIET_BISON)$(BISON) -v util/parse-events.y -d $(PARSER_DEBUG_BISON) -o $(OUTPUT)util/parse-events-bison.c 299 $(QUIET_BISON)$(BISON) -v util/parse-events.y -d $(PARSER_DEBUG_BISON) -o $(OUTPUT)util/parse-events-bison.c -p parse_events_
280 300
281$(OUTPUT)util/pmu-flex.c: util/pmu.l $(OUTPUT)util/pmu-bison.c 301$(OUTPUT)util/pmu-flex.c: util/pmu.l $(OUTPUT)util/pmu-bison.c
282 $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/pmu-flex.h -t util/pmu.l > $(OUTPUT)util/pmu-flex.c 302 $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/pmu-flex.h -t util/pmu.l > $(OUTPUT)util/pmu-flex.c
283 303
284$(OUTPUT)util/pmu-bison.c: util/pmu.y 304$(OUTPUT)util/pmu-bison.c: util/pmu.y
285 $(QUIET_BISON)$(BISON) -v util/pmu.y -d -o $(OUTPUT)util/pmu-bison.c 305 $(QUIET_BISON)$(BISON) -v util/pmu.y -d -o $(OUTPUT)util/pmu-bison.c -p perf_pmu_
286 306
287$(OUTPUT)util/parse-events.o: $(OUTPUT)util/parse-events-flex.c $(OUTPUT)util/parse-events-bison.c 307$(OUTPUT)util/parse-events.o: $(OUTPUT)util/parse-events-flex.c $(OUTPUT)util/parse-events-bison.c
288$(OUTPUT)util/pmu.o: $(OUTPUT)util/pmu-flex.c $(OUTPUT)util/pmu-bison.c 308$(OUTPUT)util/pmu.o: $(OUTPUT)util/pmu-flex.c $(OUTPUT)util/pmu-bison.c
@@ -378,8 +398,11 @@ LIB_H += util/rblist.h
378LIB_H += util/intlist.h 398LIB_H += util/intlist.h
379LIB_H += util/perf_regs.h 399LIB_H += util/perf_regs.h
380LIB_H += util/unwind.h 400LIB_H += util/unwind.h
381LIB_H += ui/helpline.h
382LIB_H += util/vdso.h 401LIB_H += util/vdso.h
402LIB_H += ui/helpline.h
403LIB_H += ui/progress.h
404LIB_H += ui/util.h
405LIB_H += ui/ui.h
383 406
384LIB_OBJS += $(OUTPUT)util/abspath.o 407LIB_OBJS += $(OUTPUT)util/abspath.o
385LIB_OBJS += $(OUTPUT)util/alias.o 408LIB_OBJS += $(OUTPUT)util/alias.o
@@ -453,6 +476,7 @@ LIB_OBJS += $(OUTPUT)util/stat.o
453LIB_OBJS += $(OUTPUT)ui/setup.o 476LIB_OBJS += $(OUTPUT)ui/setup.o
454LIB_OBJS += $(OUTPUT)ui/helpline.o 477LIB_OBJS += $(OUTPUT)ui/helpline.o
455LIB_OBJS += $(OUTPUT)ui/progress.o 478LIB_OBJS += $(OUTPUT)ui/progress.o
479LIB_OBJS += $(OUTPUT)ui/util.o
456LIB_OBJS += $(OUTPUT)ui/hist.o 480LIB_OBJS += $(OUTPUT)ui/hist.o
457LIB_OBJS += $(OUTPUT)ui/stdio/hist.o 481LIB_OBJS += $(OUTPUT)ui/stdio/hist.o
458 482
@@ -471,7 +495,8 @@ LIB_OBJS += $(OUTPUT)tests/rdpmc.o
471LIB_OBJS += $(OUTPUT)tests/evsel-roundtrip-name.o 495LIB_OBJS += $(OUTPUT)tests/evsel-roundtrip-name.o
472LIB_OBJS += $(OUTPUT)tests/evsel-tp-sched.o 496LIB_OBJS += $(OUTPUT)tests/evsel-tp-sched.o
473LIB_OBJS += $(OUTPUT)tests/pmu.o 497LIB_OBJS += $(OUTPUT)tests/pmu.o
474LIB_OBJS += $(OUTPUT)tests/util.o 498LIB_OBJS += $(OUTPUT)tests/hists_link.o
499LIB_OBJS += $(OUTPUT)tests/python-use.o
475 500
476BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o 501BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o
477BUILTIN_OBJS += $(OUTPUT)builtin-bench.o 502BUILTIN_OBJS += $(OUTPUT)builtin-bench.o
@@ -510,14 +535,13 @@ PERFLIBS = $(LIB_FILE) $(LIBTRACEEVENT)
510# 535#
511# Platform specific tweaks 536# Platform specific tweaks
512# 537#
538ifneq ($(MAKECMDGOALS),clean)
539ifneq ($(MAKECMDGOALS),tags)
513 540
514# We choose to avoid "if .. else if .. else .. endif endif" 541# We choose to avoid "if .. else if .. else .. endif endif"
515# because maintaining the nesting to match is a pain. If 542# because maintaining the nesting to match is a pain. If
516# we had "elif" things would have been much nicer... 543# we had "elif" things would have been much nicer...
517 544
518-include config.mak.autogen
519-include config.mak
520
521ifdef NO_LIBELF 545ifdef NO_LIBELF
522 NO_DWARF := 1 546 NO_DWARF := 1
523 NO_DEMANGLE := 1 547 NO_DEMANGLE := 1
@@ -557,6 +581,11 @@ else
557endif # SOURCE_LIBELF 581endif # SOURCE_LIBELF
558endif # NO_LIBELF 582endif # NO_LIBELF
559 583
584# There's only x86 (both 32 and 64) support for CFI unwind so far
585ifneq ($(ARCH),x86)
586 NO_LIBUNWIND := 1
587endif
588
560ifndef NO_LIBUNWIND 589ifndef NO_LIBUNWIND
561# for linking with debug library, run like: 590# for linking with debug library, run like:
562# make DEBUG=1 LIBUNWIND_DIR=/opt/libunwind/ 591# make DEBUG=1 LIBUNWIND_DIR=/opt/libunwind/
@@ -646,7 +675,6 @@ ifndef NO_NEWT
646 LIB_OBJS += $(OUTPUT)ui/browsers/hists.o 675 LIB_OBJS += $(OUTPUT)ui/browsers/hists.o
647 LIB_OBJS += $(OUTPUT)ui/browsers/map.o 676 LIB_OBJS += $(OUTPUT)ui/browsers/map.o
648 LIB_OBJS += $(OUTPUT)ui/browsers/scripts.o 677 LIB_OBJS += $(OUTPUT)ui/browsers/scripts.o
649 LIB_OBJS += $(OUTPUT)ui/util.o
650 LIB_OBJS += $(OUTPUT)ui/tui/setup.o 678 LIB_OBJS += $(OUTPUT)ui/tui/setup.o
651 LIB_OBJS += $(OUTPUT)ui/tui/util.o 679 LIB_OBJS += $(OUTPUT)ui/tui/util.o
652 LIB_OBJS += $(OUTPUT)ui/tui/helpline.o 680 LIB_OBJS += $(OUTPUT)ui/tui/helpline.o
@@ -655,9 +683,6 @@ ifndef NO_NEWT
655 LIB_H += ui/browsers/map.h 683 LIB_H += ui/browsers/map.h
656 LIB_H += ui/keysyms.h 684 LIB_H += ui/keysyms.h
657 LIB_H += ui/libslang.h 685 LIB_H += ui/libslang.h
658 LIB_H += ui/progress.h
659 LIB_H += ui/util.h
660 LIB_H += ui/ui.h
661 endif 686 endif
662endif 687endif
663 688
@@ -673,14 +698,12 @@ ifndef NO_GTK2
673 BASIC_CFLAGS += $(shell pkg-config --cflags gtk+-2.0 2>/dev/null) 698 BASIC_CFLAGS += $(shell pkg-config --cflags gtk+-2.0 2>/dev/null)
674 EXTLIBS += $(shell pkg-config --libs gtk+-2.0 2>/dev/null) 699 EXTLIBS += $(shell pkg-config --libs gtk+-2.0 2>/dev/null)
675 LIB_OBJS += $(OUTPUT)ui/gtk/browser.o 700 LIB_OBJS += $(OUTPUT)ui/gtk/browser.o
701 LIB_OBJS += $(OUTPUT)ui/gtk/hists.o
676 LIB_OBJS += $(OUTPUT)ui/gtk/setup.o 702 LIB_OBJS += $(OUTPUT)ui/gtk/setup.o
677 LIB_OBJS += $(OUTPUT)ui/gtk/util.o 703 LIB_OBJS += $(OUTPUT)ui/gtk/util.o
678 LIB_OBJS += $(OUTPUT)ui/gtk/helpline.o 704 LIB_OBJS += $(OUTPUT)ui/gtk/helpline.o
679 LIB_OBJS += $(OUTPUT)ui/gtk/progress.o 705 LIB_OBJS += $(OUTPUT)ui/gtk/progress.o
680 # Make sure that it'd be included only once. 706 LIB_OBJS += $(OUTPUT)ui/gtk/annotate.o
681 ifeq ($(findstring -DNEWT_SUPPORT,$(BASIC_CFLAGS)),)
682 LIB_OBJS += $(OUTPUT)ui/util.o
683 endif
684 endif 707 endif
685endif 708endif
686 709
@@ -707,7 +730,7 @@ disable-python = $(eval $(disable-python_code))
707define disable-python_code 730define disable-python_code
708 BASIC_CFLAGS += -DNO_LIBPYTHON 731 BASIC_CFLAGS += -DNO_LIBPYTHON
709 $(if $(1),$(warning No $(1) was found)) 732 $(if $(1),$(warning No $(1) was found))
710 $(warning Python support won't be built) 733 $(warning Python support will not be built)
711endef 734endef
712 735
713override PYTHON := \ 736override PYTHON := \
@@ -715,19 +738,10 @@ override PYTHON := \
715 738
716ifndef PYTHON 739ifndef PYTHON
717 $(call disable-python,python interpreter) 740 $(call disable-python,python interpreter)
718 python-clean :=
719else 741else
720 742
721 PYTHON_WORD := $(call shell-wordify,$(PYTHON)) 743 PYTHON_WORD := $(call shell-wordify,$(PYTHON))
722 744
723 # python extension build directories
724 PYTHON_EXTBUILD := $(OUTPUT)python_ext_build/
725 PYTHON_EXTBUILD_LIB := $(PYTHON_EXTBUILD)lib/
726 PYTHON_EXTBUILD_TMP := $(PYTHON_EXTBUILD)tmp/
727 export PYTHON_EXTBUILD_LIB PYTHON_EXTBUILD_TMP
728
729 python-clean := rm -rf $(PYTHON_EXTBUILD) $(OUTPUT)python/perf.so
730
731 ifdef NO_LIBPYTHON 745 ifdef NO_LIBPYTHON
732 $(call disable-python) 746 $(call disable-python)
733 else 747 else
@@ -839,10 +853,24 @@ ifndef NO_BACKTRACE
839 endif 853 endif
840endif 854endif
841 855
856ifndef NO_LIBNUMA
857 FLAGS_LIBNUMA = $(ALL_CFLAGS) $(ALL_LDFLAGS) -lnuma
858 ifneq ($(call try-cc,$(SOURCE_LIBNUMA),$(FLAGS_LIBNUMA),libnuma),y)
859 msg := $(warning No numa.h found, disables 'perf bench numa mem' benchmark, please install numa-libs-devel or libnuma-dev);
860 else
861 BASIC_CFLAGS += -DLIBNUMA_SUPPORT
862 BUILTIN_OBJS += $(OUTPUT)bench/numa.o
863 EXTLIBS += -lnuma
864 endif
865endif
866
842ifdef ASCIIDOC8 867ifdef ASCIIDOC8
843 export ASCIIDOC8 868 export ASCIIDOC8
844endif 869endif
845 870
871endif # MAKECMDGOALS != tags
872endif # MAKECMDGOALS != clean
873
846# Shell quote (do not use $(call) to accommodate ancient setups); 874# Shell quote (do not use $(call) to accommodate ancient setups);
847 875
848ETC_PERFCONFIG_SQ = $(subst ','\'',$(ETC_PERFCONFIG)) 876ETC_PERFCONFIG_SQ = $(subst ','\'',$(ETC_PERFCONFIG))
@@ -884,7 +912,7 @@ strip: $(PROGRAMS) $(OUTPUT)perf
884 $(STRIP) $(STRIP_OPTS) $(PROGRAMS) $(OUTPUT)perf 912 $(STRIP) $(STRIP_OPTS) $(PROGRAMS) $(OUTPUT)perf
885 913
886$(OUTPUT)perf.o: perf.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS 914$(OUTPUT)perf.o: perf.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS
887 $(QUIET_CC)$(CC) -DPERF_VERSION='"$(PERF_VERSION)"' \ 915 $(QUIET_CC)$(CC) -include $(OUTPUT)PERF-VERSION-FILE \
888 '-DPERF_HTML_PATH="$(htmldir_SQ)"' \ 916 '-DPERF_HTML_PATH="$(htmldir_SQ)"' \
889 $(ALL_CFLAGS) -c $(filter %.c,$^) -o $@ 917 $(ALL_CFLAGS) -c $(filter %.c,$^) -o $@
890 918
@@ -948,7 +976,13 @@ $(OUTPUT)util/exec_cmd.o: util/exec_cmd.c $(OUTPUT)PERF-CFLAGS
948 976
949$(OUTPUT)tests/attr.o: tests/attr.c $(OUTPUT)PERF-CFLAGS 977$(OUTPUT)tests/attr.o: tests/attr.c $(OUTPUT)PERF-CFLAGS
950 $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) \ 978 $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) \
951 '-DBINDIR="$(bindir_SQ)"' \ 979 '-DBINDIR="$(bindir_SQ)"' -DPYTHON='"$(PYTHON_WORD)"' \
980 $<
981
982$(OUTPUT)tests/python-use.o: tests/python-use.c $(OUTPUT)PERF-CFLAGS
983 $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) \
984 -DPYTHONPATH='"$(OUTPUT)python"' \
985 -DPYTHON='"$(PYTHON_WORD)"' \
952 $< 986 $<
953 987
954$(OUTPUT)util/config.o: util/config.c $(OUTPUT)PERF-CFLAGS 988$(OUTPUT)util/config.o: util/config.c $(OUTPUT)PERF-CFLAGS
@@ -1099,7 +1133,7 @@ perfexec_instdir = $(prefix)/$(perfexecdir)
1099endif 1133endif
1100perfexec_instdir_SQ = $(subst ','\'',$(perfexec_instdir)) 1134perfexec_instdir_SQ = $(subst ','\'',$(perfexec_instdir))
1101 1135
1102install: all try-install-man 1136install-bin: all
1103 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)' 1137 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)'
1104 $(INSTALL) $(OUTPUT)perf '$(DESTDIR_SQ)$(bindir_SQ)' 1138 $(INSTALL) $(OUTPUT)perf '$(DESTDIR_SQ)$(bindir_SQ)'
1105 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace' 1139 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'
@@ -1120,6 +1154,8 @@ install: all try-install-man
1120 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr' 1154 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'
1121 $(INSTALL) tests/attr/* '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr' 1155 $(INSTALL) tests/attr/* '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'
1122 1156
1157install: install-bin try-install-man
1158
1123install-python_ext: 1159install-python_ext:
1124 $(PYTHON_WORD) util/setup.py --quiet install --root='/$(DESTDIR_SQ)' 1160 $(PYTHON_WORD) util/setup.py --quiet install --root='/$(DESTDIR_SQ)'
1125 1161
diff --git a/tools/perf/arch/common.c b/tools/perf/arch/common.c
index 3e975cb6232e..aacef07ebf31 100644
--- a/tools/perf/arch/common.c
+++ b/tools/perf/arch/common.c
@@ -155,6 +155,7 @@ static int perf_session_env__lookup_binutils_path(struct perf_session_env *env,
155 if (lookup_path(buf)) 155 if (lookup_path(buf))
156 goto out; 156 goto out;
157 free(buf); 157 free(buf);
158 buf = NULL;
158 } 159 }
159 160
160 if (!strcmp(arch, "arm")) 161 if (!strcmp(arch, "arm"))
diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h
index 8f89998eeaf4..a5223e6a7b43 100644
--- a/tools/perf/bench/bench.h
+++ b/tools/perf/bench/bench.h
@@ -1,6 +1,7 @@
1#ifndef BENCH_H 1#ifndef BENCH_H
2#define BENCH_H 2#define BENCH_H
3 3
4extern int bench_numa(int argc, const char **argv, const char *prefix);
4extern int bench_sched_messaging(int argc, const char **argv, const char *prefix); 5extern int bench_sched_messaging(int argc, const char **argv, const char *prefix);
5extern int bench_sched_pipe(int argc, const char **argv, const char *prefix); 6extern int bench_sched_pipe(int argc, const char **argv, const char *prefix);
6extern int bench_mem_memcpy(int argc, const char **argv, 7extern int bench_mem_memcpy(int argc, const char **argv,
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
new file mode 100644
index 000000000000..30d1c3225b46
--- /dev/null
+++ b/tools/perf/bench/numa.c
@@ -0,0 +1,1731 @@
1/*
2 * numa.c
3 *
4 * numa: Simulate NUMA-sensitive workload and measure their NUMA performance
5 */
6
7#include "../perf.h"
8#include "../builtin.h"
9#include "../util/util.h"
10#include "../util/parse-options.h"
11
12#include "bench.h"
13
14#include <errno.h>
15#include <sched.h>
16#include <stdio.h>
17#include <assert.h>
18#include <malloc.h>
19#include <signal.h>
20#include <stdlib.h>
21#include <string.h>
22#include <unistd.h>
23#include <pthread.h>
24#include <sys/mman.h>
25#include <sys/time.h>
26#include <sys/wait.h>
27#include <sys/prctl.h>
28#include <sys/types.h>
29
30#include <numa.h>
31#include <numaif.h>
32
33/*
34 * Regular printout to the terminal, supressed if -q is specified:
35 */
36#define tprintf(x...) do { if (g && g->p.show_details >= 0) printf(x); } while (0)
37
38/*
39 * Debug printf:
40 */
41#define dprintf(x...) do { if (g && g->p.show_details >= 1) printf(x); } while (0)
42
43struct thread_data {
44 int curr_cpu;
45 cpu_set_t bind_cpumask;
46 int bind_node;
47 u8 *process_data;
48 int process_nr;
49 int thread_nr;
50 int task_nr;
51 unsigned int loops_done;
52 u64 val;
53 u64 runtime_ns;
54 pthread_mutex_t *process_lock;
55};
56
57/* Parameters set by options: */
58
59struct params {
60 /* Startup synchronization: */
61 bool serialize_startup;
62
63 /* Task hierarchy: */
64 int nr_proc;
65 int nr_threads;
66
67 /* Working set sizes: */
68 const char *mb_global_str;
69 const char *mb_proc_str;
70 const char *mb_proc_locked_str;
71 const char *mb_thread_str;
72
73 double mb_global;
74 double mb_proc;
75 double mb_proc_locked;
76 double mb_thread;
77
78 /* Access patterns to the working set: */
79 bool data_reads;
80 bool data_writes;
81 bool data_backwards;
82 bool data_zero_memset;
83 bool data_rand_walk;
84 u32 nr_loops;
85 u32 nr_secs;
86 u32 sleep_usecs;
87
88 /* Working set initialization: */
89 bool init_zero;
90 bool init_random;
91 bool init_cpu0;
92
93 /* Misc options: */
94 int show_details;
95 int run_all;
96 int thp;
97
98 long bytes_global;
99 long bytes_process;
100 long bytes_process_locked;
101 long bytes_thread;
102
103 int nr_tasks;
104 bool show_quiet;
105
106 bool show_convergence;
107 bool measure_convergence;
108
109 int perturb_secs;
110 int nr_cpus;
111 int nr_nodes;
112
113 /* Affinity options -C and -N: */
114 char *cpu_list_str;
115 char *node_list_str;
116};
117
118
119/* Global, read-writable area, accessible to all processes and threads: */
120
121struct global_info {
122 u8 *data;
123
124 pthread_mutex_t startup_mutex;
125 int nr_tasks_started;
126
127 pthread_mutex_t startup_done_mutex;
128
129 pthread_mutex_t start_work_mutex;
130 int nr_tasks_working;
131
132 pthread_mutex_t stop_work_mutex;
133 u64 bytes_done;
134
135 struct thread_data *threads;
136
137 /* Convergence latency measurement: */
138 bool all_converged;
139 bool stop_work;
140
141 int print_once;
142
143 struct params p;
144};
145
146static struct global_info *g = NULL;
147
148static int parse_cpus_opt(const struct option *opt, const char *arg, int unset);
149static int parse_nodes_opt(const struct option *opt, const char *arg, int unset);
150
151struct params p0;
152
153static const struct option options[] = {
154 OPT_INTEGER('p', "nr_proc" , &p0.nr_proc, "number of processes"),
155 OPT_INTEGER('t', "nr_threads" , &p0.nr_threads, "number of threads per process"),
156
157 OPT_STRING('G', "mb_global" , &p0.mb_global_str, "MB", "global memory (MBs)"),
158 OPT_STRING('P', "mb_proc" , &p0.mb_proc_str, "MB", "process memory (MBs)"),
159 OPT_STRING('L', "mb_proc_locked", &p0.mb_proc_locked_str,"MB", "process serialized/locked memory access (MBs), <= process_memory"),
160 OPT_STRING('T', "mb_thread" , &p0.mb_thread_str, "MB", "thread memory (MBs)"),
161
162 OPT_UINTEGER('l', "nr_loops" , &p0.nr_loops, "max number of loops to run"),
163 OPT_UINTEGER('s', "nr_secs" , &p0.nr_secs, "max number of seconds to run"),
164 OPT_UINTEGER('u', "usleep" , &p0.sleep_usecs, "usecs to sleep per loop iteration"),
165
166 OPT_BOOLEAN('R', "data_reads" , &p0.data_reads, "access the data via writes (can be mixed with -W)"),
167 OPT_BOOLEAN('W', "data_writes" , &p0.data_writes, "access the data via writes (can be mixed with -R)"),
168 OPT_BOOLEAN('B', "data_backwards", &p0.data_backwards, "access the data backwards as well"),
169 OPT_BOOLEAN('Z', "data_zero_memset", &p0.data_zero_memset,"access the data via glibc bzero only"),
170 OPT_BOOLEAN('r', "data_rand_walk", &p0.data_rand_walk, "access the data with random (32bit LFSR) walk"),
171
172
173 OPT_BOOLEAN('z', "init_zero" , &p0.init_zero, "bzero the initial allocations"),
174 OPT_BOOLEAN('I', "init_random" , &p0.init_random, "randomize the contents of the initial allocations"),
175 OPT_BOOLEAN('0', "init_cpu0" , &p0.init_cpu0, "do the initial allocations on CPU#0"),
176 OPT_INTEGER('x', "perturb_secs", &p0.perturb_secs, "perturb thread 0/0 every X secs, to test convergence stability"),
177
178 OPT_INCR ('d', "show_details" , &p0.show_details, "Show details"),
179 OPT_INCR ('a', "all" , &p0.run_all, "Run all tests in the suite"),
180 OPT_INTEGER('H', "thp" , &p0.thp, "MADV_NOHUGEPAGE < 0 < MADV_HUGEPAGE"),
181 OPT_BOOLEAN('c', "show_convergence", &p0.show_convergence, "show convergence details"),
182 OPT_BOOLEAN('m', "measure_convergence", &p0.measure_convergence, "measure convergence latency"),
183 OPT_BOOLEAN('q', "quiet" , &p0.show_quiet, "bzero the initial allocations"),
184 OPT_BOOLEAN('S', "serialize-startup", &p0.serialize_startup,"serialize thread startup"),
185
186 /* Special option string parsing callbacks: */
187 OPT_CALLBACK('C', "cpus", NULL, "cpu[,cpu2,...cpuN]",
188 "bind the first N tasks to these specific cpus (the rest is unbound)",
189 parse_cpus_opt),
190 OPT_CALLBACK('M', "memnodes", NULL, "node[,node2,...nodeN]",
191 "bind the first N tasks to these specific memory nodes (the rest is unbound)",
192 parse_nodes_opt),
193 OPT_END()
194};
195
196static const char * const bench_numa_usage[] = {
197 "perf bench numa <options>",
198 NULL
199};
200
201static const char * const numa_usage[] = {
202 "perf bench numa mem [<options>]",
203 NULL
204};
205
206static cpu_set_t bind_to_cpu(int target_cpu)
207{
208 cpu_set_t orig_mask, mask;
209 int ret;
210
211 ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask);
212 BUG_ON(ret);
213
214 CPU_ZERO(&mask);
215
216 if (target_cpu == -1) {
217 int cpu;
218
219 for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
220 CPU_SET(cpu, &mask);
221 } else {
222 BUG_ON(target_cpu < 0 || target_cpu >= g->p.nr_cpus);
223 CPU_SET(target_cpu, &mask);
224 }
225
226 ret = sched_setaffinity(0, sizeof(mask), &mask);
227 BUG_ON(ret);
228
229 return orig_mask;
230}
231
232static cpu_set_t bind_to_node(int target_node)
233{
234 int cpus_per_node = g->p.nr_cpus/g->p.nr_nodes;
235 cpu_set_t orig_mask, mask;
236 int cpu;
237 int ret;
238
239 BUG_ON(cpus_per_node*g->p.nr_nodes != g->p.nr_cpus);
240 BUG_ON(!cpus_per_node);
241
242 ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask);
243 BUG_ON(ret);
244
245 CPU_ZERO(&mask);
246
247 if (target_node == -1) {
248 for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
249 CPU_SET(cpu, &mask);
250 } else {
251 int cpu_start = (target_node + 0) * cpus_per_node;
252 int cpu_stop = (target_node + 1) * cpus_per_node;
253
254 BUG_ON(cpu_stop > g->p.nr_cpus);
255
256 for (cpu = cpu_start; cpu < cpu_stop; cpu++)
257 CPU_SET(cpu, &mask);
258 }
259
260 ret = sched_setaffinity(0, sizeof(mask), &mask);
261 BUG_ON(ret);
262
263 return orig_mask;
264}
265
266static void bind_to_cpumask(cpu_set_t mask)
267{
268 int ret;
269
270 ret = sched_setaffinity(0, sizeof(mask), &mask);
271 BUG_ON(ret);
272}
273
274static void mempol_restore(void)
275{
276 int ret;
277
278 ret = set_mempolicy(MPOL_DEFAULT, NULL, g->p.nr_nodes-1);
279
280 BUG_ON(ret);
281}
282
283static void bind_to_memnode(int node)
284{
285 unsigned long nodemask;
286 int ret;
287
288 if (node == -1)
289 return;
290
291 BUG_ON(g->p.nr_nodes > (int)sizeof(nodemask));
292 nodemask = 1L << node;
293
294 ret = set_mempolicy(MPOL_BIND, &nodemask, sizeof(nodemask)*8);
295 dprintf("binding to node %d, mask: %016lx => %d\n", node, nodemask, ret);
296
297 BUG_ON(ret);
298}
299
300#define HPSIZE (2*1024*1024)
301
302#define set_taskname(fmt...) \
303do { \
304 char name[20]; \
305 \
306 snprintf(name, 20, fmt); \
307 prctl(PR_SET_NAME, name); \
308} while (0)
309
310static u8 *alloc_data(ssize_t bytes0, int map_flags,
311 int init_zero, int init_cpu0, int thp, int init_random)
312{
313 cpu_set_t orig_mask;
314 ssize_t bytes;
315 u8 *buf;
316 int ret;
317
318 if (!bytes0)
319 return NULL;
320
321 /* Allocate and initialize all memory on CPU#0: */
322 if (init_cpu0) {
323 orig_mask = bind_to_node(0);
324 bind_to_memnode(0);
325 }
326
327 bytes = bytes0 + HPSIZE;
328
329 buf = (void *)mmap(0, bytes, PROT_READ|PROT_WRITE, MAP_ANON|map_flags, -1, 0);
330 BUG_ON(buf == (void *)-1);
331
332 if (map_flags == MAP_PRIVATE) {
333 if (thp > 0) {
334 ret = madvise(buf, bytes, MADV_HUGEPAGE);
335 if (ret && !g->print_once) {
336 g->print_once = 1;
337 printf("WARNING: Could not enable THP - do: 'echo madvise > /sys/kernel/mm/transparent_hugepage/enabled'\n");
338 }
339 }
340 if (thp < 0) {
341 ret = madvise(buf, bytes, MADV_NOHUGEPAGE);
342 if (ret && !g->print_once) {
343 g->print_once = 1;
344 printf("WARNING: Could not disable THP: run a CONFIG_TRANSPARENT_HUGEPAGE kernel?\n");
345 }
346 }
347 }
348
349 if (init_zero) {
350 bzero(buf, bytes);
351 } else {
352 /* Initialize random contents, different in each word: */
353 if (init_random) {
354 u64 *wbuf = (void *)buf;
355 long off = rand();
356 long i;
357
358 for (i = 0; i < bytes/8; i++)
359 wbuf[i] = i + off;
360 }
361 }
362
363 /* Align to 2MB boundary: */
364 buf = (void *)(((unsigned long)buf + HPSIZE-1) & ~(HPSIZE-1));
365
366 /* Restore affinity: */
367 if (init_cpu0) {
368 bind_to_cpumask(orig_mask);
369 mempol_restore();
370 }
371
372 return buf;
373}
374
375static void free_data(void *data, ssize_t bytes)
376{
377 int ret;
378
379 if (!data)
380 return;
381
382 ret = munmap(data, bytes);
383 BUG_ON(ret);
384}
385
386/*
387 * Create a shared memory buffer that can be shared between processes, zeroed:
388 */
389static void * zalloc_shared_data(ssize_t bytes)
390{
391 return alloc_data(bytes, MAP_SHARED, 1, g->p.init_cpu0, g->p.thp, g->p.init_random);
392}
393
394/*
395 * Create a shared memory buffer that can be shared between processes:
396 */
397static void * setup_shared_data(ssize_t bytes)
398{
399 return alloc_data(bytes, MAP_SHARED, 0, g->p.init_cpu0, g->p.thp, g->p.init_random);
400}
401
402/*
403 * Allocate process-local memory - this will either be shared between
404 * threads of this process, or only be accessed by this thread:
405 */
406static void * setup_private_data(ssize_t bytes)
407{
408 return alloc_data(bytes, MAP_PRIVATE, 0, g->p.init_cpu0, g->p.thp, g->p.init_random);
409}
410
411/*
412 * Return a process-shared (global) mutex:
413 */
414static void init_global_mutex(pthread_mutex_t *mutex)
415{
416 pthread_mutexattr_t attr;
417
418 pthread_mutexattr_init(&attr);
419 pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED);
420 pthread_mutex_init(mutex, &attr);
421}
422
423static int parse_cpu_list(const char *arg)
424{
425 p0.cpu_list_str = strdup(arg);
426
427 dprintf("got CPU list: {%s}\n", p0.cpu_list_str);
428
429 return 0;
430}
431
432static void parse_setup_cpu_list(void)
433{
434 struct thread_data *td;
435 char *str0, *str;
436 int t;
437
438 if (!g->p.cpu_list_str)
439 return;
440
441 dprintf("g->p.nr_tasks: %d\n", g->p.nr_tasks);
442
443 str0 = str = strdup(g->p.cpu_list_str);
444 t = 0;
445
446 BUG_ON(!str);
447
448 tprintf("# binding tasks to CPUs:\n");
449 tprintf("# ");
450
451 while (true) {
452 int bind_cpu, bind_cpu_0, bind_cpu_1;
453 char *tok, *tok_end, *tok_step, *tok_len, *tok_mul;
454 int bind_len;
455 int step;
456 int mul;
457
458 tok = strsep(&str, ",");
459 if (!tok)
460 break;
461
462 tok_end = strstr(tok, "-");
463
464 dprintf("\ntoken: {%s}, end: {%s}\n", tok, tok_end);
465 if (!tok_end) {
466 /* Single CPU specified: */
467 bind_cpu_0 = bind_cpu_1 = atol(tok);
468 } else {
469 /* CPU range specified (for example: "5-11"): */
470 bind_cpu_0 = atol(tok);
471 bind_cpu_1 = atol(tok_end + 1);
472 }
473
474 step = 1;
475 tok_step = strstr(tok, "#");
476 if (tok_step) {
477 step = atol(tok_step + 1);
478 BUG_ON(step <= 0 || step >= g->p.nr_cpus);
479 }
480
481 /*
482 * Mask length.
483 * Eg: "--cpus 8_4-16#4" means: '--cpus 8_4,12_4,16_4',
484 * where the _4 means the next 4 CPUs are allowed.
485 */
486 bind_len = 1;
487 tok_len = strstr(tok, "_");
488 if (tok_len) {
489 bind_len = atol(tok_len + 1);
490 BUG_ON(bind_len <= 0 || bind_len > g->p.nr_cpus);
491 }
492
493 /* Multiplicator shortcut, "0x8" is a shortcut for: "0,0,0,0,0,0,0,0" */
494 mul = 1;
495 tok_mul = strstr(tok, "x");
496 if (tok_mul) {
497 mul = atol(tok_mul + 1);
498 BUG_ON(mul <= 0);
499 }
500
501 dprintf("CPUs: %d_%d-%d#%dx%d\n", bind_cpu_0, bind_len, bind_cpu_1, step, mul);
502
503 BUG_ON(bind_cpu_0 < 0 || bind_cpu_0 >= g->p.nr_cpus);
504 BUG_ON(bind_cpu_1 < 0 || bind_cpu_1 >= g->p.nr_cpus);
505 BUG_ON(bind_cpu_0 > bind_cpu_1);
506
507 for (bind_cpu = bind_cpu_0; bind_cpu <= bind_cpu_1; bind_cpu += step) {
508 int i;
509
510 for (i = 0; i < mul; i++) {
511 int cpu;
512
513 if (t >= g->p.nr_tasks) {
514 printf("\n# NOTE: ignoring bind CPUs starting at CPU#%d\n #", bind_cpu);
515 goto out;
516 }
517 td = g->threads + t;
518
519 if (t)
520 tprintf(",");
521 if (bind_len > 1) {
522 tprintf("%2d/%d", bind_cpu, bind_len);
523 } else {
524 tprintf("%2d", bind_cpu);
525 }
526
527 CPU_ZERO(&td->bind_cpumask);
528 for (cpu = bind_cpu; cpu < bind_cpu+bind_len; cpu++) {
529 BUG_ON(cpu < 0 || cpu >= g->p.nr_cpus);
530 CPU_SET(cpu, &td->bind_cpumask);
531 }
532 t++;
533 }
534 }
535 }
536out:
537
538 tprintf("\n");
539
540 if (t < g->p.nr_tasks)
541 printf("# NOTE: %d tasks bound, %d tasks unbound\n", t, g->p.nr_tasks - t);
542
543 free(str0);
544}
545
546static int parse_cpus_opt(const struct option *opt __maybe_unused,
547 const char *arg, int unset __maybe_unused)
548{
549 if (!arg)
550 return -1;
551
552 return parse_cpu_list(arg);
553}
554
555static int parse_node_list(const char *arg)
556{
557 p0.node_list_str = strdup(arg);
558
559 dprintf("got NODE list: {%s}\n", p0.node_list_str);
560
561 return 0;
562}
563
564static void parse_setup_node_list(void)
565{
566 struct thread_data *td;
567 char *str0, *str;
568 int t;
569
570 if (!g->p.node_list_str)
571 return;
572
573 dprintf("g->p.nr_tasks: %d\n", g->p.nr_tasks);
574
575 str0 = str = strdup(g->p.node_list_str);
576 t = 0;
577
578 BUG_ON(!str);
579
580 tprintf("# binding tasks to NODEs:\n");
581 tprintf("# ");
582
583 while (true) {
584 int bind_node, bind_node_0, bind_node_1;
585 char *tok, *tok_end, *tok_step, *tok_mul;
586 int step;
587 int mul;
588
589 tok = strsep(&str, ",");
590 if (!tok)
591 break;
592
593 tok_end = strstr(tok, "-");
594
595 dprintf("\ntoken: {%s}, end: {%s}\n", tok, tok_end);
596 if (!tok_end) {
597 /* Single NODE specified: */
598 bind_node_0 = bind_node_1 = atol(tok);
599 } else {
600 /* NODE range specified (for example: "5-11"): */
601 bind_node_0 = atol(tok);
602 bind_node_1 = atol(tok_end + 1);
603 }
604
605 step = 1;
606 tok_step = strstr(tok, "#");
607 if (tok_step) {
608 step = atol(tok_step + 1);
609 BUG_ON(step <= 0 || step >= g->p.nr_nodes);
610 }
611
612 /* Multiplicator shortcut, "0x8" is a shortcut for: "0,0,0,0,0,0,0,0" */
613 mul = 1;
614 tok_mul = strstr(tok, "x");
615 if (tok_mul) {
616 mul = atol(tok_mul + 1);
617 BUG_ON(mul <= 0);
618 }
619
620 dprintf("NODEs: %d-%d #%d\n", bind_node_0, bind_node_1, step);
621
622 BUG_ON(bind_node_0 < 0 || bind_node_0 >= g->p.nr_nodes);
623 BUG_ON(bind_node_1 < 0 || bind_node_1 >= g->p.nr_nodes);
624 BUG_ON(bind_node_0 > bind_node_1);
625
626 for (bind_node = bind_node_0; bind_node <= bind_node_1; bind_node += step) {
627 int i;
628
629 for (i = 0; i < mul; i++) {
630 if (t >= g->p.nr_tasks) {
631 printf("\n# NOTE: ignoring bind NODEs starting at NODE#%d\n", bind_node);
632 goto out;
633 }
634 td = g->threads + t;
635
636 if (!t)
637 tprintf(" %2d", bind_node);
638 else
639 tprintf(",%2d", bind_node);
640
641 td->bind_node = bind_node;
642 t++;
643 }
644 }
645 }
646out:
647
648 tprintf("\n");
649
650 if (t < g->p.nr_tasks)
651 printf("# NOTE: %d tasks mem-bound, %d tasks unbound\n", t, g->p.nr_tasks - t);
652
653 free(str0);
654}
655
656static int parse_nodes_opt(const struct option *opt __maybe_unused,
657 const char *arg, int unset __maybe_unused)
658{
659 if (!arg)
660 return -1;
661
662 return parse_node_list(arg);
663
664 return 0;
665}
666
667#define BIT(x) (1ul << x)
668
669static inline uint32_t lfsr_32(uint32_t lfsr)
670{
671 const uint32_t taps = BIT(1) | BIT(5) | BIT(6) | BIT(31);
672 return (lfsr>>1) ^ ((0x0u - (lfsr & 0x1u)) & taps);
673}
674
675/*
676 * Make sure there's real data dependency to RAM (when read
677 * accesses are enabled), so the compiler, the CPU and the
678 * kernel (KSM, zero page, etc.) cannot optimize away RAM
679 * accesses:
680 */
681static inline u64 access_data(u64 *data __attribute__((unused)), u64 val)
682{
683 if (g->p.data_reads)
684 val += *data;
685 if (g->p.data_writes)
686 *data = val + 1;
687 return val;
688}
689
690/*
691 * The worker process does two types of work, a forwards going
692 * loop and a backwards going loop.
693 *
694 * We do this so that on multiprocessor systems we do not create
695 * a 'train' of processing, with highly synchronized processes,
696 * skewing the whole benchmark.
697 */
698static u64 do_work(u8 *__data, long bytes, int nr, int nr_max, int loop, u64 val)
699{
700 long words = bytes/sizeof(u64);
701 u64 *data = (void *)__data;
702 long chunk_0, chunk_1;
703 u64 *d0, *d, *d1;
704 long off;
705 long i;
706
707 BUG_ON(!data && words);
708 BUG_ON(data && !words);
709
710 if (!data)
711 return val;
712
713 /* Very simple memset() work variant: */
714 if (g->p.data_zero_memset && !g->p.data_rand_walk) {
715 bzero(data, bytes);
716 return val;
717 }
718
719 /* Spread out by PID/TID nr and by loop nr: */
720 chunk_0 = words/nr_max;
721 chunk_1 = words/g->p.nr_loops;
722 off = nr*chunk_0 + loop*chunk_1;
723
724 while (off >= words)
725 off -= words;
726
727 if (g->p.data_rand_walk) {
728 u32 lfsr = nr + loop + val;
729 int j;
730
731 for (i = 0; i < words/1024; i++) {
732 long start, end;
733
734 lfsr = lfsr_32(lfsr);
735
736 start = lfsr % words;
737 end = min(start + 1024, words-1);
738
739 if (g->p.data_zero_memset) {
740 bzero(data + start, (end-start) * sizeof(u64));
741 } else {
742 for (j = start; j < end; j++)
743 val = access_data(data + j, val);
744 }
745 }
746 } else if (!g->p.data_backwards || (nr + loop) & 1) {
747
748 d0 = data + off;
749 d = data + off + 1;
750 d1 = data + words;
751
752 /* Process data forwards: */
753 for (;;) {
754 if (unlikely(d >= d1))
755 d = data;
756 if (unlikely(d == d0))
757 break;
758
759 val = access_data(d, val);
760
761 d++;
762 }
763 } else {
764 /* Process data backwards: */
765
766 d0 = data + off;
767 d = data + off - 1;
768 d1 = data + words;
769
770 /* Process data forwards: */
771 for (;;) {
772 if (unlikely(d < data))
773 d = data + words-1;
774 if (unlikely(d == d0))
775 break;
776
777 val = access_data(d, val);
778
779 d--;
780 }
781 }
782
783 return val;
784}
785
786static void update_curr_cpu(int task_nr, unsigned long bytes_worked)
787{
788 unsigned int cpu;
789
790 cpu = sched_getcpu();
791
792 g->threads[task_nr].curr_cpu = cpu;
793 prctl(0, bytes_worked);
794}
795
796#define MAX_NR_NODES 64
797
798/*
799 * Count the number of nodes a process's threads
800 * are spread out on.
801 *
802 * A count of 1 means that the process is compressed
803 * to a single node. A count of g->p.nr_nodes means it's
804 * spread out on the whole system.
805 */
806static int count_process_nodes(int process_nr)
807{
808 char node_present[MAX_NR_NODES] = { 0, };
809 int nodes;
810 int n, t;
811
812 for (t = 0; t < g->p.nr_threads; t++) {
813 struct thread_data *td;
814 int task_nr;
815 int node;
816
817 task_nr = process_nr*g->p.nr_threads + t;
818 td = g->threads + task_nr;
819
820 node = numa_node_of_cpu(td->curr_cpu);
821 node_present[node] = 1;
822 }
823
824 nodes = 0;
825
826 for (n = 0; n < MAX_NR_NODES; n++)
827 nodes += node_present[n];
828
829 return nodes;
830}
831
832/*
833 * Count the number of distinct process-threads a node contains.
834 *
835 * A count of 1 means that the node contains only a single
836 * process. If all nodes on the system contain at most one
837 * process then we are well-converged.
838 */
839static int count_node_processes(int node)
840{
841 int processes = 0;
842 int t, p;
843
844 for (p = 0; p < g->p.nr_proc; p++) {
845 for (t = 0; t < g->p.nr_threads; t++) {
846 struct thread_data *td;
847 int task_nr;
848 int n;
849
850 task_nr = p*g->p.nr_threads + t;
851 td = g->threads + task_nr;
852
853 n = numa_node_of_cpu(td->curr_cpu);
854 if (n == node) {
855 processes++;
856 break;
857 }
858 }
859 }
860
861 return processes;
862}
863
864static void calc_convergence_compression(int *strong)
865{
866 unsigned int nodes_min, nodes_max;
867 int p;
868
869 nodes_min = -1;
870 nodes_max = 0;
871
872 for (p = 0; p < g->p.nr_proc; p++) {
873 unsigned int nodes = count_process_nodes(p);
874
875 nodes_min = min(nodes, nodes_min);
876 nodes_max = max(nodes, nodes_max);
877 }
878
879 /* Strong convergence: all threads compress on a single node: */
880 if (nodes_min == 1 && nodes_max == 1) {
881 *strong = 1;
882 } else {
883 *strong = 0;
884 tprintf(" {%d-%d}", nodes_min, nodes_max);
885 }
886}
887
888static void calc_convergence(double runtime_ns_max, double *convergence)
889{
890 unsigned int loops_done_min, loops_done_max;
891 int process_groups;
892 int nodes[MAX_NR_NODES];
893 int distance;
894 int nr_min;
895 int nr_max;
896 int strong;
897 int sum;
898 int nr;
899 int node;
900 int cpu;
901 int t;
902
903 if (!g->p.show_convergence && !g->p.measure_convergence)
904 return;
905
906 for (node = 0; node < g->p.nr_nodes; node++)
907 nodes[node] = 0;
908
909 loops_done_min = -1;
910 loops_done_max = 0;
911
912 for (t = 0; t < g->p.nr_tasks; t++) {
913 struct thread_data *td = g->threads + t;
914 unsigned int loops_done;
915
916 cpu = td->curr_cpu;
917
918 /* Not all threads have written it yet: */
919 if (cpu < 0)
920 continue;
921
922 node = numa_node_of_cpu(cpu);
923
924 nodes[node]++;
925
926 loops_done = td->loops_done;
927 loops_done_min = min(loops_done, loops_done_min);
928 loops_done_max = max(loops_done, loops_done_max);
929 }
930
931 nr_max = 0;
932 nr_min = g->p.nr_tasks;
933 sum = 0;
934
935 for (node = 0; node < g->p.nr_nodes; node++) {
936 nr = nodes[node];
937 nr_min = min(nr, nr_min);
938 nr_max = max(nr, nr_max);
939 sum += nr;
940 }
941 BUG_ON(nr_min > nr_max);
942
943 BUG_ON(sum > g->p.nr_tasks);
944
945 if (0 && (sum < g->p.nr_tasks))
946 return;
947
948 /*
949 * Count the number of distinct process groups present
950 * on nodes - when we are converged this will decrease
951 * to g->p.nr_proc:
952 */
953 process_groups = 0;
954
955 for (node = 0; node < g->p.nr_nodes; node++) {
956 int processes = count_node_processes(node);
957
958 nr = nodes[node];
959 tprintf(" %2d/%-2d", nr, processes);
960
961 process_groups += processes;
962 }
963
964 distance = nr_max - nr_min;
965
966 tprintf(" [%2d/%-2d]", distance, process_groups);
967
968 tprintf(" l:%3d-%-3d (%3d)",
969 loops_done_min, loops_done_max, loops_done_max-loops_done_min);
970
971 if (loops_done_min && loops_done_max) {
972 double skew = 1.0 - (double)loops_done_min/loops_done_max;
973
974 tprintf(" [%4.1f%%]", skew * 100.0);
975 }
976
977 calc_convergence_compression(&strong);
978
979 if (strong && process_groups == g->p.nr_proc) {
980 if (!*convergence) {
981 *convergence = runtime_ns_max;
982 tprintf(" (%6.1fs converged)\n", *convergence/1e9);
983 if (g->p.measure_convergence) {
984 g->all_converged = true;
985 g->stop_work = true;
986 }
987 }
988 } else {
989 if (*convergence) {
990 tprintf(" (%6.1fs de-converged)", runtime_ns_max/1e9);
991 *convergence = 0;
992 }
993 tprintf("\n");
994 }
995}
996
997static void show_summary(double runtime_ns_max, int l, double *convergence)
998{
999 tprintf("\r # %5.1f%% [%.1f mins]",
1000 (double)(l+1)/g->p.nr_loops*100.0, runtime_ns_max/1e9 / 60.0);
1001
1002 calc_convergence(runtime_ns_max, convergence);
1003
1004 if (g->p.show_details >= 0)
1005 fflush(stdout);
1006}
1007
1008static void *worker_thread(void *__tdata)
1009{
1010 struct thread_data *td = __tdata;
1011 struct timeval start0, start, stop, diff;
1012 int process_nr = td->process_nr;
1013 int thread_nr = td->thread_nr;
1014 unsigned long last_perturbance;
1015 int task_nr = td->task_nr;
1016 int details = g->p.show_details;
1017 int first_task, last_task;
1018 double convergence = 0;
1019 u64 val = td->val;
1020 double runtime_ns_max;
1021 u8 *global_data;
1022 u8 *process_data;
1023 u8 *thread_data;
1024 u64 bytes_done;
1025 long work_done;
1026 u32 l;
1027
1028 bind_to_cpumask(td->bind_cpumask);
1029 bind_to_memnode(td->bind_node);
1030
1031 set_taskname("thread %d/%d", process_nr, thread_nr);
1032
1033 global_data = g->data;
1034 process_data = td->process_data;
1035 thread_data = setup_private_data(g->p.bytes_thread);
1036
1037 bytes_done = 0;
1038
1039 last_task = 0;
1040 if (process_nr == g->p.nr_proc-1 && thread_nr == g->p.nr_threads-1)
1041 last_task = 1;
1042
1043 first_task = 0;
1044 if (process_nr == 0 && thread_nr == 0)
1045 first_task = 1;
1046
1047 if (details >= 2) {
1048 printf("# thread %2d / %2d global mem: %p, process mem: %p, thread mem: %p\n",
1049 process_nr, thread_nr, global_data, process_data, thread_data);
1050 }
1051
1052 if (g->p.serialize_startup) {
1053 pthread_mutex_lock(&g->startup_mutex);
1054 g->nr_tasks_started++;
1055 pthread_mutex_unlock(&g->startup_mutex);
1056
1057 /* Here we will wait for the main process to start us all at once: */
1058 pthread_mutex_lock(&g->start_work_mutex);
1059 g->nr_tasks_working++;
1060
1061 /* Last one wake the main process: */
1062 if (g->nr_tasks_working == g->p.nr_tasks)
1063 pthread_mutex_unlock(&g->startup_done_mutex);
1064
1065 pthread_mutex_unlock(&g->start_work_mutex);
1066 }
1067
1068 gettimeofday(&start0, NULL);
1069
1070 start = stop = start0;
1071 last_perturbance = start.tv_sec;
1072
1073 for (l = 0; l < g->p.nr_loops; l++) {
1074 start = stop;
1075
1076 if (g->stop_work)
1077 break;
1078
1079 val += do_work(global_data, g->p.bytes_global, process_nr, g->p.nr_proc, l, val);
1080 val += do_work(process_data, g->p.bytes_process, thread_nr, g->p.nr_threads, l, val);
1081 val += do_work(thread_data, g->p.bytes_thread, 0, 1, l, val);
1082
1083 if (g->p.sleep_usecs) {
1084 pthread_mutex_lock(td->process_lock);
1085 usleep(g->p.sleep_usecs);
1086 pthread_mutex_unlock(td->process_lock);
1087 }
1088 /*
1089 * Amount of work to be done under a process-global lock:
1090 */
1091 if (g->p.bytes_process_locked) {
1092 pthread_mutex_lock(td->process_lock);
1093 val += do_work(process_data, g->p.bytes_process_locked, thread_nr, g->p.nr_threads, l, val);
1094 pthread_mutex_unlock(td->process_lock);
1095 }
1096
1097 work_done = g->p.bytes_global + g->p.bytes_process +
1098 g->p.bytes_process_locked + g->p.bytes_thread;
1099
1100 update_curr_cpu(task_nr, work_done);
1101 bytes_done += work_done;
1102
1103 if (details < 0 && !g->p.perturb_secs && !g->p.measure_convergence && !g->p.nr_secs)
1104 continue;
1105
1106 td->loops_done = l;
1107
1108 gettimeofday(&stop, NULL);
1109
1110 /* Check whether our max runtime timed out: */
1111 if (g->p.nr_secs) {
1112 timersub(&stop, &start0, &diff);
1113 if (diff.tv_sec >= g->p.nr_secs) {
1114 g->stop_work = true;
1115 break;
1116 }
1117 }
1118
1119 /* Update the summary at most once per second: */
1120 if (start.tv_sec == stop.tv_sec)
1121 continue;
1122
1123 /*
1124 * Perturb the first task's equilibrium every g->p.perturb_secs seconds,
1125 * by migrating to CPU#0:
1126 */
1127 if (first_task && g->p.perturb_secs && (int)(stop.tv_sec - last_perturbance) >= g->p.perturb_secs) {
1128 cpu_set_t orig_mask;
1129 int target_cpu;
1130 int this_cpu;
1131
1132 last_perturbance = stop.tv_sec;
1133
1134 /*
1135 * Depending on where we are running, move into
1136 * the other half of the system, to create some
1137 * real disturbance:
1138 */
1139 this_cpu = g->threads[task_nr].curr_cpu;
1140 if (this_cpu < g->p.nr_cpus/2)
1141 target_cpu = g->p.nr_cpus-1;
1142 else
1143 target_cpu = 0;
1144
1145 orig_mask = bind_to_cpu(target_cpu);
1146
1147 /* Here we are running on the target CPU already */
1148 if (details >= 1)
1149 printf(" (injecting perturbalance, moved to CPU#%d)\n", target_cpu);
1150
1151 bind_to_cpumask(orig_mask);
1152 }
1153
1154 if (details >= 3) {
1155 timersub(&stop, &start, &diff);
1156 runtime_ns_max = diff.tv_sec * 1000000000;
1157 runtime_ns_max += diff.tv_usec * 1000;
1158
1159 if (details >= 0) {
1160 printf(" #%2d / %2d: %14.2lf nsecs/op [val: %016lx]\n",
1161 process_nr, thread_nr, runtime_ns_max / bytes_done, val);
1162 }
1163 fflush(stdout);
1164 }
1165 if (!last_task)
1166 continue;
1167
1168 timersub(&stop, &start0, &diff);
1169 runtime_ns_max = diff.tv_sec * 1000000000ULL;
1170 runtime_ns_max += diff.tv_usec * 1000ULL;
1171
1172 show_summary(runtime_ns_max, l, &convergence);
1173 }
1174
1175 gettimeofday(&stop, NULL);
1176 timersub(&stop, &start0, &diff);
1177 td->runtime_ns = diff.tv_sec * 1000000000ULL;
1178 td->runtime_ns += diff.tv_usec * 1000ULL;
1179
1180 free_data(thread_data, g->p.bytes_thread);
1181
1182 pthread_mutex_lock(&g->stop_work_mutex);
1183 g->bytes_done += bytes_done;
1184 pthread_mutex_unlock(&g->stop_work_mutex);
1185
1186 return NULL;
1187}
1188
1189/*
1190 * A worker process starts a couple of threads:
1191 */
1192static void worker_process(int process_nr)
1193{
1194 pthread_mutex_t process_lock;
1195 struct thread_data *td;
1196 pthread_t *pthreads;
1197 u8 *process_data;
1198 int task_nr;
1199 int ret;
1200 int t;
1201
1202 pthread_mutex_init(&process_lock, NULL);
1203 set_taskname("process %d", process_nr);
1204
1205 /*
1206 * Pick up the memory policy and the CPU binding of our first thread,
1207 * so that we initialize memory accordingly:
1208 */
1209 task_nr = process_nr*g->p.nr_threads;
1210 td = g->threads + task_nr;
1211
1212 bind_to_memnode(td->bind_node);
1213 bind_to_cpumask(td->bind_cpumask);
1214
1215 pthreads = zalloc(g->p.nr_threads * sizeof(pthread_t));
1216 process_data = setup_private_data(g->p.bytes_process);
1217
1218 if (g->p.show_details >= 3) {
1219 printf(" # process %2d global mem: %p, process mem: %p\n",
1220 process_nr, g->data, process_data);
1221 }
1222
1223 for (t = 0; t < g->p.nr_threads; t++) {
1224 task_nr = process_nr*g->p.nr_threads + t;
1225 td = g->threads + task_nr;
1226
1227 td->process_data = process_data;
1228 td->process_nr = process_nr;
1229 td->thread_nr = t;
1230 td->task_nr = task_nr;
1231 td->val = rand();
1232 td->curr_cpu = -1;
1233 td->process_lock = &process_lock;
1234
1235 ret = pthread_create(pthreads + t, NULL, worker_thread, td);
1236 BUG_ON(ret);
1237 }
1238
1239 for (t = 0; t < g->p.nr_threads; t++) {
1240 ret = pthread_join(pthreads[t], NULL);
1241 BUG_ON(ret);
1242 }
1243
1244 free_data(process_data, g->p.bytes_process);
1245 free(pthreads);
1246}
1247
1248static void print_summary(void)
1249{
1250 if (g->p.show_details < 0)
1251 return;
1252
1253 printf("\n ###\n");
1254 printf(" # %d %s will execute (on %d nodes, %d CPUs):\n",
1255 g->p.nr_tasks, g->p.nr_tasks == 1 ? "task" : "tasks", g->p.nr_nodes, g->p.nr_cpus);
1256 printf(" # %5dx %5ldMB global shared mem operations\n",
1257 g->p.nr_loops, g->p.bytes_global/1024/1024);
1258 printf(" # %5dx %5ldMB process shared mem operations\n",
1259 g->p.nr_loops, g->p.bytes_process/1024/1024);
1260 printf(" # %5dx %5ldMB thread local mem operations\n",
1261 g->p.nr_loops, g->p.bytes_thread/1024/1024);
1262
1263 printf(" ###\n");
1264
1265 printf("\n ###\n"); fflush(stdout);
1266}
1267
1268static void init_thread_data(void)
1269{
1270 ssize_t size = sizeof(*g->threads)*g->p.nr_tasks;
1271 int t;
1272
1273 g->threads = zalloc_shared_data(size);
1274
1275 for (t = 0; t < g->p.nr_tasks; t++) {
1276 struct thread_data *td = g->threads + t;
1277 int cpu;
1278
1279 /* Allow all nodes by default: */
1280 td->bind_node = -1;
1281
1282 /* Allow all CPUs by default: */
1283 CPU_ZERO(&td->bind_cpumask);
1284 for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
1285 CPU_SET(cpu, &td->bind_cpumask);
1286 }
1287}
1288
1289static void deinit_thread_data(void)
1290{
1291 ssize_t size = sizeof(*g->threads)*g->p.nr_tasks;
1292
1293 free_data(g->threads, size);
1294}
1295
1296static int init(void)
1297{
1298 g = (void *)alloc_data(sizeof(*g), MAP_SHARED, 1, 0, 0 /* THP */, 0);
1299
1300 /* Copy over options: */
1301 g->p = p0;
1302
1303 g->p.nr_cpus = numa_num_configured_cpus();
1304
1305 g->p.nr_nodes = numa_max_node() + 1;
1306
1307 /* char array in count_process_nodes(): */
1308 BUG_ON(g->p.nr_nodes > MAX_NR_NODES || g->p.nr_nodes < 0);
1309
1310 if (g->p.show_quiet && !g->p.show_details)
1311 g->p.show_details = -1;
1312
1313 /* Some memory should be specified: */
1314 if (!g->p.mb_global_str && !g->p.mb_proc_str && !g->p.mb_thread_str)
1315 return -1;
1316
1317 if (g->p.mb_global_str) {
1318 g->p.mb_global = atof(g->p.mb_global_str);
1319 BUG_ON(g->p.mb_global < 0);
1320 }
1321
1322 if (g->p.mb_proc_str) {
1323 g->p.mb_proc = atof(g->p.mb_proc_str);
1324 BUG_ON(g->p.mb_proc < 0);
1325 }
1326
1327 if (g->p.mb_proc_locked_str) {
1328 g->p.mb_proc_locked = atof(g->p.mb_proc_locked_str);
1329 BUG_ON(g->p.mb_proc_locked < 0);
1330 BUG_ON(g->p.mb_proc_locked > g->p.mb_proc);
1331 }
1332
1333 if (g->p.mb_thread_str) {
1334 g->p.mb_thread = atof(g->p.mb_thread_str);
1335 BUG_ON(g->p.mb_thread < 0);
1336 }
1337
1338 BUG_ON(g->p.nr_threads <= 0);
1339 BUG_ON(g->p.nr_proc <= 0);
1340
1341 g->p.nr_tasks = g->p.nr_proc*g->p.nr_threads;
1342
1343 g->p.bytes_global = g->p.mb_global *1024L*1024L;
1344 g->p.bytes_process = g->p.mb_proc *1024L*1024L;
1345 g->p.bytes_process_locked = g->p.mb_proc_locked *1024L*1024L;
1346 g->p.bytes_thread = g->p.mb_thread *1024L*1024L;
1347
1348 g->data = setup_shared_data(g->p.bytes_global);
1349
1350 /* Startup serialization: */
1351 init_global_mutex(&g->start_work_mutex);
1352 init_global_mutex(&g->startup_mutex);
1353 init_global_mutex(&g->startup_done_mutex);
1354 init_global_mutex(&g->stop_work_mutex);
1355
1356 init_thread_data();
1357
1358 tprintf("#\n");
1359 parse_setup_cpu_list();
1360 parse_setup_node_list();
1361 tprintf("#\n");
1362
1363 print_summary();
1364
1365 return 0;
1366}
1367
1368static void deinit(void)
1369{
1370 free_data(g->data, g->p.bytes_global);
1371 g->data = NULL;
1372
1373 deinit_thread_data();
1374
1375 free_data(g, sizeof(*g));
1376 g = NULL;
1377}
1378
1379/*
1380 * Print a short or long result, depending on the verbosity setting:
1381 */
1382static void print_res(const char *name, double val,
1383 const char *txt_unit, const char *txt_short, const char *txt_long)
1384{
1385 if (!name)
1386 name = "main,";
1387
1388 if (g->p.show_quiet)
1389 printf(" %-30s %15.3f, %-15s %s\n", name, val, txt_unit, txt_short);
1390 else
1391 printf(" %14.3f %s\n", val, txt_long);
1392}
1393
1394static int __bench_numa(const char *name)
1395{
1396 struct timeval start, stop, diff;
1397 u64 runtime_ns_min, runtime_ns_sum;
1398 pid_t *pids, pid, wpid;
1399 double delta_runtime;
1400 double runtime_avg;
1401 double runtime_sec_max;
1402 double runtime_sec_min;
1403 int wait_stat;
1404 double bytes;
1405 int i, t;
1406
1407 if (init())
1408 return -1;
1409
1410 pids = zalloc(g->p.nr_proc * sizeof(*pids));
1411 pid = -1;
1412
1413 /* All threads try to acquire it, this way we can wait for them to start up: */
1414 pthread_mutex_lock(&g->start_work_mutex);
1415
1416 if (g->p.serialize_startup) {
1417 tprintf(" #\n");
1418 tprintf(" # Startup synchronization: ..."); fflush(stdout);
1419 }
1420
1421 gettimeofday(&start, NULL);
1422
1423 for (i = 0; i < g->p.nr_proc; i++) {
1424 pid = fork();
1425 dprintf(" # process %2d: PID %d\n", i, pid);
1426
1427 BUG_ON(pid < 0);
1428 if (!pid) {
1429 /* Child process: */
1430 worker_process(i);
1431
1432 exit(0);
1433 }
1434 pids[i] = pid;
1435
1436 }
1437 /* Wait for all the threads to start up: */
1438 while (g->nr_tasks_started != g->p.nr_tasks)
1439 usleep(1000);
1440
1441 BUG_ON(g->nr_tasks_started != g->p.nr_tasks);
1442
1443 if (g->p.serialize_startup) {
1444 double startup_sec;
1445
1446 pthread_mutex_lock(&g->startup_done_mutex);
1447
1448 /* This will start all threads: */
1449 pthread_mutex_unlock(&g->start_work_mutex);
1450
1451 /* This mutex is locked - the last started thread will wake us: */
1452 pthread_mutex_lock(&g->startup_done_mutex);
1453
1454 gettimeofday(&stop, NULL);
1455
1456 timersub(&stop, &start, &diff);
1457
1458 startup_sec = diff.tv_sec * 1000000000.0;
1459 startup_sec += diff.tv_usec * 1000.0;
1460 startup_sec /= 1e9;
1461
1462 tprintf(" threads initialized in %.6f seconds.\n", startup_sec);
1463 tprintf(" #\n");
1464
1465 start = stop;
1466 pthread_mutex_unlock(&g->startup_done_mutex);
1467 } else {
1468 gettimeofday(&start, NULL);
1469 }
1470
1471 /* Parent process: */
1472
1473
1474 for (i = 0; i < g->p.nr_proc; i++) {
1475 wpid = waitpid(pids[i], &wait_stat, 0);
1476 BUG_ON(wpid < 0);
1477 BUG_ON(!WIFEXITED(wait_stat));
1478
1479 }
1480
1481 runtime_ns_sum = 0;
1482 runtime_ns_min = -1LL;
1483
1484 for (t = 0; t < g->p.nr_tasks; t++) {
1485 u64 thread_runtime_ns = g->threads[t].runtime_ns;
1486
1487 runtime_ns_sum += thread_runtime_ns;
1488 runtime_ns_min = min(thread_runtime_ns, runtime_ns_min);
1489 }
1490
1491 gettimeofday(&stop, NULL);
1492 timersub(&stop, &start, &diff);
1493
1494 BUG_ON(bench_format != BENCH_FORMAT_DEFAULT);
1495
1496 tprintf("\n ###\n");
1497 tprintf("\n");
1498
1499 runtime_sec_max = diff.tv_sec * 1000000000.0;
1500 runtime_sec_max += diff.tv_usec * 1000.0;
1501 runtime_sec_max /= 1e9;
1502
1503 runtime_sec_min = runtime_ns_min/1e9;
1504
1505 bytes = g->bytes_done;
1506 runtime_avg = (double)runtime_ns_sum / g->p.nr_tasks / 1e9;
1507
1508 if (g->p.measure_convergence) {
1509 print_res(name, runtime_sec_max,
1510 "secs,", "NUMA-convergence-latency", "secs latency to NUMA-converge");
1511 }
1512
1513 print_res(name, runtime_sec_max,
1514 "secs,", "runtime-max/thread", "secs slowest (max) thread-runtime");
1515
1516 print_res(name, runtime_sec_min,
1517 "secs,", "runtime-min/thread", "secs fastest (min) thread-runtime");
1518
1519 print_res(name, runtime_avg,
1520 "secs,", "runtime-avg/thread", "secs average thread-runtime");
1521
1522 delta_runtime = (runtime_sec_max - runtime_sec_min)/2.0;
1523 print_res(name, delta_runtime / runtime_sec_max * 100.0,
1524 "%,", "spread-runtime/thread", "% difference between max/avg runtime");
1525
1526 print_res(name, bytes / g->p.nr_tasks / 1e9,
1527 "GB,", "data/thread", "GB data processed, per thread");
1528
1529 print_res(name, bytes / 1e9,
1530 "GB,", "data-total", "GB data processed, total");
1531
1532 print_res(name, runtime_sec_max * 1e9 / (bytes / g->p.nr_tasks),
1533 "nsecs,", "runtime/byte/thread","nsecs/byte/thread runtime");
1534
1535 print_res(name, bytes / g->p.nr_tasks / 1e9 / runtime_sec_max,
1536 "GB/sec,", "thread-speed", "GB/sec/thread speed");
1537
1538 print_res(name, bytes / runtime_sec_max / 1e9,
1539 "GB/sec,", "total-speed", "GB/sec total speed");
1540
1541 free(pids);
1542
1543 deinit();
1544
1545 return 0;
1546}
1547
1548#define MAX_ARGS 50
1549
1550static int command_size(const char **argv)
1551{
1552 int size = 0;
1553
1554 while (*argv) {
1555 size++;
1556 argv++;
1557 }
1558
1559 BUG_ON(size >= MAX_ARGS);
1560
1561 return size;
1562}
1563
1564static void init_params(struct params *p, const char *name, int argc, const char **argv)
1565{
1566 int i;
1567
1568 printf("\n # Running %s \"perf bench numa", name);
1569
1570 for (i = 0; i < argc; i++)
1571 printf(" %s", argv[i]);
1572
1573 printf("\"\n");
1574
1575 memset(p, 0, sizeof(*p));
1576
1577 /* Initialize nonzero defaults: */
1578
1579 p->serialize_startup = 1;
1580 p->data_reads = true;
1581 p->data_writes = true;
1582 p->data_backwards = true;
1583 p->data_rand_walk = true;
1584 p->nr_loops = -1;
1585 p->init_random = true;
1586}
1587
1588static int run_bench_numa(const char *name, const char **argv)
1589{
1590 int argc = command_size(argv);
1591
1592 init_params(&p0, name, argc, argv);
1593 argc = parse_options(argc, argv, options, bench_numa_usage, 0);
1594 if (argc)
1595 goto err;
1596
1597 if (__bench_numa(name))
1598 goto err;
1599
1600 return 0;
1601
1602err:
1603 usage_with_options(numa_usage, options);
1604 return -1;
1605}
1606
1607#define OPT_BW_RAM "-s", "20", "-zZq", "--thp", " 1", "--no-data_rand_walk"
1608#define OPT_BW_RAM_NOTHP OPT_BW_RAM, "--thp", "-1"
1609
1610#define OPT_CONV "-s", "100", "-zZ0qcm", "--thp", " 1"
1611#define OPT_CONV_NOTHP OPT_CONV, "--thp", "-1"
1612
1613#define OPT_BW "-s", "20", "-zZ0q", "--thp", " 1"
1614#define OPT_BW_NOTHP OPT_BW, "--thp", "-1"
1615
1616/*
1617 * The built-in test-suite executed by "perf bench numa -a".
1618 *
1619 * (A minimum of 4 nodes and 16 GB of RAM is recommended.)
1620 */
1621static const char *tests[][MAX_ARGS] = {
1622 /* Basic single-stream NUMA bandwidth measurements: */
1623 { "RAM-bw-local,", "mem", "-p", "1", "-t", "1", "-P", "1024",
1624 "-C" , "0", "-M", "0", OPT_BW_RAM },
1625 { "RAM-bw-local-NOTHP,",
1626 "mem", "-p", "1", "-t", "1", "-P", "1024",
1627 "-C" , "0", "-M", "0", OPT_BW_RAM_NOTHP },
1628 { "RAM-bw-remote,", "mem", "-p", "1", "-t", "1", "-P", "1024",
1629 "-C" , "0", "-M", "1", OPT_BW_RAM },
1630
1631 /* 2-stream NUMA bandwidth measurements: */
1632 { "RAM-bw-local-2x,", "mem", "-p", "2", "-t", "1", "-P", "1024",
1633 "-C", "0,2", "-M", "0x2", OPT_BW_RAM },
1634 { "RAM-bw-remote-2x,", "mem", "-p", "2", "-t", "1", "-P", "1024",
1635 "-C", "0,2", "-M", "1x2", OPT_BW_RAM },
1636
1637 /* Cross-stream NUMA bandwidth measurement: */
1638 { "RAM-bw-cross,", "mem", "-p", "2", "-t", "1", "-P", "1024",
1639 "-C", "0,8", "-M", "1,0", OPT_BW_RAM },
1640
1641 /* Convergence latency measurements: */
1642 { " 1x3-convergence,", "mem", "-p", "1", "-t", "3", "-P", "512", OPT_CONV },
1643 { " 1x4-convergence,", "mem", "-p", "1", "-t", "4", "-P", "512", OPT_CONV },
1644 { " 1x6-convergence,", "mem", "-p", "1", "-t", "6", "-P", "1020", OPT_CONV },
1645 { " 2x3-convergence,", "mem", "-p", "3", "-t", "3", "-P", "1020", OPT_CONV },
1646 { " 3x3-convergence,", "mem", "-p", "3", "-t", "3", "-P", "1020", OPT_CONV },
1647 { " 4x4-convergence,", "mem", "-p", "4", "-t", "4", "-P", "512", OPT_CONV },
1648 { " 4x4-convergence-NOTHP,",
1649 "mem", "-p", "4", "-t", "4", "-P", "512", OPT_CONV_NOTHP },
1650 { " 4x6-convergence,", "mem", "-p", "4", "-t", "6", "-P", "1020", OPT_CONV },
1651 { " 4x8-convergence,", "mem", "-p", "4", "-t", "8", "-P", "512", OPT_CONV },
1652 { " 8x4-convergence,", "mem", "-p", "8", "-t", "4", "-P", "512", OPT_CONV },
1653 { " 8x4-convergence-NOTHP,",
1654 "mem", "-p", "8", "-t", "4", "-P", "512", OPT_CONV_NOTHP },
1655 { " 3x1-convergence,", "mem", "-p", "3", "-t", "1", "-P", "512", OPT_CONV },
1656 { " 4x1-convergence,", "mem", "-p", "4", "-t", "1", "-P", "512", OPT_CONV },
1657 { " 8x1-convergence,", "mem", "-p", "8", "-t", "1", "-P", "512", OPT_CONV },
1658 { "16x1-convergence,", "mem", "-p", "16", "-t", "1", "-P", "256", OPT_CONV },
1659 { "32x1-convergence,", "mem", "-p", "32", "-t", "1", "-P", "128", OPT_CONV },
1660
1661 /* Various NUMA process/thread layout bandwidth measurements: */
1662 { " 2x1-bw-process,", "mem", "-p", "2", "-t", "1", "-P", "1024", OPT_BW },
1663 { " 3x1-bw-process,", "mem", "-p", "3", "-t", "1", "-P", "1024", OPT_BW },
1664 { " 4x1-bw-process,", "mem", "-p", "4", "-t", "1", "-P", "1024", OPT_BW },
1665 { " 8x1-bw-process,", "mem", "-p", "8", "-t", "1", "-P", " 512", OPT_BW },
1666 { " 8x1-bw-process-NOTHP,",
1667 "mem", "-p", "8", "-t", "1", "-P", " 512", OPT_BW_NOTHP },
1668 { "16x1-bw-process,", "mem", "-p", "16", "-t", "1", "-P", "256", OPT_BW },
1669
1670 { " 4x1-bw-thread,", "mem", "-p", "1", "-t", "4", "-T", "256", OPT_BW },
1671 { " 8x1-bw-thread,", "mem", "-p", "1", "-t", "8", "-T", "256", OPT_BW },
1672 { "16x1-bw-thread,", "mem", "-p", "1", "-t", "16", "-T", "128", OPT_BW },
1673 { "32x1-bw-thread,", "mem", "-p", "1", "-t", "32", "-T", "64", OPT_BW },
1674
1675 { " 2x3-bw-thread,", "mem", "-p", "2", "-t", "3", "-P", "512", OPT_BW },
1676 { " 4x4-bw-thread,", "mem", "-p", "4", "-t", "4", "-P", "512", OPT_BW },
1677 { " 4x6-bw-thread,", "mem", "-p", "4", "-t", "6", "-P", "512", OPT_BW },
1678 { " 4x8-bw-thread,", "mem", "-p", "4", "-t", "8", "-P", "512", OPT_BW },
1679 { " 4x8-bw-thread-NOTHP,",
1680 "mem", "-p", "4", "-t", "8", "-P", "512", OPT_BW_NOTHP },
1681 { " 3x3-bw-thread,", "mem", "-p", "3", "-t", "3", "-P", "512", OPT_BW },
1682 { " 5x5-bw-thread,", "mem", "-p", "5", "-t", "5", "-P", "512", OPT_BW },
1683
1684 { "2x16-bw-thread,", "mem", "-p", "2", "-t", "16", "-P", "512", OPT_BW },
1685 { "1x32-bw-thread,", "mem", "-p", "1", "-t", "32", "-P", "2048", OPT_BW },
1686
1687 { "numa02-bw,", "mem", "-p", "1", "-t", "32", "-T", "32", OPT_BW },
1688 { "numa02-bw-NOTHP,", "mem", "-p", "1", "-t", "32", "-T", "32", OPT_BW_NOTHP },
1689 { "numa01-bw-thread,", "mem", "-p", "2", "-t", "16", "-T", "192", OPT_BW },
1690 { "numa01-bw-thread-NOTHP,",
1691 "mem", "-p", "2", "-t", "16", "-T", "192", OPT_BW_NOTHP },
1692};
1693
1694static int bench_all(void)
1695{
1696 int nr = ARRAY_SIZE(tests);
1697 int ret;
1698 int i;
1699
1700 ret = system("echo ' #'; echo ' # Running test on: '$(uname -a); echo ' #'");
1701 BUG_ON(ret < 0);
1702
1703 for (i = 0; i < nr; i++) {
1704 if (run_bench_numa(tests[i][0], tests[i] + 1))
1705 return -1;
1706 }
1707
1708 printf("\n");
1709
1710 return 0;
1711}
1712
1713int bench_numa(int argc, const char **argv, const char *prefix __maybe_unused)
1714{
1715 init_params(&p0, "main,", argc, argv);
1716 argc = parse_options(argc, argv, options, bench_numa_usage, 0);
1717 if (argc)
1718 goto err;
1719
1720 if (p0.run_all)
1721 return bench_all();
1722
1723 if (__bench_numa(NULL))
1724 goto err;
1725
1726 return 0;
1727
1728err:
1729 usage_with_options(numa_usage, options);
1730 return -1;
1731}
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index dc870cf31b79..2e6961ea3184 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -34,9 +34,10 @@
34 34
35struct perf_annotate { 35struct perf_annotate {
36 struct perf_tool tool; 36 struct perf_tool tool;
37 bool force, use_tui, use_stdio; 37 bool force, use_tui, use_stdio, use_gtk;
38 bool full_paths; 38 bool full_paths;
39 bool print_line; 39 bool print_line;
40 bool skip_missing;
40 const char *sym_hist_filter; 41 const char *sym_hist_filter;
41 const char *cpu_list; 42 const char *cpu_list;
42 DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); 43 DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
@@ -138,9 +139,22 @@ find_next:
138 continue; 139 continue;
139 } 140 }
140 141
141 if (use_browser > 0) { 142 if (use_browser == 2) {
143 int ret;
144
145 ret = hist_entry__gtk_annotate(he, evidx, NULL);
146 if (!ret || !ann->skip_missing)
147 return;
148
149 /* skip missing symbols */
150 nd = rb_next(nd);
151 } else if (use_browser == 1) {
142 key = hist_entry__tui_annotate(he, evidx, NULL); 152 key = hist_entry__tui_annotate(he, evidx, NULL);
143 switch (key) { 153 switch (key) {
154 case -1:
155 if (!ann->skip_missing)
156 return;
157 /* fall through */
144 case K_RIGHT: 158 case K_RIGHT:
145 next = rb_next(nd); 159 next = rb_next(nd);
146 break; 160 break;
@@ -224,6 +238,10 @@ static int __cmd_annotate(struct perf_annotate *ann)
224 ui__error("The %s file has no samples!\n", session->filename); 238 ui__error("The %s file has no samples!\n", session->filename);
225 goto out_delete; 239 goto out_delete;
226 } 240 }
241
242 if (use_browser == 2)
243 perf_gtk__show_annotations();
244
227out_delete: 245out_delete:
228 /* 246 /*
229 * Speed up the exit process, for large files this can 247 * Speed up the exit process, for large files this can
@@ -270,6 +288,7 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused)
270 "be more verbose (show symbol address, etc)"), 288 "be more verbose (show symbol address, etc)"),
271 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, 289 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
272 "dump raw trace in ASCII"), 290 "dump raw trace in ASCII"),
291 OPT_BOOLEAN(0, "gtk", &annotate.use_gtk, "Use the GTK interface"),
273 OPT_BOOLEAN(0, "tui", &annotate.use_tui, "Use the TUI interface"), 292 OPT_BOOLEAN(0, "tui", &annotate.use_tui, "Use the TUI interface"),
274 OPT_BOOLEAN(0, "stdio", &annotate.use_stdio, "Use the stdio interface"), 293 OPT_BOOLEAN(0, "stdio", &annotate.use_stdio, "Use the stdio interface"),
275 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, 294 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
@@ -280,6 +299,8 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused)
280 "print matching source lines (may be slow)"), 299 "print matching source lines (may be slow)"),
281 OPT_BOOLEAN('P', "full-paths", &annotate.full_paths, 300 OPT_BOOLEAN('P', "full-paths", &annotate.full_paths,
282 "Don't shorten the displayed pathnames"), 301 "Don't shorten the displayed pathnames"),
302 OPT_BOOLEAN(0, "skip-missing", &annotate.skip_missing,
303 "Skip symbols that cannot be annotated"),
283 OPT_STRING('C', "cpu", &annotate.cpu_list, "cpu", "list of cpus to profile"), 304 OPT_STRING('C', "cpu", &annotate.cpu_list, "cpu", "list of cpus to profile"),
284 OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory", 305 OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
285 "Look for files with symbols relative to this directory"), 306 "Look for files with symbols relative to this directory"),
@@ -300,6 +321,8 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused)
300 use_browser = 0; 321 use_browser = 0;
301 else if (annotate.use_tui) 322 else if (annotate.use_tui)
302 use_browser = 1; 323 use_browser = 1;
324 else if (annotate.use_gtk)
325 use_browser = 2;
303 326
304 setup_browser(true); 327 setup_browser(true);
305 328
@@ -309,7 +332,8 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused)
309 if (symbol__init() < 0) 332 if (symbol__init() < 0)
310 return -1; 333 return -1;
311 334
312 setup_sorting(annotate_usage, options); 335 if (setup_sorting() < 0)
336 usage_with_options(annotate_usage, options);
313 337
314 if (argc) { 338 if (argc) {
315 /* 339 /*
diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c
index cae9a5fd2ecf..77298bf892b8 100644
--- a/tools/perf/builtin-bench.c
+++ b/tools/perf/builtin-bench.c
@@ -35,6 +35,18 @@ struct bench_suite {
35/* sentinel: easy for help */ 35/* sentinel: easy for help */
36#define suite_all { "all", "Test all benchmark suites", NULL } 36#define suite_all { "all", "Test all benchmark suites", NULL }
37 37
38#ifdef LIBNUMA_SUPPORT
39static struct bench_suite numa_suites[] = {
40 { "mem",
41 "Benchmark for NUMA workloads",
42 bench_numa },
43 suite_all,
44 { NULL,
45 NULL,
46 NULL }
47};
48#endif
49
38static struct bench_suite sched_suites[] = { 50static struct bench_suite sched_suites[] = {
39 { "messaging", 51 { "messaging",
40 "Benchmark for scheduler and IPC mechanisms", 52 "Benchmark for scheduler and IPC mechanisms",
@@ -68,6 +80,11 @@ struct bench_subsys {
68}; 80};
69 81
70static struct bench_subsys subsystems[] = { 82static struct bench_subsys subsystems[] = {
83#ifdef LIBNUMA_SUPPORT
84 { "numa",
85 "NUMA scheduling and MM behavior",
86 numa_suites },
87#endif
71 { "sched", 88 { "sched",
72 "scheduler and IPC mechanism", 89 "scheduler and IPC mechanism",
73 sched_suites }, 90 sched_suites },
@@ -159,6 +176,7 @@ static void all_suite(struct bench_subsys *subsys) /* FROM HERE */
159 printf("# Running %s/%s benchmark...\n", 176 printf("# Running %s/%s benchmark...\n",
160 subsys->name, 177 subsys->name,
161 suites[i].name); 178 suites[i].name);
179 fflush(stdout);
162 180
163 argv[1] = suites[i].name; 181 argv[1] = suites[i].name;
164 suites[i].fn(1, argv, NULL); 182 suites[i].fn(1, argv, NULL);
@@ -225,6 +243,7 @@ int cmd_bench(int argc, const char **argv, const char *prefix __maybe_unused)
225 printf("# Running %s/%s benchmark...\n", 243 printf("# Running %s/%s benchmark...\n",
226 subsystems[i].name, 244 subsystems[i].name,
227 subsystems[i].suites[j].name); 245 subsystems[i].suites[j].name);
246 fflush(stdout);
228 status = subsystems[i].suites[j].fn(argc - 1, 247 status = subsystems[i].suites[j].fn(argc - 1,
229 argv + 1, prefix); 248 argv + 1, prefix);
230 goto end; 249 goto end;
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c
index fae8b250b2ca..c96c8fa38243 100644
--- a/tools/perf/builtin-buildid-cache.c
+++ b/tools/perf/builtin-buildid-cache.c
@@ -14,6 +14,7 @@
14#include "util/parse-options.h" 14#include "util/parse-options.h"
15#include "util/strlist.h" 15#include "util/strlist.h"
16#include "util/build-id.h" 16#include "util/build-id.h"
17#include "util/session.h"
17#include "util/symbol.h" 18#include "util/symbol.h"
18 19
19static int build_id_cache__add_file(const char *filename, const char *debugdir) 20static int build_id_cache__add_file(const char *filename, const char *debugdir)
@@ -58,19 +59,89 @@ static int build_id_cache__remove_file(const char *filename,
58 return err; 59 return err;
59} 60}
60 61
62static bool dso__missing_buildid_cache(struct dso *dso, int parm __maybe_unused)
63{
64 char filename[PATH_MAX];
65 u8 build_id[BUILD_ID_SIZE];
66
67 if (dso__build_id_filename(dso, filename, sizeof(filename)) &&
68 filename__read_build_id(filename, build_id,
69 sizeof(build_id)) != sizeof(build_id)) {
70 if (errno == ENOENT)
71 return false;
72
73 pr_warning("Problems with %s file, consider removing it from the cache\n",
74 filename);
75 } else if (memcmp(dso->build_id, build_id, sizeof(dso->build_id))) {
76 pr_warning("Problems with %s file, consider removing it from the cache\n",
77 filename);
78 }
79
80 return true;
81}
82
83static int build_id_cache__fprintf_missing(const char *filename, bool force, FILE *fp)
84{
85 struct perf_session *session = perf_session__new(filename, O_RDONLY,
86 force, false, NULL);
87 if (session == NULL)
88 return -1;
89
90 perf_session__fprintf_dsos_buildid(session, fp, dso__missing_buildid_cache, 0);
91 perf_session__delete(session);
92
93 return 0;
94}
95
96static int build_id_cache__update_file(const char *filename,
97 const char *debugdir)
98{
99 u8 build_id[BUILD_ID_SIZE];
100 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
101
102 int err;
103
104 if (filename__read_build_id(filename, &build_id, sizeof(build_id)) < 0) {
105 pr_debug("Couldn't read a build-id in %s\n", filename);
106 return -1;
107 }
108
109 build_id__sprintf(build_id, sizeof(build_id), sbuild_id);
110 err = build_id_cache__remove_s(sbuild_id, debugdir);
111 if (!err) {
112 err = build_id_cache__add_s(sbuild_id, debugdir, filename,
113 false, false);
114 }
115 if (verbose)
116 pr_info("Updating %s %s: %s\n", sbuild_id, filename,
117 err ? "FAIL" : "Ok");
118
119 return err;
120}
121
61int cmd_buildid_cache(int argc, const char **argv, 122int cmd_buildid_cache(int argc, const char **argv,
62 const char *prefix __maybe_unused) 123 const char *prefix __maybe_unused)
63{ 124{
64 struct strlist *list; 125 struct strlist *list;
65 struct str_node *pos; 126 struct str_node *pos;
127 int ret = 0;
128 bool force = false;
66 char debugdir[PATH_MAX]; 129 char debugdir[PATH_MAX];
67 char const *add_name_list_str = NULL, 130 char const *add_name_list_str = NULL,
68 *remove_name_list_str = NULL; 131 *remove_name_list_str = NULL,
132 *missing_filename = NULL,
133 *update_name_list_str = NULL;
134
69 const struct option buildid_cache_options[] = { 135 const struct option buildid_cache_options[] = {
70 OPT_STRING('a', "add", &add_name_list_str, 136 OPT_STRING('a', "add", &add_name_list_str,
71 "file list", "file(s) to add"), 137 "file list", "file(s) to add"),
72 OPT_STRING('r', "remove", &remove_name_list_str, "file list", 138 OPT_STRING('r', "remove", &remove_name_list_str, "file list",
73 "file(s) to remove"), 139 "file(s) to remove"),
140 OPT_STRING('M', "missing", &missing_filename, "file",
141 "to find missing build ids in the cache"),
142 OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
143 OPT_STRING('u', "update", &update_name_list_str, "file list",
144 "file(s) to update"),
74 OPT_INCR('v', "verbose", &verbose, "be more verbose"), 145 OPT_INCR('v', "verbose", &verbose, "be more verbose"),
75 OPT_END() 146 OPT_END()
76 }; 147 };
@@ -125,5 +196,26 @@ int cmd_buildid_cache(int argc, const char **argv,
125 } 196 }
126 } 197 }
127 198
128 return 0; 199 if (missing_filename)
200 ret = build_id_cache__fprintf_missing(missing_filename, force, stdout);
201
202 if (update_name_list_str) {
203 list = strlist__new(true, update_name_list_str);
204 if (list) {
205 strlist__for_each(pos, list)
206 if (build_id_cache__update_file(pos->s, debugdir)) {
207 if (errno == ENOENT) {
208 pr_debug("%s wasn't in the cache\n",
209 pos->s);
210 continue;
211 }
212 pr_warning("Couldn't update %s: %s\n",
213 pos->s, strerror(errno));
214 }
215
216 strlist__delete(list);
217 }
218 }
219
220 return ret;
129} 221}
diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c
index a82d99fec83e..e74366a13218 100644
--- a/tools/perf/builtin-buildid-list.c
+++ b/tools/perf/builtin-buildid-list.c
@@ -44,23 +44,26 @@ static int filename__fprintf_build_id(const char *name, FILE *fp)
44 return fprintf(fp, "%s\n", sbuild_id); 44 return fprintf(fp, "%s\n", sbuild_id);
45} 45}
46 46
47static bool dso__skip_buildid(struct dso *dso, int with_hits)
48{
49 return with_hits && !dso->hit;
50}
51
47static int perf_session__list_build_ids(bool force, bool with_hits) 52static int perf_session__list_build_ids(bool force, bool with_hits)
48{ 53{
49 struct perf_session *session; 54 struct perf_session *session;
50 55
51 symbol__elf_init(); 56 symbol__elf_init();
52
53 session = perf_session__new(input_name, O_RDONLY, force, false,
54 &build_id__mark_dso_hit_ops);
55 if (session == NULL)
56 return -1;
57
58 /* 57 /*
59 * See if this is an ELF file first: 58 * See if this is an ELF file first:
60 */ 59 */
61 if (filename__fprintf_build_id(session->filename, stdout)) 60 if (filename__fprintf_build_id(input_name, stdout))
62 goto out; 61 goto out;
63 62
63 session = perf_session__new(input_name, O_RDONLY, force, false,
64 &build_id__mark_dso_hit_ops);
65 if (session == NULL)
66 return -1;
64 /* 67 /*
65 * in pipe-mode, the only way to get the buildids is to parse 68 * in pipe-mode, the only way to get the buildids is to parse
66 * the record stream. Buildids are stored as RECORD_HEADER_BUILD_ID 69 * the record stream. Buildids are stored as RECORD_HEADER_BUILD_ID
@@ -68,9 +71,9 @@ static int perf_session__list_build_ids(bool force, bool with_hits)
68 if (with_hits || session->fd_pipe) 71 if (with_hits || session->fd_pipe)
69 perf_session__process_events(session, &build_id__mark_dso_hit_ops); 72 perf_session__process_events(session, &build_id__mark_dso_hit_ops);
70 73
71 perf_session__fprintf_dsos_buildid(session, stdout, with_hits); 74 perf_session__fprintf_dsos_buildid(session, stdout, dso__skip_buildid, with_hits);
72out:
73 perf_session__delete(session); 75 perf_session__delete(session);
76out:
74 return 0; 77 return 0;
75} 78}
76 79
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 93b852f8a5d5..d207a97a2db1 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -23,7 +23,6 @@ static char const *input_old = "perf.data.old",
23 *input_new = "perf.data"; 23 *input_new = "perf.data";
24static char diff__default_sort_order[] = "dso,symbol"; 24static char diff__default_sort_order[] = "dso,symbol";
25static bool force; 25static bool force;
26static bool show_displacement;
27static bool show_period; 26static bool show_period;
28static bool show_formula; 27static bool show_formula;
29static bool show_baseline_only; 28static bool show_baseline_only;
@@ -146,58 +145,47 @@ static int setup_compute(const struct option *opt, const char *str,
146 return -EINVAL; 145 return -EINVAL;
147} 146}
148 147
149static double get_period_percent(struct hist_entry *he, u64 period) 148double perf_diff__period_percent(struct hist_entry *he, u64 period)
150{ 149{
151 u64 total = he->hists->stats.total_period; 150 u64 total = he->hists->stats.total_period;
152 return (period * 100.0) / total; 151 return (period * 100.0) / total;
153} 152}
154 153
155double perf_diff__compute_delta(struct hist_entry *he) 154double perf_diff__compute_delta(struct hist_entry *he, struct hist_entry *pair)
156{ 155{
157 struct hist_entry *pair = hist_entry__next_pair(he); 156 double new_percent = perf_diff__period_percent(he, he->stat.period);
158 double new_percent = get_period_percent(he, he->stat.period); 157 double old_percent = perf_diff__period_percent(pair, pair->stat.period);
159 double old_percent = pair ? get_period_percent(pair, pair->stat.period) : 0.0;
160 158
161 he->diff.period_ratio_delta = new_percent - old_percent; 159 he->diff.period_ratio_delta = new_percent - old_percent;
162 he->diff.computed = true; 160 he->diff.computed = true;
163 return he->diff.period_ratio_delta; 161 return he->diff.period_ratio_delta;
164} 162}
165 163
166double perf_diff__compute_ratio(struct hist_entry *he) 164double perf_diff__compute_ratio(struct hist_entry *he, struct hist_entry *pair)
167{ 165{
168 struct hist_entry *pair = hist_entry__next_pair(he);
169 double new_period = he->stat.period; 166 double new_period = he->stat.period;
170 double old_period = pair ? pair->stat.period : 0; 167 double old_period = pair->stat.period;
171 168
172 he->diff.computed = true; 169 he->diff.computed = true;
173 he->diff.period_ratio = pair ? (new_period / old_period) : 0; 170 he->diff.period_ratio = new_period / old_period;
174 return he->diff.period_ratio; 171 return he->diff.period_ratio;
175} 172}
176 173
177s64 perf_diff__compute_wdiff(struct hist_entry *he) 174s64 perf_diff__compute_wdiff(struct hist_entry *he, struct hist_entry *pair)
178{ 175{
179 struct hist_entry *pair = hist_entry__next_pair(he);
180 u64 new_period = he->stat.period; 176 u64 new_period = he->stat.period;
181 u64 old_period = pair ? pair->stat.period : 0; 177 u64 old_period = pair->stat.period;
182 178
183 he->diff.computed = true; 179 he->diff.computed = true;
184 180 he->diff.wdiff = new_period * compute_wdiff_w2 -
185 if (!pair) 181 old_period * compute_wdiff_w1;
186 he->diff.wdiff = 0;
187 else
188 he->diff.wdiff = new_period * compute_wdiff_w2 -
189 old_period * compute_wdiff_w1;
190 182
191 return he->diff.wdiff; 183 return he->diff.wdiff;
192} 184}
193 185
194static int formula_delta(struct hist_entry *he, char *buf, size_t size) 186static int formula_delta(struct hist_entry *he, struct hist_entry *pair,
187 char *buf, size_t size)
195{ 188{
196 struct hist_entry *pair = hist_entry__next_pair(he);
197
198 if (!pair)
199 return -1;
200
201 return scnprintf(buf, size, 189 return scnprintf(buf, size,
202 "(%" PRIu64 " * 100 / %" PRIu64 ") - " 190 "(%" PRIu64 " * 100 / %" PRIu64 ") - "
203 "(%" PRIu64 " * 100 / %" PRIu64 ")", 191 "(%" PRIu64 " * 100 / %" PRIu64 ")",
@@ -205,41 +193,36 @@ static int formula_delta(struct hist_entry *he, char *buf, size_t size)
205 pair->stat.period, pair->hists->stats.total_period); 193 pair->stat.period, pair->hists->stats.total_period);
206} 194}
207 195
208static int formula_ratio(struct hist_entry *he, char *buf, size_t size) 196static int formula_ratio(struct hist_entry *he, struct hist_entry *pair,
197 char *buf, size_t size)
209{ 198{
210 struct hist_entry *pair = hist_entry__next_pair(he);
211 double new_period = he->stat.period; 199 double new_period = he->stat.period;
212 double old_period = pair ? pair->stat.period : 0; 200 double old_period = pair->stat.period;
213
214 if (!pair)
215 return -1;
216 201
217 return scnprintf(buf, size, "%.0F / %.0F", new_period, old_period); 202 return scnprintf(buf, size, "%.0F / %.0F", new_period, old_period);
218} 203}
219 204
220static int formula_wdiff(struct hist_entry *he, char *buf, size_t size) 205static int formula_wdiff(struct hist_entry *he, struct hist_entry *pair,
206 char *buf, size_t size)
221{ 207{
222 struct hist_entry *pair = hist_entry__next_pair(he);
223 u64 new_period = he->stat.period; 208 u64 new_period = he->stat.period;
224 u64 old_period = pair ? pair->stat.period : 0; 209 u64 old_period = pair->stat.period;
225
226 if (!pair)
227 return -1;
228 210
229 return scnprintf(buf, size, 211 return scnprintf(buf, size,
230 "(%" PRIu64 " * " "%" PRId64 ") - (%" PRIu64 " * " "%" PRId64 ")", 212 "(%" PRIu64 " * " "%" PRId64 ") - (%" PRIu64 " * " "%" PRId64 ")",
231 new_period, compute_wdiff_w2, old_period, compute_wdiff_w1); 213 new_period, compute_wdiff_w2, old_period, compute_wdiff_w1);
232} 214}
233 215
234int perf_diff__formula(char *buf, size_t size, struct hist_entry *he) 216int perf_diff__formula(struct hist_entry *he, struct hist_entry *pair,
217 char *buf, size_t size)
235{ 218{
236 switch (compute) { 219 switch (compute) {
237 case COMPUTE_DELTA: 220 case COMPUTE_DELTA:
238 return formula_delta(he, buf, size); 221 return formula_delta(he, pair, buf, size);
239 case COMPUTE_RATIO: 222 case COMPUTE_RATIO:
240 return formula_ratio(he, buf, size); 223 return formula_ratio(he, pair, buf, size);
241 case COMPUTE_WEIGHTED_DIFF: 224 case COMPUTE_WEIGHTED_DIFF:
242 return formula_wdiff(he, buf, size); 225 return formula_wdiff(he, pair, buf, size);
243 default: 226 default:
244 BUG_ON(1); 227 BUG_ON(1);
245 } 228 }
@@ -292,48 +275,6 @@ static struct perf_tool tool = {
292 .ordering_requires_timestamps = true, 275 .ordering_requires_timestamps = true,
293}; 276};
294 277
295static void insert_hist_entry_by_name(struct rb_root *root,
296 struct hist_entry *he)
297{
298 struct rb_node **p = &root->rb_node;
299 struct rb_node *parent = NULL;
300 struct hist_entry *iter;
301
302 while (*p != NULL) {
303 parent = *p;
304 iter = rb_entry(parent, struct hist_entry, rb_node);
305 if (hist_entry__cmp(he, iter) < 0)
306 p = &(*p)->rb_left;
307 else
308 p = &(*p)->rb_right;
309 }
310
311 rb_link_node(&he->rb_node, parent, p);
312 rb_insert_color(&he->rb_node, root);
313}
314
315static void hists__name_resort(struct hists *self, bool sort)
316{
317 unsigned long position = 1;
318 struct rb_root tmp = RB_ROOT;
319 struct rb_node *next = rb_first(&self->entries);
320
321 while (next != NULL) {
322 struct hist_entry *n = rb_entry(next, struct hist_entry, rb_node);
323
324 next = rb_next(&n->rb_node);
325 n->position = position++;
326
327 if (sort) {
328 rb_erase(&n->rb_node, &self->entries);
329 insert_hist_entry_by_name(&tmp, n);
330 }
331 }
332
333 if (sort)
334 self->entries = tmp;
335}
336
337static struct perf_evsel *evsel_match(struct perf_evsel *evsel, 278static struct perf_evsel *evsel_match(struct perf_evsel *evsel,
338 struct perf_evlist *evlist) 279 struct perf_evlist *evlist)
339{ 280{
@@ -346,34 +287,34 @@ static struct perf_evsel *evsel_match(struct perf_evsel *evsel,
346 return NULL; 287 return NULL;
347} 288}
348 289
349static void perf_evlist__resort_hists(struct perf_evlist *evlist, bool name) 290static void perf_evlist__collapse_resort(struct perf_evlist *evlist)
350{ 291{
351 struct perf_evsel *evsel; 292 struct perf_evsel *evsel;
352 293
353 list_for_each_entry(evsel, &evlist->entries, node) { 294 list_for_each_entry(evsel, &evlist->entries, node) {
354 struct hists *hists = &evsel->hists; 295 struct hists *hists = &evsel->hists;
355 296
356 hists__output_resort(hists); 297 hists__collapse_resort(hists);
357
358 /*
359 * The hists__name_resort only sets possition
360 * if name is false.
361 */
362 if (name || ((!name) && show_displacement))
363 hists__name_resort(hists, name);
364 } 298 }
365} 299}
366 300
367static void hists__baseline_only(struct hists *hists) 301static void hists__baseline_only(struct hists *hists)
368{ 302{
369 struct rb_node *next = rb_first(&hists->entries); 303 struct rb_root *root;
304 struct rb_node *next;
370 305
306 if (sort__need_collapse)
307 root = &hists->entries_collapsed;
308 else
309 root = hists->entries_in;
310
311 next = rb_first(root);
371 while (next != NULL) { 312 while (next != NULL) {
372 struct hist_entry *he = rb_entry(next, struct hist_entry, rb_node); 313 struct hist_entry *he = rb_entry(next, struct hist_entry, rb_node_in);
373 314
374 next = rb_next(&he->rb_node); 315 next = rb_next(&he->rb_node_in);
375 if (!hist_entry__next_pair(he)) { 316 if (!hist_entry__next_pair(he)) {
376 rb_erase(&he->rb_node, &hists->entries); 317 rb_erase(&he->rb_node_in, root);
377 hist_entry__free(he); 318 hist_entry__free(he);
378 } 319 }
379 } 320 }
@@ -385,18 +326,21 @@ static void hists__precompute(struct hists *hists)
385 326
386 while (next != NULL) { 327 while (next != NULL) {
387 struct hist_entry *he = rb_entry(next, struct hist_entry, rb_node); 328 struct hist_entry *he = rb_entry(next, struct hist_entry, rb_node);
329 struct hist_entry *pair = hist_entry__next_pair(he);
388 330
389 next = rb_next(&he->rb_node); 331 next = rb_next(&he->rb_node);
332 if (!pair)
333 continue;
390 334
391 switch (compute) { 335 switch (compute) {
392 case COMPUTE_DELTA: 336 case COMPUTE_DELTA:
393 perf_diff__compute_delta(he); 337 perf_diff__compute_delta(he, pair);
394 break; 338 break;
395 case COMPUTE_RATIO: 339 case COMPUTE_RATIO:
396 perf_diff__compute_ratio(he); 340 perf_diff__compute_ratio(he, pair);
397 break; 341 break;
398 case COMPUTE_WEIGHTED_DIFF: 342 case COMPUTE_WEIGHTED_DIFF:
399 perf_diff__compute_wdiff(he); 343 perf_diff__compute_wdiff(he, pair);
400 break; 344 break;
401 default: 345 default:
402 BUG_ON(1); 346 BUG_ON(1);
@@ -470,19 +414,30 @@ static void insert_hist_entry_by_compute(struct rb_root *root,
470 414
471static void hists__compute_resort(struct hists *hists) 415static void hists__compute_resort(struct hists *hists)
472{ 416{
473 struct rb_root tmp = RB_ROOT; 417 struct rb_root *root;
474 struct rb_node *next = rb_first(&hists->entries); 418 struct rb_node *next;
419
420 if (sort__need_collapse)
421 root = &hists->entries_collapsed;
422 else
423 root = hists->entries_in;
424
425 hists->entries = RB_ROOT;
426 next = rb_first(root);
427
428 hists->nr_entries = 0;
429 hists->stats.total_period = 0;
430 hists__reset_col_len(hists);
475 431
476 while (next != NULL) { 432 while (next != NULL) {
477 struct hist_entry *he = rb_entry(next, struct hist_entry, rb_node); 433 struct hist_entry *he;
478 434
479 next = rb_next(&he->rb_node); 435 he = rb_entry(next, struct hist_entry, rb_node_in);
436 next = rb_next(&he->rb_node_in);
480 437
481 rb_erase(&he->rb_node, &hists->entries); 438 insert_hist_entry_by_compute(&hists->entries, he, compute);
482 insert_hist_entry_by_compute(&tmp, he, compute); 439 hists__inc_nr_entries(hists, he);
483 } 440 }
484
485 hists->entries = tmp;
486} 441}
487 442
488static void hists__process(struct hists *old, struct hists *new) 443static void hists__process(struct hists *old, struct hists *new)
@@ -497,6 +452,8 @@ static void hists__process(struct hists *old, struct hists *new)
497 if (sort_compute) { 452 if (sort_compute) {
498 hists__precompute(new); 453 hists__precompute(new);
499 hists__compute_resort(new); 454 hists__compute_resort(new);
455 } else {
456 hists__output_resort(new);
500 } 457 }
501 458
502 hists__fprintf(new, true, 0, 0, stdout); 459 hists__fprintf(new, true, 0, 0, stdout);
@@ -528,8 +485,8 @@ static int __cmd_diff(void)
528 evlist_old = older->evlist; 485 evlist_old = older->evlist;
529 evlist_new = newer->evlist; 486 evlist_new = newer->evlist;
530 487
531 perf_evlist__resort_hists(evlist_old, true); 488 perf_evlist__collapse_resort(evlist_old);
532 perf_evlist__resort_hists(evlist_new, false); 489 perf_evlist__collapse_resort(evlist_new);
533 490
534 list_for_each_entry(evsel, &evlist_new->entries, node) { 491 list_for_each_entry(evsel, &evlist_new->entries, node) {
535 struct perf_evsel *evsel_old; 492 struct perf_evsel *evsel_old;
@@ -562,8 +519,6 @@ static const char * const diff_usage[] = {
562static const struct option options[] = { 519static const struct option options[] = {
563 OPT_INCR('v', "verbose", &verbose, 520 OPT_INCR('v', "verbose", &verbose,
564 "be more verbose (show symbol address, etc)"), 521 "be more verbose (show symbol address, etc)"),
565 OPT_BOOLEAN('M', "displacement", &show_displacement,
566 "Show position displacement relative to baseline"),
567 OPT_BOOLEAN('b', "baseline-only", &show_baseline_only, 522 OPT_BOOLEAN('b', "baseline-only", &show_baseline_only,
568 "Show only items with match in baseline"), 523 "Show only items with match in baseline"),
569 OPT_CALLBACK('c', "compute", &compute, 524 OPT_CALLBACK('c', "compute", &compute,
@@ -597,40 +552,32 @@ static const struct option options[] = {
597 552
598static void ui_init(void) 553static void ui_init(void)
599{ 554{
600 perf_hpp__init();
601
602 /* No overhead column. */
603 perf_hpp__column_enable(PERF_HPP__OVERHEAD, false);
604
605 /* 555 /*
606 * Display baseline/delta/ratio/displacement/ 556 * Display baseline/delta/ratio
607 * formula/periods columns. 557 * formula/periods columns.
608 */ 558 */
609 perf_hpp__column_enable(PERF_HPP__BASELINE, true); 559 perf_hpp__column_enable(PERF_HPP__BASELINE);
610 560
611 switch (compute) { 561 switch (compute) {
612 case COMPUTE_DELTA: 562 case COMPUTE_DELTA:
613 perf_hpp__column_enable(PERF_HPP__DELTA, true); 563 perf_hpp__column_enable(PERF_HPP__DELTA);
614 break; 564 break;
615 case COMPUTE_RATIO: 565 case COMPUTE_RATIO:
616 perf_hpp__column_enable(PERF_HPP__RATIO, true); 566 perf_hpp__column_enable(PERF_HPP__RATIO);
617 break; 567 break;
618 case COMPUTE_WEIGHTED_DIFF: 568 case COMPUTE_WEIGHTED_DIFF:
619 perf_hpp__column_enable(PERF_HPP__WEIGHTED_DIFF, true); 569 perf_hpp__column_enable(PERF_HPP__WEIGHTED_DIFF);
620 break; 570 break;
621 default: 571 default:
622 BUG_ON(1); 572 BUG_ON(1);
623 }; 573 };
624 574
625 if (show_displacement)
626 perf_hpp__column_enable(PERF_HPP__DISPL, true);
627
628 if (show_formula) 575 if (show_formula)
629 perf_hpp__column_enable(PERF_HPP__FORMULA, true); 576 perf_hpp__column_enable(PERF_HPP__FORMULA);
630 577
631 if (show_period) { 578 if (show_period) {
632 perf_hpp__column_enable(PERF_HPP__PERIOD, true); 579 perf_hpp__column_enable(PERF_HPP__PERIOD);
633 perf_hpp__column_enable(PERF_HPP__PERIOD_BASELINE, true); 580 perf_hpp__column_enable(PERF_HPP__PERIOD_BASELINE);
634 } 581 }
635} 582}
636 583
@@ -658,7 +605,9 @@ int cmd_diff(int argc, const char **argv, const char *prefix __maybe_unused)
658 605
659 ui_init(); 606 ui_init();
660 607
661 setup_sorting(diff_usage, options); 608 if (setup_sorting() < 0)
609 usage_with_options(diff_usage, options);
610
662 setup_pager(); 611 setup_pager();
663 612
664 sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list, "dso", NULL); 613 sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list, "dso", NULL);
diff --git a/tools/perf/builtin-evlist.c b/tools/perf/builtin-evlist.c
index c20f1dcfb7e2..05bd9dfe875c 100644
--- a/tools/perf/builtin-evlist.c
+++ b/tools/perf/builtin-evlist.c
@@ -15,39 +15,6 @@
15#include "util/parse-options.h" 15#include "util/parse-options.h"
16#include "util/session.h" 16#include "util/session.h"
17 17
18struct perf_attr_details {
19 bool freq;
20 bool verbose;
21};
22
23static int comma_printf(bool *first, const char *fmt, ...)
24{
25 va_list args;
26 int ret = 0;
27
28 if (!*first) {
29 ret += printf(",");
30 } else {
31 ret += printf(":");
32 *first = false;
33 }
34
35 va_start(args, fmt);
36 ret += vprintf(fmt, args);
37 va_end(args);
38 return ret;
39}
40
41static int __if_print(bool *first, const char *field, u64 value)
42{
43 if (value == 0)
44 return 0;
45
46 return comma_printf(first, " %s: %" PRIu64, field, value);
47}
48
49#define if_print(field) __if_print(&first, #field, pos->attr.field)
50
51static int __cmd_evlist(const char *file_name, struct perf_attr_details *details) 18static int __cmd_evlist(const char *file_name, struct perf_attr_details *details)
52{ 19{
53 struct perf_session *session; 20 struct perf_session *session;
@@ -57,52 +24,8 @@ static int __cmd_evlist(const char *file_name, struct perf_attr_details *details
57 if (session == NULL) 24 if (session == NULL)
58 return -ENOMEM; 25 return -ENOMEM;
59 26
60 list_for_each_entry(pos, &session->evlist->entries, node) { 27 list_for_each_entry(pos, &session->evlist->entries, node)
61 bool first = true; 28 perf_evsel__fprintf(pos, details, stdout);
62
63 printf("%s", perf_evsel__name(pos));
64
65 if (details->verbose || details->freq) {
66 comma_printf(&first, " sample_freq=%" PRIu64,
67 (u64)pos->attr.sample_freq);
68 }
69
70 if (details->verbose) {
71 if_print(type);
72 if_print(config);
73 if_print(config1);
74 if_print(config2);
75 if_print(size);
76 if_print(sample_type);
77 if_print(read_format);
78 if_print(disabled);
79 if_print(inherit);
80 if_print(pinned);
81 if_print(exclusive);
82 if_print(exclude_user);
83 if_print(exclude_kernel);
84 if_print(exclude_hv);
85 if_print(exclude_idle);
86 if_print(mmap);
87 if_print(comm);
88 if_print(freq);
89 if_print(inherit_stat);
90 if_print(enable_on_exec);
91 if_print(task);
92 if_print(watermark);
93 if_print(precise_ip);
94 if_print(mmap_data);
95 if_print(sample_id_all);
96 if_print(exclude_host);
97 if_print(exclude_guest);
98 if_print(__reserved_1);
99 if_print(wakeup_events);
100 if_print(bp_type);
101 if_print(branch_sample_type);
102 }
103
104 putchar('\n');
105 }
106 29
107 perf_session__delete(session); 30 perf_session__delete(session);
108 return 0; 31 return 0;
@@ -116,6 +39,8 @@ int cmd_evlist(int argc, const char **argv, const char *prefix __maybe_unused)
116 OPT_BOOLEAN('F', "freq", &details.freq, "Show the sample frequency"), 39 OPT_BOOLEAN('F', "freq", &details.freq, "Show the sample frequency"),
117 OPT_BOOLEAN('v', "verbose", &details.verbose, 40 OPT_BOOLEAN('v', "verbose", &details.verbose,
118 "Show all event attr details"), 41 "Show all event attr details"),
42 OPT_BOOLEAN('g', "group", &details.event_group,
43 "Show event group information"),
119 OPT_END() 44 OPT_END()
120 }; 45 };
121 const char * const evlist_usage[] = { 46 const char * const evlist_usage[] = {
@@ -127,5 +52,10 @@ int cmd_evlist(int argc, const char **argv, const char *prefix __maybe_unused)
127 if (argc) 52 if (argc)
128 usage_with_options(evlist_usage, options); 53 usage_with_options(evlist_usage, options);
129 54
55 if (details.event_group && (details.verbose || details.freq)) {
56 pr_err("--group option is not compatible with other options\n");
57 usage_with_options(evlist_usage, options);
58 }
59
130 return __cmd_evlist(input_name, &details); 60 return __cmd_evlist(input_name, &details);
131} 61}
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index 0b4b796167be..46878daca5cc 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -17,6 +17,7 @@
17#include "util/debug.h" 17#include "util/debug.h"
18 18
19#include <linux/rbtree.h> 19#include <linux/rbtree.h>
20#include <linux/string.h>
20 21
21struct alloc_stat; 22struct alloc_stat;
22typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *); 23typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *);
@@ -340,7 +341,7 @@ static void __print_result(struct rb_root *root, struct perf_session *session,
340 int n_lines, int is_caller) 341 int n_lines, int is_caller)
341{ 342{
342 struct rb_node *next; 343 struct rb_node *next;
343 struct machine *machine; 344 struct machine *machine = &session->machines.host;
344 345
345 printf("%.102s\n", graph_dotted_line); 346 printf("%.102s\n", graph_dotted_line);
346 printf(" %-34s |", is_caller ? "Callsite": "Alloc Ptr"); 347 printf(" %-34s |", is_caller ? "Callsite": "Alloc Ptr");
@@ -349,11 +350,6 @@ static void __print_result(struct rb_root *root, struct perf_session *session,
349 350
350 next = rb_first(root); 351 next = rb_first(root);
351 352
352 machine = perf_session__find_host_machine(session);
353 if (!machine) {
354 pr_err("__print_result: couldn't find kernel information\n");
355 return;
356 }
357 while (next && n_lines--) { 353 while (next && n_lines--) {
358 struct alloc_stat *data = rb_entry(next, struct alloc_stat, 354 struct alloc_stat *data = rb_entry(next, struct alloc_stat,
359 node); 355 node);
@@ -614,8 +610,7 @@ static struct sort_dimension *avail_sorts[] = {
614 &pingpong_sort_dimension, 610 &pingpong_sort_dimension,
615}; 611};
616 612
617#define NUM_AVAIL_SORTS \ 613#define NUM_AVAIL_SORTS ((int)ARRAY_SIZE(avail_sorts))
618 (int)(sizeof(avail_sorts) / sizeof(struct sort_dimension *))
619 614
620static int sort_dimension__add(const char *tok, struct list_head *list) 615static int sort_dimension__add(const char *tok, struct list_head *list)
621{ 616{
@@ -624,12 +619,11 @@ static int sort_dimension__add(const char *tok, struct list_head *list)
624 619
625 for (i = 0; i < NUM_AVAIL_SORTS; i++) { 620 for (i = 0; i < NUM_AVAIL_SORTS; i++) {
626 if (!strcmp(avail_sorts[i]->name, tok)) { 621 if (!strcmp(avail_sorts[i]->name, tok)) {
627 sort = malloc(sizeof(*sort)); 622 sort = memdup(avail_sorts[i], sizeof(*avail_sorts[i]));
628 if (!sort) { 623 if (!sort) {
629 pr_err("%s: malloc failed\n", __func__); 624 pr_err("%s: memdup failed\n", __func__);
630 return -1; 625 return -1;
631 } 626 }
632 memcpy(sort, avail_sorts[i], sizeof(*sort));
633 list_add_tail(&sort->list, list); 627 list_add_tail(&sort->list, list);
634 return 0; 628 return 0;
635 } 629 }
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index ca3f80ebc100..37a769d7f9fe 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -973,8 +973,7 @@ __cmd_buildid_list(const char *file_name, int argc, const char **argv)
973 973
974int cmd_kvm(int argc, const char **argv, const char *prefix __maybe_unused) 974int cmd_kvm(int argc, const char **argv, const char *prefix __maybe_unused)
975{ 975{
976 const char *file_name; 976 const char *file_name = NULL;
977
978 const struct option kvm_options[] = { 977 const struct option kvm_options[] = {
979 OPT_STRING('i', "input", &file_name, "file", 978 OPT_STRING('i', "input", &file_name, "file",
980 "Input file name"), 979 "Input file name"),
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index f3151d3c70ce..774c90713a53 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -224,130 +224,28 @@ static bool perf_evlist__equal(struct perf_evlist *evlist,
224 224
225static int perf_record__open(struct perf_record *rec) 225static int perf_record__open(struct perf_record *rec)
226{ 226{
227 char msg[512];
227 struct perf_evsel *pos; 228 struct perf_evsel *pos;
228 struct perf_evlist *evlist = rec->evlist; 229 struct perf_evlist *evlist = rec->evlist;
229 struct perf_session *session = rec->session; 230 struct perf_session *session = rec->session;
230 struct perf_record_opts *opts = &rec->opts; 231 struct perf_record_opts *opts = &rec->opts;
231 int rc = 0; 232 int rc = 0;
232 233
233 /* 234 perf_evlist__config(evlist, opts);
234 * Set the evsel leader links before we configure attributes,
235 * since some might depend on this info.
236 */
237 if (opts->group)
238 perf_evlist__set_leader(evlist);
239
240 perf_evlist__config_attrs(evlist, opts);
241 235
242 list_for_each_entry(pos, &evlist->entries, node) { 236 list_for_each_entry(pos, &evlist->entries, node) {
243 struct perf_event_attr *attr = &pos->attr;
244 /*
245 * Check if parse_single_tracepoint_event has already asked for
246 * PERF_SAMPLE_TIME.
247 *
248 * XXX this is kludgy but short term fix for problems introduced by
249 * eac23d1c that broke 'perf script' by having different sample_types
250 * when using multiple tracepoint events when we use a perf binary
251 * that tries to use sample_id_all on an older kernel.
252 *
253 * We need to move counter creation to perf_session, support
254 * different sample_types, etc.
255 */
256 bool time_needed = attr->sample_type & PERF_SAMPLE_TIME;
257
258fallback_missing_features:
259 if (opts->exclude_guest_missing)
260 attr->exclude_guest = attr->exclude_host = 0;
261retry_sample_id:
262 attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1;
263try_again: 237try_again:
264 if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) { 238 if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) {
265 int err = errno; 239 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
266
267 if (err == EPERM || err == EACCES) {
268 ui__error_paranoid();
269 rc = -err;
270 goto out;
271 } else if (err == ENODEV && opts->target.cpu_list) {
272 pr_err("No such device - did you specify"
273 " an out-of-range profile CPU?\n");
274 rc = -err;
275 goto out;
276 } else if (err == EINVAL) {
277 if (!opts->exclude_guest_missing &&
278 (attr->exclude_guest || attr->exclude_host)) {
279 pr_debug("Old kernel, cannot exclude "
280 "guest or host samples.\n");
281 opts->exclude_guest_missing = true;
282 goto fallback_missing_features;
283 } else if (!opts->sample_id_all_missing) {
284 /*
285 * Old kernel, no attr->sample_id_type_all field
286 */
287 opts->sample_id_all_missing = true;
288 if (!opts->sample_time && !opts->raw_samples && !time_needed)
289 attr->sample_type &= ~PERF_SAMPLE_TIME;
290
291 goto retry_sample_id;
292 }
293 }
294
295 /*
296 * If it's cycles then fall back to hrtimer
297 * based cpu-clock-tick sw counter, which
298 * is always available even if no PMU support.
299 *
300 * PPC returns ENXIO until 2.6.37 (behavior changed
301 * with commit b0a873e).
302 */
303 if ((err == ENOENT || err == ENXIO)
304 && attr->type == PERF_TYPE_HARDWARE
305 && attr->config == PERF_COUNT_HW_CPU_CYCLES) {
306
307 if (verbose) 240 if (verbose)
308 ui__warning("The cycles event is not supported, " 241 ui__warning("%s\n", msg);
309 "trying to fall back to cpu-clock-ticks\n");
310 attr->type = PERF_TYPE_SOFTWARE;
311 attr->config = PERF_COUNT_SW_CPU_CLOCK;
312 if (pos->name) {
313 free(pos->name);
314 pos->name = NULL;
315 }
316 goto try_again; 242 goto try_again;
317 } 243 }
318 244
319 if (err == ENOENT) { 245 rc = -errno;
320 ui__error("The %s event is not supported.\n", 246 perf_evsel__open_strerror(pos, &opts->target,
321 perf_evsel__name(pos)); 247 errno, msg, sizeof(msg));
322 rc = -err; 248 ui__error("%s\n", msg);
323 goto out;
324 } else if ((err == EOPNOTSUPP) && (attr->precise_ip)) {
325 ui__error("\'precise\' request may not be supported. "
326 "Try removing 'p' modifier\n");
327 rc = -err;
328 goto out;
329 }
330
331 printf("\n");
332 error("sys_perf_event_open() syscall returned with %d "
333 "(%s) for event %s. /bin/dmesg may provide "
334 "additional information.\n",
335 err, strerror(err), perf_evsel__name(pos));
336
337#if defined(__i386__) || defined(__x86_64__)
338 if (attr->type == PERF_TYPE_HARDWARE &&
339 err == EOPNOTSUPP) {
340 pr_err("No hardware sampling interrupt available."
341 " No APIC? If so then you can boot the kernel"
342 " with the \"lapic\" boot parameter to"
343 " force-enable it.\n");
344 rc = -err;
345 goto out;
346 }
347#endif
348
349 pr_err("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
350 rc = -err;
351 goto out; 249 goto out;
352 } 250 }
353 } 251 }
@@ -430,10 +328,6 @@ static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
430{ 328{
431 int err; 329 int err;
432 struct perf_tool *tool = data; 330 struct perf_tool *tool = data;
433
434 if (machine__is_host(machine))
435 return;
436
437 /* 331 /*
438 *As for guest kernel when processing subcommand record&report, 332 *As for guest kernel when processing subcommand record&report,
439 *we arrange module mmap prior to guest kernel mmap and trigger 333 *we arrange module mmap prior to guest kernel mmap and trigger
@@ -592,6 +486,9 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
592 goto out_delete_session; 486 goto out_delete_session;
593 } 487 }
594 488
489 if (!evsel_list->nr_groups)
490 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
491
595 /* 492 /*
596 * perf_session__delete(session) will be called at perf_record__exit() 493 * perf_session__delete(session) will be called at perf_record__exit()
597 */ 494 */
@@ -618,12 +515,7 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
618 515
619 rec->post_processing_offset = lseek(output, 0, SEEK_CUR); 516 rec->post_processing_offset = lseek(output, 0, SEEK_CUR);
620 517
621 machine = perf_session__find_host_machine(session); 518 machine = &session->machines.host;
622 if (!machine) {
623 pr_err("Couldn't find native kernel information.\n");
624 err = -1;
625 goto out_delete_session;
626 }
627 519
628 if (opts->pipe_output) { 520 if (opts->pipe_output) {
629 err = perf_event__synthesize_attrs(tool, session, 521 err = perf_event__synthesize_attrs(tool, session,
@@ -676,9 +568,10 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
676 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n" 568 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
677 "Check /proc/modules permission or run as root.\n"); 569 "Check /proc/modules permission or run as root.\n");
678 570
679 if (perf_guest) 571 if (perf_guest) {
680 perf_session__process_machines(session, tool, 572 machines__process_guests(&session->machines,
681 perf_event__synthesize_guest_os); 573 perf_event__synthesize_guest_os, tool);
574 }
682 575
683 if (!opts->target.system_wide) 576 if (!opts->target.system_wide)
684 err = perf_event__synthesize_thread_map(tool, evsel_list->threads, 577 err = perf_event__synthesize_thread_map(tool, evsel_list->threads,
@@ -875,11 +768,10 @@ static int get_stack_size(char *str, unsigned long *_size)
875} 768}
876#endif /* LIBUNWIND_SUPPORT */ 769#endif /* LIBUNWIND_SUPPORT */
877 770
878static int 771int record_parse_callchain_opt(const struct option *opt,
879parse_callchain_opt(const struct option *opt __maybe_unused, const char *arg, 772 const char *arg, int unset)
880 int unset)
881{ 773{
882 struct perf_record *rec = (struct perf_record *)opt->value; 774 struct perf_record_opts *opts = opt->value;
883 char *tok, *name, *saveptr = NULL; 775 char *tok, *name, *saveptr = NULL;
884 char *buf; 776 char *buf;
885 int ret = -1; 777 int ret = -1;
@@ -905,7 +797,7 @@ parse_callchain_opt(const struct option *opt __maybe_unused, const char *arg,
905 /* Framepointer style */ 797 /* Framepointer style */
906 if (!strncmp(name, "fp", sizeof("fp"))) { 798 if (!strncmp(name, "fp", sizeof("fp"))) {
907 if (!strtok_r(NULL, ",", &saveptr)) { 799 if (!strtok_r(NULL, ",", &saveptr)) {
908 rec->opts.call_graph = CALLCHAIN_FP; 800 opts->call_graph = CALLCHAIN_FP;
909 ret = 0; 801 ret = 0;
910 } else 802 } else
911 pr_err("callchain: No more arguments " 803 pr_err("callchain: No more arguments "
@@ -918,20 +810,20 @@ parse_callchain_opt(const struct option *opt __maybe_unused, const char *arg,
918 const unsigned long default_stack_dump_size = 8192; 810 const unsigned long default_stack_dump_size = 8192;
919 811
920 ret = 0; 812 ret = 0;
921 rec->opts.call_graph = CALLCHAIN_DWARF; 813 opts->call_graph = CALLCHAIN_DWARF;
922 rec->opts.stack_dump_size = default_stack_dump_size; 814 opts->stack_dump_size = default_stack_dump_size;
923 815
924 tok = strtok_r(NULL, ",", &saveptr); 816 tok = strtok_r(NULL, ",", &saveptr);
925 if (tok) { 817 if (tok) {
926 unsigned long size = 0; 818 unsigned long size = 0;
927 819
928 ret = get_stack_size(tok, &size); 820 ret = get_stack_size(tok, &size);
929 rec->opts.stack_dump_size = size; 821 opts->stack_dump_size = size;
930 } 822 }
931 823
932 if (!ret) 824 if (!ret)
933 pr_debug("callchain: stack dump size %d\n", 825 pr_debug("callchain: stack dump size %d\n",
934 rec->opts.stack_dump_size); 826 opts->stack_dump_size);
935#endif /* LIBUNWIND_SUPPORT */ 827#endif /* LIBUNWIND_SUPPORT */
936 } else { 828 } else {
937 pr_err("callchain: Unknown -g option " 829 pr_err("callchain: Unknown -g option "
@@ -944,7 +836,7 @@ parse_callchain_opt(const struct option *opt __maybe_unused, const char *arg,
944 free(buf); 836 free(buf);
945 837
946 if (!ret) 838 if (!ret)
947 pr_debug("callchain: type %d\n", rec->opts.call_graph); 839 pr_debug("callchain: type %d\n", opts->call_graph);
948 840
949 return ret; 841 return ret;
950} 842}
@@ -982,9 +874,9 @@ static struct perf_record record = {
982#define CALLCHAIN_HELP "do call-graph (stack chain/backtrace) recording: " 874#define CALLCHAIN_HELP "do call-graph (stack chain/backtrace) recording: "
983 875
984#ifdef LIBUNWIND_SUPPORT 876#ifdef LIBUNWIND_SUPPORT
985static const char callchain_help[] = CALLCHAIN_HELP "[fp] dwarf"; 877const char record_callchain_help[] = CALLCHAIN_HELP "[fp] dwarf";
986#else 878#else
987static const char callchain_help[] = CALLCHAIN_HELP "[fp]"; 879const char record_callchain_help[] = CALLCHAIN_HELP "[fp]";
988#endif 880#endif
989 881
990/* 882/*
@@ -1028,9 +920,9 @@ const struct option record_options[] = {
1028 "number of mmap data pages"), 920 "number of mmap data pages"),
1029 OPT_BOOLEAN(0, "group", &record.opts.group, 921 OPT_BOOLEAN(0, "group", &record.opts.group,
1030 "put the counters into a counter group"), 922 "put the counters into a counter group"),
1031 OPT_CALLBACK_DEFAULT('g', "call-graph", &record, "mode[,dump_size]", 923 OPT_CALLBACK_DEFAULT('g', "call-graph", &record.opts,
1032 callchain_help, &parse_callchain_opt, 924 "mode[,dump_size]", record_callchain_help,
1033 "fp"), 925 &record_parse_callchain_opt, "fp"),
1034 OPT_INCR('v', "verbose", &verbose, 926 OPT_INCR('v', "verbose", &verbose,
1035 "be more verbose (show counter open errors, etc)"), 927 "be more verbose (show counter open errors, etc)"),
1036 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"), 928 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index fc251005dd3d..96b5a7fee4bb 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -8,6 +8,7 @@
8#include "builtin.h" 8#include "builtin.h"
9 9
10#include "util/util.h" 10#include "util/util.h"
11#include "util/cache.h"
11 12
12#include "util/annotate.h" 13#include "util/annotate.h"
13#include "util/color.h" 14#include "util/color.h"
@@ -54,6 +55,16 @@ struct perf_report {
54 DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); 55 DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
55}; 56};
56 57
58static int perf_report_config(const char *var, const char *value, void *cb)
59{
60 if (!strcmp(var, "report.group")) {
61 symbol_conf.event_group = perf_config_bool(var, value);
62 return 0;
63 }
64
65 return perf_default_config(var, value, cb);
66}
67
57static int perf_report__add_branch_hist_entry(struct perf_tool *tool, 68static int perf_report__add_branch_hist_entry(struct perf_tool *tool,
58 struct addr_location *al, 69 struct addr_location *al,
59 struct perf_sample *sample, 70 struct perf_sample *sample,
@@ -299,6 +310,21 @@ static size_t hists__fprintf_nr_sample_events(struct hists *self,
299 char unit; 310 char unit;
300 unsigned long nr_samples = self->stats.nr_events[PERF_RECORD_SAMPLE]; 311 unsigned long nr_samples = self->stats.nr_events[PERF_RECORD_SAMPLE];
301 u64 nr_events = self->stats.total_period; 312 u64 nr_events = self->stats.total_period;
313 struct perf_evsel *evsel = hists_to_evsel(self);
314 char buf[512];
315 size_t size = sizeof(buf);
316
317 if (symbol_conf.event_group && evsel->nr_members > 1) {
318 struct perf_evsel *pos;
319
320 perf_evsel__group_desc(evsel, buf, size);
321 evname = buf;
322
323 for_each_group_member(pos, evsel) {
324 nr_samples += pos->hists.stats.nr_events[PERF_RECORD_SAMPLE];
325 nr_events += pos->hists.stats.total_period;
326 }
327 }
302 328
303 nr_samples = convert_unit(nr_samples, &unit); 329 nr_samples = convert_unit(nr_samples, &unit);
304 ret = fprintf(fp, "# Samples: %lu%c", nr_samples, unit); 330 ret = fprintf(fp, "# Samples: %lu%c", nr_samples, unit);
@@ -319,6 +345,10 @@ static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist,
319 struct hists *hists = &pos->hists; 345 struct hists *hists = &pos->hists;
320 const char *evname = perf_evsel__name(pos); 346 const char *evname = perf_evsel__name(pos);
321 347
348 if (symbol_conf.event_group &&
349 !perf_evsel__is_group_leader(pos))
350 continue;
351
322 hists__fprintf_nr_sample_events(hists, evname, stdout); 352 hists__fprintf_nr_sample_events(hists, evname, stdout);
323 hists__fprintf(hists, true, 0, 0, stdout); 353 hists__fprintf(hists, true, 0, 0, stdout);
324 fprintf(stdout, "\n\n"); 354 fprintf(stdout, "\n\n");
@@ -372,7 +402,7 @@ static int __cmd_report(struct perf_report *rep)
372 if (ret) 402 if (ret)
373 goto out_delete; 403 goto out_delete;
374 404
375 kernel_map = session->host_machine.vmlinux_maps[MAP__FUNCTION]; 405 kernel_map = session->machines.host.vmlinux_maps[MAP__FUNCTION];
376 kernel_kmap = map__kmap(kernel_map); 406 kernel_kmap = map__kmap(kernel_map);
377 if (kernel_map == NULL || 407 if (kernel_map == NULL ||
378 (kernel_map->dso->hit && 408 (kernel_map->dso->hit &&
@@ -416,8 +446,16 @@ static int __cmd_report(struct perf_report *rep)
416 hists->symbol_filter_str = rep->symbol_filter_str; 446 hists->symbol_filter_str = rep->symbol_filter_str;
417 447
418 hists__collapse_resort(hists); 448 hists__collapse_resort(hists);
419 hists__output_resort(hists);
420 nr_samples += hists->stats.nr_events[PERF_RECORD_SAMPLE]; 449 nr_samples += hists->stats.nr_events[PERF_RECORD_SAMPLE];
450
451 /* Non-group events are considered as leader */
452 if (symbol_conf.event_group &&
453 !perf_evsel__is_group_leader(pos)) {
454 struct hists *leader_hists = &pos->leader->hists;
455
456 hists__match(leader_hists, hists);
457 hists__link(leader_hists, hists);
458 }
421 } 459 }
422 460
423 if (nr_samples == 0) { 461 if (nr_samples == 0) {
@@ -425,11 +463,22 @@ static int __cmd_report(struct perf_report *rep)
425 goto out_delete; 463 goto out_delete;
426 } 464 }
427 465
466 list_for_each_entry(pos, &session->evlist->entries, node)
467 hists__output_resort(&pos->hists);
468
428 if (use_browser > 0) { 469 if (use_browser > 0) {
429 if (use_browser == 1) { 470 if (use_browser == 1) {
430 perf_evlist__tui_browse_hists(session->evlist, help, 471 ret = perf_evlist__tui_browse_hists(session->evlist,
431 NULL, 472 help,
432 &session->header.env); 473 NULL,
474 &session->header.env);
475 /*
476 * Usually "ret" is the last pressed key, and we only
477 * care if the key notifies us to switch data file.
478 */
479 if (ret != K_SWITCH_INPUT_DATA)
480 ret = 0;
481
433 } else if (use_browser == 2) { 482 } else if (use_browser == 2) {
434 perf_evlist__gtk_browse_hists(session->evlist, help, 483 perf_evlist__gtk_browse_hists(session->evlist, help,
435 NULL); 484 NULL);
@@ -595,8 +644,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
595 OPT_BOOLEAN(0, "stdio", &report.use_stdio, 644 OPT_BOOLEAN(0, "stdio", &report.use_stdio,
596 "Use the stdio interface"), 645 "Use the stdio interface"),
597 OPT_STRING('s', "sort", &sort_order, "key[,key2...]", 646 OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
598 "sort by key(s): pid, comm, dso, symbol, parent, dso_to," 647 "sort by key(s): pid, comm, dso, symbol, parent, cpu, srcline,"
599 " dso_from, symbol_to, symbol_from, mispredict"), 648 " dso_to, dso_from, symbol_to, symbol_from, mispredict"),
600 OPT_BOOLEAN(0, "showcpuutilization", &symbol_conf.show_cpu_utilization, 649 OPT_BOOLEAN(0, "showcpuutilization", &symbol_conf.show_cpu_utilization,
601 "Show sample percentage for different cpu modes"), 650 "Show sample percentage for different cpu modes"),
602 OPT_STRING('p', "parent", &parent_pattern, "regex", 651 OPT_STRING('p', "parent", &parent_pattern, "regex",
@@ -638,6 +687,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
638 "Specify disassembler style (e.g. -M intel for intel syntax)"), 687 "Specify disassembler style (e.g. -M intel for intel syntax)"),
639 OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period, 688 OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
640 "Show a column with the sum of periods"), 689 "Show a column with the sum of periods"),
690 OPT_BOOLEAN(0, "group", &symbol_conf.event_group,
691 "Show event group information together"),
641 OPT_CALLBACK_NOOPT('b', "branch-stack", &sort__branch_mode, "", 692 OPT_CALLBACK_NOOPT('b', "branch-stack", &sort__branch_mode, "",
642 "use branch records for histogram filling", parse_branch_mode), 693 "use branch records for histogram filling", parse_branch_mode),
643 OPT_STRING(0, "objdump", &objdump_path, "path", 694 OPT_STRING(0, "objdump", &objdump_path, "path",
@@ -645,6 +696,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
645 OPT_END() 696 OPT_END()
646 }; 697 };
647 698
699 perf_config(perf_report_config, NULL);
700
648 argc = parse_options(argc, argv, options, report_usage, 0); 701 argc = parse_options(argc, argv, options, report_usage, 0);
649 702
650 if (report.use_stdio) 703 if (report.use_stdio)
@@ -663,6 +716,16 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
663 else 716 else
664 input_name = "perf.data"; 717 input_name = "perf.data";
665 } 718 }
719
720 if (strcmp(input_name, "-") != 0)
721 setup_browser(true);
722 else {
723 use_browser = 0;
724 perf_hpp__column_enable(PERF_HPP__OVERHEAD);
725 perf_hpp__init();
726 }
727
728repeat:
666 session = perf_session__new(input_name, O_RDONLY, 729 session = perf_session__new(input_name, O_RDONLY,
667 report.force, false, &report.tool); 730 report.force, false, &report.tool);
668 if (session == NULL) 731 if (session == NULL)
@@ -688,14 +751,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
688 751
689 } 752 }
690 753
691 if (strcmp(input_name, "-") != 0) 754 if (setup_sorting() < 0)
692 setup_browser(true); 755 usage_with_options(report_usage, options);
693 else {
694 use_browser = 0;
695 perf_hpp__init();
696 }
697
698 setup_sorting(report_usage, options);
699 756
700 /* 757 /*
701 * Only in the newt browser we are doing integrated annotation, 758 * Only in the newt browser we are doing integrated annotation,
@@ -763,6 +820,12 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
763 } 820 }
764 821
765 ret = __cmd_report(&report); 822 ret = __cmd_report(&report);
823 if (ret == K_SWITCH_INPUT_DATA) {
824 perf_session__delete(session);
825 goto repeat;
826 } else
827 ret = 0;
828
766error: 829error:
767 perf_session__delete(session); 830 perf_session__delete(session);
768 return ret; 831 return ret;
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index cc28b85dabd5..138229439a93 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -1475,9 +1475,9 @@ static int perf_sched__read_events(struct perf_sched *sched, bool destroy,
1475 goto out_delete; 1475 goto out_delete;
1476 } 1476 }
1477 1477
1478 sched->nr_events = session->hists.stats.nr_events[0]; 1478 sched->nr_events = session->stats.nr_events[0];
1479 sched->nr_lost_events = session->hists.stats.total_lost; 1479 sched->nr_lost_events = session->stats.total_lost;
1480 sched->nr_lost_chunks = session->hists.stats.nr_events[PERF_RECORD_LOST]; 1480 sched->nr_lost_chunks = session->stats.nr_events[PERF_RECORD_LOST];
1481 } 1481 }
1482 1482
1483 if (destroy) 1483 if (destroy)
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index b363e7b292b2..92d4658f56fb 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -692,7 +692,7 @@ static int parse_output_fields(const struct option *opt __maybe_unused,
692 const char *arg, int unset __maybe_unused) 692 const char *arg, int unset __maybe_unused)
693{ 693{
694 char *tok; 694 char *tok;
695 int i, imax = sizeof(all_output_options) / sizeof(struct output_option); 695 int i, imax = ARRAY_SIZE(all_output_options);
696 int j; 696 int j;
697 int rc = 0; 697 int rc = 0;
698 char *str = strdup(arg); 698 char *str = strdup(arg);
@@ -909,18 +909,6 @@ static const char *ends_with(const char *str, const char *suffix)
909 return NULL; 909 return NULL;
910} 910}
911 911
912static char *ltrim(char *str)
913{
914 int len = strlen(str);
915
916 while (len && isspace(*str)) {
917 len--;
918 str++;
919 }
920
921 return str;
922}
923
924static int read_script_info(struct script_desc *desc, const char *filename) 912static int read_script_info(struct script_desc *desc, const char *filename)
925{ 913{
926 char line[BUFSIZ], *p; 914 char line[BUFSIZ], *p;
@@ -1487,7 +1475,8 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
1487 return -1; 1475 return -1;
1488 } 1476 }
1489 1477
1490 perf_session__fprintf_info(session, stdout, show_full_info); 1478 if (!script_name && !generate_script_lang)
1479 perf_session__fprintf_info(session, stdout, show_full_info);
1491 1480
1492 if (!no_callchain) 1481 if (!no_callchain)
1493 symbol_conf.use_callchain = true; 1482 symbol_conf.use_callchain = true;
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index c247faca7127..99848761f573 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -65,6 +65,11 @@
65#define CNTR_NOT_SUPPORTED "<not supported>" 65#define CNTR_NOT_SUPPORTED "<not supported>"
66#define CNTR_NOT_COUNTED "<not counted>" 66#define CNTR_NOT_COUNTED "<not counted>"
67 67
68static void print_stat(int argc, const char **argv);
69static void print_counter_aggr(struct perf_evsel *counter, char *prefix);
70static void print_counter(struct perf_evsel *counter, char *prefix);
71static void print_aggr_socket(char *prefix);
72
68static struct perf_evlist *evsel_list; 73static struct perf_evlist *evsel_list;
69 74
70static struct perf_target target = { 75static struct perf_target target = {
@@ -75,6 +80,7 @@ static int run_count = 1;
75static bool no_inherit = false; 80static bool no_inherit = false;
76static bool scale = true; 81static bool scale = true;
77static bool no_aggr = false; 82static bool no_aggr = false;
83static bool aggr_socket = false;
78static pid_t child_pid = -1; 84static pid_t child_pid = -1;
79static bool null_run = false; 85static bool null_run = false;
80static int detailed_run = 0; 86static int detailed_run = 0;
@@ -87,6 +93,9 @@ static FILE *output = NULL;
87static const char *pre_cmd = NULL; 93static const char *pre_cmd = NULL;
88static const char *post_cmd = NULL; 94static const char *post_cmd = NULL;
89static bool sync_run = false; 95static bool sync_run = false;
96static unsigned int interval = 0;
97static struct timespec ref_time;
98static struct cpu_map *sock_map;
90 99
91static volatile int done = 0; 100static volatile int done = 0;
92 101
@@ -94,6 +103,28 @@ struct perf_stat {
94 struct stats res_stats[3]; 103 struct stats res_stats[3];
95}; 104};
96 105
106static inline void diff_timespec(struct timespec *r, struct timespec *a,
107 struct timespec *b)
108{
109 r->tv_sec = a->tv_sec - b->tv_sec;
110 if (a->tv_nsec < b->tv_nsec) {
111 r->tv_nsec = a->tv_nsec + 1000000000L - b->tv_nsec;
112 r->tv_sec--;
113 } else {
114 r->tv_nsec = a->tv_nsec - b->tv_nsec ;
115 }
116}
117
118static inline struct cpu_map *perf_evsel__cpus(struct perf_evsel *evsel)
119{
120 return (evsel->cpus && !target.cpu_list) ? evsel->cpus : evsel_list->cpus;
121}
122
123static inline int perf_evsel__nr_cpus(struct perf_evsel *evsel)
124{
125 return perf_evsel__cpus(evsel)->nr;
126}
127
97static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel) 128static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel)
98{ 129{
99 evsel->priv = zalloc(sizeof(struct perf_stat)); 130 evsel->priv = zalloc(sizeof(struct perf_stat));
@@ -106,14 +137,27 @@ static void perf_evsel__free_stat_priv(struct perf_evsel *evsel)
106 evsel->priv = NULL; 137 evsel->priv = NULL;
107} 138}
108 139
109static inline struct cpu_map *perf_evsel__cpus(struct perf_evsel *evsel) 140static int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel)
110{ 141{
111 return (evsel->cpus && !target.cpu_list) ? evsel->cpus : evsel_list->cpus; 142 void *addr;
143 size_t sz;
144
145 sz = sizeof(*evsel->counts) +
146 (perf_evsel__nr_cpus(evsel) * sizeof(struct perf_counts_values));
147
148 addr = zalloc(sz);
149 if (!addr)
150 return -ENOMEM;
151
152 evsel->prev_raw_counts = addr;
153
154 return 0;
112} 155}
113 156
114static inline int perf_evsel__nr_cpus(struct perf_evsel *evsel) 157static void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel)
115{ 158{
116 return perf_evsel__cpus(evsel)->nr; 159 free(evsel->prev_raw_counts);
160 evsel->prev_raw_counts = NULL;
117} 161}
118 162
119static struct stats runtime_nsecs_stats[MAX_NR_CPUS]; 163static struct stats runtime_nsecs_stats[MAX_NR_CPUS];
@@ -132,8 +176,6 @@ static struct stats walltime_nsecs_stats;
132static int create_perf_stat_counter(struct perf_evsel *evsel) 176static int create_perf_stat_counter(struct perf_evsel *evsel)
133{ 177{
134 struct perf_event_attr *attr = &evsel->attr; 178 struct perf_event_attr *attr = &evsel->attr;
135 bool exclude_guest_missing = false;
136 int ret;
137 179
138 if (scale) 180 if (scale)
139 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | 181 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
@@ -141,38 +183,16 @@ static int create_perf_stat_counter(struct perf_evsel *evsel)
141 183
142 attr->inherit = !no_inherit; 184 attr->inherit = !no_inherit;
143 185
144retry: 186 if (perf_target__has_cpu(&target))
145 if (exclude_guest_missing) 187 return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel));
146 evsel->attr.exclude_guest = evsel->attr.exclude_host = 0;
147
148 if (perf_target__has_cpu(&target)) {
149 ret = perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel));
150 if (ret)
151 goto check_ret;
152 return 0;
153 }
154 188
155 if (!perf_target__has_task(&target) && 189 if (!perf_target__has_task(&target) &&
156 !perf_evsel__is_group_member(evsel)) { 190 perf_evsel__is_group_leader(evsel)) {
157 attr->disabled = 1; 191 attr->disabled = 1;
158 attr->enable_on_exec = 1; 192 attr->enable_on_exec = 1;
159 } 193 }
160 194
161 ret = perf_evsel__open_per_thread(evsel, evsel_list->threads); 195 return perf_evsel__open_per_thread(evsel, evsel_list->threads);
162 if (!ret)
163 return 0;
164 /* fall through */
165check_ret:
166 if (ret && errno == EINVAL) {
167 if (!exclude_guest_missing &&
168 (evsel->attr.exclude_guest || evsel->attr.exclude_host)) {
169 pr_debug("Old kernel, cannot exclude "
170 "guest or host samples.\n");
171 exclude_guest_missing = true;
172 goto retry;
173 }
174 }
175 return ret;
176} 196}
177 197
178/* 198/*
@@ -269,15 +289,79 @@ static int read_counter(struct perf_evsel *counter)
269 return 0; 289 return 0;
270} 290}
271 291
292static void print_interval(void)
293{
294 static int num_print_interval;
295 struct perf_evsel *counter;
296 struct perf_stat *ps;
297 struct timespec ts, rs;
298 char prefix[64];
299
300 if (no_aggr) {
301 list_for_each_entry(counter, &evsel_list->entries, node) {
302 ps = counter->priv;
303 memset(ps->res_stats, 0, sizeof(ps->res_stats));
304 read_counter(counter);
305 }
306 } else {
307 list_for_each_entry(counter, &evsel_list->entries, node) {
308 ps = counter->priv;
309 memset(ps->res_stats, 0, sizeof(ps->res_stats));
310 read_counter_aggr(counter);
311 }
312 }
313 clock_gettime(CLOCK_MONOTONIC, &ts);
314 diff_timespec(&rs, &ts, &ref_time);
315 sprintf(prefix, "%6lu.%09lu%s", rs.tv_sec, rs.tv_nsec, csv_sep);
316
317 if (num_print_interval == 0 && !csv_output) {
318 if (aggr_socket)
319 fprintf(output, "# time socket cpus counts events\n");
320 else if (no_aggr)
321 fprintf(output, "# time CPU counts events\n");
322 else
323 fprintf(output, "# time counts events\n");
324 }
325
326 if (++num_print_interval == 25)
327 num_print_interval = 0;
328
329 if (aggr_socket)
330 print_aggr_socket(prefix);
331 else if (no_aggr) {
332 list_for_each_entry(counter, &evsel_list->entries, node)
333 print_counter(counter, prefix);
334 } else {
335 list_for_each_entry(counter, &evsel_list->entries, node)
336 print_counter_aggr(counter, prefix);
337 }
338}
339
272static int __run_perf_stat(int argc __maybe_unused, const char **argv) 340static int __run_perf_stat(int argc __maybe_unused, const char **argv)
273{ 341{
342 char msg[512];
274 unsigned long long t0, t1; 343 unsigned long long t0, t1;
275 struct perf_evsel *counter; 344 struct perf_evsel *counter;
345 struct timespec ts;
276 int status = 0; 346 int status = 0;
277 int child_ready_pipe[2], go_pipe[2]; 347 int child_ready_pipe[2], go_pipe[2];
278 const bool forks = (argc > 0); 348 const bool forks = (argc > 0);
279 char buf; 349 char buf;
280 350
351 if (interval) {
352 ts.tv_sec = interval / 1000;
353 ts.tv_nsec = (interval % 1000) * 1000000;
354 } else {
355 ts.tv_sec = 1;
356 ts.tv_nsec = 0;
357 }
358
359 if (aggr_socket
360 && cpu_map__build_socket_map(evsel_list->cpus, &sock_map)) {
361 perror("cannot build socket map");
362 return -1;
363 }
364
281 if (forks && (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0)) { 365 if (forks && (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0)) {
282 perror("failed to create pipes"); 366 perror("failed to create pipes");
283 return -1; 367 return -1;
@@ -348,20 +432,13 @@ static int __run_perf_stat(int argc __maybe_unused, const char **argv)
348 continue; 432 continue;
349 } 433 }
350 434
351 if (errno == EPERM || errno == EACCES) { 435 perf_evsel__open_strerror(counter, &target,
352 error("You may not have permission to collect %sstats.\n" 436 errno, msg, sizeof(msg));
353 "\t Consider tweaking" 437 ui__error("%s\n", msg);
354 " /proc/sys/kernel/perf_event_paranoid or running as root.", 438
355 target.system_wide ? "system-wide " : "");
356 } else {
357 error("open_counter returned with %d (%s). "
358 "/bin/dmesg may provide additional information.\n",
359 errno, strerror(errno));
360 }
361 if (child_pid != -1) 439 if (child_pid != -1)
362 kill(child_pid, SIGTERM); 440 kill(child_pid, SIGTERM);
363 441
364 pr_err("Not all events could be opened.\n");
365 return -1; 442 return -1;
366 } 443 }
367 counter->supported = true; 444 counter->supported = true;
@@ -377,14 +454,25 @@ static int __run_perf_stat(int argc __maybe_unused, const char **argv)
377 * Enable counters and exec the command: 454 * Enable counters and exec the command:
378 */ 455 */
379 t0 = rdclock(); 456 t0 = rdclock();
457 clock_gettime(CLOCK_MONOTONIC, &ref_time);
380 458
381 if (forks) { 459 if (forks) {
382 close(go_pipe[1]); 460 close(go_pipe[1]);
461 if (interval) {
462 while (!waitpid(child_pid, &status, WNOHANG)) {
463 nanosleep(&ts, NULL);
464 print_interval();
465 }
466 }
383 wait(&status); 467 wait(&status);
384 if (WIFSIGNALED(status)) 468 if (WIFSIGNALED(status))
385 psignal(WTERMSIG(status), argv[0]); 469 psignal(WTERMSIG(status), argv[0]);
386 } else { 470 } else {
387 while(!done) sleep(1); 471 while (!done) {
472 nanosleep(&ts, NULL);
473 if (interval)
474 print_interval();
475 }
388 } 476 }
389 477
390 t1 = rdclock(); 478 t1 = rdclock();
@@ -454,13 +542,21 @@ static void print_noise(struct perf_evsel *evsel, double avg)
454 print_noise_pct(stddev_stats(&ps->res_stats[0]), avg); 542 print_noise_pct(stddev_stats(&ps->res_stats[0]), avg);
455} 543}
456 544
457static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg) 545static void nsec_printout(int cpu, int nr, struct perf_evsel *evsel, double avg)
458{ 546{
459 double msecs = avg / 1e6; 547 double msecs = avg / 1e6;
460 char cpustr[16] = { '\0', }; 548 char cpustr[16] = { '\0', };
461 const char *fmt = csv_output ? "%s%.6f%s%s" : "%s%18.6f%s%-25s"; 549 const char *fmt = csv_output ? "%s%.6f%s%s" : "%s%18.6f%s%-25s";
462 550
463 if (no_aggr) 551 if (aggr_socket)
552 sprintf(cpustr, "S%*d%s%*d%s",
553 csv_output ? 0 : -5,
554 cpu,
555 csv_sep,
556 csv_output ? 0 : 4,
557 nr,
558 csv_sep);
559 else if (no_aggr)
464 sprintf(cpustr, "CPU%*d%s", 560 sprintf(cpustr, "CPU%*d%s",
465 csv_output ? 0 : -4, 561 csv_output ? 0 : -4,
466 perf_evsel__cpus(evsel)->map[cpu], csv_sep); 562 perf_evsel__cpus(evsel)->map[cpu], csv_sep);
@@ -470,7 +566,7 @@ static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg)
470 if (evsel->cgrp) 566 if (evsel->cgrp)
471 fprintf(output, "%s%s", csv_sep, evsel->cgrp->name); 567 fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
472 568
473 if (csv_output) 569 if (csv_output || interval)
474 return; 570 return;
475 571
476 if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK)) 572 if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK))
@@ -659,7 +755,7 @@ static void print_ll_cache_misses(int cpu,
659 fprintf(output, " of all LL-cache hits "); 755 fprintf(output, " of all LL-cache hits ");
660} 756}
661 757
662static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) 758static void abs_printout(int cpu, int nr, struct perf_evsel *evsel, double avg)
663{ 759{
664 double total, ratio = 0.0; 760 double total, ratio = 0.0;
665 char cpustr[16] = { '\0', }; 761 char cpustr[16] = { '\0', };
@@ -672,7 +768,15 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg)
672 else 768 else
673 fmt = "%s%18.0f%s%-25s"; 769 fmt = "%s%18.0f%s%-25s";
674 770
675 if (no_aggr) 771 if (aggr_socket)
772 sprintf(cpustr, "S%*d%s%*d%s",
773 csv_output ? 0 : -5,
774 cpu,
775 csv_sep,
776 csv_output ? 0 : 4,
777 nr,
778 csv_sep);
779 else if (no_aggr)
676 sprintf(cpustr, "CPU%*d%s", 780 sprintf(cpustr, "CPU%*d%s",
677 csv_output ? 0 : -4, 781 csv_output ? 0 : -4,
678 perf_evsel__cpus(evsel)->map[cpu], csv_sep); 782 perf_evsel__cpus(evsel)->map[cpu], csv_sep);
@@ -684,12 +788,11 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg)
684 if (evsel->cgrp) 788 if (evsel->cgrp)
685 fprintf(output, "%s%s", csv_sep, evsel->cgrp->name); 789 fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
686 790
687 if (csv_output) 791 if (csv_output || interval)
688 return; 792 return;
689 793
690 if (perf_evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) { 794 if (perf_evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) {
691 total = avg_stats(&runtime_cycles_stats[cpu]); 795 total = avg_stats(&runtime_cycles_stats[cpu]);
692
693 if (total) 796 if (total)
694 ratio = avg / total; 797 ratio = avg / total;
695 798
@@ -779,16 +882,83 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg)
779 } 882 }
780} 883}
781 884
885static void print_aggr_socket(char *prefix)
886{
887 struct perf_evsel *counter;
888 u64 ena, run, val;
889 int cpu, s, s2, sock, nr;
890
891 if (!sock_map)
892 return;
893
894 for (s = 0; s < sock_map->nr; s++) {
895 sock = cpu_map__socket(sock_map, s);
896 list_for_each_entry(counter, &evsel_list->entries, node) {
897 val = ena = run = 0;
898 nr = 0;
899 for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
900 s2 = cpu_map__get_socket(evsel_list->cpus, cpu);
901 if (s2 != sock)
902 continue;
903 val += counter->counts->cpu[cpu].val;
904 ena += counter->counts->cpu[cpu].ena;
905 run += counter->counts->cpu[cpu].run;
906 nr++;
907 }
908 if (prefix)
909 fprintf(output, "%s", prefix);
910
911 if (run == 0 || ena == 0) {
912 fprintf(output, "S%*d%s%*d%s%*s%s%*s",
913 csv_output ? 0 : -5,
914 s,
915 csv_sep,
916 csv_output ? 0 : 4,
917 nr,
918 csv_sep,
919 csv_output ? 0 : 18,
920 counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
921 csv_sep,
922 csv_output ? 0 : -24,
923 perf_evsel__name(counter));
924 if (counter->cgrp)
925 fprintf(output, "%s%s",
926 csv_sep, counter->cgrp->name);
927
928 fputc('\n', output);
929 continue;
930 }
931
932 if (nsec_counter(counter))
933 nsec_printout(sock, nr, counter, val);
934 else
935 abs_printout(sock, nr, counter, val);
936
937 if (!csv_output) {
938 print_noise(counter, 1.0);
939
940 if (run != ena)
941 fprintf(output, " (%.2f%%)",
942 100.0 * run / ena);
943 }
944 fputc('\n', output);
945 }
946 }
947}
948
782/* 949/*
783 * Print out the results of a single counter: 950 * Print out the results of a single counter:
784 * aggregated counts in system-wide mode 951 * aggregated counts in system-wide mode
785 */ 952 */
786static void print_counter_aggr(struct perf_evsel *counter) 953static void print_counter_aggr(struct perf_evsel *counter, char *prefix)
787{ 954{
788 struct perf_stat *ps = counter->priv; 955 struct perf_stat *ps = counter->priv;
789 double avg = avg_stats(&ps->res_stats[0]); 956 double avg = avg_stats(&ps->res_stats[0]);
790 int scaled = counter->counts->scaled; 957 int scaled = counter->counts->scaled;
791 958
959 if (prefix)
960 fprintf(output, "%s", prefix);
961
792 if (scaled == -1) { 962 if (scaled == -1) {
793 fprintf(output, "%*s%s%*s", 963 fprintf(output, "%*s%s%*s",
794 csv_output ? 0 : 18, 964 csv_output ? 0 : 18,
@@ -805,9 +975,9 @@ static void print_counter_aggr(struct perf_evsel *counter)
805 } 975 }
806 976
807 if (nsec_counter(counter)) 977 if (nsec_counter(counter))
808 nsec_printout(-1, counter, avg); 978 nsec_printout(-1, 0, counter, avg);
809 else 979 else
810 abs_printout(-1, counter, avg); 980 abs_printout(-1, 0, counter, avg);
811 981
812 print_noise(counter, avg); 982 print_noise(counter, avg);
813 983
@@ -831,7 +1001,7 @@ static void print_counter_aggr(struct perf_evsel *counter)
831 * Print out the results of a single counter: 1001 * Print out the results of a single counter:
832 * does not use aggregated count in system-wide 1002 * does not use aggregated count in system-wide
833 */ 1003 */
834static void print_counter(struct perf_evsel *counter) 1004static void print_counter(struct perf_evsel *counter, char *prefix)
835{ 1005{
836 u64 ena, run, val; 1006 u64 ena, run, val;
837 int cpu; 1007 int cpu;
@@ -840,6 +1010,10 @@ static void print_counter(struct perf_evsel *counter)
840 val = counter->counts->cpu[cpu].val; 1010 val = counter->counts->cpu[cpu].val;
841 ena = counter->counts->cpu[cpu].ena; 1011 ena = counter->counts->cpu[cpu].ena;
842 run = counter->counts->cpu[cpu].run; 1012 run = counter->counts->cpu[cpu].run;
1013
1014 if (prefix)
1015 fprintf(output, "%s", prefix);
1016
843 if (run == 0 || ena == 0) { 1017 if (run == 0 || ena == 0) {
844 fprintf(output, "CPU%*d%s%*s%s%*s", 1018 fprintf(output, "CPU%*d%s%*s%s%*s",
845 csv_output ? 0 : -4, 1019 csv_output ? 0 : -4,
@@ -859,9 +1033,9 @@ static void print_counter(struct perf_evsel *counter)
859 } 1033 }
860 1034
861 if (nsec_counter(counter)) 1035 if (nsec_counter(counter))
862 nsec_printout(cpu, counter, val); 1036 nsec_printout(cpu, 0, counter, val);
863 else 1037 else
864 abs_printout(cpu, counter, val); 1038 abs_printout(cpu, 0, counter, val);
865 1039
866 if (!csv_output) { 1040 if (!csv_output) {
867 print_noise(counter, 1.0); 1041 print_noise(counter, 1.0);
@@ -899,12 +1073,14 @@ static void print_stat(int argc, const char **argv)
899 fprintf(output, ":\n\n"); 1073 fprintf(output, ":\n\n");
900 } 1074 }
901 1075
902 if (no_aggr) { 1076 if (aggr_socket)
1077 print_aggr_socket(NULL);
1078 else if (no_aggr) {
903 list_for_each_entry(counter, &evsel_list->entries, node) 1079 list_for_each_entry(counter, &evsel_list->entries, node)
904 print_counter(counter); 1080 print_counter(counter, NULL);
905 } else { 1081 } else {
906 list_for_each_entry(counter, &evsel_list->entries, node) 1082 list_for_each_entry(counter, &evsel_list->entries, node)
907 print_counter_aggr(counter); 1083 print_counter_aggr(counter, NULL);
908 } 1084 }
909 1085
910 if (!csv_output) { 1086 if (!csv_output) {
@@ -925,7 +1101,7 @@ static volatile int signr = -1;
925 1101
926static void skip_signal(int signo) 1102static void skip_signal(int signo)
927{ 1103{
928 if(child_pid == -1) 1104 if ((child_pid == -1) || interval)
929 done = 1; 1105 done = 1;
930 1106
931 signr = signo; 1107 signr = signo;
@@ -1145,6 +1321,9 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
1145 "command to run prior to the measured command"), 1321 "command to run prior to the measured command"),
1146 OPT_STRING(0, "post", &post_cmd, "command", 1322 OPT_STRING(0, "post", &post_cmd, "command",
1147 "command to run after to the measured command"), 1323 "command to run after to the measured command"),
1324 OPT_UINTEGER('I', "interval-print", &interval,
1325 "print counts at regular interval in ms (>= 100)"),
1326 OPT_BOOLEAN(0, "aggr-socket", &aggr_socket, "aggregate counts per processor socket"),
1148 OPT_END() 1327 OPT_END()
1149 }; 1328 };
1150 const char * const stat_usage[] = { 1329 const char * const stat_usage[] = {
@@ -1231,6 +1410,14 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
1231 usage_with_options(stat_usage, options); 1410 usage_with_options(stat_usage, options);
1232 } 1411 }
1233 1412
1413 if (aggr_socket) {
1414 if (!perf_target__has_cpu(&target)) {
1415 fprintf(stderr, "--aggr-socket only available in system-wide mode (-a)\n");
1416 usage_with_options(stat_usage, options);
1417 }
1418 no_aggr = true;
1419 }
1420
1234 if (add_default_attributes()) 1421 if (add_default_attributes())
1235 goto out; 1422 goto out;
1236 1423
@@ -1245,12 +1432,23 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
1245 usage_with_options(stat_usage, options); 1432 usage_with_options(stat_usage, options);
1246 return -1; 1433 return -1;
1247 } 1434 }
1435 if (interval && interval < 100) {
1436 pr_err("print interval must be >= 100ms\n");
1437 usage_with_options(stat_usage, options);
1438 return -1;
1439 }
1248 1440
1249 list_for_each_entry(pos, &evsel_list->entries, node) { 1441 list_for_each_entry(pos, &evsel_list->entries, node) {
1250 if (perf_evsel__alloc_stat_priv(pos) < 0 || 1442 if (perf_evsel__alloc_stat_priv(pos) < 0 ||
1251 perf_evsel__alloc_counts(pos, perf_evsel__nr_cpus(pos)) < 0) 1443 perf_evsel__alloc_counts(pos, perf_evsel__nr_cpus(pos)) < 0)
1252 goto out_free_fd; 1444 goto out_free_fd;
1253 } 1445 }
1446 if (interval) {
1447 list_for_each_entry(pos, &evsel_list->entries, node) {
1448 if (perf_evsel__alloc_prev_raw_counts(pos) < 0)
1449 goto out_free_fd;
1450 }
1451 }
1254 1452
1255 /* 1453 /*
1256 * We dont want to block the signals - that would cause 1454 * We dont want to block the signals - that would cause
@@ -1260,6 +1458,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
1260 */ 1458 */
1261 atexit(sig_atexit); 1459 atexit(sig_atexit);
1262 signal(SIGINT, skip_signal); 1460 signal(SIGINT, skip_signal);
1461 signal(SIGCHLD, skip_signal);
1263 signal(SIGALRM, skip_signal); 1462 signal(SIGALRM, skip_signal);
1264 signal(SIGABRT, skip_signal); 1463 signal(SIGABRT, skip_signal);
1265 1464
@@ -1272,11 +1471,14 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
1272 status = run_perf_stat(argc, argv); 1471 status = run_perf_stat(argc, argv);
1273 } 1472 }
1274 1473
1275 if (status != -1) 1474 if (status != -1 && !interval)
1276 print_stat(argc, argv); 1475 print_stat(argc, argv);
1277out_free_fd: 1476out_free_fd:
1278 list_for_each_entry(pos, &evsel_list->entries, node) 1477 list_for_each_entry(pos, &evsel_list->entries, node) {
1279 perf_evsel__free_stat_priv(pos); 1478 perf_evsel__free_stat_priv(pos);
1479 perf_evsel__free_counts(pos);
1480 perf_evsel__free_prev_raw_counts(pos);
1481 }
1280 perf_evlist__delete_maps(evsel_list); 1482 perf_evlist__delete_maps(evsel_list);
1281out: 1483out:
1282 perf_evlist__delete(evsel_list); 1484 perf_evlist__delete(evsel_list);
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index c9ff3950cd4b..72f6eb7b4173 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -68,27 +68,7 @@
68#include <linux/unistd.h> 68#include <linux/unistd.h>
69#include <linux/types.h> 69#include <linux/types.h>
70 70
71void get_term_dimensions(struct winsize *ws) 71static volatile int done;
72{
73 char *s = getenv("LINES");
74
75 if (s != NULL) {
76 ws->ws_row = atoi(s);
77 s = getenv("COLUMNS");
78 if (s != NULL) {
79 ws->ws_col = atoi(s);
80 if (ws->ws_row && ws->ws_col)
81 return;
82 }
83 }
84#ifdef TIOCGWINSZ
85 if (ioctl(1, TIOCGWINSZ, ws) == 0 &&
86 ws->ws_row && ws->ws_col)
87 return;
88#endif
89 ws->ws_row = 25;
90 ws->ws_col = 80;
91}
92 72
93static void perf_top__update_print_entries(struct perf_top *top) 73static void perf_top__update_print_entries(struct perf_top *top)
94{ 74{
@@ -453,8 +433,10 @@ static int perf_top__key_mapped(struct perf_top *top, int c)
453 return 0; 433 return 0;
454} 434}
455 435
456static void perf_top__handle_keypress(struct perf_top *top, int c) 436static bool perf_top__handle_keypress(struct perf_top *top, int c)
457{ 437{
438 bool ret = true;
439
458 if (!perf_top__key_mapped(top, c)) { 440 if (!perf_top__key_mapped(top, c)) {
459 struct pollfd stdin_poll = { .fd = 0, .events = POLLIN }; 441 struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
460 struct termios tc, save; 442 struct termios tc, save;
@@ -475,7 +457,7 @@ static void perf_top__handle_keypress(struct perf_top *top, int c)
475 457
476 tcsetattr(0, TCSAFLUSH, &save); 458 tcsetattr(0, TCSAFLUSH, &save);
477 if (!perf_top__key_mapped(top, c)) 459 if (!perf_top__key_mapped(top, c))
478 return; 460 return ret;
479 } 461 }
480 462
481 switch (c) { 463 switch (c) {
@@ -537,7 +519,8 @@ static void perf_top__handle_keypress(struct perf_top *top, int c)
537 printf("exiting.\n"); 519 printf("exiting.\n");
538 if (top->dump_symtab) 520 if (top->dump_symtab)
539 perf_session__fprintf_dsos(top->session, stderr); 521 perf_session__fprintf_dsos(top->session, stderr);
540 exit(0); 522 ret = false;
523 break;
541 case 's': 524 case 's':
542 perf_top__prompt_symbol(top, "Enter details symbol"); 525 perf_top__prompt_symbol(top, "Enter details symbol");
543 break; 526 break;
@@ -560,6 +543,8 @@ static void perf_top__handle_keypress(struct perf_top *top, int c)
560 default: 543 default:
561 break; 544 break;
562 } 545 }
546
547 return ret;
563} 548}
564 549
565static void perf_top__sort_new_samples(void *arg) 550static void perf_top__sort_new_samples(void *arg)
@@ -596,13 +581,12 @@ static void *display_thread_tui(void *arg)
596 * via --uid. 581 * via --uid.
597 */ 582 */
598 list_for_each_entry(pos, &top->evlist->entries, node) 583 list_for_each_entry(pos, &top->evlist->entries, node)
599 pos->hists.uid_filter_str = top->target.uid_str; 584 pos->hists.uid_filter_str = top->record_opts.target.uid_str;
600 585
601 perf_evlist__tui_browse_hists(top->evlist, help, &hbt, 586 perf_evlist__tui_browse_hists(top->evlist, help, &hbt,
602 &top->session->header.env); 587 &top->session->header.env);
603 588
604 exit_browser(0); 589 done = 1;
605 exit(0);
606 return NULL; 590 return NULL;
607} 591}
608 592
@@ -626,7 +610,7 @@ repeat:
626 /* trash return*/ 610 /* trash return*/
627 getc(stdin); 611 getc(stdin);
628 612
629 while (1) { 613 while (!done) {
630 perf_top__print_sym_table(top); 614 perf_top__print_sym_table(top);
631 /* 615 /*
632 * Either timeout expired or we got an EINTR due to SIGWINCH, 616 * Either timeout expired or we got an EINTR due to SIGWINCH,
@@ -640,15 +624,14 @@ repeat:
640 continue; 624 continue;
641 /* Fall trhu */ 625 /* Fall trhu */
642 default: 626 default:
643 goto process_hotkey; 627 c = getc(stdin);
628 tcsetattr(0, TCSAFLUSH, &save);
629
630 if (perf_top__handle_keypress(top, c))
631 goto repeat;
632 done = 1;
644 } 633 }
645 } 634 }
646process_hotkey:
647 c = getc(stdin);
648 tcsetattr(0, TCSAFLUSH, &save);
649
650 perf_top__handle_keypress(top, c);
651 goto repeat;
652 635
653 return NULL; 636 return NULL;
654} 637}
@@ -716,7 +699,7 @@ static void perf_event__process_sample(struct perf_tool *tool,
716 static struct intlist *seen; 699 static struct intlist *seen;
717 700
718 if (!seen) 701 if (!seen)
719 seen = intlist__new(); 702 seen = intlist__new(NULL);
720 703
721 if (!intlist__has_entry(seen, event->ip.pid)) { 704 if (!intlist__has_entry(seen, event->ip.pid)) {
722 pr_err("Can't find guest [%d]'s kernel information\n", 705 pr_err("Can't find guest [%d]'s kernel information\n",
@@ -727,8 +710,8 @@ static void perf_event__process_sample(struct perf_tool *tool,
727 } 710 }
728 711
729 if (!machine) { 712 if (!machine) {
730 pr_err("%u unprocessable samples recorded.", 713 pr_err("%u unprocessable samples recorded.\r",
731 top->session->hists.stats.nr_unprocessable_samples++); 714 top->session->stats.nr_unprocessable_samples++);
732 return; 715 return;
733 } 716 }
734 717
@@ -847,13 +830,13 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
847 ++top->us_samples; 830 ++top->us_samples;
848 if (top->hide_user_symbols) 831 if (top->hide_user_symbols)
849 continue; 832 continue;
850 machine = perf_session__find_host_machine(session); 833 machine = &session->machines.host;
851 break; 834 break;
852 case PERF_RECORD_MISC_KERNEL: 835 case PERF_RECORD_MISC_KERNEL:
853 ++top->kernel_samples; 836 ++top->kernel_samples;
854 if (top->hide_kernel_symbols) 837 if (top->hide_kernel_symbols)
855 continue; 838 continue;
856 machine = perf_session__find_host_machine(session); 839 machine = &session->machines.host;
857 break; 840 break;
858 case PERF_RECORD_MISC_GUEST_KERNEL: 841 case PERF_RECORD_MISC_GUEST_KERNEL:
859 ++top->guest_kernel_samples; 842 ++top->guest_kernel_samples;
@@ -878,7 +861,7 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
878 hists__inc_nr_events(&evsel->hists, event->header.type); 861 hists__inc_nr_events(&evsel->hists, event->header.type);
879 machine__process_event(machine, event); 862 machine__process_event(machine, event);
880 } else 863 } else
881 ++session->hists.stats.nr_unknown_events; 864 ++session->stats.nr_unknown_events;
882 } 865 }
883} 866}
884 867
@@ -890,123 +873,42 @@ static void perf_top__mmap_read(struct perf_top *top)
890 perf_top__mmap_read_idx(top, i); 873 perf_top__mmap_read_idx(top, i);
891} 874}
892 875
893static void perf_top__start_counters(struct perf_top *top) 876static int perf_top__start_counters(struct perf_top *top)
894{ 877{
878 char msg[512];
895 struct perf_evsel *counter; 879 struct perf_evsel *counter;
896 struct perf_evlist *evlist = top->evlist; 880 struct perf_evlist *evlist = top->evlist;
881 struct perf_record_opts *opts = &top->record_opts;
897 882
898 if (top->group) 883 perf_evlist__config(evlist, opts);
899 perf_evlist__set_leader(evlist);
900 884
901 list_for_each_entry(counter, &evlist->entries, node) { 885 list_for_each_entry(counter, &evlist->entries, node) {
902 struct perf_event_attr *attr = &counter->attr;
903
904 attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID;
905
906 if (top->freq) {
907 attr->sample_type |= PERF_SAMPLE_PERIOD;
908 attr->freq = 1;
909 attr->sample_freq = top->freq;
910 }
911
912 if (evlist->nr_entries > 1) {
913 attr->sample_type |= PERF_SAMPLE_ID;
914 attr->read_format |= PERF_FORMAT_ID;
915 }
916
917 if (perf_target__has_cpu(&top->target))
918 attr->sample_type |= PERF_SAMPLE_CPU;
919
920 if (symbol_conf.use_callchain)
921 attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
922
923 attr->mmap = 1;
924 attr->comm = 1;
925 attr->inherit = top->inherit;
926fallback_missing_features:
927 if (top->exclude_guest_missing)
928 attr->exclude_guest = attr->exclude_host = 0;
929retry_sample_id:
930 attr->sample_id_all = top->sample_id_all_missing ? 0 : 1;
931try_again: 886try_again:
932 if (perf_evsel__open(counter, top->evlist->cpus, 887 if (perf_evsel__open(counter, top->evlist->cpus,
933 top->evlist->threads) < 0) { 888 top->evlist->threads) < 0) {
934 int err = errno; 889 if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) {
935
936 if (err == EPERM || err == EACCES) {
937 ui__error_paranoid();
938 goto out_err;
939 } else if (err == EINVAL) {
940 if (!top->exclude_guest_missing &&
941 (attr->exclude_guest || attr->exclude_host)) {
942 pr_debug("Old kernel, cannot exclude "
943 "guest or host samples.\n");
944 top->exclude_guest_missing = true;
945 goto fallback_missing_features;
946 } else if (!top->sample_id_all_missing) {
947 /*
948 * Old kernel, no attr->sample_id_type_all field
949 */
950 top->sample_id_all_missing = true;
951 goto retry_sample_id;
952 }
953 }
954 /*
955 * If it's cycles then fall back to hrtimer
956 * based cpu-clock-tick sw counter, which
957 * is always available even if no PMU support:
958 */
959 if ((err == ENOENT || err == ENXIO) &&
960 (attr->type == PERF_TYPE_HARDWARE) &&
961 (attr->config == PERF_COUNT_HW_CPU_CYCLES)) {
962
963 if (verbose) 890 if (verbose)
964 ui__warning("Cycles event not supported,\n" 891 ui__warning("%s\n", msg);
965 "trying to fall back to cpu-clock-ticks\n");
966
967 attr->type = PERF_TYPE_SOFTWARE;
968 attr->config = PERF_COUNT_SW_CPU_CLOCK;
969 if (counter->name) {
970 free(counter->name);
971 counter->name = NULL;
972 }
973 goto try_again; 892 goto try_again;
974 } 893 }
975 894
976 if (err == ENOENT) { 895 perf_evsel__open_strerror(counter, &opts->target,
977 ui__error("The %s event is not supported.\n", 896 errno, msg, sizeof(msg));
978 perf_evsel__name(counter)); 897 ui__error("%s\n", msg);
979 goto out_err;
980 } else if (err == EMFILE) {
981 ui__error("Too many events are opened.\n"
982 "Try again after reducing the number of events\n");
983 goto out_err;
984 } else if ((err == EOPNOTSUPP) && (attr->precise_ip)) {
985 ui__error("\'precise\' request may not be supported. "
986 "Try removing 'p' modifier\n");
987 goto out_err;
988 }
989
990 ui__error("The sys_perf_event_open() syscall "
991 "returned with %d (%s). /bin/dmesg "
992 "may provide additional information.\n"
993 "No CONFIG_PERF_EVENTS=y kernel support "
994 "configured?\n", err, strerror(err));
995 goto out_err; 898 goto out_err;
996 } 899 }
997 } 900 }
998 901
999 if (perf_evlist__mmap(evlist, top->mmap_pages, false) < 0) { 902 if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
1000 ui__error("Failed to mmap with %d (%s)\n", 903 ui__error("Failed to mmap with %d (%s)\n",
1001 errno, strerror(errno)); 904 errno, strerror(errno));
1002 goto out_err; 905 goto out_err;
1003 } 906 }
1004 907
1005 return; 908 return 0;
1006 909
1007out_err: 910out_err:
1008 exit_browser(0); 911 return -1;
1009 exit(0);
1010} 912}
1011 913
1012static int perf_top__setup_sample_type(struct perf_top *top) 914static int perf_top__setup_sample_type(struct perf_top *top)
@@ -1016,7 +918,7 @@ static int perf_top__setup_sample_type(struct perf_top *top)
1016 ui__error("Selected -g but \"sym\" not present in --sort/-s."); 918 ui__error("Selected -g but \"sym\" not present in --sort/-s.");
1017 return -EINVAL; 919 return -EINVAL;
1018 } 920 }
1019 } else if (!top->dont_use_callchains && callchain_param.mode != CHAIN_NONE) { 921 } else if (callchain_param.mode != CHAIN_NONE) {
1020 if (callchain_register_param(&callchain_param) < 0) { 922 if (callchain_register_param(&callchain_param) < 0) {
1021 ui__error("Can't register callchain params.\n"); 923 ui__error("Can't register callchain params.\n");
1022 return -EINVAL; 924 return -EINVAL;
@@ -1028,6 +930,7 @@ static int perf_top__setup_sample_type(struct perf_top *top)
1028 930
1029static int __cmd_top(struct perf_top *top) 931static int __cmd_top(struct perf_top *top)
1030{ 932{
933 struct perf_record_opts *opts = &top->record_opts;
1031 pthread_t thread; 934 pthread_t thread;
1032 int ret; 935 int ret;
1033 /* 936 /*
@@ -1042,26 +945,42 @@ static int __cmd_top(struct perf_top *top)
1042 if (ret) 945 if (ret)
1043 goto out_delete; 946 goto out_delete;
1044 947
1045 if (perf_target__has_task(&top->target)) 948 if (perf_target__has_task(&opts->target))
1046 perf_event__synthesize_thread_map(&top->tool, top->evlist->threads, 949 perf_event__synthesize_thread_map(&top->tool, top->evlist->threads,
1047 perf_event__process, 950 perf_event__process,
1048 &top->session->host_machine); 951 &top->session->machines.host);
1049 else 952 else
1050 perf_event__synthesize_threads(&top->tool, perf_event__process, 953 perf_event__synthesize_threads(&top->tool, perf_event__process,
1051 &top->session->host_machine); 954 &top->session->machines.host);
1052 perf_top__start_counters(top); 955
956 ret = perf_top__start_counters(top);
957 if (ret)
958 goto out_delete;
959
1053 top->session->evlist = top->evlist; 960 top->session->evlist = top->evlist;
1054 perf_session__set_id_hdr_size(top->session); 961 perf_session__set_id_hdr_size(top->session);
1055 962
963 /*
964 * When perf is starting the traced process, all the events (apart from
965 * group members) have enable_on_exec=1 set, so don't spoil it by
966 * prematurely enabling them.
967 *
968 * XXX 'top' still doesn't start workloads like record, trace, but should,
969 * so leave the check here.
970 */
971 if (!perf_target__none(&opts->target))
972 perf_evlist__enable(top->evlist);
973
1056 /* Wait for a minimal set of events before starting the snapshot */ 974 /* Wait for a minimal set of events before starting the snapshot */
1057 poll(top->evlist->pollfd, top->evlist->nr_fds, 100); 975 poll(top->evlist->pollfd, top->evlist->nr_fds, 100);
1058 976
1059 perf_top__mmap_read(top); 977 perf_top__mmap_read(top);
1060 978
979 ret = -1;
1061 if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui : 980 if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
1062 display_thread), top)) { 981 display_thread), top)) {
1063 ui__error("Could not create display thread.\n"); 982 ui__error("Could not create display thread.\n");
1064 exit(-1); 983 goto out_delete;
1065 } 984 }
1066 985
1067 if (top->realtime_prio) { 986 if (top->realtime_prio) {
@@ -1070,11 +989,11 @@ static int __cmd_top(struct perf_top *top)
1070 param.sched_priority = top->realtime_prio; 989 param.sched_priority = top->realtime_prio;
1071 if (sched_setscheduler(0, SCHED_FIFO, &param)) { 990 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
1072 ui__error("Could not set realtime priority.\n"); 991 ui__error("Could not set realtime priority.\n");
1073 exit(-1); 992 goto out_delete;
1074 } 993 }
1075 } 994 }
1076 995
1077 while (1) { 996 while (!done) {
1078 u64 hits = top->samples; 997 u64 hits = top->samples;
1079 998
1080 perf_top__mmap_read(top); 999 perf_top__mmap_read(top);
@@ -1083,126 +1002,67 @@ static int __cmd_top(struct perf_top *top)
1083 ret = poll(top->evlist->pollfd, top->evlist->nr_fds, 100); 1002 ret = poll(top->evlist->pollfd, top->evlist->nr_fds, 100);
1084 } 1003 }
1085 1004
1005 ret = 0;
1086out_delete: 1006out_delete:
1087 perf_session__delete(top->session); 1007 perf_session__delete(top->session);
1088 top->session = NULL; 1008 top->session = NULL;
1089 1009
1090 return 0; 1010 return ret;
1091} 1011}
1092 1012
1093static int 1013static int
1094parse_callchain_opt(const struct option *opt, const char *arg, int unset) 1014parse_callchain_opt(const struct option *opt, const char *arg, int unset)
1095{ 1015{
1096 struct perf_top *top = (struct perf_top *)opt->value;
1097 char *tok, *tok2;
1098 char *endptr;
1099
1100 /* 1016 /*
1101 * --no-call-graph 1017 * --no-call-graph
1102 */ 1018 */
1103 if (unset) { 1019 if (unset)
1104 top->dont_use_callchains = true;
1105 return 0; 1020 return 0;
1106 }
1107 1021
1108 symbol_conf.use_callchain = true; 1022 symbol_conf.use_callchain = true;
1109 1023
1110 if (!arg) 1024 return record_parse_callchain_opt(opt, arg, unset);
1111 return 0;
1112
1113 tok = strtok((char *)arg, ",");
1114 if (!tok)
1115 return -1;
1116
1117 /* get the output mode */
1118 if (!strncmp(tok, "graph", strlen(arg)))
1119 callchain_param.mode = CHAIN_GRAPH_ABS;
1120
1121 else if (!strncmp(tok, "flat", strlen(arg)))
1122 callchain_param.mode = CHAIN_FLAT;
1123
1124 else if (!strncmp(tok, "fractal", strlen(arg)))
1125 callchain_param.mode = CHAIN_GRAPH_REL;
1126
1127 else if (!strncmp(tok, "none", strlen(arg))) {
1128 callchain_param.mode = CHAIN_NONE;
1129 symbol_conf.use_callchain = false;
1130
1131 return 0;
1132 } else
1133 return -1;
1134
1135 /* get the min percentage */
1136 tok = strtok(NULL, ",");
1137 if (!tok)
1138 goto setup;
1139
1140 callchain_param.min_percent = strtod(tok, &endptr);
1141 if (tok == endptr)
1142 return -1;
1143
1144 /* get the print limit */
1145 tok2 = strtok(NULL, ",");
1146 if (!tok2)
1147 goto setup;
1148
1149 if (tok2[0] != 'c') {
1150 callchain_param.print_limit = strtod(tok2, &endptr);
1151 tok2 = strtok(NULL, ",");
1152 if (!tok2)
1153 goto setup;
1154 }
1155
1156 /* get the call chain order */
1157 if (!strcmp(tok2, "caller"))
1158 callchain_param.order = ORDER_CALLER;
1159 else if (!strcmp(tok2, "callee"))
1160 callchain_param.order = ORDER_CALLEE;
1161 else
1162 return -1;
1163setup:
1164 if (callchain_register_param(&callchain_param) < 0) {
1165 fprintf(stderr, "Can't register callchain params\n");
1166 return -1;
1167 }
1168 return 0;
1169} 1025}
1170 1026
1171int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused) 1027int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
1172{ 1028{
1173 struct perf_evsel *pos;
1174 int status; 1029 int status;
1175 char errbuf[BUFSIZ]; 1030 char errbuf[BUFSIZ];
1176 struct perf_top top = { 1031 struct perf_top top = {
1177 .count_filter = 5, 1032 .count_filter = 5,
1178 .delay_secs = 2, 1033 .delay_secs = 2,
1179 .freq = 4000, /* 4 KHz */ 1034 .record_opts = {
1180 .mmap_pages = 128, 1035 .mmap_pages = UINT_MAX,
1181 .sym_pcnt_filter = 5, 1036 .user_freq = UINT_MAX,
1182 .target = { 1037 .user_interval = ULLONG_MAX,
1183 .uses_mmap = true, 1038 .freq = 4000, /* 4 KHz */
1039 .target = {
1040 .uses_mmap = true,
1041 },
1184 }, 1042 },
1043 .sym_pcnt_filter = 5,
1185 }; 1044 };
1186 char callchain_default_opt[] = "fractal,0.5,callee"; 1045 struct perf_record_opts *opts = &top.record_opts;
1046 struct perf_target *target = &opts->target;
1187 const struct option options[] = { 1047 const struct option options[] = {
1188 OPT_CALLBACK('e', "event", &top.evlist, "event", 1048 OPT_CALLBACK('e', "event", &top.evlist, "event",
1189 "event selector. use 'perf list' to list available events", 1049 "event selector. use 'perf list' to list available events",
1190 parse_events_option), 1050 parse_events_option),
1191 OPT_INTEGER('c', "count", &top.default_interval, 1051 OPT_U64('c', "count", &opts->user_interval, "event period to sample"),
1192 "event period to sample"), 1052 OPT_STRING('p', "pid", &target->pid, "pid",
1193 OPT_STRING('p', "pid", &top.target.pid, "pid",
1194 "profile events on existing process id"), 1053 "profile events on existing process id"),
1195 OPT_STRING('t', "tid", &top.target.tid, "tid", 1054 OPT_STRING('t', "tid", &target->tid, "tid",
1196 "profile events on existing thread id"), 1055 "profile events on existing thread id"),
1197 OPT_BOOLEAN('a', "all-cpus", &top.target.system_wide, 1056 OPT_BOOLEAN('a', "all-cpus", &target->system_wide,
1198 "system-wide collection from all CPUs"), 1057 "system-wide collection from all CPUs"),
1199 OPT_STRING('C', "cpu", &top.target.cpu_list, "cpu", 1058 OPT_STRING('C', "cpu", &target->cpu_list, "cpu",
1200 "list of cpus to monitor"), 1059 "list of cpus to monitor"),
1201 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, 1060 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
1202 "file", "vmlinux pathname"), 1061 "file", "vmlinux pathname"),
1203 OPT_BOOLEAN('K', "hide_kernel_symbols", &top.hide_kernel_symbols, 1062 OPT_BOOLEAN('K', "hide_kernel_symbols", &top.hide_kernel_symbols,
1204 "hide kernel symbols"), 1063 "hide kernel symbols"),
1205 OPT_UINTEGER('m', "mmap-pages", &top.mmap_pages, "number of mmap data pages"), 1064 OPT_UINTEGER('m', "mmap-pages", &opts->mmap_pages,
1065 "number of mmap data pages"),
1206 OPT_INTEGER('r', "realtime", &top.realtime_prio, 1066 OPT_INTEGER('r', "realtime", &top.realtime_prio,
1207 "collect data with this RT SCHED_FIFO priority"), 1067 "collect data with this RT SCHED_FIFO priority"),
1208 OPT_INTEGER('d', "delay", &top.delay_secs, 1068 OPT_INTEGER('d', "delay", &top.delay_secs,
@@ -1211,16 +1071,14 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
1211 "dump the symbol table used for profiling"), 1071 "dump the symbol table used for profiling"),
1212 OPT_INTEGER('f', "count-filter", &top.count_filter, 1072 OPT_INTEGER('f', "count-filter", &top.count_filter,
1213 "only display functions with more events than this"), 1073 "only display functions with more events than this"),
1214 OPT_BOOLEAN('g', "group", &top.group, 1074 OPT_BOOLEAN('g', "group", &opts->group,
1215 "put the counters into a counter group"), 1075 "put the counters into a counter group"),
1216 OPT_BOOLEAN('i', "inherit", &top.inherit, 1076 OPT_BOOLEAN('i', "no-inherit", &opts->no_inherit,
1217 "child tasks inherit counters"), 1077 "child tasks do not inherit counters"),
1218 OPT_STRING(0, "sym-annotate", &top.sym_filter, "symbol name", 1078 OPT_STRING(0, "sym-annotate", &top.sym_filter, "symbol name",
1219 "symbol to annotate"), 1079 "symbol to annotate"),
1220 OPT_BOOLEAN('z', "zero", &top.zero, 1080 OPT_BOOLEAN('z', "zero", &top.zero, "zero history across updates"),
1221 "zero history across updates"), 1081 OPT_UINTEGER('F', "freq", &opts->user_freq, "profile at this frequency"),
1222 OPT_INTEGER('F', "freq", &top.freq,
1223 "profile at this frequency"),
1224 OPT_INTEGER('E', "entries", &top.print_entries, 1082 OPT_INTEGER('E', "entries", &top.print_entries,
1225 "display this many functions"), 1083 "display this many functions"),
1226 OPT_BOOLEAN('U', "hide_user_symbols", &top.hide_user_symbols, 1084 OPT_BOOLEAN('U', "hide_user_symbols", &top.hide_user_symbols,
@@ -1233,10 +1091,9 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
1233 "sort by key(s): pid, comm, dso, symbol, parent"), 1091 "sort by key(s): pid, comm, dso, symbol, parent"),
1234 OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples, 1092 OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
1235 "Show a column with the number of samples"), 1093 "Show a column with the number of samples"),
1236 OPT_CALLBACK_DEFAULT('G', "call-graph", &top, "output_type,min_percent, call_order", 1094 OPT_CALLBACK_DEFAULT('G', "call-graph", &top.record_opts,
1237 "Display callchains using output_type (graph, flat, fractal, or none), min percent threshold and callchain order. " 1095 "mode[,dump_size]", record_callchain_help,
1238 "Default: fractal,0.5,callee", &parse_callchain_opt, 1096 &parse_callchain_opt, "fp"),
1239 callchain_default_opt),
1240 OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period, 1097 OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
1241 "Show a column with the sum of periods"), 1098 "Show a column with the sum of periods"),
1242 OPT_STRING(0, "dsos", &symbol_conf.dso_list_str, "dso[,dso...]", 1099 OPT_STRING(0, "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
@@ -1251,7 +1108,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
1251 "Display raw encoding of assembly instructions (default)"), 1108 "Display raw encoding of assembly instructions (default)"),
1252 OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style", 1109 OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
1253 "Specify disassembler style (e.g. -M intel for intel syntax)"), 1110 "Specify disassembler style (e.g. -M intel for intel syntax)"),
1254 OPT_STRING('u', "uid", &top.target.uid_str, "user", "user to profile"), 1111 OPT_STRING('u', "uid", &target->uid_str, "user", "user to profile"),
1255 OPT_END() 1112 OPT_END()
1256 }; 1113 };
1257 const char * const top_usage[] = { 1114 const char * const top_usage[] = {
@@ -1272,7 +1129,8 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
1272 if (sort_order == default_sort_order) 1129 if (sort_order == default_sort_order)
1273 sort_order = "dso,symbol"; 1130 sort_order = "dso,symbol";
1274 1131
1275 setup_sorting(top_usage, options); 1132 if (setup_sorting() < 0)
1133 usage_with_options(top_usage, options);
1276 1134
1277 if (top.use_stdio) 1135 if (top.use_stdio)
1278 use_browser = 0; 1136 use_browser = 0;
@@ -1281,33 +1139,33 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
1281 1139
1282 setup_browser(false); 1140 setup_browser(false);
1283 1141
1284 status = perf_target__validate(&top.target); 1142 status = perf_target__validate(target);
1285 if (status) { 1143 if (status) {
1286 perf_target__strerror(&top.target, status, errbuf, BUFSIZ); 1144 perf_target__strerror(target, status, errbuf, BUFSIZ);
1287 ui__warning("%s", errbuf); 1145 ui__warning("%s", errbuf);
1288 } 1146 }
1289 1147
1290 status = perf_target__parse_uid(&top.target); 1148 status = perf_target__parse_uid(target);
1291 if (status) { 1149 if (status) {
1292 int saved_errno = errno; 1150 int saved_errno = errno;
1293 1151
1294 perf_target__strerror(&top.target, status, errbuf, BUFSIZ); 1152 perf_target__strerror(target, status, errbuf, BUFSIZ);
1295 ui__error("%s", errbuf); 1153 ui__error("%s", errbuf);
1296 1154
1297 status = -saved_errno; 1155 status = -saved_errno;
1298 goto out_delete_evlist; 1156 goto out_delete_evlist;
1299 } 1157 }
1300 1158
1301 if (perf_target__none(&top.target)) 1159 if (perf_target__none(target))
1302 top.target.system_wide = true; 1160 target->system_wide = true;
1303 1161
1304 if (perf_evlist__create_maps(top.evlist, &top.target) < 0) 1162 if (perf_evlist__create_maps(top.evlist, target) < 0)
1305 usage_with_options(top_usage, options); 1163 usage_with_options(top_usage, options);
1306 1164
1307 if (!top.evlist->nr_entries && 1165 if (!top.evlist->nr_entries &&
1308 perf_evlist__add_default(top.evlist) < 0) { 1166 perf_evlist__add_default(top.evlist) < 0) {
1309 ui__error("Not enough memory for event selector list\n"); 1167 ui__error("Not enough memory for event selector list\n");
1310 return -ENOMEM; 1168 goto out_delete_maps;
1311 } 1169 }
1312 1170
1313 symbol_conf.nr_events = top.evlist->nr_entries; 1171 symbol_conf.nr_events = top.evlist->nr_entries;
@@ -1315,24 +1173,22 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
1315 if (top.delay_secs < 1) 1173 if (top.delay_secs < 1)
1316 top.delay_secs = 1; 1174 top.delay_secs = 1;
1317 1175
1176 if (opts->user_interval != ULLONG_MAX)
1177 opts->default_interval = opts->user_interval;
1178 if (opts->user_freq != UINT_MAX)
1179 opts->freq = opts->user_freq;
1180
1318 /* 1181 /*
1319 * User specified count overrides default frequency. 1182 * User specified count overrides default frequency.
1320 */ 1183 */
1321 if (top.default_interval) 1184 if (opts->default_interval)
1322 top.freq = 0; 1185 opts->freq = 0;
1323 else if (top.freq) { 1186 else if (opts->freq) {
1324 top.default_interval = top.freq; 1187 opts->default_interval = opts->freq;
1325 } else { 1188 } else {
1326 ui__error("frequency and count are zero, aborting\n"); 1189 ui__error("frequency and count are zero, aborting\n");
1327 exit(EXIT_FAILURE); 1190 status = -EINVAL;
1328 } 1191 goto out_delete_maps;
1329
1330 list_for_each_entry(pos, &top.evlist->entries, node) {
1331 /*
1332 * Fill in the ones not specifically initialized via -c:
1333 */
1334 if (!pos->attr.sample_period)
1335 pos->attr.sample_period = top.default_interval;
1336 } 1192 }
1337 1193
1338 top.sym_evsel = perf_evlist__first(top.evlist); 1194 top.sym_evsel = perf_evlist__first(top.evlist);
@@ -1365,6 +1221,8 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
1365 1221
1366 status = __cmd_top(&top); 1222 status = __cmd_top(&top);
1367 1223
1224out_delete_maps:
1225 perf_evlist__delete_maps(top.evlist);
1368out_delete_evlist: 1226out_delete_evlist:
1369 perf_evlist__delete(top.evlist); 1227 perf_evlist__delete(top.evlist);
1370 1228
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 7932ffa29889..d222d7fc7e96 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -455,7 +455,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
455 goto out_delete_evlist; 455 goto out_delete_evlist;
456 } 456 }
457 457
458 perf_evlist__config_attrs(evlist, &trace->opts); 458 perf_evlist__config(evlist, &trace->opts);
459 459
460 signal(SIGCHLD, sig_handler); 460 signal(SIGCHLD, sig_handler);
461 signal(SIGINT, sig_handler); 461 signal(SIGINT, sig_handler);
diff --git a/tools/perf/config/feature-tests.mak b/tools/perf/config/feature-tests.mak
index f5ac77485a4f..b4eabb44e381 100644
--- a/tools/perf/config/feature-tests.mak
+++ b/tools/perf/config/feature-tests.mak
@@ -225,3 +225,14 @@ int main(void)
225 return on_exit(NULL, NULL); 225 return on_exit(NULL, NULL);
226} 226}
227endef 227endef
228
229define SOURCE_LIBNUMA
230#include <numa.h>
231#include <numaif.h>
232
233int main(void)
234{
235 numa_available();
236 return 0;
237}
238endef \ No newline at end of file
diff --git a/tools/perf/config/utilities.mak b/tools/perf/config/utilities.mak
index e5413125e6bb..8ef3bd30a549 100644
--- a/tools/perf/config/utilities.mak
+++ b/tools/perf/config/utilities.mak
@@ -13,7 +13,7 @@ newline := $(newline)
13# what should replace a newline when escaping 13# what should replace a newline when escaping
14# newlines; the default is a bizarre string. 14# newlines; the default is a bizarre string.
15# 15#
16nl-escape = $(or $(1),m822df3020w6a44id34bt574ctac44eb9f4n) 16nl-escape = $(if $(1),$(1),m822df3020w6a44id34bt574ctac44eb9f4n)
17 17
18# escape-nl 18# escape-nl
19# 19#
@@ -173,9 +173,9 @@ _ge-abspath = $(if $(is-executable),$(1))
173# Usage: absolute-executable-path-or-empty = $(call get-executable-or-default,variable,default) 173# Usage: absolute-executable-path-or-empty = $(call get-executable-or-default,variable,default)
174# 174#
175define get-executable-or-default 175define get-executable-or-default
176$(if $($(1)),$(call _ge_attempt,$($(1)),$(1)),$(call _ge_attempt,$(2))) 176$(if $($(1)),$(call _ge_attempt,$($(1)),$(1)),$(call _ge_attempt,$(2),$(1)))
177endef 177endef
178_ge_attempt = $(or $(get-executable),$(_gea_warn),$(call _gea_err,$(2))) 178_ge_attempt = $(if $(get-executable),$(get-executable),$(_gea_warn)$(call _gea_err,$(2)))
179_gea_warn = $(warning The path '$(1)' is not executable.) 179_gea_warn = $(warning The path '$(1)' is not executable.)
180_gea_err = $(if $(1),$(error Please set '$(1)' appropriately)) 180_gea_err = $(if $(1),$(error Please set '$(1)' appropriately))
181 181
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 0f661fbce6a8..095b88207cd3 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -328,14 +328,23 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
328 if (S_ISFIFO(st.st_mode) || S_ISSOCK(st.st_mode)) 328 if (S_ISFIFO(st.st_mode) || S_ISSOCK(st.st_mode))
329 return 0; 329 return 0;
330 330
331 status = 1;
331 /* Check for ENOSPC and EIO errors.. */ 332 /* Check for ENOSPC and EIO errors.. */
332 if (fflush(stdout)) 333 if (fflush(stdout)) {
333 die("write failure on standard output: %s", strerror(errno)); 334 fprintf(stderr, "write failure on standard output: %s", strerror(errno));
334 if (ferror(stdout)) 335 goto out;
335 die("unknown write failure on standard output"); 336 }
336 if (fclose(stdout)) 337 if (ferror(stdout)) {
337 die("close failed on standard output: %s", strerror(errno)); 338 fprintf(stderr, "unknown write failure on standard output");
338 return 0; 339 goto out;
340 }
341 if (fclose(stdout)) {
342 fprintf(stderr, "close failed on standard output: %s", strerror(errno));
343 goto out;
344 }
345 status = 0;
346out:
347 return status;
339} 348}
340 349
341static void handle_internal_command(int argc, const char **argv) 350static void handle_internal_command(int argc, const char **argv)
@@ -467,7 +476,8 @@ int main(int argc, const char **argv)
467 cmd += 5; 476 cmd += 5;
468 argv[0] = cmd; 477 argv[0] = cmd;
469 handle_internal_command(argc, argv); 478 handle_internal_command(argc, argv);
470 die("cannot handle %s internally", cmd); 479 fprintf(stderr, "cannot handle %s internally", cmd);
480 goto out;
471 } 481 }
472 482
473 /* Look for flags.. */ 483 /* Look for flags.. */
@@ -485,7 +495,7 @@ int main(int argc, const char **argv)
485 printf("\n usage: %s\n\n", perf_usage_string); 495 printf("\n usage: %s\n\n", perf_usage_string);
486 list_common_cmds_help(); 496 list_common_cmds_help();
487 printf("\n %s\n\n", perf_more_info_string); 497 printf("\n %s\n\n", perf_more_info_string);
488 exit(1); 498 goto out;
489 } 499 }
490 cmd = argv[0]; 500 cmd = argv[0];
491 501
@@ -517,7 +527,7 @@ int main(int argc, const char **argv)
517 fprintf(stderr, "Expansion of alias '%s' failed; " 527 fprintf(stderr, "Expansion of alias '%s' failed; "
518 "'%s' is not a perf-command\n", 528 "'%s' is not a perf-command\n",
519 cmd, argv[0]); 529 cmd, argv[0]);
520 exit(1); 530 goto out;
521 } 531 }
522 if (!done_help) { 532 if (!done_help) {
523 cmd = argv[0] = help_unknown_cmd(cmd); 533 cmd = argv[0] = help_unknown_cmd(cmd);
@@ -528,6 +538,6 @@ int main(int argc, const char **argv)
528 538
529 fprintf(stderr, "Failed to run command '%s': %s\n", 539 fprintf(stderr, "Failed to run command '%s': %s\n",
530 cmd, strerror(errno)); 540 cmd, strerror(errno));
531 541out:
532 return 1; 542 return 1;
533} 543}
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 2c340e7da458..c2206c87fc9f 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -1,10 +1,6 @@
1#ifndef _PERF_PERF_H 1#ifndef _PERF_PERF_H
2#define _PERF_PERF_H 2#define _PERF_PERF_H
3 3
4struct winsize;
5
6void get_term_dimensions(struct winsize *ws);
7
8#include <asm/unistd.h> 4#include <asm/unistd.h>
9 5
10#if defined(__i386__) 6#if defined(__i386__)
@@ -107,32 +103,6 @@ void get_term_dimensions(struct winsize *ws);
107#include "util/types.h" 103#include "util/types.h"
108#include <stdbool.h> 104#include <stdbool.h>
109 105
110struct perf_mmap {
111 void *base;
112 int mask;
113 unsigned int prev;
114};
115
116static inline unsigned int perf_mmap__read_head(struct perf_mmap *mm)
117{
118 struct perf_event_mmap_page *pc = mm->base;
119 int head = pc->data_head;
120 rmb();
121 return head;
122}
123
124static inline void perf_mmap__write_tail(struct perf_mmap *md,
125 unsigned long tail)
126{
127 struct perf_event_mmap_page *pc = md->base;
128
129 /*
130 * ensure all reads are done before we write the tail out.
131 */
132 /* mb(); */
133 pc->data_tail = tail;
134}
135
136/* 106/*
137 * prctl(PR_TASK_PERF_EVENTS_DISABLE) will (cheaply) disable all 107 * prctl(PR_TASK_PERF_EVENTS_DISABLE) will (cheaply) disable all
138 * counters in the current task. 108 * counters in the current task.
@@ -237,8 +207,6 @@ struct perf_record_opts {
237 bool raw_samples; 207 bool raw_samples;
238 bool sample_address; 208 bool sample_address;
239 bool sample_time; 209 bool sample_time;
240 bool sample_id_all_missing;
241 bool exclude_guest_missing;
242 bool period; 210 bool period;
243 unsigned int freq; 211 unsigned int freq;
244 unsigned int mmap_pages; 212 unsigned int mmap_pages;
diff --git a/tools/perf/scripts/perl/bin/workqueue-stats-record b/tools/perf/scripts/perl/bin/workqueue-stats-record
deleted file mode 100644
index 8edda9078d5d..000000000000
--- a/tools/perf/scripts/perl/bin/workqueue-stats-record
+++ /dev/null
@@ -1,2 +0,0 @@
1#!/bin/bash
2perf record -e workqueue:workqueue_creation -e workqueue:workqueue_destruction -e workqueue:workqueue_execution -e workqueue:workqueue_insertion $@
diff --git a/tools/perf/scripts/perl/bin/workqueue-stats-report b/tools/perf/scripts/perl/bin/workqueue-stats-report
deleted file mode 100644
index 6d91411d248c..000000000000
--- a/tools/perf/scripts/perl/bin/workqueue-stats-report
+++ /dev/null
@@ -1,3 +0,0 @@
1#!/bin/bash
2# description: workqueue stats (ins/exe/create/destroy)
3perf script $@ -s "$PERF_EXEC_PATH"/scripts/perl/workqueue-stats.pl
diff --git a/tools/perf/scripts/perl/rwtop.pl b/tools/perf/scripts/perl/rwtop.pl
index 4bb3ecd33472..8b20787021c1 100644
--- a/tools/perf/scripts/perl/rwtop.pl
+++ b/tools/perf/scripts/perl/rwtop.pl
@@ -17,6 +17,7 @@ use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib";
17use lib "./Perf-Trace-Util/lib"; 17use lib "./Perf-Trace-Util/lib";
18use Perf::Trace::Core; 18use Perf::Trace::Core;
19use Perf::Trace::Util; 19use Perf::Trace::Util;
20use POSIX qw/SIGALRM SA_RESTART/;
20 21
21my $default_interval = 3; 22my $default_interval = 3;
22my $nlines = 20; 23my $nlines = 20;
@@ -90,7 +91,10 @@ sub syscalls::sys_enter_write
90 91
91sub trace_begin 92sub trace_begin
92{ 93{
93 $SIG{ALRM} = \&set_print_pending; 94 my $sa = POSIX::SigAction->new(\&set_print_pending);
95 $sa->flags(SA_RESTART);
96 $sa->safe(1);
97 POSIX::sigaction(SIGALRM, $sa) or die "Can't set SIGALRM handler: $!\n";
94 alarm 1; 98 alarm 1;
95} 99}
96 100
diff --git a/tools/perf/scripts/perl/workqueue-stats.pl b/tools/perf/scripts/perl/workqueue-stats.pl
deleted file mode 100644
index a8eaff5119e0..000000000000
--- a/tools/perf/scripts/perl/workqueue-stats.pl
+++ /dev/null
@@ -1,129 +0,0 @@
1#!/usr/bin/perl -w
2# (c) 2009, Tom Zanussi <tzanussi@gmail.com>
3# Licensed under the terms of the GNU GPL License version 2
4
5# Displays workqueue stats
6#
7# Usage:
8#
9# perf record -c 1 -f -a -R -e workqueue:workqueue_creation -e
10# workqueue:workqueue_destruction -e workqueue:workqueue_execution
11# -e workqueue:workqueue_insertion
12#
13# perf script -p -s tools/perf/scripts/perl/workqueue-stats.pl
14
15use 5.010000;
16use strict;
17use warnings;
18
19use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib";
20use lib "./Perf-Trace-Util/lib";
21use Perf::Trace::Core;
22use Perf::Trace::Util;
23
24my @cpus;
25
26sub workqueue::workqueue_destruction
27{
28 my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
29 $common_pid, $common_comm,
30 $thread_comm, $thread_pid) = @_;
31
32 $cpus[$common_cpu]{$thread_pid}{destroyed}++;
33 $cpus[$common_cpu]{$thread_pid}{comm} = $thread_comm;
34}
35
36sub workqueue::workqueue_creation
37{
38 my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
39 $common_pid, $common_comm,
40 $thread_comm, $thread_pid, $cpu) = @_;
41
42 $cpus[$common_cpu]{$thread_pid}{created}++;
43 $cpus[$common_cpu]{$thread_pid}{comm} = $thread_comm;
44}
45
46sub workqueue::workqueue_execution
47{
48 my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
49 $common_pid, $common_comm,
50 $thread_comm, $thread_pid, $func) = @_;
51
52 $cpus[$common_cpu]{$thread_pid}{executed}++;
53 $cpus[$common_cpu]{$thread_pid}{comm} = $thread_comm;
54}
55
56sub workqueue::workqueue_insertion
57{
58 my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
59 $common_pid, $common_comm,
60 $thread_comm, $thread_pid, $func) = @_;
61
62 $cpus[$common_cpu]{$thread_pid}{inserted}++;
63 $cpus[$common_cpu]{$thread_pid}{comm} = $thread_comm;
64}
65
66sub trace_end
67{
68 print "workqueue work stats:\n\n";
69 my $cpu = 0;
70 printf("%3s %6s %6s\t%-20s\n", "cpu", "ins", "exec", "name");
71 printf("%3s %6s %6s\t%-20s\n", "---", "---", "----", "----");
72 foreach my $pidhash (@cpus) {
73 while ((my $pid, my $wqhash) = each %$pidhash) {
74 my $ins = $$wqhash{'inserted'} || 0;
75 my $exe = $$wqhash{'executed'} || 0;
76 my $comm = $$wqhash{'comm'} || "";
77 if ($ins || $exe) {
78 printf("%3u %6u %6u\t%-20s\n", $cpu, $ins, $exe, $comm);
79 }
80 }
81 $cpu++;
82 }
83
84 $cpu = 0;
85 print "\nworkqueue lifecycle stats:\n\n";
86 printf("%3s %6s %6s\t%-20s\n", "cpu", "created", "destroyed", "name");
87 printf("%3s %6s %6s\t%-20s\n", "---", "-------", "---------", "----");
88 foreach my $pidhash (@cpus) {
89 while ((my $pid, my $wqhash) = each %$pidhash) {
90 my $created = $$wqhash{'created'} || 0;
91 my $destroyed = $$wqhash{'destroyed'} || 0;
92 my $comm = $$wqhash{'comm'} || "";
93 if ($created || $destroyed) {
94 printf("%3u %6u %6u\t%-20s\n", $cpu, $created, $destroyed,
95 $comm);
96 }
97 }
98 $cpu++;
99 }
100
101 print_unhandled();
102}
103
104my %unhandled;
105
106sub print_unhandled
107{
108 if ((scalar keys %unhandled) == 0) {
109 return;
110 }
111
112 print "\nunhandled events:\n\n";
113
114 printf("%-40s %10s\n", "event", "count");
115 printf("%-40s %10s\n", "----------------------------------------",
116 "-----------");
117
118 foreach my $event_name (keys %unhandled) {
119 printf("%-40s %10d\n", $event_name, $unhandled{$event_name});
120 }
121}
122
123sub trace_unhandled
124{
125 my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
126 $common_pid, $common_comm) = @_;
127
128 $unhandled{$event_name}++;
129}
diff --git a/tools/perf/tests/attr.c b/tools/perf/tests/attr.c
index 25638a986257..bdcceb886f77 100644
--- a/tools/perf/tests/attr.c
+++ b/tools/perf/tests/attr.c
@@ -19,6 +19,11 @@
19 * permissions. All the event text files are stored there. 19 * permissions. All the event text files are stored there.
20 */ 20 */
21 21
22/*
23 * Powerpc needs __SANE_USERSPACE_TYPES__ before <linux/types.h> to select
24 * 'int-ll64.h' and avoid compile warnings when printing __u64 with %llu.
25 */
26#define __SANE_USERSPACE_TYPES__
22#include <stdlib.h> 27#include <stdlib.h>
23#include <stdio.h> 28#include <stdio.h>
24#include <inttypes.h> 29#include <inttypes.h>
@@ -33,8 +38,6 @@
33 38
34extern int verbose; 39extern int verbose;
35 40
36bool test_attr__enabled;
37
38static char *dir; 41static char *dir;
39 42
40void test_attr__init(void) 43void test_attr__init(void)
@@ -146,7 +149,7 @@ static int run_dir(const char *d, const char *perf)
146{ 149{
147 char cmd[3*PATH_MAX]; 150 char cmd[3*PATH_MAX];
148 151
149 snprintf(cmd, 3*PATH_MAX, "python %s/attr.py -d %s/attr/ -p %s %s", 152 snprintf(cmd, 3*PATH_MAX, PYTHON " %s/attr.py -d %s/attr/ -p %s %s",
150 d, d, perf, verbose ? "-v" : ""); 153 d, d, perf, verbose ? "-v" : "");
151 154
152 return system(cmd); 155 return system(cmd);
diff --git a/tools/perf/tests/attr.py b/tools/perf/tests/attr.py
index e702b82dcb86..2f629ca485bc 100644
--- a/tools/perf/tests/attr.py
+++ b/tools/perf/tests/attr.py
@@ -68,7 +68,7 @@ class Event(dict):
68 self[key] = val 68 self[key] = val
69 69
70 def __init__(self, name, data, base): 70 def __init__(self, name, data, base):
71 log.info(" Event %s" % name); 71 log.debug(" Event %s" % name);
72 self.name = name; 72 self.name = name;
73 self.group = '' 73 self.group = ''
74 self.add(base) 74 self.add(base)
@@ -97,6 +97,14 @@ class Event(dict):
97 return False 97 return False
98 return True 98 return True
99 99
100 def diff(self, other):
101 for t in Event.terms:
102 if not self.has_key(t) or not other.has_key(t):
103 continue
104 if not self.compare_data(self[t], other[t]):
105 log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
106
107
100# Test file description needs to have following sections: 108# Test file description needs to have following sections:
101# [config] 109# [config]
102# - just single instance in file 110# - just single instance in file
@@ -113,7 +121,7 @@ class Test(object):
113 parser = ConfigParser.SafeConfigParser() 121 parser = ConfigParser.SafeConfigParser()
114 parser.read(path) 122 parser.read(path)
115 123
116 log.warning("running '%s'" % path) 124 log.debug("running '%s'" % path)
117 125
118 self.path = path 126 self.path = path
119 self.test_dir = options.test_dir 127 self.test_dir = options.test_dir
@@ -128,7 +136,7 @@ class Test(object):
128 136
129 self.expect = {} 137 self.expect = {}
130 self.result = {} 138 self.result = {}
131 log.info(" loading expected events"); 139 log.debug(" loading expected events");
132 self.load_events(path, self.expect) 140 self.load_events(path, self.expect)
133 141
134 def is_event(self, name): 142 def is_event(self, name):
@@ -164,7 +172,7 @@ class Test(object):
164 self.perf, self.command, tempdir, self.args) 172 self.perf, self.command, tempdir, self.args)
165 ret = os.WEXITSTATUS(os.system(cmd)) 173 ret = os.WEXITSTATUS(os.system(cmd))
166 174
167 log.info(" running '%s' ret %d " % (cmd, ret)) 175 log.warning(" running '%s' ret %d " % (cmd, ret))
168 176
169 if ret != int(self.ret): 177 if ret != int(self.ret):
170 raise Unsup(self) 178 raise Unsup(self)
@@ -172,7 +180,7 @@ class Test(object):
172 def compare(self, expect, result): 180 def compare(self, expect, result):
173 match = {} 181 match = {}
174 182
175 log.info(" compare"); 183 log.debug(" compare");
176 184
177 # For each expected event find all matching 185 # For each expected event find all matching
178 # events in result. Fail if there's not any. 186 # events in result. Fail if there's not any.
@@ -187,10 +195,11 @@ class Test(object):
187 else: 195 else:
188 log.debug(" ->FAIL"); 196 log.debug(" ->FAIL");
189 197
190 log.info(" match: [%s] matches %s" % (exp_name, str(exp_list))) 198 log.debug(" match: [%s] matches %s" % (exp_name, str(exp_list)))
191 199
192 # we did not any matching event - fail 200 # we did not any matching event - fail
193 if (not exp_list): 201 if (not exp_list):
202 exp_event.diff(res_event)
194 raise Fail(self, 'match failure'); 203 raise Fail(self, 'match failure');
195 204
196 match[exp_name] = exp_list 205 match[exp_name] = exp_list
@@ -208,10 +217,10 @@ class Test(object):
208 if res_group not in match[group]: 217 if res_group not in match[group]:
209 raise Fail(self, 'group failure') 218 raise Fail(self, 'group failure')
210 219
211 log.info(" group: [%s] matches group leader %s" % 220 log.debug(" group: [%s] matches group leader %s" %
212 (exp_name, str(match[group]))) 221 (exp_name, str(match[group])))
213 222
214 log.info(" matched") 223 log.debug(" matched")
215 224
216 def resolve_groups(self, events): 225 def resolve_groups(self, events):
217 for name, event in events.items(): 226 for name, event in events.items():
@@ -233,7 +242,7 @@ class Test(object):
233 self.run_cmd(tempdir); 242 self.run_cmd(tempdir);
234 243
235 # load events expectation for the test 244 # load events expectation for the test
236 log.info(" loading result events"); 245 log.debug(" loading result events");
237 for f in glob.glob(tempdir + '/event*'): 246 for f in glob.glob(tempdir + '/event*'):
238 self.load_events(f, self.result); 247 self.load_events(f, self.result);
239 248
diff --git a/tools/perf/tests/attr/base-record b/tools/perf/tests/attr/base-record
index f1485d8e6a0b..5bc3880f7be5 100644
--- a/tools/perf/tests/attr/base-record
+++ b/tools/perf/tests/attr/base-record
@@ -7,7 +7,7 @@ size=96
7config=0 7config=0
8sample_period=4000 8sample_period=4000
9sample_type=263 9sample_type=263
10read_format=7 10read_format=0
11disabled=1 11disabled=1
12inherit=1 12inherit=1
13pinned=0 13pinned=0
diff --git a/tools/perf/tests/attr/test-record-group b/tools/perf/tests/attr/test-record-group
index a6599e9a19d3..57739cacdb2a 100644
--- a/tools/perf/tests/attr/test-record-group
+++ b/tools/perf/tests/attr/test-record-group
@@ -6,12 +6,14 @@ args = --group -e cycles,instructions kill >/dev/null 2>&1
6fd=1 6fd=1
7group_fd=-1 7group_fd=-1
8sample_type=327 8sample_type=327
9read_format=4
9 10
10[event-2:base-record] 11[event-2:base-record]
11fd=2 12fd=2
12group_fd=1 13group_fd=1
13config=1 14config=1
14sample_type=327 15sample_type=327
16read_format=4
15mmap=0 17mmap=0
16comm=0 18comm=0
17enable_on_exec=0 19enable_on_exec=0
diff --git a/tools/perf/tests/attr/test-record-group1 b/tools/perf/tests/attr/test-record-group1
index 5a8359da38af..c5548d054aff 100644
--- a/tools/perf/tests/attr/test-record-group1
+++ b/tools/perf/tests/attr/test-record-group1
@@ -1,11 +1,12 @@
1[config] 1[config]
2command = record 2command = record
3args = -e '{cycles,instructions}' kill >/tmp/krava 2>&1 3args = -e '{cycles,instructions}' kill >/dev/null 2>&1
4 4
5[event-1:base-record] 5[event-1:base-record]
6fd=1 6fd=1
7group_fd=-1 7group_fd=-1
8sample_type=327 8sample_type=327
9read_format=4
9 10
10[event-2:base-record] 11[event-2:base-record]
11fd=2 12fd=2
@@ -13,6 +14,7 @@ group_fd=1
13type=0 14type=0
14config=1 15config=1
15sample_type=327 16sample_type=327
17read_format=4
16mmap=0 18mmap=0
17comm=0 19comm=0
18enable_on_exec=0 20enable_on_exec=0
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index 186f67535494..acb98e0e39f2 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -4,6 +4,7 @@
4 * Builtin regression testing command: ever growing number of sanity tests 4 * Builtin regression testing command: ever growing number of sanity tests
5 */ 5 */
6#include "builtin.h" 6#include "builtin.h"
7#include "intlist.h"
7#include "tests.h" 8#include "tests.h"
8#include "debug.h" 9#include "debug.h"
9#include "color.h" 10#include "color.h"
@@ -69,6 +70,14 @@ static struct test {
69 .func = test__attr, 70 .func = test__attr,
70 }, 71 },
71 { 72 {
73 .desc = "Test matching and linking mutliple hists",
74 .func = test__hists_link,
75 },
76 {
77 .desc = "Try 'use perf' in python, checking link problems",
78 .func = test__python_use,
79 },
80 {
72 .func = NULL, 81 .func = NULL,
73 }, 82 },
74}; 83};
@@ -97,7 +106,7 @@ static bool perf_test__matches(int curr, int argc, const char *argv[])
97 return false; 106 return false;
98} 107}
99 108
100static int __cmd_test(int argc, const char *argv[]) 109static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
101{ 110{
102 int i = 0; 111 int i = 0;
103 int width = 0; 112 int width = 0;
@@ -118,13 +127,28 @@ static int __cmd_test(int argc, const char *argv[])
118 continue; 127 continue;
119 128
120 pr_info("%2d: %-*s:", i, width, tests[curr].desc); 129 pr_info("%2d: %-*s:", i, width, tests[curr].desc);
130
131 if (intlist__find(skiplist, i)) {
132 color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (user override)\n");
133 continue;
134 }
135
121 pr_debug("\n--- start ---\n"); 136 pr_debug("\n--- start ---\n");
122 err = tests[curr].func(); 137 err = tests[curr].func();
123 pr_debug("---- end ----\n%s:", tests[curr].desc); 138 pr_debug("---- end ----\n%s:", tests[curr].desc);
124 if (err) 139
125 color_fprintf(stderr, PERF_COLOR_RED, " FAILED!\n"); 140 switch (err) {
126 else 141 case TEST_OK:
127 pr_info(" Ok\n"); 142 pr_info(" Ok\n");
143 break;
144 case TEST_SKIP:
145 color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip\n");
146 break;
147 case TEST_FAIL:
148 default:
149 color_fprintf(stderr, PERF_COLOR_RED, " FAILED!\n");
150 break;
151 }
128 } 152 }
129 153
130 return 0; 154 return 0;
@@ -152,11 +176,14 @@ int cmd_test(int argc, const char **argv, const char *prefix __maybe_unused)
152 "perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]", 176 "perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]",
153 NULL, 177 NULL,
154 }; 178 };
179 const char *skip = NULL;
155 const struct option test_options[] = { 180 const struct option test_options[] = {
181 OPT_STRING('s', "skip", &skip, "tests", "tests to skip"),
156 OPT_INCR('v', "verbose", &verbose, 182 OPT_INCR('v', "verbose", &verbose,
157 "be more verbose (show symbol address, etc)"), 183 "be more verbose (show symbol address, etc)"),
158 OPT_END() 184 OPT_END()
159 }; 185 };
186 struct intlist *skiplist = NULL;
160 187
161 argc = parse_options(argc, argv, test_options, test_usage, 0); 188 argc = parse_options(argc, argv, test_options, test_usage, 0);
162 if (argc >= 1 && !strcmp(argv[0], "list")) 189 if (argc >= 1 && !strcmp(argv[0], "list"))
@@ -169,5 +196,8 @@ int cmd_test(int argc, const char **argv, const char *prefix __maybe_unused)
169 if (symbol__init() < 0) 196 if (symbol__init() < 0)
170 return -1; 197 return -1;
171 198
172 return __cmd_test(argc, argv); 199 if (skip != NULL)
200 skiplist = intlist__new(skip);
201
202 return __cmd_test(argc, argv, skiplist);
173} 203}
diff --git a/tools/perf/tests/evsel-roundtrip-name.c b/tools/perf/tests/evsel-roundtrip-name.c
index e61fc828a158..0fd99a9adb91 100644
--- a/tools/perf/tests/evsel-roundtrip-name.c
+++ b/tools/perf/tests/evsel-roundtrip-name.c
@@ -22,7 +22,7 @@ static int perf_evsel__roundtrip_cache_name_test(void)
22 for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { 22 for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
23 __perf_evsel__hw_cache_type_op_res_name(type, op, i, 23 __perf_evsel__hw_cache_type_op_res_name(type, op, i,
24 name, sizeof(name)); 24 name, sizeof(name));
25 err = parse_events(evlist, name, 0); 25 err = parse_events(evlist, name);
26 if (err) 26 if (err)
27 ret = err; 27 ret = err;
28 } 28 }
@@ -70,7 +70,7 @@ static int __perf_evsel__name_array_test(const char *names[], int nr_names)
70 return -ENOMEM; 70 return -ENOMEM;
71 71
72 for (i = 0; i < nr_names; ++i) { 72 for (i = 0; i < nr_names; ++i) {
73 err = parse_events(evlist, names[i], 0); 73 err = parse_events(evlist, names[i]);
74 if (err) { 74 if (err) {
75 pr_debug("failed to parse event '%s', err %d\n", 75 pr_debug("failed to parse event '%s', err %d\n",
76 names[i], err); 76 names[i], err);
diff --git a/tools/perf/tests/hists_link.c b/tools/perf/tests/hists_link.c
new file mode 100644
index 000000000000..1be64a6c5daf
--- /dev/null
+++ b/tools/perf/tests/hists_link.c
@@ -0,0 +1,500 @@
1#include "perf.h"
2#include "tests.h"
3#include "debug.h"
4#include "symbol.h"
5#include "sort.h"
6#include "evsel.h"
7#include "evlist.h"
8#include "machine.h"
9#include "thread.h"
10#include "parse-events.h"
11
12static struct {
13 u32 pid;
14 const char *comm;
15} fake_threads[] = {
16 { 100, "perf" },
17 { 200, "perf" },
18 { 300, "bash" },
19};
20
21static struct {
22 u32 pid;
23 u64 start;
24 const char *filename;
25} fake_mmap_info[] = {
26 { 100, 0x40000, "perf" },
27 { 100, 0x50000, "libc" },
28 { 100, 0xf0000, "[kernel]" },
29 { 200, 0x40000, "perf" },
30 { 200, 0x50000, "libc" },
31 { 200, 0xf0000, "[kernel]" },
32 { 300, 0x40000, "bash" },
33 { 300, 0x50000, "libc" },
34 { 300, 0xf0000, "[kernel]" },
35};
36
37struct fake_sym {
38 u64 start;
39 u64 length;
40 const char *name;
41};
42
43static struct fake_sym perf_syms[] = {
44 { 700, 100, "main" },
45 { 800, 100, "run_command" },
46 { 900, 100, "cmd_record" },
47};
48
49static struct fake_sym bash_syms[] = {
50 { 700, 100, "main" },
51 { 800, 100, "xmalloc" },
52 { 900, 100, "xfree" },
53};
54
55static struct fake_sym libc_syms[] = {
56 { 700, 100, "malloc" },
57 { 800, 100, "free" },
58 { 900, 100, "realloc" },
59};
60
61static struct fake_sym kernel_syms[] = {
62 { 700, 100, "schedule" },
63 { 800, 100, "page_fault" },
64 { 900, 100, "sys_perf_event_open" },
65};
66
67static struct {
68 const char *dso_name;
69 struct fake_sym *syms;
70 size_t nr_syms;
71} fake_symbols[] = {
72 { "perf", perf_syms, ARRAY_SIZE(perf_syms) },
73 { "bash", bash_syms, ARRAY_SIZE(bash_syms) },
74 { "libc", libc_syms, ARRAY_SIZE(libc_syms) },
75 { "[kernel]", kernel_syms, ARRAY_SIZE(kernel_syms) },
76};
77
78static struct machine *setup_fake_machine(struct machines *machines)
79{
80 struct machine *machine = machines__find(machines, HOST_KERNEL_ID);
81 size_t i;
82
83 if (machine == NULL) {
84 pr_debug("Not enough memory for machine setup\n");
85 return NULL;
86 }
87
88 for (i = 0; i < ARRAY_SIZE(fake_threads); i++) {
89 struct thread *thread;
90
91 thread = machine__findnew_thread(machine, fake_threads[i].pid);
92 if (thread == NULL)
93 goto out;
94
95 thread__set_comm(thread, fake_threads[i].comm);
96 }
97
98 for (i = 0; i < ARRAY_SIZE(fake_mmap_info); i++) {
99 union perf_event fake_mmap_event = {
100 .mmap = {
101 .header = { .misc = PERF_RECORD_MISC_USER, },
102 .pid = fake_mmap_info[i].pid,
103 .start = fake_mmap_info[i].start,
104 .len = 0x1000ULL,
105 .pgoff = 0ULL,
106 },
107 };
108
109 strcpy(fake_mmap_event.mmap.filename,
110 fake_mmap_info[i].filename);
111
112 machine__process_mmap_event(machine, &fake_mmap_event);
113 }
114
115 for (i = 0; i < ARRAY_SIZE(fake_symbols); i++) {
116 size_t k;
117 struct dso *dso;
118
119 dso = __dsos__findnew(&machine->user_dsos,
120 fake_symbols[i].dso_name);
121 if (dso == NULL)
122 goto out;
123
124 /* emulate dso__load() */
125 dso__set_loaded(dso, MAP__FUNCTION);
126
127 for (k = 0; k < fake_symbols[i].nr_syms; k++) {
128 struct symbol *sym;
129 struct fake_sym *fsym = &fake_symbols[i].syms[k];
130
131 sym = symbol__new(fsym->start, fsym->length,
132 STB_GLOBAL, fsym->name);
133 if (sym == NULL)
134 goto out;
135
136 symbols__insert(&dso->symbols[MAP__FUNCTION], sym);
137 }
138 }
139
140 return machine;
141
142out:
143 pr_debug("Not enough memory for machine setup\n");
144 machine__delete_threads(machine);
145 machine__delete(machine);
146 return NULL;
147}
148
149struct sample {
150 u32 pid;
151 u64 ip;
152 struct thread *thread;
153 struct map *map;
154 struct symbol *sym;
155};
156
157static struct sample fake_common_samples[] = {
158 /* perf [kernel] schedule() */
159 { .pid = 100, .ip = 0xf0000 + 700, },
160 /* perf [perf] main() */
161 { .pid = 200, .ip = 0x40000 + 700, },
162 /* perf [perf] cmd_record() */
163 { .pid = 200, .ip = 0x40000 + 900, },
164 /* bash [bash] xmalloc() */
165 { .pid = 300, .ip = 0x40000 + 800, },
166 /* bash [libc] malloc() */
167 { .pid = 300, .ip = 0x50000 + 700, },
168};
169
170static struct sample fake_samples[][5] = {
171 {
172 /* perf [perf] run_command() */
173 { .pid = 100, .ip = 0x40000 + 800, },
174 /* perf [libc] malloc() */
175 { .pid = 100, .ip = 0x50000 + 700, },
176 /* perf [kernel] page_fault() */
177 { .pid = 100, .ip = 0xf0000 + 800, },
178 /* perf [kernel] sys_perf_event_open() */
179 { .pid = 200, .ip = 0xf0000 + 900, },
180 /* bash [libc] free() */
181 { .pid = 300, .ip = 0x50000 + 800, },
182 },
183 {
184 /* perf [libc] free() */
185 { .pid = 200, .ip = 0x50000 + 800, },
186 /* bash [libc] malloc() */
187 { .pid = 300, .ip = 0x50000 + 700, }, /* will be merged */
188 /* bash [bash] xfee() */
189 { .pid = 300, .ip = 0x40000 + 900, },
190 /* bash [libc] realloc() */
191 { .pid = 300, .ip = 0x50000 + 900, },
192 /* bash [kernel] page_fault() */
193 { .pid = 300, .ip = 0xf0000 + 800, },
194 },
195};
196
197static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
198{
199 struct perf_evsel *evsel;
200 struct addr_location al;
201 struct hist_entry *he;
202 struct perf_sample sample = { .cpu = 0, };
203 size_t i = 0, k;
204
205 /*
206 * each evsel will have 10 samples - 5 common and 5 distinct.
207 * However the second evsel also has a collapsed entry for
208 * "bash [libc] malloc" so total 9 entries will be in the tree.
209 */
210 list_for_each_entry(evsel, &evlist->entries, node) {
211 for (k = 0; k < ARRAY_SIZE(fake_common_samples); k++) {
212 const union perf_event event = {
213 .ip = {
214 .header = {
215 .misc = PERF_RECORD_MISC_USER,
216 },
217 .pid = fake_common_samples[k].pid,
218 .ip = fake_common_samples[k].ip,
219 },
220 };
221
222 if (perf_event__preprocess_sample(&event, machine, &al,
223 &sample, 0) < 0)
224 goto out;
225
226 he = __hists__add_entry(&evsel->hists, &al, NULL, 1);
227 if (he == NULL)
228 goto out;
229
230 fake_common_samples[k].thread = al.thread;
231 fake_common_samples[k].map = al.map;
232 fake_common_samples[k].sym = al.sym;
233 }
234
235 for (k = 0; k < ARRAY_SIZE(fake_samples[i]); k++) {
236 const union perf_event event = {
237 .ip = {
238 .header = {
239 .misc = PERF_RECORD_MISC_USER,
240 },
241 .pid = fake_samples[i][k].pid,
242 .ip = fake_samples[i][k].ip,
243 },
244 };
245
246 if (perf_event__preprocess_sample(&event, machine, &al,
247 &sample, 0) < 0)
248 goto out;
249
250 he = __hists__add_entry(&evsel->hists, &al, NULL, 1);
251 if (he == NULL)
252 goto out;
253
254 fake_samples[i][k].thread = al.thread;
255 fake_samples[i][k].map = al.map;
256 fake_samples[i][k].sym = al.sym;
257 }
258 i++;
259 }
260
261 return 0;
262
263out:
264 pr_debug("Not enough memory for adding a hist entry\n");
265 return -1;
266}
267
268static int find_sample(struct sample *samples, size_t nr_samples,
269 struct thread *t, struct map *m, struct symbol *s)
270{
271 while (nr_samples--) {
272 if (samples->thread == t && samples->map == m &&
273 samples->sym == s)
274 return 1;
275 samples++;
276 }
277 return 0;
278}
279
280static int __validate_match(struct hists *hists)
281{
282 size_t count = 0;
283 struct rb_root *root;
284 struct rb_node *node;
285
286 /*
287 * Only entries from fake_common_samples should have a pair.
288 */
289 if (sort__need_collapse)
290 root = &hists->entries_collapsed;
291 else
292 root = hists->entries_in;
293
294 node = rb_first(root);
295 while (node) {
296 struct hist_entry *he;
297
298 he = rb_entry(node, struct hist_entry, rb_node_in);
299
300 if (hist_entry__has_pairs(he)) {
301 if (find_sample(fake_common_samples,
302 ARRAY_SIZE(fake_common_samples),
303 he->thread, he->ms.map, he->ms.sym)) {
304 count++;
305 } else {
306 pr_debug("Can't find the matched entry\n");
307 return -1;
308 }
309 }
310
311 node = rb_next(node);
312 }
313
314 if (count != ARRAY_SIZE(fake_common_samples)) {
315 pr_debug("Invalid count for matched entries: %zd of %zd\n",
316 count, ARRAY_SIZE(fake_common_samples));
317 return -1;
318 }
319
320 return 0;
321}
322
323static int validate_match(struct hists *leader, struct hists *other)
324{
325 return __validate_match(leader) || __validate_match(other);
326}
327
328static int __validate_link(struct hists *hists, int idx)
329{
330 size_t count = 0;
331 size_t count_pair = 0;
332 size_t count_dummy = 0;
333 struct rb_root *root;
334 struct rb_node *node;
335
336 /*
337 * Leader hists (idx = 0) will have dummy entries from other,
338 * and some entries will have no pair. However every entry
339 * in other hists should have (dummy) pair.
340 */
341 if (sort__need_collapse)
342 root = &hists->entries_collapsed;
343 else
344 root = hists->entries_in;
345
346 node = rb_first(root);
347 while (node) {
348 struct hist_entry *he;
349
350 he = rb_entry(node, struct hist_entry, rb_node_in);
351
352 if (hist_entry__has_pairs(he)) {
353 if (!find_sample(fake_common_samples,
354 ARRAY_SIZE(fake_common_samples),
355 he->thread, he->ms.map, he->ms.sym) &&
356 !find_sample(fake_samples[idx],
357 ARRAY_SIZE(fake_samples[idx]),
358 he->thread, he->ms.map, he->ms.sym)) {
359 count_dummy++;
360 }
361 count_pair++;
362 } else if (idx) {
363 pr_debug("A entry from the other hists should have pair\n");
364 return -1;
365 }
366
367 count++;
368 node = rb_next(node);
369 }
370
371 /*
372 * Note that we have a entry collapsed in the other (idx = 1) hists.
373 */
374 if (idx == 0) {
375 if (count_dummy != ARRAY_SIZE(fake_samples[1]) - 1) {
376 pr_debug("Invalid count of dummy entries: %zd of %zd\n",
377 count_dummy, ARRAY_SIZE(fake_samples[1]) - 1);
378 return -1;
379 }
380 if (count != count_pair + ARRAY_SIZE(fake_samples[0])) {
381 pr_debug("Invalid count of total leader entries: %zd of %zd\n",
382 count, count_pair + ARRAY_SIZE(fake_samples[0]));
383 return -1;
384 }
385 } else {
386 if (count != count_pair) {
387 pr_debug("Invalid count of total other entries: %zd of %zd\n",
388 count, count_pair);
389 return -1;
390 }
391 if (count_dummy > 0) {
392 pr_debug("Other hists should not have dummy entries: %zd\n",
393 count_dummy);
394 return -1;
395 }
396 }
397
398 return 0;
399}
400
401static int validate_link(struct hists *leader, struct hists *other)
402{
403 return __validate_link(leader, 0) || __validate_link(other, 1);
404}
405
406static void print_hists(struct hists *hists)
407{
408 int i = 0;
409 struct rb_root *root;
410 struct rb_node *node;
411
412 if (sort__need_collapse)
413 root = &hists->entries_collapsed;
414 else
415 root = hists->entries_in;
416
417 pr_info("----- %s --------\n", __func__);
418 node = rb_first(root);
419 while (node) {
420 struct hist_entry *he;
421
422 he = rb_entry(node, struct hist_entry, rb_node_in);
423
424 pr_info("%2d: entry: %-8s [%-8s] %20s: period = %"PRIu64"\n",
425 i, he->thread->comm, he->ms.map->dso->short_name,
426 he->ms.sym->name, he->stat.period);
427
428 i++;
429 node = rb_next(node);
430 }
431}
432
433int test__hists_link(void)
434{
435 int err = -1;
436 struct machines machines;
437 struct machine *machine = NULL;
438 struct perf_evsel *evsel, *first;
439 struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
440
441 if (evlist == NULL)
442 return -ENOMEM;
443
444 err = parse_events(evlist, "cpu-clock");
445 if (err)
446 goto out;
447 err = parse_events(evlist, "task-clock");
448 if (err)
449 goto out;
450
451 /* default sort order (comm,dso,sym) will be used */
452 if (setup_sorting() < 0)
453 goto out;
454
455 machines__init(&machines);
456
457 /* setup threads/dso/map/symbols also */
458 machine = setup_fake_machine(&machines);
459 if (!machine)
460 goto out;
461
462 if (verbose > 1)
463 machine__fprintf(machine, stderr);
464
465 /* process sample events */
466 err = add_hist_entries(evlist, machine);
467 if (err < 0)
468 goto out;
469
470 list_for_each_entry(evsel, &evlist->entries, node) {
471 hists__collapse_resort(&evsel->hists);
472
473 if (verbose > 2)
474 print_hists(&evsel->hists);
475 }
476
477 first = perf_evlist__first(evlist);
478 evsel = perf_evlist__last(evlist);
479
480 /* match common entries */
481 hists__match(&first->hists, &evsel->hists);
482 err = validate_match(&first->hists, &evsel->hists);
483 if (err)
484 goto out;
485
486 /* link common and/or dummy entries */
487 hists__link(&first->hists, &evsel->hists);
488 err = validate_link(&first->hists, &evsel->hists);
489 if (err)
490 goto out;
491
492 err = 0;
493
494out:
495 /* tear down everything */
496 perf_evlist__delete(evlist);
497 machines__exit(&machines);
498
499 return err;
500}
diff --git a/tools/perf/tests/mmap-basic.c b/tools/perf/tests/mmap-basic.c
index e1746811e14b..cdd50755af51 100644
--- a/tools/perf/tests/mmap-basic.c
+++ b/tools/perf/tests/mmap-basic.c
@@ -22,36 +22,16 @@ int test__basic_mmap(void)
22 struct thread_map *threads; 22 struct thread_map *threads;
23 struct cpu_map *cpus; 23 struct cpu_map *cpus;
24 struct perf_evlist *evlist; 24 struct perf_evlist *evlist;
25 struct perf_event_attr attr = {
26 .type = PERF_TYPE_TRACEPOINT,
27 .read_format = PERF_FORMAT_ID,
28 .sample_type = PERF_SAMPLE_ID,
29 .watermark = 0,
30 };
31 cpu_set_t cpu_set; 25 cpu_set_t cpu_set;
32 const char *syscall_names[] = { "getsid", "getppid", "getpgrp", 26 const char *syscall_names[] = { "getsid", "getppid", "getpgrp",
33 "getpgid", }; 27 "getpgid", };
34 pid_t (*syscalls[])(void) = { (void *)getsid, getppid, getpgrp, 28 pid_t (*syscalls[])(void) = { (void *)getsid, getppid, getpgrp,
35 (void*)getpgid }; 29 (void*)getpgid };
36#define nsyscalls ARRAY_SIZE(syscall_names) 30#define nsyscalls ARRAY_SIZE(syscall_names)
37 int ids[nsyscalls];
38 unsigned int nr_events[nsyscalls], 31 unsigned int nr_events[nsyscalls],
39 expected_nr_events[nsyscalls], i, j; 32 expected_nr_events[nsyscalls], i, j;
40 struct perf_evsel *evsels[nsyscalls], *evsel; 33 struct perf_evsel *evsels[nsyscalls], *evsel;
41 34
42 for (i = 0; i < nsyscalls; ++i) {
43 char name[64];
44
45 snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]);
46 ids[i] = trace_event__id(name);
47 if (ids[i] < 0) {
48 pr_debug("Is debugfs mounted on /sys/kernel/debug?\n");
49 return -1;
50 }
51 nr_events[i] = 0;
52 expected_nr_events[i] = random() % 257;
53 }
54
55 threads = thread_map__new(-1, getpid(), UINT_MAX); 35 threads = thread_map__new(-1, getpid(), UINT_MAX);
56 if (threads == NULL) { 36 if (threads == NULL) {
57 pr_debug("thread_map__new\n"); 37 pr_debug("thread_map__new\n");
@@ -79,18 +59,19 @@ int test__basic_mmap(void)
79 goto out_free_cpus; 59 goto out_free_cpus;
80 } 60 }
81 61
82 /* anonymous union fields, can't be initialized above */
83 attr.wakeup_events = 1;
84 attr.sample_period = 1;
85
86 for (i = 0; i < nsyscalls; ++i) { 62 for (i = 0; i < nsyscalls; ++i) {
87 attr.config = ids[i]; 63 char name[64];
88 evsels[i] = perf_evsel__new(&attr, i); 64
65 snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]);
66 evsels[i] = perf_evsel__newtp("syscalls", name, i);
89 if (evsels[i] == NULL) { 67 if (evsels[i] == NULL) {
90 pr_debug("perf_evsel__new\n"); 68 pr_debug("perf_evsel__new\n");
91 goto out_free_evlist; 69 goto out_free_evlist;
92 } 70 }
93 71
72 evsels[i]->attr.wakeup_events = 1;
73 perf_evsel__set_sample_id(evsels[i]);
74
94 perf_evlist__add(evlist, evsels[i]); 75 perf_evlist__add(evlist, evsels[i]);
95 76
96 if (perf_evsel__open(evsels[i], cpus, threads) < 0) { 77 if (perf_evsel__open(evsels[i], cpus, threads) < 0) {
@@ -99,6 +80,9 @@ int test__basic_mmap(void)
99 strerror(errno)); 80 strerror(errno));
100 goto out_close_fd; 81 goto out_close_fd;
101 } 82 }
83
84 nr_events[i] = 0;
85 expected_nr_events[i] = 1 + rand() % 127;
102 } 86 }
103 87
104 if (perf_evlist__mmap(evlist, 128, true) < 0) { 88 if (perf_evlist__mmap(evlist, 128, true) < 0) {
@@ -128,6 +112,7 @@ int test__basic_mmap(void)
128 goto out_munmap; 112 goto out_munmap;
129 } 113 }
130 114
115 err = -1;
131 evsel = perf_evlist__id2evsel(evlist, sample.id); 116 evsel = perf_evlist__id2evsel(evlist, sample.id);
132 if (evsel == NULL) { 117 if (evsel == NULL) {
133 pr_debug("event with id %" PRIu64 118 pr_debug("event with id %" PRIu64
@@ -137,16 +122,17 @@ int test__basic_mmap(void)
137 nr_events[evsel->idx]++; 122 nr_events[evsel->idx]++;
138 } 123 }
139 124
125 err = 0;
140 list_for_each_entry(evsel, &evlist->entries, node) { 126 list_for_each_entry(evsel, &evlist->entries, node) {
141 if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) { 127 if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) {
142 pr_debug("expected %d %s events, got %d\n", 128 pr_debug("expected %d %s events, got %d\n",
143 expected_nr_events[evsel->idx], 129 expected_nr_events[evsel->idx],
144 perf_evsel__name(evsel), nr_events[evsel->idx]); 130 perf_evsel__name(evsel), nr_events[evsel->idx]);
131 err = -1;
145 goto out_munmap; 132 goto out_munmap;
146 } 133 }
147 } 134 }
148 135
149 err = 0;
150out_munmap: 136out_munmap:
151 perf_evlist__munmap(evlist); 137 perf_evlist__munmap(evlist);
152out_close_fd: 138out_close_fd:
diff --git a/tools/perf/tests/open-syscall-all-cpus.c b/tools/perf/tests/open-syscall-all-cpus.c
index 31072aba0d54..b0657a9ccda6 100644
--- a/tools/perf/tests/open-syscall-all-cpus.c
+++ b/tools/perf/tests/open-syscall-all-cpus.c
@@ -7,20 +7,12 @@
7int test__open_syscall_event_on_all_cpus(void) 7int test__open_syscall_event_on_all_cpus(void)
8{ 8{
9 int err = -1, fd, cpu; 9 int err = -1, fd, cpu;
10 struct thread_map *threads;
11 struct cpu_map *cpus; 10 struct cpu_map *cpus;
12 struct perf_evsel *evsel; 11 struct perf_evsel *evsel;
13 struct perf_event_attr attr;
14 unsigned int nr_open_calls = 111, i; 12 unsigned int nr_open_calls = 111, i;
15 cpu_set_t cpu_set; 13 cpu_set_t cpu_set;
16 int id = trace_event__id("sys_enter_open"); 14 struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX);
17 15
18 if (id < 0) {
19 pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
20 return -1;
21 }
22
23 threads = thread_map__new(-1, getpid(), UINT_MAX);
24 if (threads == NULL) { 16 if (threads == NULL) {
25 pr_debug("thread_map__new\n"); 17 pr_debug("thread_map__new\n");
26 return -1; 18 return -1;
@@ -32,15 +24,11 @@ int test__open_syscall_event_on_all_cpus(void)
32 goto out_thread_map_delete; 24 goto out_thread_map_delete;
33 } 25 }
34 26
35
36 CPU_ZERO(&cpu_set); 27 CPU_ZERO(&cpu_set);
37 28
38 memset(&attr, 0, sizeof(attr)); 29 evsel = perf_evsel__newtp("syscalls", "sys_enter_open", 0);
39 attr.type = PERF_TYPE_TRACEPOINT;
40 attr.config = id;
41 evsel = perf_evsel__new(&attr, 0);
42 if (evsel == NULL) { 30 if (evsel == NULL) {
43 pr_debug("perf_evsel__new\n"); 31 pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
44 goto out_thread_map_delete; 32 goto out_thread_map_delete;
45 } 33 }
46 34
@@ -110,6 +98,7 @@ int test__open_syscall_event_on_all_cpus(void)
110 } 98 }
111 } 99 }
112 100
101 perf_evsel__free_counts(evsel);
113out_close_fd: 102out_close_fd:
114 perf_evsel__close_fd(evsel, 1, threads->nr); 103 perf_evsel__close_fd(evsel, 1, threads->nr);
115out_evsel_delete: 104out_evsel_delete:
diff --git a/tools/perf/tests/open-syscall.c b/tools/perf/tests/open-syscall.c
index 98be8b518b4f..befc0671f95d 100644
--- a/tools/perf/tests/open-syscall.c
+++ b/tools/perf/tests/open-syscall.c
@@ -6,29 +6,18 @@
6int test__open_syscall_event(void) 6int test__open_syscall_event(void)
7{ 7{
8 int err = -1, fd; 8 int err = -1, fd;
9 struct thread_map *threads;
10 struct perf_evsel *evsel; 9 struct perf_evsel *evsel;
11 struct perf_event_attr attr;
12 unsigned int nr_open_calls = 111, i; 10 unsigned int nr_open_calls = 111, i;
13 int id = trace_event__id("sys_enter_open"); 11 struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX);
14 12
15 if (id < 0) {
16 pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
17 return -1;
18 }
19
20 threads = thread_map__new(-1, getpid(), UINT_MAX);
21 if (threads == NULL) { 13 if (threads == NULL) {
22 pr_debug("thread_map__new\n"); 14 pr_debug("thread_map__new\n");
23 return -1; 15 return -1;
24 } 16 }
25 17
26 memset(&attr, 0, sizeof(attr)); 18 evsel = perf_evsel__newtp("syscalls", "sys_enter_open", 0);
27 attr.type = PERF_TYPE_TRACEPOINT;
28 attr.config = id;
29 evsel = perf_evsel__new(&attr, 0);
30 if (evsel == NULL) { 19 if (evsel == NULL) {
31 pr_debug("perf_evsel__new\n"); 20 pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
32 goto out_thread_map_delete; 21 goto out_thread_map_delete;
33 } 22 }
34 23
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index 32ee478905eb..c5636f36fe31 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -3,6 +3,7 @@
3#include "evsel.h" 3#include "evsel.h"
4#include "evlist.h" 4#include "evlist.h"
5#include "sysfs.h" 5#include "sysfs.h"
6#include "debugfs.h"
6#include "tests.h" 7#include "tests.h"
7#include <linux/hw_breakpoint.h> 8#include <linux/hw_breakpoint.h>
8 9
@@ -22,6 +23,7 @@ static int test__checkevent_tracepoint(struct perf_evlist *evlist)
22 struct perf_evsel *evsel = perf_evlist__first(evlist); 23 struct perf_evsel *evsel = perf_evlist__first(evlist);
23 24
24 TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); 25 TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
26 TEST_ASSERT_VAL("wrong number of groups", 0 == evlist->nr_groups);
25 TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type); 27 TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type);
26 TEST_ASSERT_VAL("wrong sample_type", 28 TEST_ASSERT_VAL("wrong sample_type",
27 PERF_TP_SAMPLE_TYPE == evsel->attr.sample_type); 29 PERF_TP_SAMPLE_TYPE == evsel->attr.sample_type);
@@ -34,6 +36,7 @@ static int test__checkevent_tracepoint_multi(struct perf_evlist *evlist)
34 struct perf_evsel *evsel; 36 struct perf_evsel *evsel;
35 37
36 TEST_ASSERT_VAL("wrong number of entries", evlist->nr_entries > 1); 38 TEST_ASSERT_VAL("wrong number of entries", evlist->nr_entries > 1);
39 TEST_ASSERT_VAL("wrong number of groups", 0 == evlist->nr_groups);
37 40
38 list_for_each_entry(evsel, &evlist->entries, node) { 41 list_for_each_entry(evsel, &evlist->entries, node) {
39 TEST_ASSERT_VAL("wrong type", 42 TEST_ASSERT_VAL("wrong type",
@@ -463,10 +466,10 @@ static int test__checkevent_pmu_events(struct perf_evlist *evlist)
463 466
464static int test__checkterms_simple(struct list_head *terms) 467static int test__checkterms_simple(struct list_head *terms)
465{ 468{
466 struct parse_events__term *term; 469 struct parse_events_term *term;
467 470
468 /* config=10 */ 471 /* config=10 */
469 term = list_entry(terms->next, struct parse_events__term, list); 472 term = list_entry(terms->next, struct parse_events_term, list);
470 TEST_ASSERT_VAL("wrong type term", 473 TEST_ASSERT_VAL("wrong type term",
471 term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG); 474 term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG);
472 TEST_ASSERT_VAL("wrong type val", 475 TEST_ASSERT_VAL("wrong type val",
@@ -475,7 +478,7 @@ static int test__checkterms_simple(struct list_head *terms)
475 TEST_ASSERT_VAL("wrong config", !term->config); 478 TEST_ASSERT_VAL("wrong config", !term->config);
476 479
477 /* config1 */ 480 /* config1 */
478 term = list_entry(term->list.next, struct parse_events__term, list); 481 term = list_entry(term->list.next, struct parse_events_term, list);
479 TEST_ASSERT_VAL("wrong type term", 482 TEST_ASSERT_VAL("wrong type term",
480 term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG1); 483 term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG1);
481 TEST_ASSERT_VAL("wrong type val", 484 TEST_ASSERT_VAL("wrong type val",
@@ -484,7 +487,7 @@ static int test__checkterms_simple(struct list_head *terms)
484 TEST_ASSERT_VAL("wrong config", !term->config); 487 TEST_ASSERT_VAL("wrong config", !term->config);
485 488
486 /* config2=3 */ 489 /* config2=3 */
487 term = list_entry(term->list.next, struct parse_events__term, list); 490 term = list_entry(term->list.next, struct parse_events_term, list);
488 TEST_ASSERT_VAL("wrong type term", 491 TEST_ASSERT_VAL("wrong type term",
489 term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG2); 492 term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG2);
490 TEST_ASSERT_VAL("wrong type val", 493 TEST_ASSERT_VAL("wrong type val",
@@ -493,7 +496,7 @@ static int test__checkterms_simple(struct list_head *terms)
493 TEST_ASSERT_VAL("wrong config", !term->config); 496 TEST_ASSERT_VAL("wrong config", !term->config);
494 497
495 /* umask=1*/ 498 /* umask=1*/
496 term = list_entry(term->list.next, struct parse_events__term, list); 499 term = list_entry(term->list.next, struct parse_events_term, list);
497 TEST_ASSERT_VAL("wrong type term", 500 TEST_ASSERT_VAL("wrong type term",
498 term->type_term == PARSE_EVENTS__TERM_TYPE_USER); 501 term->type_term == PARSE_EVENTS__TERM_TYPE_USER);
499 TEST_ASSERT_VAL("wrong type val", 502 TEST_ASSERT_VAL("wrong type val",
@@ -509,6 +512,7 @@ static int test__group1(struct perf_evlist *evlist)
509 struct perf_evsel *evsel, *leader; 512 struct perf_evsel *evsel, *leader;
510 513
511 TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries); 514 TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries);
515 TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups);
512 516
513 /* instructions:k */ 517 /* instructions:k */
514 evsel = leader = perf_evlist__first(evlist); 518 evsel = leader = perf_evlist__first(evlist);
@@ -521,7 +525,9 @@ static int test__group1(struct perf_evlist *evlist)
521 TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); 525 TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
522 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); 526 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
523 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); 527 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
524 TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel)); 528 TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
529 TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2);
530 TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0);
525 531
526 /* cycles:upp */ 532 /* cycles:upp */
527 evsel = perf_evsel__next(evsel); 533 evsel = perf_evsel__next(evsel);
@@ -536,6 +542,7 @@ static int test__group1(struct perf_evlist *evlist)
536 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); 542 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
537 TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 2); 543 TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 2);
538 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); 544 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
545 TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1);
539 546
540 return 0; 547 return 0;
541} 548}
@@ -545,6 +552,7 @@ static int test__group2(struct perf_evlist *evlist)
545 struct perf_evsel *evsel, *leader; 552 struct perf_evsel *evsel, *leader;
546 553
547 TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->nr_entries); 554 TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->nr_entries);
555 TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups);
548 556
549 /* faults + :ku modifier */ 557 /* faults + :ku modifier */
550 evsel = leader = perf_evlist__first(evlist); 558 evsel = leader = perf_evlist__first(evlist);
@@ -557,7 +565,9 @@ static int test__group2(struct perf_evlist *evlist)
557 TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); 565 TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
558 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); 566 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
559 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); 567 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
560 TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel)); 568 TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
569 TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2);
570 TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0);
561 571
562 /* cache-references + :u modifier */ 572 /* cache-references + :u modifier */
563 evsel = perf_evsel__next(evsel); 573 evsel = perf_evsel__next(evsel);
@@ -567,10 +577,11 @@ static int test__group2(struct perf_evlist *evlist)
567 TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); 577 TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
568 TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); 578 TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
569 TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); 579 TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
570 TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); 580 TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest);
571 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); 581 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
572 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); 582 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
573 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); 583 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
584 TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1);
574 585
575 /* cycles:k */ 586 /* cycles:k */
576 evsel = perf_evsel__next(evsel); 587 evsel = perf_evsel__next(evsel);
@@ -583,7 +594,7 @@ static int test__group2(struct perf_evlist *evlist)
583 TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); 594 TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
584 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); 595 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
585 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); 596 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
586 TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel)); 597 TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
587 598
588 return 0; 599 return 0;
589} 600}
@@ -593,6 +604,7 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused)
593 struct perf_evsel *evsel, *leader; 604 struct perf_evsel *evsel, *leader;
594 605
595 TEST_ASSERT_VAL("wrong number of entries", 5 == evlist->nr_entries); 606 TEST_ASSERT_VAL("wrong number of entries", 5 == evlist->nr_entries);
607 TEST_ASSERT_VAL("wrong number of groups", 2 == evlist->nr_groups);
596 608
597 /* group1 syscalls:sys_enter_open:H */ 609 /* group1 syscalls:sys_enter_open:H */
598 evsel = leader = perf_evlist__first(evlist); 610 evsel = leader = perf_evlist__first(evlist);
@@ -606,9 +618,11 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused)
606 TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest); 618 TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest);
607 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); 619 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
608 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); 620 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
609 TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel)); 621 TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
610 TEST_ASSERT_VAL("wrong group name", 622 TEST_ASSERT_VAL("wrong group name",
611 !strcmp(leader->group_name, "group1")); 623 !strcmp(leader->group_name, "group1"));
624 TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2);
625 TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0);
612 626
613 /* group1 cycles:kppp */ 627 /* group1 cycles:kppp */
614 evsel = perf_evsel__next(evsel); 628 evsel = perf_evsel__next(evsel);
@@ -624,6 +638,7 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused)
624 TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 3); 638 TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 3);
625 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); 639 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
626 TEST_ASSERT_VAL("wrong group name", !evsel->group_name); 640 TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
641 TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1);
627 642
628 /* group2 cycles + G modifier */ 643 /* group2 cycles + G modifier */
629 evsel = leader = perf_evsel__next(evsel); 644 evsel = leader = perf_evsel__next(evsel);
@@ -636,9 +651,11 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused)
636 TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); 651 TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
637 TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); 652 TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host);
638 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); 653 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
639 TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel)); 654 TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
640 TEST_ASSERT_VAL("wrong group name", 655 TEST_ASSERT_VAL("wrong group name",
641 !strcmp(leader->group_name, "group2")); 656 !strcmp(leader->group_name, "group2"));
657 TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2);
658 TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0);
642 659
643 /* group2 1:3 + G modifier */ 660 /* group2 1:3 + G modifier */
644 evsel = perf_evsel__next(evsel); 661 evsel = perf_evsel__next(evsel);
@@ -651,6 +668,7 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused)
651 TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); 668 TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host);
652 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); 669 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
653 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); 670 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
671 TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1);
654 672
655 /* instructions:u */ 673 /* instructions:u */
656 evsel = perf_evsel__next(evsel); 674 evsel = perf_evsel__next(evsel);
@@ -663,7 +681,7 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused)
663 TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); 681 TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
664 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); 682 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
665 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); 683 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
666 TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel)); 684 TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
667 685
668 return 0; 686 return 0;
669} 687}
@@ -673,6 +691,7 @@ static int test__group4(struct perf_evlist *evlist __maybe_unused)
673 struct perf_evsel *evsel, *leader; 691 struct perf_evsel *evsel, *leader;
674 692
675 TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries); 693 TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries);
694 TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups);
676 695
677 /* cycles:u + p */ 696 /* cycles:u + p */
678 evsel = leader = perf_evlist__first(evlist); 697 evsel = leader = perf_evlist__first(evlist);
@@ -687,7 +706,9 @@ static int test__group4(struct perf_evlist *evlist __maybe_unused)
687 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); 706 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
688 TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 1); 707 TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 1);
689 TEST_ASSERT_VAL("wrong group name", !evsel->group_name); 708 TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
690 TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel)); 709 TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
710 TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2);
711 TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0);
691 712
692 /* instructions:kp + p */ 713 /* instructions:kp + p */
693 evsel = perf_evsel__next(evsel); 714 evsel = perf_evsel__next(evsel);
@@ -702,6 +723,7 @@ static int test__group4(struct perf_evlist *evlist __maybe_unused)
702 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); 723 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
703 TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 2); 724 TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 2);
704 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); 725 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
726 TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1);
705 727
706 return 0; 728 return 0;
707} 729}
@@ -711,6 +733,7 @@ static int test__group5(struct perf_evlist *evlist __maybe_unused)
711 struct perf_evsel *evsel, *leader; 733 struct perf_evsel *evsel, *leader;
712 734
713 TEST_ASSERT_VAL("wrong number of entries", 5 == evlist->nr_entries); 735 TEST_ASSERT_VAL("wrong number of entries", 5 == evlist->nr_entries);
736 TEST_ASSERT_VAL("wrong number of groups", 2 == evlist->nr_groups);
714 737
715 /* cycles + G */ 738 /* cycles + G */
716 evsel = leader = perf_evlist__first(evlist); 739 evsel = leader = perf_evlist__first(evlist);
@@ -724,7 +747,9 @@ static int test__group5(struct perf_evlist *evlist __maybe_unused)
724 TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); 747 TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host);
725 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); 748 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
726 TEST_ASSERT_VAL("wrong group name", !evsel->group_name); 749 TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
727 TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel)); 750 TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
751 TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2);
752 TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0);
728 753
729 /* instructions + G */ 754 /* instructions + G */
730 evsel = perf_evsel__next(evsel); 755 evsel = perf_evsel__next(evsel);
@@ -738,6 +763,7 @@ static int test__group5(struct perf_evlist *evlist __maybe_unused)
738 TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); 763 TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host);
739 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); 764 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
740 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); 765 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
766 TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1);
741 767
742 /* cycles:G */ 768 /* cycles:G */
743 evsel = leader = perf_evsel__next(evsel); 769 evsel = leader = perf_evsel__next(evsel);
@@ -751,7 +777,9 @@ static int test__group5(struct perf_evlist *evlist __maybe_unused)
751 TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); 777 TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host);
752 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); 778 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
753 TEST_ASSERT_VAL("wrong group name", !evsel->group_name); 779 TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
754 TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel)); 780 TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
781 TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2);
782 TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0);
755 783
756 /* instructions:G */ 784 /* instructions:G */
757 evsel = perf_evsel__next(evsel); 785 evsel = perf_evsel__next(evsel);
@@ -765,6 +793,7 @@ static int test__group5(struct perf_evlist *evlist __maybe_unused)
765 TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); 793 TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host);
766 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); 794 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
767 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); 795 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
796 TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1);
768 797
769 /* cycles */ 798 /* cycles */
770 evsel = perf_evsel__next(evsel); 799 evsel = perf_evsel__next(evsel);
@@ -777,18 +806,235 @@ static int test__group5(struct perf_evlist *evlist __maybe_unused)
777 TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest); 806 TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest);
778 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); 807 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
779 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); 808 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
780 TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel)); 809 TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
810
811 return 0;
812}
813
814static int test__group_gh1(struct perf_evlist *evlist)
815{
816 struct perf_evsel *evsel, *leader;
817
818 TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries);
819 TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups);
820
821 /* cycles + :H group modifier */
822 evsel = leader = perf_evlist__first(evlist);
823 TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
824 TEST_ASSERT_VAL("wrong config",
825 PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config);
826 TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
827 TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
828 TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
829 TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest);
830 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
831 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
832 TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
833 TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
834 TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2);
835 TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0);
836
837 /* cache-misses:G + :H group modifier */
838 evsel = perf_evsel__next(evsel);
839 TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
840 TEST_ASSERT_VAL("wrong config",
841 PERF_COUNT_HW_CACHE_MISSES == evsel->attr.config);
842 TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
843 TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
844 TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
845 TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
846 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
847 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
848 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
849 TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1);
850
851 return 0;
852}
853
854static int test__group_gh2(struct perf_evlist *evlist)
855{
856 struct perf_evsel *evsel, *leader;
857
858 TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries);
859 TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups);
860
861 /* cycles + :G group modifier */
862 evsel = leader = perf_evlist__first(evlist);
863 TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
864 TEST_ASSERT_VAL("wrong config",
865 PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config);
866 TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
867 TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
868 TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
869 TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
870 TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host);
871 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
872 TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
873 TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
874 TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2);
875 TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0);
876
877 /* cache-misses:H + :G group modifier */
878 evsel = perf_evsel__next(evsel);
879 TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
880 TEST_ASSERT_VAL("wrong config",
881 PERF_COUNT_HW_CACHE_MISSES == evsel->attr.config);
882 TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
883 TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
884 TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
885 TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
886 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
887 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
888 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
889 TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1);
890
891 return 0;
892}
893
894static int test__group_gh3(struct perf_evlist *evlist)
895{
896 struct perf_evsel *evsel, *leader;
897
898 TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries);
899 TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups);
900
901 /* cycles:G + :u group modifier */
902 evsel = leader = perf_evlist__first(evlist);
903 TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
904 TEST_ASSERT_VAL("wrong config",
905 PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config);
906 TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
907 TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
908 TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
909 TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
910 TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host);
911 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
912 TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
913 TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
914 TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2);
915 TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0);
916
917 /* cache-misses:H + :u group modifier */
918 evsel = perf_evsel__next(evsel);
919 TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
920 TEST_ASSERT_VAL("wrong config",
921 PERF_COUNT_HW_CACHE_MISSES == evsel->attr.config);
922 TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
923 TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
924 TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
925 TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest);
926 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
927 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
928 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
929 TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1);
930
931 return 0;
932}
933
934static int test__group_gh4(struct perf_evlist *evlist)
935{
936 struct perf_evsel *evsel, *leader;
937
938 TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries);
939 TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups);
940
941 /* cycles:G + :uG group modifier */
942 evsel = leader = perf_evlist__first(evlist);
943 TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
944 TEST_ASSERT_VAL("wrong config",
945 PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config);
946 TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
947 TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
948 TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
949 TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
950 TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host);
951 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
952 TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
953 TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
954 TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2);
955 TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0);
956
957 /* cache-misses:H + :uG group modifier */
958 evsel = perf_evsel__next(evsel);
959 TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
960 TEST_ASSERT_VAL("wrong config",
961 PERF_COUNT_HW_CACHE_MISSES == evsel->attr.config);
962 TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
963 TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
964 TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
965 TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
966 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
967 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
968 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
969 TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1);
781 970
782 return 0; 971 return 0;
783} 972}
784 973
785struct test__event_st { 974static int count_tracepoints(void)
975{
976 char events_path[PATH_MAX];
977 struct dirent *events_ent;
978 DIR *events_dir;
979 int cnt = 0;
980
981 scnprintf(events_path, PATH_MAX, "%s/tracing/events",
982 debugfs_find_mountpoint());
983
984 events_dir = opendir(events_path);
985
986 TEST_ASSERT_VAL("Can't open events dir", events_dir);
987
988 while ((events_ent = readdir(events_dir))) {
989 char sys_path[PATH_MAX];
990 struct dirent *sys_ent;
991 DIR *sys_dir;
992
993 if (!strcmp(events_ent->d_name, ".")
994 || !strcmp(events_ent->d_name, "..")
995 || !strcmp(events_ent->d_name, "enable")
996 || !strcmp(events_ent->d_name, "header_event")
997 || !strcmp(events_ent->d_name, "header_page"))
998 continue;
999
1000 scnprintf(sys_path, PATH_MAX, "%s/%s",
1001 events_path, events_ent->d_name);
1002
1003 sys_dir = opendir(sys_path);
1004 TEST_ASSERT_VAL("Can't open sys dir", sys_dir);
1005
1006 while ((sys_ent = readdir(sys_dir))) {
1007 if (!strcmp(sys_ent->d_name, ".")
1008 || !strcmp(sys_ent->d_name, "..")
1009 || !strcmp(sys_ent->d_name, "enable")
1010 || !strcmp(sys_ent->d_name, "filter"))
1011 continue;
1012
1013 cnt++;
1014 }
1015
1016 closedir(sys_dir);
1017 }
1018
1019 closedir(events_dir);
1020 return cnt;
1021}
1022
1023static int test__all_tracepoints(struct perf_evlist *evlist)
1024{
1025 TEST_ASSERT_VAL("wrong events count",
1026 count_tracepoints() == evlist->nr_entries);
1027
1028 return test__checkevent_tracepoint_multi(evlist);
1029}
1030
1031struct evlist_test {
786 const char *name; 1032 const char *name;
787 __u32 type; 1033 __u32 type;
788 int (*check)(struct perf_evlist *evlist); 1034 int (*check)(struct perf_evlist *evlist);
789}; 1035};
790 1036
791static struct test__event_st test__events[] = { 1037static struct evlist_test test__events[] = {
792 [0] = { 1038 [0] = {
793 .name = "syscalls:sys_enter_open", 1039 .name = "syscalls:sys_enter_open",
794 .check = test__checkevent_tracepoint, 1040 .check = test__checkevent_tracepoint,
@@ -921,9 +1167,29 @@ static struct test__event_st test__events[] = {
921 .name = "{cycles,instructions}:G,{cycles:G,instructions:G},cycles", 1167 .name = "{cycles,instructions}:G,{cycles:G,instructions:G},cycles",
922 .check = test__group5, 1168 .check = test__group5,
923 }, 1169 },
1170 [33] = {
1171 .name = "*:*",
1172 .check = test__all_tracepoints,
1173 },
1174 [34] = {
1175 .name = "{cycles,cache-misses:G}:H",
1176 .check = test__group_gh1,
1177 },
1178 [35] = {
1179 .name = "{cycles,cache-misses:H}:G",
1180 .check = test__group_gh2,
1181 },
1182 [36] = {
1183 .name = "{cycles:G,cache-misses:H}:u",
1184 .check = test__group_gh3,
1185 },
1186 [37] = {
1187 .name = "{cycles:G,cache-misses:H}:uG",
1188 .check = test__group_gh4,
1189 },
924}; 1190};
925 1191
926static struct test__event_st test__events_pmu[] = { 1192static struct evlist_test test__events_pmu[] = {
927 [0] = { 1193 [0] = {
928 .name = "cpu/config=10,config1,config2=3,period=1000/u", 1194 .name = "cpu/config=10,config1,config2=3,period=1000/u",
929 .check = test__checkevent_pmu, 1195 .check = test__checkevent_pmu,
@@ -934,20 +1200,20 @@ static struct test__event_st test__events_pmu[] = {
934 }, 1200 },
935}; 1201};
936 1202
937struct test__term { 1203struct terms_test {
938 const char *str; 1204 const char *str;
939 __u32 type; 1205 __u32 type;
940 int (*check)(struct list_head *terms); 1206 int (*check)(struct list_head *terms);
941}; 1207};
942 1208
943static struct test__term test__terms[] = { 1209static struct terms_test test__terms[] = {
944 [0] = { 1210 [0] = {
945 .str = "config=10,config1,config2=3,umask=1", 1211 .str = "config=10,config1,config2=3,umask=1",
946 .check = test__checkterms_simple, 1212 .check = test__checkterms_simple,
947 }, 1213 },
948}; 1214};
949 1215
950static int test_event(struct test__event_st *e) 1216static int test_event(struct evlist_test *e)
951{ 1217{
952 struct perf_evlist *evlist; 1218 struct perf_evlist *evlist;
953 int ret; 1219 int ret;
@@ -956,7 +1222,7 @@ static int test_event(struct test__event_st *e)
956 if (evlist == NULL) 1222 if (evlist == NULL)
957 return -ENOMEM; 1223 return -ENOMEM;
958 1224
959 ret = parse_events(evlist, e->name, 0); 1225 ret = parse_events(evlist, e->name);
960 if (ret) { 1226 if (ret) {
961 pr_debug("failed to parse event '%s', err %d\n", 1227 pr_debug("failed to parse event '%s', err %d\n",
962 e->name, ret); 1228 e->name, ret);
@@ -969,13 +1235,13 @@ static int test_event(struct test__event_st *e)
969 return ret; 1235 return ret;
970} 1236}
971 1237
972static int test_events(struct test__event_st *events, unsigned cnt) 1238static int test_events(struct evlist_test *events, unsigned cnt)
973{ 1239{
974 int ret1, ret2 = 0; 1240 int ret1, ret2 = 0;
975 unsigned i; 1241 unsigned i;
976 1242
977 for (i = 0; i < cnt; i++) { 1243 for (i = 0; i < cnt; i++) {
978 struct test__event_st *e = &events[i]; 1244 struct evlist_test *e = &events[i];
979 1245
980 pr_debug("running test %d '%s'\n", i, e->name); 1246 pr_debug("running test %d '%s'\n", i, e->name);
981 ret1 = test_event(e); 1247 ret1 = test_event(e);
@@ -986,7 +1252,7 @@ static int test_events(struct test__event_st *events, unsigned cnt)
986 return ret2; 1252 return ret2;
987} 1253}
988 1254
989static int test_term(struct test__term *t) 1255static int test_term(struct terms_test *t)
990{ 1256{
991 struct list_head *terms; 1257 struct list_head *terms;
992 int ret; 1258 int ret;
@@ -1010,13 +1276,13 @@ static int test_term(struct test__term *t)
1010 return ret; 1276 return ret;
1011} 1277}
1012 1278
1013static int test_terms(struct test__term *terms, unsigned cnt) 1279static int test_terms(struct terms_test *terms, unsigned cnt)
1014{ 1280{
1015 int ret = 0; 1281 int ret = 0;
1016 unsigned i; 1282 unsigned i;
1017 1283
1018 for (i = 0; i < cnt; i++) { 1284 for (i = 0; i < cnt; i++) {
1019 struct test__term *t = &terms[i]; 1285 struct terms_test *t = &terms[i];
1020 1286
1021 pr_debug("running test %d '%s'\n", i, t->str); 1287 pr_debug("running test %d '%s'\n", i, t->str);
1022 ret = test_term(t); 1288 ret = test_term(t);
@@ -1067,7 +1333,7 @@ static int test_pmu_events(void)
1067 1333
1068 while (!ret && (ent = readdir(dir))) { 1334 while (!ret && (ent = readdir(dir))) {
1069#define MAX_NAME 100 1335#define MAX_NAME 100
1070 struct test__event_st e; 1336 struct evlist_test e;
1071 char name[MAX_NAME]; 1337 char name[MAX_NAME];
1072 1338
1073 if (!strcmp(ent->d_name, ".") || 1339 if (!strcmp(ent->d_name, ".") ||
diff --git a/tools/perf/tests/perf-record.c b/tools/perf/tests/perf-record.c
index 70e0d4421df8..1e8e5128d0da 100644
--- a/tools/perf/tests/perf-record.c
+++ b/tools/perf/tests/perf-record.c
@@ -96,22 +96,22 @@ int test__PERF_RECORD(void)
96 err = perf_evlist__prepare_workload(evlist, &opts, argv); 96 err = perf_evlist__prepare_workload(evlist, &opts, argv);
97 if (err < 0) { 97 if (err < 0) {
98 pr_debug("Couldn't run the workload!\n"); 98 pr_debug("Couldn't run the workload!\n");
99 goto out_delete_evlist; 99 goto out_delete_maps;
100 } 100 }
101 101
102 /* 102 /*
103 * Config the evsels, setting attr->comm on the first one, etc. 103 * Config the evsels, setting attr->comm on the first one, etc.
104 */ 104 */
105 evsel = perf_evlist__first(evlist); 105 evsel = perf_evlist__first(evlist);
106 evsel->attr.sample_type |= PERF_SAMPLE_CPU; 106 perf_evsel__set_sample_bit(evsel, CPU);
107 evsel->attr.sample_type |= PERF_SAMPLE_TID; 107 perf_evsel__set_sample_bit(evsel, TID);
108 evsel->attr.sample_type |= PERF_SAMPLE_TIME; 108 perf_evsel__set_sample_bit(evsel, TIME);
109 perf_evlist__config_attrs(evlist, &opts); 109 perf_evlist__config(evlist, &opts);
110 110
111 err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask); 111 err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask);
112 if (err < 0) { 112 if (err < 0) {
113 pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno)); 113 pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno));
114 goto out_delete_evlist; 114 goto out_delete_maps;
115 } 115 }
116 116
117 cpu = err; 117 cpu = err;
@@ -121,7 +121,7 @@ int test__PERF_RECORD(void)
121 */ 121 */
122 if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) { 122 if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) {
123 pr_debug("sched_setaffinity: %s\n", strerror(errno)); 123 pr_debug("sched_setaffinity: %s\n", strerror(errno));
124 goto out_delete_evlist; 124 goto out_delete_maps;
125 } 125 }
126 126
127 /* 127 /*
@@ -131,7 +131,7 @@ int test__PERF_RECORD(void)
131 err = perf_evlist__open(evlist); 131 err = perf_evlist__open(evlist);
132 if (err < 0) { 132 if (err < 0) {
133 pr_debug("perf_evlist__open: %s\n", strerror(errno)); 133 pr_debug("perf_evlist__open: %s\n", strerror(errno));
134 goto out_delete_evlist; 134 goto out_delete_maps;
135 } 135 }
136 136
137 /* 137 /*
@@ -142,7 +142,7 @@ int test__PERF_RECORD(void)
142 err = perf_evlist__mmap(evlist, opts.mmap_pages, false); 142 err = perf_evlist__mmap(evlist, opts.mmap_pages, false);
143 if (err < 0) { 143 if (err < 0) {
144 pr_debug("perf_evlist__mmap: %s\n", strerror(errno)); 144 pr_debug("perf_evlist__mmap: %s\n", strerror(errno));
145 goto out_delete_evlist; 145 goto out_delete_maps;
146 } 146 }
147 147
148 /* 148 /*
@@ -305,6 +305,8 @@ found_exit:
305 } 305 }
306out_err: 306out_err:
307 perf_evlist__munmap(evlist); 307 perf_evlist__munmap(evlist);
308out_delete_maps:
309 perf_evlist__delete_maps(evlist);
308out_delete_evlist: 310out_delete_evlist:
309 perf_evlist__delete(evlist); 311 perf_evlist__delete(evlist);
310out: 312out:
diff --git a/tools/perf/tests/pmu.c b/tools/perf/tests/pmu.c
index a5f379863b8f..12b322fa3475 100644
--- a/tools/perf/tests/pmu.c
+++ b/tools/perf/tests/pmu.c
@@ -19,10 +19,8 @@ static struct test_format {
19 { "krava23", "config2:28-29,38\n", }, 19 { "krava23", "config2:28-29,38\n", },
20}; 20};
21 21
22#define TEST_FORMATS_CNT (sizeof(test_formats) / sizeof(struct test_format))
23
24/* Simulated users input. */ 22/* Simulated users input. */
25static struct parse_events__term test_terms[] = { 23static struct parse_events_term test_terms[] = {
26 { 24 {
27 .config = (char *) "krava01", 25 .config = (char *) "krava01",
28 .val.num = 15, 26 .val.num = 15,
@@ -78,7 +76,6 @@ static struct parse_events__term test_terms[] = {
78 .type_term = PARSE_EVENTS__TERM_TYPE_USER, 76 .type_term = PARSE_EVENTS__TERM_TYPE_USER,
79 }, 77 },
80}; 78};
81#define TERMS_CNT (sizeof(test_terms) / sizeof(struct parse_events__term))
82 79
83/* 80/*
84 * Prepare format directory data, exported by kernel 81 * Prepare format directory data, exported by kernel
@@ -93,7 +90,7 @@ static char *test_format_dir_get(void)
93 if (!mkdtemp(dir)) 90 if (!mkdtemp(dir))
94 return NULL; 91 return NULL;
95 92
96 for (i = 0; i < TEST_FORMATS_CNT; i++) { 93 for (i = 0; i < ARRAY_SIZE(test_formats); i++) {
97 static char name[PATH_MAX]; 94 static char name[PATH_MAX];
98 struct test_format *format = &test_formats[i]; 95 struct test_format *format = &test_formats[i];
99 FILE *file; 96 FILE *file;
@@ -130,14 +127,12 @@ static struct list_head *test_terms_list(void)
130 static LIST_HEAD(terms); 127 static LIST_HEAD(terms);
131 unsigned int i; 128 unsigned int i;
132 129
133 for (i = 0; i < TERMS_CNT; i++) 130 for (i = 0; i < ARRAY_SIZE(test_terms); i++)
134 list_add_tail(&test_terms[i].list, &terms); 131 list_add_tail(&test_terms[i].list, &terms);
135 132
136 return &terms; 133 return &terms;
137} 134}
138 135
139#undef TERMS_CNT
140
141int test__pmu(void) 136int test__pmu(void)
142{ 137{
143 char *format = test_format_dir_get(); 138 char *format = test_format_dir_get();
diff --git a/tools/perf/tests/python-use.c b/tools/perf/tests/python-use.c
new file mode 100644
index 000000000000..7760277c6def
--- /dev/null
+++ b/tools/perf/tests/python-use.c
@@ -0,0 +1,23 @@
1/*
2 * Just test if we can load the python binding.
3 */
4
5#include <stdio.h>
6#include <stdlib.h>
7#include "tests.h"
8
9extern int verbose;
10
11int test__python_use(void)
12{
13 char *cmd;
14 int ret;
15
16 if (asprintf(&cmd, "echo \"import sys ; sys.path.append('%s'); import perf\" | %s %s",
17 PYTHONPATH, PYTHON, verbose ? "" : "2> /dev/null") < 0)
18 return -1;
19
20 ret = system(cmd) ? -1 : 0;
21 free(cmd);
22 return ret;
23}
diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h
index fc121edab016..5de0be1ff4b6 100644
--- a/tools/perf/tests/tests.h
+++ b/tools/perf/tests/tests.h
@@ -1,6 +1,12 @@
1#ifndef TESTS_H 1#ifndef TESTS_H
2#define TESTS_H 2#define TESTS_H
3 3
4enum {
5 TEST_OK = 0,
6 TEST_FAIL = -1,
7 TEST_SKIP = -2,
8};
9
4/* Tests */ 10/* Tests */
5int test__vmlinux_matches_kallsyms(void); 11int test__vmlinux_matches_kallsyms(void);
6int test__open_syscall_event(void); 12int test__open_syscall_event(void);
@@ -15,8 +21,7 @@ int test__pmu(void);
15int test__attr(void); 21int test__attr(void);
16int test__dso_data(void); 22int test__dso_data(void);
17int test__parse_events(void); 23int test__parse_events(void);
18 24int test__hists_link(void);
19/* Util */ 25int test__python_use(void);
20int trace_event__id(const char *evname);
21 26
22#endif /* TESTS_H */ 27#endif /* TESTS_H */
diff --git a/tools/perf/tests/util.c b/tools/perf/tests/util.c
deleted file mode 100644
index 748f2e8f6961..000000000000
--- a/tools/perf/tests/util.c
+++ /dev/null
@@ -1,30 +0,0 @@
1#include <stdio.h>
2#include <unistd.h>
3#include <stdlib.h>
4#include <sys/types.h>
5#include <sys/stat.h>
6#include <fcntl.h>
7#include "tests.h"
8#include "debugfs.h"
9
10int trace_event__id(const char *evname)
11{
12 char *filename;
13 int err = -1, fd;
14
15 if (asprintf(&filename,
16 "%s/syscalls/%s/id",
17 tracing_events_path, evname) < 0)
18 return -1;
19
20 fd = open(filename, O_RDONLY);
21 if (fd >= 0) {
22 char id[16];
23 if (read(fd, id, sizeof(id)) > 0)
24 err = atoi(id);
25 close(fd);
26 }
27
28 free(filename);
29 return err;
30}
diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c
index 0d1cdbee2f59..7b4c4d26d1ba 100644
--- a/tools/perf/tests/vmlinux-kallsyms.c
+++ b/tools/perf/tests/vmlinux-kallsyms.c
@@ -44,7 +44,7 @@ int test__vmlinux_matches_kallsyms(void)
44 */ 44 */
45 if (machine__create_kernel_maps(&kallsyms) < 0) { 45 if (machine__create_kernel_maps(&kallsyms) < 0) {
46 pr_debug("machine__create_kernel_maps "); 46 pr_debug("machine__create_kernel_maps ");
47 return -1; 47 goto out;
48 } 48 }
49 49
50 /* 50 /*
@@ -101,7 +101,8 @@ int test__vmlinux_matches_kallsyms(void)
101 */ 101 */
102 if (machine__load_vmlinux_path(&vmlinux, type, 102 if (machine__load_vmlinux_path(&vmlinux, type,
103 vmlinux_matches_kallsyms_filter) <= 0) { 103 vmlinux_matches_kallsyms_filter) <= 0) {
104 pr_debug("machine__load_vmlinux_path "); 104 pr_debug("Couldn't find a vmlinux that matches the kernel running on this machine, skipping test\n");
105 err = TEST_SKIP;
105 goto out; 106 goto out;
106 } 107 }
107 108
@@ -226,5 +227,7 @@ detour:
226 map__fprintf(pos, stderr); 227 map__fprintf(pos, stderr);
227 } 228 }
228out: 229out:
230 machine__exit(&kallsyms);
231 machine__exit(&vmlinux);
229 return err; 232 return err;
230} 233}
diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c
index 4aeb7d5df939..809ea4632a34 100644
--- a/tools/perf/ui/browser.c
+++ b/tools/perf/ui/browser.c
@@ -273,6 +273,8 @@ void ui_browser__hide(struct ui_browser *browser __maybe_unused)
273{ 273{
274 pthread_mutex_lock(&ui__lock); 274 pthread_mutex_lock(&ui__lock);
275 ui_helpline__pop(); 275 ui_helpline__pop();
276 free(browser->helpline);
277 browser->helpline = NULL;
276 pthread_mutex_unlock(&ui__lock); 278 pthread_mutex_unlock(&ui__lock);
277} 279}
278 280
@@ -471,7 +473,7 @@ unsigned int ui_browser__list_head_refresh(struct ui_browser *browser)
471 return row; 473 return row;
472} 474}
473 475
474static struct ui_browser__colorset { 476static struct ui_browser_colorset {
475 const char *name, *fg, *bg; 477 const char *name, *fg, *bg;
476 int colorset; 478 int colorset;
477} ui_browser__colorsets[] = { 479} ui_browser__colorsets[] = {
@@ -706,7 +708,7 @@ void ui_browser__init(void)
706 perf_config(ui_browser__color_config, NULL); 708 perf_config(ui_browser__color_config, NULL);
707 709
708 while (ui_browser__colorsets[i].name) { 710 while (ui_browser__colorsets[i].name) {
709 struct ui_browser__colorset *c = &ui_browser__colorsets[i++]; 711 struct ui_browser_colorset *c = &ui_browser__colorsets[i++];
710 sltt_set_color(c->colorset, c->name, c->fg, c->bg); 712 sltt_set_color(c->colorset, c->name, c->fg, c->bg);
711 } 713 }
712 714
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index 5dab3ca96980..7dca1555c610 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -182,6 +182,16 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
182 ab->selection = dl; 182 ab->selection = dl;
183} 183}
184 184
185static bool disasm_line__is_valid_jump(struct disasm_line *dl, struct symbol *sym)
186{
187 if (!dl || !dl->ins || !ins__is_jump(dl->ins)
188 || !disasm_line__has_offset(dl)
189 || dl->ops.target.offset >= symbol__size(sym))
190 return false;
191
192 return true;
193}
194
185static void annotate_browser__draw_current_jump(struct ui_browser *browser) 195static void annotate_browser__draw_current_jump(struct ui_browser *browser)
186{ 196{
187 struct annotate_browser *ab = container_of(browser, struct annotate_browser, b); 197 struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
@@ -195,8 +205,7 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
195 if (strstr(sym->name, "@plt")) 205 if (strstr(sym->name, "@plt"))
196 return; 206 return;
197 207
198 if (!cursor || !cursor->ins || !ins__is_jump(cursor->ins) || 208 if (!disasm_line__is_valid_jump(cursor, sym))
199 !disasm_line__has_offset(cursor))
200 return; 209 return;
201 210
202 target = ab->offsets[cursor->ops.target.offset]; 211 target = ab->offsets[cursor->ops.target.offset];
@@ -788,17 +797,9 @@ static void annotate_browser__mark_jump_targets(struct annotate_browser *browser
788 struct disasm_line *dl = browser->offsets[offset], *dlt; 797 struct disasm_line *dl = browser->offsets[offset], *dlt;
789 struct browser_disasm_line *bdlt; 798 struct browser_disasm_line *bdlt;
790 799
791 if (!dl || !dl->ins || !ins__is_jump(dl->ins) || 800 if (!disasm_line__is_valid_jump(dl, sym))
792 !disasm_line__has_offset(dl))
793 continue; 801 continue;
794 802
795 if (dl->ops.target.offset >= size) {
796 ui__error("jump to after symbol!\n"
797 "size: %zx, jump target: %" PRIx64,
798 size, dl->ops.target.offset);
799 continue;
800 }
801
802 dlt = browser->offsets[dl->ops.target.offset]; 803 dlt = browser->offsets[dl->ops.target.offset];
803 /* 804 /*
804 * FIXME: Oops, no jump target? Buggy disassembler? Or do we 805 * FIXME: Oops, no jump target? Buggy disassembler? Or do we
@@ -921,11 +922,11 @@ out_free_offsets:
921 922
922#define ANNOTATE_CFG(n) \ 923#define ANNOTATE_CFG(n) \
923 { .name = #n, .value = &annotate_browser__opts.n, } 924 { .name = #n, .value = &annotate_browser__opts.n, }
924 925
925/* 926/*
926 * Keep the entries sorted, they are bsearch'ed 927 * Keep the entries sorted, they are bsearch'ed
927 */ 928 */
928static struct annotate__config { 929static struct annotate_config {
929 const char *name; 930 const char *name;
930 bool *value; 931 bool *value;
931} annotate__configs[] = { 932} annotate__configs[] = {
@@ -939,7 +940,7 @@ static struct annotate__config {
939 940
940static int annotate_config__cmp(const void *name, const void *cfgp) 941static int annotate_config__cmp(const void *name, const void *cfgp)
941{ 942{
942 const struct annotate__config *cfg = cfgp; 943 const struct annotate_config *cfg = cfgp;
943 944
944 return strcmp(name, cfg->name); 945 return strcmp(name, cfg->name);
945} 946}
@@ -947,7 +948,7 @@ static int annotate_config__cmp(const void *name, const void *cfgp)
947static int annotate__config(const char *var, const char *value, 948static int annotate__config(const char *var, const char *value,
948 void *data __maybe_unused) 949 void *data __maybe_unused)
949{ 950{
950 struct annotate__config *cfg; 951 struct annotate_config *cfg;
951 const char *name; 952 const char *name;
952 953
953 if (prefixcmp(var, "annotate.") != 0) 954 if (prefixcmp(var, "annotate.") != 0)
@@ -955,7 +956,7 @@ static int annotate__config(const char *var, const char *value,
955 956
956 name = var + 9; 957 name = var + 9;
957 cfg = bsearch(name, annotate__configs, ARRAY_SIZE(annotate__configs), 958 cfg = bsearch(name, annotate__configs, ARRAY_SIZE(annotate__configs),
958 sizeof(struct annotate__config), annotate_config__cmp); 959 sizeof(struct annotate_config), annotate_config__cmp);
959 960
960 if (cfg == NULL) 961 if (cfg == NULL)
961 return -1; 962 return -1;
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index ccc4bd161420..aa22704047d6 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -567,26 +567,128 @@ static int hist_browser__show_callchain(struct hist_browser *browser,
567 return row - first_row; 567 return row - first_row;
568} 568}
569 569
570#define HPP__COLOR_FN(_name, _field) \ 570struct hpp_arg {
571static int hist_browser__hpp_color_ ## _name(struct perf_hpp *hpp, \ 571 struct ui_browser *b;
572 struct hist_entry *he) \ 572 char folded_sign;
573 bool current_entry;
574};
575
576static int __hpp__color_callchain(struct hpp_arg *arg)
577{
578 if (!symbol_conf.use_callchain)
579 return 0;
580
581 slsmg_printf("%c ", arg->folded_sign);
582 return 2;
583}
584
585static int __hpp__color_fmt(struct perf_hpp *hpp, struct hist_entry *he,
586 u64 (*get_field)(struct hist_entry *),
587 int (*callchain_cb)(struct hpp_arg *))
588{
589 int ret = 0;
590 double percent = 0.0;
591 struct hists *hists = he->hists;
592 struct hpp_arg *arg = hpp->ptr;
593
594 if (hists->stats.total_period)
595 percent = 100.0 * get_field(he) / hists->stats.total_period;
596
597 ui_browser__set_percent_color(arg->b, percent, arg->current_entry);
598
599 if (callchain_cb)
600 ret += callchain_cb(arg);
601
602 ret += scnprintf(hpp->buf, hpp->size, "%6.2f%%", percent);
603 slsmg_printf("%s", hpp->buf);
604
605 if (symbol_conf.event_group) {
606 int prev_idx, idx_delta;
607 struct perf_evsel *evsel = hists_to_evsel(hists);
608 struct hist_entry *pair;
609 int nr_members = evsel->nr_members;
610
611 if (nr_members <= 1)
612 goto out;
613
614 prev_idx = perf_evsel__group_idx(evsel);
615
616 list_for_each_entry(pair, &he->pairs.head, pairs.node) {
617 u64 period = get_field(pair);
618 u64 total = pair->hists->stats.total_period;
619
620 if (!total)
621 continue;
622
623 evsel = hists_to_evsel(pair->hists);
624 idx_delta = perf_evsel__group_idx(evsel) - prev_idx - 1;
625
626 while (idx_delta--) {
627 /*
628 * zero-fill group members in the middle which
629 * have no sample
630 */
631 ui_browser__set_percent_color(arg->b, 0.0,
632 arg->current_entry);
633 ret += scnprintf(hpp->buf, hpp->size,
634 " %6.2f%%", 0.0);
635 slsmg_printf("%s", hpp->buf);
636 }
637
638 percent = 100.0 * period / total;
639 ui_browser__set_percent_color(arg->b, percent,
640 arg->current_entry);
641 ret += scnprintf(hpp->buf, hpp->size,
642 " %6.2f%%", percent);
643 slsmg_printf("%s", hpp->buf);
644
645 prev_idx = perf_evsel__group_idx(evsel);
646 }
647
648 idx_delta = nr_members - prev_idx - 1;
649
650 while (idx_delta--) {
651 /*
652 * zero-fill group members at last which have no sample
653 */
654 ui_browser__set_percent_color(arg->b, 0.0,
655 arg->current_entry);
656 ret += scnprintf(hpp->buf, hpp->size,
657 " %6.2f%%", 0.0);
658 slsmg_printf("%s", hpp->buf);
659 }
660 }
661out:
662 if (!arg->current_entry || !arg->b->navkeypressed)
663 ui_browser__set_color(arg->b, HE_COLORSET_NORMAL);
664
665 return ret;
666}
667
668#define __HPP_COLOR_PERCENT_FN(_type, _field, _cb) \
669static u64 __hpp_get_##_field(struct hist_entry *he) \
670{ \
671 return he->stat._field; \
672} \
673 \
674static int hist_browser__hpp_color_##_type(struct perf_hpp *hpp, \
675 struct hist_entry *he) \
573{ \ 676{ \
574 struct hists *hists = he->hists; \ 677 return __hpp__color_fmt(hpp, he, __hpp_get_##_field, _cb); \
575 double percent = 100.0 * he->stat._field / hists->stats.total_period; \
576 *(double *)hpp->ptr = percent; \
577 return scnprintf(hpp->buf, hpp->size, "%6.2f%%", percent); \
578} 678}
579 679
580HPP__COLOR_FN(overhead, period) 680__HPP_COLOR_PERCENT_FN(overhead, period, __hpp__color_callchain)
581HPP__COLOR_FN(overhead_sys, period_sys) 681__HPP_COLOR_PERCENT_FN(overhead_sys, period_sys, NULL)
582HPP__COLOR_FN(overhead_us, period_us) 682__HPP_COLOR_PERCENT_FN(overhead_us, period_us, NULL)
583HPP__COLOR_FN(overhead_guest_sys, period_guest_sys) 683__HPP_COLOR_PERCENT_FN(overhead_guest_sys, period_guest_sys, NULL)
584HPP__COLOR_FN(overhead_guest_us, period_guest_us) 684__HPP_COLOR_PERCENT_FN(overhead_guest_us, period_guest_us, NULL)
585 685
586#undef HPP__COLOR_FN 686#undef __HPP_COLOR_PERCENT_FN
587 687
588void hist_browser__init_hpp(void) 688void hist_browser__init_hpp(void)
589{ 689{
690 perf_hpp__column_enable(PERF_HPP__OVERHEAD);
691
590 perf_hpp__init(); 692 perf_hpp__init();
591 693
592 perf_hpp__format[PERF_HPP__OVERHEAD].color = 694 perf_hpp__format[PERF_HPP__OVERHEAD].color =
@@ -606,13 +708,13 @@ static int hist_browser__show_entry(struct hist_browser *browser,
606 unsigned short row) 708 unsigned short row)
607{ 709{
608 char s[256]; 710 char s[256];
609 double percent; 711 int printed = 0;
610 int i, printed = 0;
611 int width = browser->b.width; 712 int width = browser->b.width;
612 char folded_sign = ' '; 713 char folded_sign = ' ';
613 bool current_entry = ui_browser__is_current_entry(&browser->b, row); 714 bool current_entry = ui_browser__is_current_entry(&browser->b, row);
614 off_t row_offset = entry->row_offset; 715 off_t row_offset = entry->row_offset;
615 bool first = true; 716 bool first = true;
717 struct perf_hpp_fmt *fmt;
616 718
617 if (current_entry) { 719 if (current_entry) {
618 browser->he_selection = entry; 720 browser->he_selection = entry;
@@ -625,41 +727,30 @@ static int hist_browser__show_entry(struct hist_browser *browser,
625 } 727 }
626 728
627 if (row_offset == 0) { 729 if (row_offset == 0) {
730 struct hpp_arg arg = {
731 .b = &browser->b,
732 .folded_sign = folded_sign,
733 .current_entry = current_entry,
734 };
628 struct perf_hpp hpp = { 735 struct perf_hpp hpp = {
629 .buf = s, 736 .buf = s,
630 .size = sizeof(s), 737 .size = sizeof(s),
738 .ptr = &arg,
631 }; 739 };
632 740
633 ui_browser__gotorc(&browser->b, row, 0); 741 ui_browser__gotorc(&browser->b, row, 0);
634 742
635 for (i = 0; i < PERF_HPP__MAX_INDEX; i++) { 743 perf_hpp__for_each_format(fmt) {
636 if (!perf_hpp__format[i].cond)
637 continue;
638
639 if (!first) { 744 if (!first) {
640 slsmg_printf(" "); 745 slsmg_printf(" ");
641 width -= 2; 746 width -= 2;
642 } 747 }
643 first = false; 748 first = false;
644 749
645 if (perf_hpp__format[i].color) { 750 if (fmt->color) {
646 hpp.ptr = &percent; 751 width -= fmt->color(&hpp, entry);
647 /* It will set percent for us. See HPP__COLOR_FN above. */
648 width -= perf_hpp__format[i].color(&hpp, entry);
649
650 ui_browser__set_percent_color(&browser->b, percent, current_entry);
651
652 if (i == PERF_HPP__OVERHEAD && symbol_conf.use_callchain) {
653 slsmg_printf("%c ", folded_sign);
654 width -= 2;
655 }
656
657 slsmg_printf("%s", s);
658
659 if (!current_entry || !browser->b.navkeypressed)
660 ui_browser__set_color(&browser->b, HE_COLORSET_NORMAL);
661 } else { 752 } else {
662 width -= perf_hpp__format[i].entry(&hpp, entry); 753 width -= fmt->entry(&hpp, entry);
663 slsmg_printf("%s", s); 754 slsmg_printf("%s", s);
664 } 755 }
665 } 756 }
@@ -1098,6 +1189,21 @@ static int hists__browser_title(struct hists *hists, char *bf, size_t size,
1098 const struct thread *thread = hists->thread_filter; 1189 const struct thread *thread = hists->thread_filter;
1099 unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE]; 1190 unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
1100 u64 nr_events = hists->stats.total_period; 1191 u64 nr_events = hists->stats.total_period;
1192 struct perf_evsel *evsel = hists_to_evsel(hists);
1193 char buf[512];
1194 size_t buflen = sizeof(buf);
1195
1196 if (symbol_conf.event_group && evsel->nr_members > 1) {
1197 struct perf_evsel *pos;
1198
1199 perf_evsel__group_desc(evsel, buf, buflen);
1200 ev_name = buf;
1201
1202 for_each_group_member(pos, evsel) {
1203 nr_samples += pos->hists.stats.nr_events[PERF_RECORD_SAMPLE];
1204 nr_events += pos->hists.stats.total_period;
1205 }
1206 }
1101 1207
1102 nr_samples = convert_unit(nr_samples, &unit); 1208 nr_samples = convert_unit(nr_samples, &unit);
1103 printed = scnprintf(bf, size, 1209 printed = scnprintf(bf, size,
@@ -1135,6 +1241,96 @@ static inline bool is_report_browser(void *timer)
1135 return timer == NULL; 1241 return timer == NULL;
1136} 1242}
1137 1243
1244/*
1245 * Only runtime switching of perf data file will make "input_name" point
1246 * to a malloced buffer. So add "is_input_name_malloced" flag to decide
1247 * whether we need to call free() for current "input_name" during the switch.
1248 */
1249static bool is_input_name_malloced = false;
1250
1251static int switch_data_file(void)
1252{
1253 char *pwd, *options[32], *abs_path[32], *tmp;
1254 DIR *pwd_dir;
1255 int nr_options = 0, choice = -1, ret = -1;
1256 struct dirent *dent;
1257
1258 pwd = getenv("PWD");
1259 if (!pwd)
1260 return ret;
1261
1262 pwd_dir = opendir(pwd);
1263 if (!pwd_dir)
1264 return ret;
1265
1266 memset(options, 0, sizeof(options));
1267 memset(options, 0, sizeof(abs_path));
1268
1269 while ((dent = readdir(pwd_dir))) {
1270 char path[PATH_MAX];
1271 u64 magic;
1272 char *name = dent->d_name;
1273 FILE *file;
1274
1275 if (!(dent->d_type == DT_REG))
1276 continue;
1277
1278 snprintf(path, sizeof(path), "%s/%s", pwd, name);
1279
1280 file = fopen(path, "r");
1281 if (!file)
1282 continue;
1283
1284 if (fread(&magic, 1, 8, file) < 8)
1285 goto close_file_and_continue;
1286
1287 if (is_perf_magic(magic)) {
1288 options[nr_options] = strdup(name);
1289 if (!options[nr_options])
1290 goto close_file_and_continue;
1291
1292 abs_path[nr_options] = strdup(path);
1293 if (!abs_path[nr_options]) {
1294 free(options[nr_options]);
1295 ui__warning("Can't search all data files due to memory shortage.\n");
1296 fclose(file);
1297 break;
1298 }
1299
1300 nr_options++;
1301 }
1302
1303close_file_and_continue:
1304 fclose(file);
1305 if (nr_options >= 32) {
1306 ui__warning("Too many perf data files in PWD!\n"
1307 "Only the first 32 files will be listed.\n");
1308 break;
1309 }
1310 }
1311 closedir(pwd_dir);
1312
1313 if (nr_options) {
1314 choice = ui__popup_menu(nr_options, options);
1315 if (choice < nr_options && choice >= 0) {
1316 tmp = strdup(abs_path[choice]);
1317 if (tmp) {
1318 if (is_input_name_malloced)
1319 free((void *)input_name);
1320 input_name = tmp;
1321 is_input_name_malloced = true;
1322 ret = 0;
1323 } else
1324 ui__warning("Data switch failed due to memory shortage!\n");
1325 }
1326 }
1327
1328 free_popup_options(options, nr_options);
1329 free_popup_options(abs_path, nr_options);
1330 return ret;
1331}
1332
1333
1138static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, 1334static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
1139 const char *helpline, const char *ev_name, 1335 const char *helpline, const char *ev_name,
1140 bool left_exits, 1336 bool left_exits,
@@ -1169,7 +1365,8 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
1169 int choice = 0, 1365 int choice = 0,
1170 annotate = -2, zoom_dso = -2, zoom_thread = -2, 1366 annotate = -2, zoom_dso = -2, zoom_thread = -2,
1171 annotate_f = -2, annotate_t = -2, browse_map = -2; 1367 annotate_f = -2, annotate_t = -2, browse_map = -2;
1172 int scripts_comm = -2, scripts_symbol = -2, scripts_all = -2; 1368 int scripts_comm = -2, scripts_symbol = -2,
1369 scripts_all = -2, switch_data = -2;
1173 1370
1174 nr_options = 0; 1371 nr_options = 0;
1175 1372
@@ -1226,6 +1423,10 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
1226 if (is_report_browser(hbt)) 1423 if (is_report_browser(hbt))
1227 goto do_scripts; 1424 goto do_scripts;
1228 continue; 1425 continue;
1426 case 's':
1427 if (is_report_browser(hbt))
1428 goto do_data_switch;
1429 continue;
1229 case K_F1: 1430 case K_F1:
1230 case 'h': 1431 case 'h':
1231 case '?': 1432 case '?':
@@ -1245,6 +1446,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
1245 "d Zoom into current DSO\n" 1446 "d Zoom into current DSO\n"
1246 "t Zoom into current Thread\n" 1447 "t Zoom into current Thread\n"
1247 "r Run available scripts('perf report' only)\n" 1448 "r Run available scripts('perf report' only)\n"
1449 "s Switch to another data file in PWD ('perf report' only)\n"
1248 "P Print histograms to perf.hist.N\n" 1450 "P Print histograms to perf.hist.N\n"
1249 "V Verbose (DSO names in callchains, etc)\n" 1451 "V Verbose (DSO names in callchains, etc)\n"
1250 "/ Filter symbol by name"); 1452 "/ Filter symbol by name");
@@ -1352,6 +1554,9 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
1352 if (asprintf(&options[nr_options], "Run scripts for all samples") > 0) 1554 if (asprintf(&options[nr_options], "Run scripts for all samples") > 0)
1353 scripts_all = nr_options++; 1555 scripts_all = nr_options++;
1354 1556
1557 if (is_report_browser(hbt) && asprintf(&options[nr_options],
1558 "Switch to another data file in PWD") > 0)
1559 switch_data = nr_options++;
1355add_exit_option: 1560add_exit_option:
1356 options[nr_options++] = (char *)"Exit"; 1561 options[nr_options++] = (char *)"Exit";
1357retry_popup_menu: 1562retry_popup_menu:
@@ -1462,6 +1667,16 @@ do_scripts:
1462 1667
1463 script_browse(script_opt); 1668 script_browse(script_opt);
1464 } 1669 }
1670 /* Switch to another data file */
1671 else if (choice == switch_data) {
1672do_data_switch:
1673 if (!switch_data_file()) {
1674 key = K_SWITCH_INPUT_DATA;
1675 break;
1676 } else
1677 ui__warning("Won't switch the data files due to\n"
1678 "no valid data file get selected!\n");
1679 }
1465 } 1680 }
1466out_free_stack: 1681out_free_stack:
1467 pstack__delete(fstack); 1682 pstack__delete(fstack);
@@ -1494,6 +1709,16 @@ static void perf_evsel_menu__write(struct ui_browser *browser,
1494 ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED : 1709 ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED :
1495 HE_COLORSET_NORMAL); 1710 HE_COLORSET_NORMAL);
1496 1711
1712 if (symbol_conf.event_group && evsel->nr_members > 1) {
1713 struct perf_evsel *pos;
1714
1715 ev_name = perf_evsel__group_name(evsel);
1716
1717 for_each_group_member(pos, evsel) {
1718 nr_events += pos->hists.stats.nr_events[PERF_RECORD_SAMPLE];
1719 }
1720 }
1721
1497 nr_events = convert_unit(nr_events, &unit); 1722 nr_events = convert_unit(nr_events, &unit);
1498 printed = scnprintf(bf, sizeof(bf), "%lu%c%s%s", nr_events, 1723 printed = scnprintf(bf, sizeof(bf), "%lu%c%s%s", nr_events,
1499 unit, unit == ' ' ? "" : " ", ev_name); 1724 unit, unit == ' ' ? "" : " ", ev_name);
@@ -1578,6 +1803,7 @@ browse_hists:
1578 "Do you really want to exit?")) 1803 "Do you really want to exit?"))
1579 continue; 1804 continue;
1580 /* Fall thru */ 1805 /* Fall thru */
1806 case K_SWITCH_INPUT_DATA:
1581 case 'q': 1807 case 'q':
1582 case CTRL('c'): 1808 case CTRL('c'):
1583 goto out; 1809 goto out;
@@ -1604,8 +1830,19 @@ out:
1604 return key; 1830 return key;
1605} 1831}
1606 1832
1833static bool filter_group_entries(struct ui_browser *self __maybe_unused,
1834 void *entry)
1835{
1836 struct perf_evsel *evsel = list_entry(entry, struct perf_evsel, node);
1837
1838 if (symbol_conf.event_group && !perf_evsel__is_group_leader(evsel))
1839 return true;
1840
1841 return false;
1842}
1843
1607static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist, 1844static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist,
1608 const char *help, 1845 int nr_entries, const char *help,
1609 struct hist_browser_timer *hbt, 1846 struct hist_browser_timer *hbt,
1610 struct perf_session_env *env) 1847 struct perf_session_env *env)
1611{ 1848{
@@ -1616,7 +1853,8 @@ static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist,
1616 .refresh = ui_browser__list_head_refresh, 1853 .refresh = ui_browser__list_head_refresh,
1617 .seek = ui_browser__list_head_seek, 1854 .seek = ui_browser__list_head_seek,
1618 .write = perf_evsel_menu__write, 1855 .write = perf_evsel_menu__write,
1619 .nr_entries = evlist->nr_entries, 1856 .filter = filter_group_entries,
1857 .nr_entries = nr_entries,
1620 .priv = evlist, 1858 .priv = evlist,
1621 }, 1859 },
1622 .env = env, 1860 .env = env,
@@ -1632,20 +1870,37 @@ static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist,
1632 menu.b.width = line_len; 1870 menu.b.width = line_len;
1633 } 1871 }
1634 1872
1635 return perf_evsel_menu__run(&menu, evlist->nr_entries, help, hbt); 1873 return perf_evsel_menu__run(&menu, nr_entries, help, hbt);
1636} 1874}
1637 1875
1638int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help, 1876int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
1639 struct hist_browser_timer *hbt, 1877 struct hist_browser_timer *hbt,
1640 struct perf_session_env *env) 1878 struct perf_session_env *env)
1641{ 1879{
1642 if (evlist->nr_entries == 1) { 1880 int nr_entries = evlist->nr_entries;
1881
1882single_entry:
1883 if (nr_entries == 1) {
1643 struct perf_evsel *first = list_entry(evlist->entries.next, 1884 struct perf_evsel *first = list_entry(evlist->entries.next,
1644 struct perf_evsel, node); 1885 struct perf_evsel, node);
1645 const char *ev_name = perf_evsel__name(first); 1886 const char *ev_name = perf_evsel__name(first);
1646 return perf_evsel__hists_browse(first, evlist->nr_entries, help, 1887
1888 return perf_evsel__hists_browse(first, nr_entries, help,
1647 ev_name, false, hbt, env); 1889 ev_name, false, hbt, env);
1648 } 1890 }
1649 1891
1650 return __perf_evlist__tui_browse_hists(evlist, help, hbt, env); 1892 if (symbol_conf.event_group) {
1893 struct perf_evsel *pos;
1894
1895 nr_entries = 0;
1896 list_for_each_entry(pos, &evlist->entries, node)
1897 if (perf_evsel__is_group_leader(pos))
1898 nr_entries++;
1899
1900 if (nr_entries == 1)
1901 goto single_entry;
1902 }
1903
1904 return __perf_evlist__tui_browse_hists(evlist, nr_entries, help,
1905 hbt, env);
1651} 1906}
diff --git a/tools/perf/ui/gtk/annotate.c b/tools/perf/ui/gtk/annotate.c
new file mode 100644
index 000000000000..7d8dc581a545
--- /dev/null
+++ b/tools/perf/ui/gtk/annotate.c
@@ -0,0 +1,229 @@
1#include "gtk.h"
2#include "util/debug.h"
3#include "util/annotate.h"
4#include "ui/helpline.h"
5
6
7enum {
8 ANN_COL__PERCENT,
9 ANN_COL__OFFSET,
10 ANN_COL__LINE,
11
12 MAX_ANN_COLS
13};
14
15static const char *const col_names[] = {
16 "Overhead",
17 "Offset",
18 "Line"
19};
20
21static int perf_gtk__get_percent(char *buf, size_t size, struct symbol *sym,
22 struct disasm_line *dl, int evidx)
23{
24 struct sym_hist *symhist;
25 double percent = 0.0;
26 const char *markup;
27 int ret = 0;
28
29 strcpy(buf, "");
30
31 if (dl->offset == (s64) -1)
32 return 0;
33
34 symhist = annotation__histogram(symbol__annotation(sym), evidx);
35 if (!symhist->addr[dl->offset])
36 return 0;
37
38 percent = 100.0 * symhist->addr[dl->offset] / symhist->sum;
39
40 markup = perf_gtk__get_percent_color(percent);
41 if (markup)
42 ret += scnprintf(buf, size, "%s", markup);
43 ret += scnprintf(buf + ret, size - ret, "%6.2f%%", percent);
44 if (markup)
45 ret += scnprintf(buf + ret, size - ret, "</span>");
46
47 return ret;
48}
49
50static int perf_gtk__get_offset(char *buf, size_t size, struct symbol *sym,
51 struct map *map, struct disasm_line *dl)
52{
53 u64 start = map__rip_2objdump(map, sym->start);
54
55 strcpy(buf, "");
56
57 if (dl->offset == (s64) -1)
58 return 0;
59
60 return scnprintf(buf, size, "%"PRIx64, start + dl->offset);
61}
62
63static int perf_gtk__get_line(char *buf, size_t size, struct disasm_line *dl)
64{
65 int ret = 0;
66 char *line = g_markup_escape_text(dl->line, -1);
67 const char *markup = "<span fgcolor='gray'>";
68
69 strcpy(buf, "");
70
71 if (!line)
72 return 0;
73
74 if (dl->offset != (s64) -1)
75 markup = NULL;
76
77 if (markup)
78 ret += scnprintf(buf, size, "%s", markup);
79 ret += scnprintf(buf + ret, size - ret, "%s", line);
80 if (markup)
81 ret += scnprintf(buf + ret, size - ret, "</span>");
82
83 g_free(line);
84 return ret;
85}
86
87static int perf_gtk__annotate_symbol(GtkWidget *window, struct symbol *sym,
88 struct map *map, int evidx,
89 struct hist_browser_timer *hbt __maybe_unused)
90{
91 struct disasm_line *pos, *n;
92 struct annotation *notes;
93 GType col_types[MAX_ANN_COLS];
94 GtkCellRenderer *renderer;
95 GtkListStore *store;
96 GtkWidget *view;
97 int i;
98 char s[512];
99
100 notes = symbol__annotation(sym);
101
102 for (i = 0; i < MAX_ANN_COLS; i++) {
103 col_types[i] = G_TYPE_STRING;
104 }
105 store = gtk_list_store_newv(MAX_ANN_COLS, col_types);
106
107 view = gtk_tree_view_new();
108 renderer = gtk_cell_renderer_text_new();
109
110 for (i = 0; i < MAX_ANN_COLS; i++) {
111 gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view),
112 -1, col_names[i], renderer, "markup",
113 i, NULL);
114 }
115
116 gtk_tree_view_set_model(GTK_TREE_VIEW(view), GTK_TREE_MODEL(store));
117 g_object_unref(GTK_TREE_MODEL(store));
118
119 list_for_each_entry(pos, &notes->src->source, node) {
120 GtkTreeIter iter;
121
122 gtk_list_store_append(store, &iter);
123
124 if (perf_gtk__get_percent(s, sizeof(s), sym, pos, evidx))
125 gtk_list_store_set(store, &iter, ANN_COL__PERCENT, s, -1);
126 if (perf_gtk__get_offset(s, sizeof(s), sym, map, pos))
127 gtk_list_store_set(store, &iter, ANN_COL__OFFSET, s, -1);
128 if (perf_gtk__get_line(s, sizeof(s), pos))
129 gtk_list_store_set(store, &iter, ANN_COL__LINE, s, -1);
130 }
131
132 gtk_container_add(GTK_CONTAINER(window), view);
133
134 list_for_each_entry_safe(pos, n, &notes->src->source, node) {
135 list_del(&pos->node);
136 disasm_line__free(pos);
137 }
138
139 return 0;
140}
141
142int symbol__gtk_annotate(struct symbol *sym, struct map *map, int evidx,
143 struct hist_browser_timer *hbt)
144{
145 GtkWidget *window;
146 GtkWidget *notebook;
147 GtkWidget *scrolled_window;
148 GtkWidget *tab_label;
149
150 if (map->dso->annotate_warned)
151 return -1;
152
153 if (symbol__annotate(sym, map, 0) < 0) {
154 ui__error("%s", ui_helpline__current);
155 return -1;
156 }
157
158 if (perf_gtk__is_active_context(pgctx)) {
159 window = pgctx->main_window;
160 notebook = pgctx->notebook;
161 } else {
162 GtkWidget *vbox;
163 GtkWidget *infobar;
164 GtkWidget *statbar;
165
166 signal(SIGSEGV, perf_gtk__signal);
167 signal(SIGFPE, perf_gtk__signal);
168 signal(SIGINT, perf_gtk__signal);
169 signal(SIGQUIT, perf_gtk__signal);
170 signal(SIGTERM, perf_gtk__signal);
171
172 window = gtk_window_new(GTK_WINDOW_TOPLEVEL);
173 gtk_window_set_title(GTK_WINDOW(window), "perf annotate");
174
175 g_signal_connect(window, "delete_event", gtk_main_quit, NULL);
176
177 pgctx = perf_gtk__activate_context(window);
178 if (!pgctx)
179 return -1;
180
181 vbox = gtk_vbox_new(FALSE, 0);
182 notebook = gtk_notebook_new();
183 pgctx->notebook = notebook;
184
185 gtk_box_pack_start(GTK_BOX(vbox), notebook, TRUE, TRUE, 0);
186
187 infobar = perf_gtk__setup_info_bar();
188 if (infobar) {
189 gtk_box_pack_start(GTK_BOX(vbox), infobar,
190 FALSE, FALSE, 0);
191 }
192
193 statbar = perf_gtk__setup_statusbar();
194 gtk_box_pack_start(GTK_BOX(vbox), statbar, FALSE, FALSE, 0);
195
196 gtk_container_add(GTK_CONTAINER(window), vbox);
197 }
198
199 scrolled_window = gtk_scrolled_window_new(NULL, NULL);
200 tab_label = gtk_label_new(sym->name);
201
202 gtk_scrolled_window_set_policy(GTK_SCROLLED_WINDOW(scrolled_window),
203 GTK_POLICY_AUTOMATIC,
204 GTK_POLICY_AUTOMATIC);
205
206 gtk_notebook_append_page(GTK_NOTEBOOK(notebook), scrolled_window,
207 tab_label);
208
209 perf_gtk__annotate_symbol(scrolled_window, sym, map, evidx, hbt);
210 return 0;
211}
212
213void perf_gtk__show_annotations(void)
214{
215 GtkWidget *window;
216
217 if (!perf_gtk__is_active_context(pgctx))
218 return;
219
220 window = pgctx->main_window;
221 gtk_widget_show_all(window);
222
223 perf_gtk__resize_window(window);
224 gtk_window_set_position(GTK_WINDOW(window), GTK_WIN_POS_CENTER);
225
226 gtk_main();
227
228 perf_gtk__deactivate_context(&pgctx);
229}
diff --git a/tools/perf/ui/gtk/browser.c b/tools/perf/ui/gtk/browser.c
index 253b6219a39e..c95012cdb438 100644
--- a/tools/perf/ui/gtk/browser.c
+++ b/tools/perf/ui/gtk/browser.c
@@ -8,15 +8,13 @@
8 8
9#include <signal.h> 9#include <signal.h>
10 10
11#define MAX_COLUMNS 32 11void perf_gtk__signal(int sig)
12
13static void perf_gtk__signal(int sig)
14{ 12{
15 perf_gtk__exit(false); 13 perf_gtk__exit(false);
16 psignal(sig, "perf"); 14 psignal(sig, "perf");
17} 15}
18 16
19static void perf_gtk__resize_window(GtkWidget *window) 17void perf_gtk__resize_window(GtkWidget *window)
20{ 18{
21 GdkRectangle rect; 19 GdkRectangle rect;
22 GdkScreen *screen; 20 GdkScreen *screen;
@@ -36,7 +34,7 @@ static void perf_gtk__resize_window(GtkWidget *window)
36 gtk_window_resize(GTK_WINDOW(window), width, height); 34 gtk_window_resize(GTK_WINDOW(window), width, height);
37} 35}
38 36
39static const char *perf_gtk__get_percent_color(double percent) 37const char *perf_gtk__get_percent_color(double percent)
40{ 38{
41 if (percent >= MIN_RED) 39 if (percent >= MIN_RED)
42 return "<span fgcolor='red'>"; 40 return "<span fgcolor='red'>";
@@ -45,155 +43,8 @@ static const char *perf_gtk__get_percent_color(double percent)
45 return NULL; 43 return NULL;
46} 44}
47 45
48#define HPP__COLOR_FN(_name, _field) \
49static int perf_gtk__hpp_color_ ## _name(struct perf_hpp *hpp, \
50 struct hist_entry *he) \
51{ \
52 struct hists *hists = he->hists; \
53 double percent = 100.0 * he->stat._field / hists->stats.total_period; \
54 const char *markup; \
55 int ret = 0; \
56 \
57 markup = perf_gtk__get_percent_color(percent); \
58 if (markup) \
59 ret += scnprintf(hpp->buf, hpp->size, "%s", markup); \
60 ret += scnprintf(hpp->buf + ret, hpp->size - ret, "%6.2f%%", percent); \
61 if (markup) \
62 ret += scnprintf(hpp->buf + ret, hpp->size - ret, "</span>"); \
63 \
64 return ret; \
65}
66
67HPP__COLOR_FN(overhead, period)
68HPP__COLOR_FN(overhead_sys, period_sys)
69HPP__COLOR_FN(overhead_us, period_us)
70HPP__COLOR_FN(overhead_guest_sys, period_guest_sys)
71HPP__COLOR_FN(overhead_guest_us, period_guest_us)
72
73#undef HPP__COLOR_FN
74
75void perf_gtk__init_hpp(void)
76{
77 perf_hpp__init();
78
79 perf_hpp__format[PERF_HPP__OVERHEAD].color =
80 perf_gtk__hpp_color_overhead;
81 perf_hpp__format[PERF_HPP__OVERHEAD_SYS].color =
82 perf_gtk__hpp_color_overhead_sys;
83 perf_hpp__format[PERF_HPP__OVERHEAD_US].color =
84 perf_gtk__hpp_color_overhead_us;
85 perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_SYS].color =
86 perf_gtk__hpp_color_overhead_guest_sys;
87 perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_US].color =
88 perf_gtk__hpp_color_overhead_guest_us;
89}
90
91static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists)
92{
93 GType col_types[MAX_COLUMNS];
94 GtkCellRenderer *renderer;
95 struct sort_entry *se;
96 GtkListStore *store;
97 struct rb_node *nd;
98 GtkWidget *view;
99 int i, col_idx;
100 int nr_cols;
101 char s[512];
102
103 struct perf_hpp hpp = {
104 .buf = s,
105 .size = sizeof(s),
106 };
107
108 nr_cols = 0;
109
110 for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
111 if (!perf_hpp__format[i].cond)
112 continue;
113
114 col_types[nr_cols++] = G_TYPE_STRING;
115 }
116
117 list_for_each_entry(se, &hist_entry__sort_list, list) {
118 if (se->elide)
119 continue;
120
121 col_types[nr_cols++] = G_TYPE_STRING;
122 }
123
124 store = gtk_list_store_newv(nr_cols, col_types);
125
126 view = gtk_tree_view_new();
127
128 renderer = gtk_cell_renderer_text_new();
129
130 col_idx = 0;
131
132 for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
133 if (!perf_hpp__format[i].cond)
134 continue;
135
136 perf_hpp__format[i].header(&hpp);
137
138 gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view),
139 -1, s,
140 renderer, "markup",
141 col_idx++, NULL);
142 }
143
144 list_for_each_entry(se, &hist_entry__sort_list, list) {
145 if (se->elide)
146 continue;
147
148 gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view),
149 -1, se->se_header,
150 renderer, "text",
151 col_idx++, NULL);
152 }
153
154 gtk_tree_view_set_model(GTK_TREE_VIEW(view), GTK_TREE_MODEL(store));
155
156 g_object_unref(GTK_TREE_MODEL(store));
157
158 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
159 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
160 GtkTreeIter iter;
161
162 if (h->filtered)
163 continue;
164
165 gtk_list_store_append(store, &iter);
166
167 col_idx = 0;
168
169 for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
170 if (!perf_hpp__format[i].cond)
171 continue;
172
173 if (perf_hpp__format[i].color)
174 perf_hpp__format[i].color(&hpp, h);
175 else
176 perf_hpp__format[i].entry(&hpp, h);
177
178 gtk_list_store_set(store, &iter, col_idx++, s, -1);
179 }
180
181 list_for_each_entry(se, &hist_entry__sort_list, list) {
182 if (se->elide)
183 continue;
184
185 se->se_snprintf(h, s, ARRAY_SIZE(s),
186 hists__col_len(hists, se->se_width_idx));
187
188 gtk_list_store_set(store, &iter, col_idx++, s, -1);
189 }
190 }
191
192 gtk_container_add(GTK_CONTAINER(window), view);
193}
194
195#ifdef HAVE_GTK_INFO_BAR 46#ifdef HAVE_GTK_INFO_BAR
196static GtkWidget *perf_gtk__setup_info_bar(void) 47GtkWidget *perf_gtk__setup_info_bar(void)
197{ 48{
198 GtkWidget *info_bar; 49 GtkWidget *info_bar;
199 GtkWidget *label; 50 GtkWidget *label;
@@ -220,7 +71,7 @@ static GtkWidget *perf_gtk__setup_info_bar(void)
220} 71}
221#endif 72#endif
222 73
223static GtkWidget *perf_gtk__setup_statusbar(void) 74GtkWidget *perf_gtk__setup_statusbar(void)
224{ 75{
225 GtkWidget *stbar; 76 GtkWidget *stbar;
226 unsigned ctxid; 77 unsigned ctxid;
@@ -234,79 +85,3 @@ static GtkWidget *perf_gtk__setup_statusbar(void)
234 85
235 return stbar; 86 return stbar;
236} 87}
237
238int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist,
239 const char *help,
240 struct hist_browser_timer *hbt __maybe_unused)
241{
242 struct perf_evsel *pos;
243 GtkWidget *vbox;
244 GtkWidget *notebook;
245 GtkWidget *info_bar;
246 GtkWidget *statbar;
247 GtkWidget *window;
248
249 signal(SIGSEGV, perf_gtk__signal);
250 signal(SIGFPE, perf_gtk__signal);
251 signal(SIGINT, perf_gtk__signal);
252 signal(SIGQUIT, perf_gtk__signal);
253 signal(SIGTERM, perf_gtk__signal);
254
255 window = gtk_window_new(GTK_WINDOW_TOPLEVEL);
256
257 gtk_window_set_title(GTK_WINDOW(window), "perf report");
258
259 g_signal_connect(window, "delete_event", gtk_main_quit, NULL);
260
261 pgctx = perf_gtk__activate_context(window);
262 if (!pgctx)
263 return -1;
264
265 vbox = gtk_vbox_new(FALSE, 0);
266
267 notebook = gtk_notebook_new();
268
269 list_for_each_entry(pos, &evlist->entries, node) {
270 struct hists *hists = &pos->hists;
271 const char *evname = perf_evsel__name(pos);
272 GtkWidget *scrolled_window;
273 GtkWidget *tab_label;
274
275 scrolled_window = gtk_scrolled_window_new(NULL, NULL);
276
277 gtk_scrolled_window_set_policy(GTK_SCROLLED_WINDOW(scrolled_window),
278 GTK_POLICY_AUTOMATIC,
279 GTK_POLICY_AUTOMATIC);
280
281 perf_gtk__show_hists(scrolled_window, hists);
282
283 tab_label = gtk_label_new(evname);
284
285 gtk_notebook_append_page(GTK_NOTEBOOK(notebook), scrolled_window, tab_label);
286 }
287
288 gtk_box_pack_start(GTK_BOX(vbox), notebook, TRUE, TRUE, 0);
289
290 info_bar = perf_gtk__setup_info_bar();
291 if (info_bar)
292 gtk_box_pack_start(GTK_BOX(vbox), info_bar, FALSE, FALSE, 0);
293
294 statbar = perf_gtk__setup_statusbar();
295 gtk_box_pack_start(GTK_BOX(vbox), statbar, FALSE, FALSE, 0);
296
297 gtk_container_add(GTK_CONTAINER(window), vbox);
298
299 gtk_widget_show_all(window);
300
301 perf_gtk__resize_window(window);
302
303 gtk_window_set_position(GTK_WINDOW(window), GTK_WIN_POS_CENTER);
304
305 ui_helpline__push(help);
306
307 gtk_main();
308
309 perf_gtk__deactivate_context(&pgctx);
310
311 return 0;
312}
diff --git a/tools/perf/ui/gtk/gtk.h b/tools/perf/ui/gtk/gtk.h
index 856320e2cc05..3d96785ef155 100644
--- a/tools/perf/ui/gtk/gtk.h
+++ b/tools/perf/ui/gtk/gtk.h
@@ -10,6 +10,7 @@
10 10
11struct perf_gtk_context { 11struct perf_gtk_context {
12 GtkWidget *main_window; 12 GtkWidget *main_window;
13 GtkWidget *notebook;
13 14
14#ifdef HAVE_GTK_INFO_BAR 15#ifdef HAVE_GTK_INFO_BAR
15 GtkWidget *info_bar; 16 GtkWidget *info_bar;
@@ -33,7 +34,14 @@ void perf_gtk__init_helpline(void);
33void perf_gtk__init_progress(void); 34void perf_gtk__init_progress(void);
34void perf_gtk__init_hpp(void); 35void perf_gtk__init_hpp(void);
35 36
36#ifndef HAVE_GTK_INFO_BAR 37void perf_gtk__signal(int sig);
38void perf_gtk__resize_window(GtkWidget *window);
39const char *perf_gtk__get_percent_color(double percent);
40GtkWidget *perf_gtk__setup_statusbar(void);
41
42#ifdef HAVE_GTK_INFO_BAR
43GtkWidget *perf_gtk__setup_info_bar(void);
44#else
37static inline GtkWidget *perf_gtk__setup_info_bar(void) 45static inline GtkWidget *perf_gtk__setup_info_bar(void)
38{ 46{
39 return NULL; 47 return NULL;
diff --git a/tools/perf/ui/gtk/helpline.c b/tools/perf/ui/gtk/helpline.c
index 5db4432ff12a..3388cbd12186 100644
--- a/tools/perf/ui/gtk/helpline.c
+++ b/tools/perf/ui/gtk/helpline.c
@@ -24,17 +24,7 @@ static void gtk_helpline_push(const char *msg)
24 pgctx->statbar_ctx_id, msg); 24 pgctx->statbar_ctx_id, msg);
25} 25}
26 26
27static struct ui_helpline gtk_helpline_fns = { 27static int gtk_helpline_show(const char *fmt, va_list ap)
28 .pop = gtk_helpline_pop,
29 .push = gtk_helpline_push,
30};
31
32void perf_gtk__init_helpline(void)
33{
34 helpline_fns = &gtk_helpline_fns;
35}
36
37int perf_gtk__show_helpline(const char *fmt, va_list ap)
38{ 28{
39 int ret; 29 int ret;
40 char *ptr; 30 char *ptr;
@@ -54,3 +44,14 @@ int perf_gtk__show_helpline(const char *fmt, va_list ap)
54 44
55 return ret; 45 return ret;
56} 46}
47
48static struct ui_helpline gtk_helpline_fns = {
49 .pop = gtk_helpline_pop,
50 .push = gtk_helpline_push,
51 .show = gtk_helpline_show,
52};
53
54void perf_gtk__init_helpline(void)
55{
56 helpline_fns = &gtk_helpline_fns;
57}
diff --git a/tools/perf/ui/gtk/hists.c b/tools/perf/ui/gtk/hists.c
new file mode 100644
index 000000000000..1e764a8ad259
--- /dev/null
+++ b/tools/perf/ui/gtk/hists.c
@@ -0,0 +1,312 @@
1#include "../evlist.h"
2#include "../cache.h"
3#include "../evsel.h"
4#include "../sort.h"
5#include "../hist.h"
6#include "../helpline.h"
7#include "gtk.h"
8
9#define MAX_COLUMNS 32
10
11static int __percent_color_snprintf(char *buf, size_t size, double percent)
12{
13 int ret = 0;
14 const char *markup;
15
16 markup = perf_gtk__get_percent_color(percent);
17 if (markup)
18 ret += scnprintf(buf, size, markup);
19
20 ret += scnprintf(buf + ret, size - ret, " %6.2f%%", percent);
21
22 if (markup)
23 ret += scnprintf(buf + ret, size - ret, "</span>");
24
25 return ret;
26}
27
28
29static int __hpp__color_fmt(struct perf_hpp *hpp, struct hist_entry *he,
30 u64 (*get_field)(struct hist_entry *))
31{
32 int ret;
33 double percent = 0.0;
34 struct hists *hists = he->hists;
35
36 if (hists->stats.total_period)
37 percent = 100.0 * get_field(he) / hists->stats.total_period;
38
39 ret = __percent_color_snprintf(hpp->buf, hpp->size, percent);
40
41 if (symbol_conf.event_group) {
42 int prev_idx, idx_delta;
43 struct perf_evsel *evsel = hists_to_evsel(hists);
44 struct hist_entry *pair;
45 int nr_members = evsel->nr_members;
46
47 if (nr_members <= 1)
48 return ret;
49
50 prev_idx = perf_evsel__group_idx(evsel);
51
52 list_for_each_entry(pair, &he->pairs.head, pairs.node) {
53 u64 period = get_field(pair);
54 u64 total = pair->hists->stats.total_period;
55
56 evsel = hists_to_evsel(pair->hists);
57 idx_delta = perf_evsel__group_idx(evsel) - prev_idx - 1;
58
59 while (idx_delta--) {
60 /*
61 * zero-fill group members in the middle which
62 * have no sample
63 */
64 ret += __percent_color_snprintf(hpp->buf + ret,
65 hpp->size - ret,
66 0.0);
67 }
68
69 percent = 100.0 * period / total;
70 ret += __percent_color_snprintf(hpp->buf + ret,
71 hpp->size - ret,
72 percent);
73
74 prev_idx = perf_evsel__group_idx(evsel);
75 }
76
77 idx_delta = nr_members - prev_idx - 1;
78
79 while (idx_delta--) {
80 /*
81 * zero-fill group members at last which have no sample
82 */
83 ret += __percent_color_snprintf(hpp->buf + ret,
84 hpp->size - ret,
85 0.0);
86 }
87 }
88 return ret;
89}
90
91#define __HPP_COLOR_PERCENT_FN(_type, _field) \
92static u64 he_get_##_field(struct hist_entry *he) \
93{ \
94 return he->stat._field; \
95} \
96 \
97static int perf_gtk__hpp_color_##_type(struct perf_hpp *hpp, \
98 struct hist_entry *he) \
99{ \
100 return __hpp__color_fmt(hpp, he, he_get_##_field); \
101}
102
103__HPP_COLOR_PERCENT_FN(overhead, period)
104__HPP_COLOR_PERCENT_FN(overhead_sys, period_sys)
105__HPP_COLOR_PERCENT_FN(overhead_us, period_us)
106__HPP_COLOR_PERCENT_FN(overhead_guest_sys, period_guest_sys)
107__HPP_COLOR_PERCENT_FN(overhead_guest_us, period_guest_us)
108
109#undef __HPP_COLOR_PERCENT_FN
110
111
112void perf_gtk__init_hpp(void)
113{
114 perf_hpp__column_enable(PERF_HPP__OVERHEAD);
115
116 perf_hpp__init();
117
118 perf_hpp__format[PERF_HPP__OVERHEAD].color =
119 perf_gtk__hpp_color_overhead;
120 perf_hpp__format[PERF_HPP__OVERHEAD_SYS].color =
121 perf_gtk__hpp_color_overhead_sys;
122 perf_hpp__format[PERF_HPP__OVERHEAD_US].color =
123 perf_gtk__hpp_color_overhead_us;
124 perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_SYS].color =
125 perf_gtk__hpp_color_overhead_guest_sys;
126 perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_US].color =
127 perf_gtk__hpp_color_overhead_guest_us;
128}
129
130static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists)
131{
132 struct perf_hpp_fmt *fmt;
133 GType col_types[MAX_COLUMNS];
134 GtkCellRenderer *renderer;
135 struct sort_entry *se;
136 GtkListStore *store;
137 struct rb_node *nd;
138 GtkWidget *view;
139 int col_idx;
140 int nr_cols;
141 char s[512];
142
143 struct perf_hpp hpp = {
144 .buf = s,
145 .size = sizeof(s),
146 .ptr = hists_to_evsel(hists),
147 };
148
149 nr_cols = 0;
150
151 perf_hpp__for_each_format(fmt)
152 col_types[nr_cols++] = G_TYPE_STRING;
153
154 list_for_each_entry(se, &hist_entry__sort_list, list) {
155 if (se->elide)
156 continue;
157
158 col_types[nr_cols++] = G_TYPE_STRING;
159 }
160
161 store = gtk_list_store_newv(nr_cols, col_types);
162
163 view = gtk_tree_view_new();
164
165 renderer = gtk_cell_renderer_text_new();
166
167 col_idx = 0;
168
169 perf_hpp__for_each_format(fmt) {
170 fmt->header(&hpp);
171
172 gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view),
173 -1, ltrim(s),
174 renderer, "markup",
175 col_idx++, NULL);
176 }
177
178 list_for_each_entry(se, &hist_entry__sort_list, list) {
179 if (se->elide)
180 continue;
181
182 gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view),
183 -1, se->se_header,
184 renderer, "text",
185 col_idx++, NULL);
186 }
187
188 gtk_tree_view_set_model(GTK_TREE_VIEW(view), GTK_TREE_MODEL(store));
189
190 g_object_unref(GTK_TREE_MODEL(store));
191
192 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
193 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
194 GtkTreeIter iter;
195
196 if (h->filtered)
197 continue;
198
199 gtk_list_store_append(store, &iter);
200
201 col_idx = 0;
202
203 perf_hpp__for_each_format(fmt) {
204 if (fmt->color)
205 fmt->color(&hpp, h);
206 else
207 fmt->entry(&hpp, h);
208
209 gtk_list_store_set(store, &iter, col_idx++, s, -1);
210 }
211
212 list_for_each_entry(se, &hist_entry__sort_list, list) {
213 if (se->elide)
214 continue;
215
216 se->se_snprintf(h, s, ARRAY_SIZE(s),
217 hists__col_len(hists, se->se_width_idx));
218
219 gtk_list_store_set(store, &iter, col_idx++, s, -1);
220 }
221 }
222
223 gtk_container_add(GTK_CONTAINER(window), view);
224}
225
226int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist,
227 const char *help,
228 struct hist_browser_timer *hbt __maybe_unused)
229{
230 struct perf_evsel *pos;
231 GtkWidget *vbox;
232 GtkWidget *notebook;
233 GtkWidget *info_bar;
234 GtkWidget *statbar;
235 GtkWidget *window;
236
237 signal(SIGSEGV, perf_gtk__signal);
238 signal(SIGFPE, perf_gtk__signal);
239 signal(SIGINT, perf_gtk__signal);
240 signal(SIGQUIT, perf_gtk__signal);
241 signal(SIGTERM, perf_gtk__signal);
242
243 window = gtk_window_new(GTK_WINDOW_TOPLEVEL);
244
245 gtk_window_set_title(GTK_WINDOW(window), "perf report");
246
247 g_signal_connect(window, "delete_event", gtk_main_quit, NULL);
248
249 pgctx = perf_gtk__activate_context(window);
250 if (!pgctx)
251 return -1;
252
253 vbox = gtk_vbox_new(FALSE, 0);
254
255 notebook = gtk_notebook_new();
256
257 gtk_box_pack_start(GTK_BOX(vbox), notebook, TRUE, TRUE, 0);
258
259 info_bar = perf_gtk__setup_info_bar();
260 if (info_bar)
261 gtk_box_pack_start(GTK_BOX(vbox), info_bar, FALSE, FALSE, 0);
262
263 statbar = perf_gtk__setup_statusbar();
264 gtk_box_pack_start(GTK_BOX(vbox), statbar, FALSE, FALSE, 0);
265
266 gtk_container_add(GTK_CONTAINER(window), vbox);
267
268 list_for_each_entry(pos, &evlist->entries, node) {
269 struct hists *hists = &pos->hists;
270 const char *evname = perf_evsel__name(pos);
271 GtkWidget *scrolled_window;
272 GtkWidget *tab_label;
273 char buf[512];
274 size_t size = sizeof(buf);
275
276 if (symbol_conf.event_group) {
277 if (!perf_evsel__is_group_leader(pos))
278 continue;
279
280 if (pos->nr_members > 1) {
281 perf_evsel__group_desc(pos, buf, size);
282 evname = buf;
283 }
284 }
285
286 scrolled_window = gtk_scrolled_window_new(NULL, NULL);
287
288 gtk_scrolled_window_set_policy(GTK_SCROLLED_WINDOW(scrolled_window),
289 GTK_POLICY_AUTOMATIC,
290 GTK_POLICY_AUTOMATIC);
291
292 perf_gtk__show_hists(scrolled_window, hists);
293
294 tab_label = gtk_label_new(evname);
295
296 gtk_notebook_append_page(GTK_NOTEBOOK(notebook), scrolled_window, tab_label);
297 }
298
299 gtk_widget_show_all(window);
300
301 perf_gtk__resize_window(window);
302
303 gtk_window_set_position(GTK_WINDOW(window), GTK_WIN_POS_CENTER);
304
305 ui_helpline__push(help);
306
307 gtk_main();
308
309 perf_gtk__deactivate_context(&pgctx);
310
311 return 0;
312}
diff --git a/tools/perf/ui/helpline.c b/tools/perf/ui/helpline.c
index a49bcf3c190b..700fb3cfa1c7 100644
--- a/tools/perf/ui/helpline.c
+++ b/tools/perf/ui/helpline.c
@@ -16,9 +16,16 @@ static void nop_helpline__push(const char *msg __maybe_unused)
16{ 16{
17} 17}
18 18
19static int nop_helpline__show(const char *fmt __maybe_unused,
20 va_list ap __maybe_unused)
21{
22 return 0;
23}
24
19static struct ui_helpline default_helpline_fns = { 25static struct ui_helpline default_helpline_fns = {
20 .pop = nop_helpline__pop, 26 .pop = nop_helpline__pop,
21 .push = nop_helpline__push, 27 .push = nop_helpline__push,
28 .show = nop_helpline__show,
22}; 29};
23 30
24struct ui_helpline *helpline_fns = &default_helpline_fns; 31struct ui_helpline *helpline_fns = &default_helpline_fns;
@@ -59,3 +66,8 @@ void ui_helpline__puts(const char *msg)
59 ui_helpline__pop(); 66 ui_helpline__pop();
60 ui_helpline__push(msg); 67 ui_helpline__push(msg);
61} 68}
69
70int ui_helpline__vshow(const char *fmt, va_list ap)
71{
72 return helpline_fns->show(fmt, ap);
73}
diff --git a/tools/perf/ui/helpline.h b/tools/perf/ui/helpline.h
index baa28a4d16b9..46181f4fc07e 100644
--- a/tools/perf/ui/helpline.h
+++ b/tools/perf/ui/helpline.h
@@ -9,6 +9,7 @@
9struct ui_helpline { 9struct ui_helpline {
10 void (*pop)(void); 10 void (*pop)(void);
11 void (*push)(const char *msg); 11 void (*push)(const char *msg);
12 int (*show)(const char *fmt, va_list ap);
12}; 13};
13 14
14extern struct ui_helpline *helpline_fns; 15extern struct ui_helpline *helpline_fns;
@@ -20,28 +21,9 @@ void ui_helpline__push(const char *msg);
20void ui_helpline__vpush(const char *fmt, va_list ap); 21void ui_helpline__vpush(const char *fmt, va_list ap);
21void ui_helpline__fpush(const char *fmt, ...); 22void ui_helpline__fpush(const char *fmt, ...);
22void ui_helpline__puts(const char *msg); 23void ui_helpline__puts(const char *msg);
24int ui_helpline__vshow(const char *fmt, va_list ap);
23 25
24extern char ui_helpline__current[512]; 26extern char ui_helpline__current[512];
25
26#ifdef NEWT_SUPPORT
27extern char ui_helpline__last_msg[]; 27extern char ui_helpline__last_msg[];
28int ui_helpline__show_help(const char *format, va_list ap);
29#else
30static inline int ui_helpline__show_help(const char *format __maybe_unused,
31 va_list ap __maybe_unused)
32{
33 return 0;
34}
35#endif /* NEWT_SUPPORT */
36
37#ifdef GTK2_SUPPORT
38int perf_gtk__show_helpline(const char *format, va_list ap);
39#else
40static inline int perf_gtk__show_helpline(const char *format __maybe_unused,
41 va_list ap __maybe_unused)
42{
43 return 0;
44}
45#endif /* GTK2_SUPPORT */
46 28
47#endif /* _PERF_UI_HELPLINE_H_ */ 29#endif /* _PERF_UI_HELPLINE_H_ */
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
index aa84130024d5..d671e63aa351 100644
--- a/tools/perf/ui/hist.c
+++ b/tools/perf/ui/hist.c
@@ -3,151 +3,163 @@
3#include "../util/hist.h" 3#include "../util/hist.h"
4#include "../util/util.h" 4#include "../util/util.h"
5#include "../util/sort.h" 5#include "../util/sort.h"
6 6#include "../util/evsel.h"
7 7
8/* hist period print (hpp) functions */ 8/* hist period print (hpp) functions */
9static int hpp__header_overhead(struct perf_hpp *hpp)
10{
11 return scnprintf(hpp->buf, hpp->size, "Overhead");
12}
13
14static int hpp__width_overhead(struct perf_hpp *hpp __maybe_unused)
15{
16 return 8;
17}
18
19static int hpp__color_overhead(struct perf_hpp *hpp, struct hist_entry *he)
20{
21 struct hists *hists = he->hists;
22 double percent = 100.0 * he->stat.period / hists->stats.total_period;
23 9
24 return percent_color_snprintf(hpp->buf, hpp->size, " %6.2f%%", percent); 10typedef int (*hpp_snprint_fn)(char *buf, size_t size, const char *fmt, ...);
25}
26 11
27static int hpp__entry_overhead(struct perf_hpp *hpp, struct hist_entry *he) 12static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
13 u64 (*get_field)(struct hist_entry *),
14 const char *fmt, hpp_snprint_fn print_fn,
15 bool fmt_percent)
28{ 16{
17 int ret;
29 struct hists *hists = he->hists; 18 struct hists *hists = he->hists;
30 double percent = 100.0 * he->stat.period / hists->stats.total_period;
31 const char *fmt = symbol_conf.field_sep ? "%.2f" : " %6.2f%%";
32
33 return scnprintf(hpp->buf, hpp->size, fmt, percent);
34}
35 19
36static int hpp__header_overhead_sys(struct perf_hpp *hpp) 20 if (fmt_percent) {
37{ 21 double percent = 0.0;
38 const char *fmt = symbol_conf.field_sep ? "%s" : "%7s";
39
40 return scnprintf(hpp->buf, hpp->size, fmt, "sys");
41}
42 22
43static int hpp__width_overhead_sys(struct perf_hpp *hpp __maybe_unused) 23 if (hists->stats.total_period)
44{ 24 percent = 100.0 * get_field(he) /
45 return 7; 25 hists->stats.total_period;
46}
47 26
48static int hpp__color_overhead_sys(struct perf_hpp *hpp, struct hist_entry *he) 27 ret = print_fn(hpp->buf, hpp->size, fmt, percent);
49{ 28 } else
50 struct hists *hists = he->hists; 29 ret = print_fn(hpp->buf, hpp->size, fmt, get_field(he));
51 double percent = 100.0 * he->stat.period_sys / hists->stats.total_period;
52 30
53 return percent_color_snprintf(hpp->buf, hpp->size, "%6.2f%%", percent); 31 if (symbol_conf.event_group) {
54} 32 int prev_idx, idx_delta;
33 struct perf_evsel *evsel = hists_to_evsel(hists);
34 struct hist_entry *pair;
35 int nr_members = evsel->nr_members;
55 36
56static int hpp__entry_overhead_sys(struct perf_hpp *hpp, struct hist_entry *he) 37 if (nr_members <= 1)
57{ 38 return ret;
58 struct hists *hists = he->hists;
59 double percent = 100.0 * he->stat.period_sys / hists->stats.total_period;
60 const char *fmt = symbol_conf.field_sep ? "%.2f" : "%6.2f%%";
61 39
62 return scnprintf(hpp->buf, hpp->size, fmt, percent); 40 prev_idx = perf_evsel__group_idx(evsel);
63}
64 41
65static int hpp__header_overhead_us(struct perf_hpp *hpp) 42 list_for_each_entry(pair, &he->pairs.head, pairs.node) {
66{ 43 u64 period = get_field(pair);
67 const char *fmt = symbol_conf.field_sep ? "%s" : "%7s"; 44 u64 total = pair->hists->stats.total_period;
68 45
69 return scnprintf(hpp->buf, hpp->size, fmt, "user"); 46 if (!total)
70} 47 continue;
71 48
72static int hpp__width_overhead_us(struct perf_hpp *hpp __maybe_unused) 49 evsel = hists_to_evsel(pair->hists);
73{ 50 idx_delta = perf_evsel__group_idx(evsel) - prev_idx - 1;
74 return 7;
75}
76 51
77static int hpp__color_overhead_us(struct perf_hpp *hpp, struct hist_entry *he) 52 while (idx_delta--) {
78{ 53 /*
79 struct hists *hists = he->hists; 54 * zero-fill group members in the middle which
80 double percent = 100.0 * he->stat.period_us / hists->stats.total_period; 55 * have no sample
56 */
57 ret += print_fn(hpp->buf + ret, hpp->size - ret,
58 fmt, 0);
59 }
81 60
82 return percent_color_snprintf(hpp->buf, hpp->size, "%6.2f%%", percent); 61 if (fmt_percent)
83} 62 ret += print_fn(hpp->buf + ret, hpp->size - ret,
63 fmt, 100.0 * period / total);
64 else
65 ret += print_fn(hpp->buf + ret, hpp->size - ret,
66 fmt, period);
84 67
85static int hpp__entry_overhead_us(struct perf_hpp *hpp, struct hist_entry *he) 68 prev_idx = perf_evsel__group_idx(evsel);
86{ 69 }
87 struct hists *hists = he->hists;
88 double percent = 100.0 * he->stat.period_us / hists->stats.total_period;
89 const char *fmt = symbol_conf.field_sep ? "%.2f" : "%6.2f%%";
90
91 return scnprintf(hpp->buf, hpp->size, fmt, percent);
92}
93
94static int hpp__header_overhead_guest_sys(struct perf_hpp *hpp)
95{
96 return scnprintf(hpp->buf, hpp->size, "guest sys");
97}
98
99static int hpp__width_overhead_guest_sys(struct perf_hpp *hpp __maybe_unused)
100{
101 return 9;
102}
103
104static int hpp__color_overhead_guest_sys(struct perf_hpp *hpp,
105 struct hist_entry *he)
106{
107 struct hists *hists = he->hists;
108 double percent = 100.0 * he->stat.period_guest_sys / hists->stats.total_period;
109
110 return percent_color_snprintf(hpp->buf, hpp->size, " %6.2f%% ", percent);
111}
112
113static int hpp__entry_overhead_guest_sys(struct perf_hpp *hpp,
114 struct hist_entry *he)
115{
116 struct hists *hists = he->hists;
117 double percent = 100.0 * he->stat.period_guest_sys / hists->stats.total_period;
118 const char *fmt = symbol_conf.field_sep ? "%.2f" : " %6.2f%% ";
119
120 return scnprintf(hpp->buf, hpp->size, fmt, percent);
121}
122
123static int hpp__header_overhead_guest_us(struct perf_hpp *hpp)
124{
125 return scnprintf(hpp->buf, hpp->size, "guest usr");
126}
127 70
128static int hpp__width_overhead_guest_us(struct perf_hpp *hpp __maybe_unused) 71 idx_delta = nr_members - prev_idx - 1;
129{
130 return 9;
131}
132 72
133static int hpp__color_overhead_guest_us(struct perf_hpp *hpp, 73 while (idx_delta--) {
134 struct hist_entry *he) 74 /*
135{ 75 * zero-fill group members at last which have no sample
136 struct hists *hists = he->hists; 76 */
137 double percent = 100.0 * he->stat.period_guest_us / hists->stats.total_period; 77 ret += print_fn(hpp->buf + ret, hpp->size - ret,
138 78 fmt, 0);
139 return percent_color_snprintf(hpp->buf, hpp->size, " %6.2f%% ", percent); 79 }
80 }
81 return ret;
140} 82}
141 83
142static int hpp__entry_overhead_guest_us(struct perf_hpp *hpp, 84#define __HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \
143 struct hist_entry *he) 85static int hpp__header_##_type(struct perf_hpp *hpp) \
144{ 86{ \
145 struct hists *hists = he->hists; 87 int len = _min_width; \
146 double percent = 100.0 * he->stat.period_guest_us / hists->stats.total_period; 88 \
147 const char *fmt = symbol_conf.field_sep ? "%.2f" : " %6.2f%% "; 89 if (symbol_conf.event_group) { \
90 struct perf_evsel *evsel = hpp->ptr; \
91 \
92 len = max(len, evsel->nr_members * _unit_width); \
93 } \
94 return scnprintf(hpp->buf, hpp->size, "%*s", len, _str); \
95}
96
97#define __HPP_WIDTH_FN(_type, _min_width, _unit_width) \
98static int hpp__width_##_type(struct perf_hpp *hpp __maybe_unused) \
99{ \
100 int len = _min_width; \
101 \
102 if (symbol_conf.event_group) { \
103 struct perf_evsel *evsel = hpp->ptr; \
104 \
105 len = max(len, evsel->nr_members * _unit_width); \
106 } \
107 return len; \
108}
109
110#define __HPP_COLOR_PERCENT_FN(_type, _field) \
111static u64 he_get_##_field(struct hist_entry *he) \
112{ \
113 return he->stat._field; \
114} \
115 \
116static int hpp__color_##_type(struct perf_hpp *hpp, struct hist_entry *he) \
117{ \
118 return __hpp__fmt(hpp, he, he_get_##_field, " %6.2f%%", \
119 (hpp_snprint_fn)percent_color_snprintf, true); \
120}
121
122#define __HPP_ENTRY_PERCENT_FN(_type, _field) \
123static int hpp__entry_##_type(struct perf_hpp *hpp, struct hist_entry *he) \
124{ \
125 const char *fmt = symbol_conf.field_sep ? " %.2f" : " %6.2f%%"; \
126 return __hpp__fmt(hpp, he, he_get_##_field, fmt, \
127 scnprintf, true); \
128}
129
130#define __HPP_ENTRY_RAW_FN(_type, _field) \
131static u64 he_get_raw_##_field(struct hist_entry *he) \
132{ \
133 return he->stat._field; \
134} \
135 \
136static int hpp__entry_##_type(struct perf_hpp *hpp, struct hist_entry *he) \
137{ \
138 const char *fmt = symbol_conf.field_sep ? " %"PRIu64 : " %11"PRIu64; \
139 return __hpp__fmt(hpp, he, he_get_raw_##_field, fmt, scnprintf, false); \
140}
141
142#define HPP_PERCENT_FNS(_type, _str, _field, _min_width, _unit_width) \
143__HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \
144__HPP_WIDTH_FN(_type, _min_width, _unit_width) \
145__HPP_COLOR_PERCENT_FN(_type, _field) \
146__HPP_ENTRY_PERCENT_FN(_type, _field)
147
148#define HPP_RAW_FNS(_type, _str, _field, _min_width, _unit_width) \
149__HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \
150__HPP_WIDTH_FN(_type, _min_width, _unit_width) \
151__HPP_ENTRY_RAW_FN(_type, _field)
152
153
154HPP_PERCENT_FNS(overhead, "Overhead", period, 8, 8)
155HPP_PERCENT_FNS(overhead_sys, "sys", period_sys, 8, 8)
156HPP_PERCENT_FNS(overhead_us, "usr", period_us, 8, 8)
157HPP_PERCENT_FNS(overhead_guest_sys, "guest sys", period_guest_sys, 9, 8)
158HPP_PERCENT_FNS(overhead_guest_us, "guest usr", period_guest_us, 9, 8)
159
160HPP_RAW_FNS(samples, "Samples", nr_events, 12, 12)
161HPP_RAW_FNS(period, "Period", period, 12, 12)
148 162
149 return scnprintf(hpp->buf, hpp->size, fmt, percent);
150}
151 163
152static int hpp__header_baseline(struct perf_hpp *hpp) 164static int hpp__header_baseline(struct perf_hpp *hpp)
153{ 165{
@@ -179,7 +191,7 @@ static int hpp__color_baseline(struct perf_hpp *hpp, struct hist_entry *he)
179{ 191{
180 double percent = baseline_percent(he); 192 double percent = baseline_percent(he);
181 193
182 if (hist_entry__has_pairs(he)) 194 if (hist_entry__has_pairs(he) || symbol_conf.field_sep)
183 return percent_color_snprintf(hpp->buf, hpp->size, " %6.2f%%", percent); 195 return percent_color_snprintf(hpp->buf, hpp->size, " %6.2f%%", percent);
184 else 196 else
185 return scnprintf(hpp->buf, hpp->size, " "); 197 return scnprintf(hpp->buf, hpp->size, " ");
@@ -196,44 +208,6 @@ static int hpp__entry_baseline(struct perf_hpp *hpp, struct hist_entry *he)
196 return scnprintf(hpp->buf, hpp->size, " "); 208 return scnprintf(hpp->buf, hpp->size, " ");
197} 209}
198 210
199static int hpp__header_samples(struct perf_hpp *hpp)
200{
201 const char *fmt = symbol_conf.field_sep ? "%s" : "%11s";
202
203 return scnprintf(hpp->buf, hpp->size, fmt, "Samples");
204}
205
206static int hpp__width_samples(struct perf_hpp *hpp __maybe_unused)
207{
208 return 11;
209}
210
211static int hpp__entry_samples(struct perf_hpp *hpp, struct hist_entry *he)
212{
213 const char *fmt = symbol_conf.field_sep ? "%" PRIu64 : "%11" PRIu64;
214
215 return scnprintf(hpp->buf, hpp->size, fmt, he->stat.nr_events);
216}
217
218static int hpp__header_period(struct perf_hpp *hpp)
219{
220 const char *fmt = symbol_conf.field_sep ? "%s" : "%12s";
221
222 return scnprintf(hpp->buf, hpp->size, fmt, "Period");
223}
224
225static int hpp__width_period(struct perf_hpp *hpp __maybe_unused)
226{
227 return 12;
228}
229
230static int hpp__entry_period(struct perf_hpp *hpp, struct hist_entry *he)
231{
232 const char *fmt = symbol_conf.field_sep ? "%" PRIu64 : "%12" PRIu64;
233
234 return scnprintf(hpp->buf, hpp->size, fmt, he->stat.period);
235}
236
237static int hpp__header_period_baseline(struct perf_hpp *hpp) 211static int hpp__header_period_baseline(struct perf_hpp *hpp)
238{ 212{
239 const char *fmt = symbol_conf.field_sep ? "%s" : "%12s"; 213 const char *fmt = symbol_conf.field_sep ? "%s" : "%12s";
@@ -254,6 +228,7 @@ static int hpp__entry_period_baseline(struct perf_hpp *hpp, struct hist_entry *h
254 228
255 return scnprintf(hpp->buf, hpp->size, fmt, period); 229 return scnprintf(hpp->buf, hpp->size, fmt, period);
256} 230}
231
257static int hpp__header_delta(struct perf_hpp *hpp) 232static int hpp__header_delta(struct perf_hpp *hpp)
258{ 233{
259 const char *fmt = symbol_conf.field_sep ? "%s" : "%7s"; 234 const char *fmt = symbol_conf.field_sep ? "%s" : "%7s";
@@ -268,14 +243,18 @@ static int hpp__width_delta(struct perf_hpp *hpp __maybe_unused)
268 243
269static int hpp__entry_delta(struct perf_hpp *hpp, struct hist_entry *he) 244static int hpp__entry_delta(struct perf_hpp *hpp, struct hist_entry *he)
270{ 245{
246 struct hist_entry *pair = hist_entry__next_pair(he);
271 const char *fmt = symbol_conf.field_sep ? "%s" : "%7.7s"; 247 const char *fmt = symbol_conf.field_sep ? "%s" : "%7.7s";
272 char buf[32] = " "; 248 char buf[32] = " ";
273 double diff; 249 double diff = 0.0;
274 250
275 if (he->diff.computed) 251 if (pair) {
276 diff = he->diff.period_ratio_delta; 252 if (he->diff.computed)
277 else 253 diff = he->diff.period_ratio_delta;
278 diff = perf_diff__compute_delta(he); 254 else
255 diff = perf_diff__compute_delta(he, pair);
256 } else
257 diff = perf_diff__period_percent(he, he->stat.period);
279 258
280 if (fabs(diff) >= 0.01) 259 if (fabs(diff) >= 0.01)
281 scnprintf(buf, sizeof(buf), "%+4.2F%%", diff); 260 scnprintf(buf, sizeof(buf), "%+4.2F%%", diff);
@@ -297,14 +276,17 @@ static int hpp__width_ratio(struct perf_hpp *hpp __maybe_unused)
297 276
298static int hpp__entry_ratio(struct perf_hpp *hpp, struct hist_entry *he) 277static int hpp__entry_ratio(struct perf_hpp *hpp, struct hist_entry *he)
299{ 278{
279 struct hist_entry *pair = hist_entry__next_pair(he);
300 const char *fmt = symbol_conf.field_sep ? "%s" : "%14s"; 280 const char *fmt = symbol_conf.field_sep ? "%s" : "%14s";
301 char buf[32] = " "; 281 char buf[32] = " ";
302 double ratio; 282 double ratio = 0.0;
303 283
304 if (he->diff.computed) 284 if (pair) {
305 ratio = he->diff.period_ratio; 285 if (he->diff.computed)
306 else 286 ratio = he->diff.period_ratio;
307 ratio = perf_diff__compute_ratio(he); 287 else
288 ratio = perf_diff__compute_ratio(he, pair);
289 }
308 290
309 if (ratio > 0.0) 291 if (ratio > 0.0)
310 scnprintf(buf, sizeof(buf), "%+14.6F", ratio); 292 scnprintf(buf, sizeof(buf), "%+14.6F", ratio);
@@ -326,14 +308,17 @@ static int hpp__width_wdiff(struct perf_hpp *hpp __maybe_unused)
326 308
327static int hpp__entry_wdiff(struct perf_hpp *hpp, struct hist_entry *he) 309static int hpp__entry_wdiff(struct perf_hpp *hpp, struct hist_entry *he)
328{ 310{
311 struct hist_entry *pair = hist_entry__next_pair(he);
329 const char *fmt = symbol_conf.field_sep ? "%s" : "%14s"; 312 const char *fmt = symbol_conf.field_sep ? "%s" : "%14s";
330 char buf[32] = " "; 313 char buf[32] = " ";
331 s64 wdiff; 314 s64 wdiff = 0;
332 315
333 if (he->diff.computed) 316 if (pair) {
334 wdiff = he->diff.wdiff; 317 if (he->diff.computed)
335 else 318 wdiff = he->diff.wdiff;
336 wdiff = perf_diff__compute_wdiff(he); 319 else
320 wdiff = perf_diff__compute_wdiff(he, pair);
321 }
337 322
338 if (wdiff != 0) 323 if (wdiff != 0)
339 scnprintf(buf, sizeof(buf), "%14ld", wdiff); 324 scnprintf(buf, sizeof(buf), "%14ld", wdiff);
@@ -341,30 +326,6 @@ static int hpp__entry_wdiff(struct perf_hpp *hpp, struct hist_entry *he)
341 return scnprintf(hpp->buf, hpp->size, fmt, buf); 326 return scnprintf(hpp->buf, hpp->size, fmt, buf);
342} 327}
343 328
344static int hpp__header_displ(struct perf_hpp *hpp)
345{
346 return scnprintf(hpp->buf, hpp->size, "Displ.");
347}
348
349static int hpp__width_displ(struct perf_hpp *hpp __maybe_unused)
350{
351 return 6;
352}
353
354static int hpp__entry_displ(struct perf_hpp *hpp,
355 struct hist_entry *he)
356{
357 struct hist_entry *pair = hist_entry__next_pair(he);
358 long displacement = pair ? pair->position - he->position : 0;
359 const char *fmt = symbol_conf.field_sep ? "%s" : "%6.6s";
360 char buf[32] = " ";
361
362 if (displacement)
363 scnprintf(buf, sizeof(buf), "%+4ld", displacement);
364
365 return scnprintf(hpp->buf, hpp->size, fmt, buf);
366}
367
368static int hpp__header_formula(struct perf_hpp *hpp) 329static int hpp__header_formula(struct perf_hpp *hpp)
369{ 330{
370 const char *fmt = symbol_conf.field_sep ? "%s" : "%70s"; 331 const char *fmt = symbol_conf.field_sep ? "%s" : "%70s";
@@ -379,67 +340,91 @@ static int hpp__width_formula(struct perf_hpp *hpp __maybe_unused)
379 340
380static int hpp__entry_formula(struct perf_hpp *hpp, struct hist_entry *he) 341static int hpp__entry_formula(struct perf_hpp *hpp, struct hist_entry *he)
381{ 342{
343 struct hist_entry *pair = hist_entry__next_pair(he);
382 const char *fmt = symbol_conf.field_sep ? "%s" : "%-70s"; 344 const char *fmt = symbol_conf.field_sep ? "%s" : "%-70s";
383 char buf[96] = " "; 345 char buf[96] = " ";
384 346
385 perf_diff__formula(buf, sizeof(buf), he); 347 if (pair)
348 perf_diff__formula(he, pair, buf, sizeof(buf));
349
386 return scnprintf(hpp->buf, hpp->size, fmt, buf); 350 return scnprintf(hpp->buf, hpp->size, fmt, buf);
387} 351}
388 352
389#define HPP__COLOR_PRINT_FNS(_name) \ 353#define HPP__COLOR_PRINT_FNS(_name) \
390 .header = hpp__header_ ## _name, \ 354 { \
391 .width = hpp__width_ ## _name, \ 355 .header = hpp__header_ ## _name, \
392 .color = hpp__color_ ## _name, \ 356 .width = hpp__width_ ## _name, \
393 .entry = hpp__entry_ ## _name 357 .color = hpp__color_ ## _name, \
358 .entry = hpp__entry_ ## _name \
359 }
394 360
395#define HPP__PRINT_FNS(_name) \ 361#define HPP__PRINT_FNS(_name) \
396 .header = hpp__header_ ## _name, \ 362 { \
397 .width = hpp__width_ ## _name, \ 363 .header = hpp__header_ ## _name, \
398 .entry = hpp__entry_ ## _name 364 .width = hpp__width_ ## _name, \
365 .entry = hpp__entry_ ## _name \
366 }
399 367
400struct perf_hpp_fmt perf_hpp__format[] = { 368struct perf_hpp_fmt perf_hpp__format[] = {
401 { .cond = false, HPP__COLOR_PRINT_FNS(baseline) }, 369 HPP__COLOR_PRINT_FNS(baseline),
402 { .cond = true, HPP__COLOR_PRINT_FNS(overhead) }, 370 HPP__COLOR_PRINT_FNS(overhead),
403 { .cond = false, HPP__COLOR_PRINT_FNS(overhead_sys) }, 371 HPP__COLOR_PRINT_FNS(overhead_sys),
404 { .cond = false, HPP__COLOR_PRINT_FNS(overhead_us) }, 372 HPP__COLOR_PRINT_FNS(overhead_us),
405 { .cond = false, HPP__COLOR_PRINT_FNS(overhead_guest_sys) }, 373 HPP__COLOR_PRINT_FNS(overhead_guest_sys),
406 { .cond = false, HPP__COLOR_PRINT_FNS(overhead_guest_us) }, 374 HPP__COLOR_PRINT_FNS(overhead_guest_us),
407 { .cond = false, HPP__PRINT_FNS(samples) }, 375 HPP__PRINT_FNS(samples),
408 { .cond = false, HPP__PRINT_FNS(period) }, 376 HPP__PRINT_FNS(period),
409 { .cond = false, HPP__PRINT_FNS(period_baseline) }, 377 HPP__PRINT_FNS(period_baseline),
410 { .cond = false, HPP__PRINT_FNS(delta) }, 378 HPP__PRINT_FNS(delta),
411 { .cond = false, HPP__PRINT_FNS(ratio) }, 379 HPP__PRINT_FNS(ratio),
412 { .cond = false, HPP__PRINT_FNS(wdiff) }, 380 HPP__PRINT_FNS(wdiff),
413 { .cond = false, HPP__PRINT_FNS(displ) }, 381 HPP__PRINT_FNS(formula)
414 { .cond = false, HPP__PRINT_FNS(formula) }
415}; 382};
416 383
384LIST_HEAD(perf_hpp__list);
385
386
417#undef HPP__COLOR_PRINT_FNS 387#undef HPP__COLOR_PRINT_FNS
418#undef HPP__PRINT_FNS 388#undef HPP__PRINT_FNS
419 389
390#undef HPP_PERCENT_FNS
391#undef HPP_RAW_FNS
392
393#undef __HPP_HEADER_FN
394#undef __HPP_WIDTH_FN
395#undef __HPP_COLOR_PERCENT_FN
396#undef __HPP_ENTRY_PERCENT_FN
397#undef __HPP_ENTRY_RAW_FN
398
399
420void perf_hpp__init(void) 400void perf_hpp__init(void)
421{ 401{
422 if (symbol_conf.show_cpu_utilization) { 402 if (symbol_conf.show_cpu_utilization) {
423 perf_hpp__format[PERF_HPP__OVERHEAD_SYS].cond = true; 403 perf_hpp__column_enable(PERF_HPP__OVERHEAD_SYS);
424 perf_hpp__format[PERF_HPP__OVERHEAD_US].cond = true; 404 perf_hpp__column_enable(PERF_HPP__OVERHEAD_US);
425 405
426 if (perf_guest) { 406 if (perf_guest) {
427 perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_SYS].cond = true; 407 perf_hpp__column_enable(PERF_HPP__OVERHEAD_GUEST_SYS);
428 perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_US].cond = true; 408 perf_hpp__column_enable(PERF_HPP__OVERHEAD_GUEST_US);
429 } 409 }
430 } 410 }
431 411
432 if (symbol_conf.show_nr_samples) 412 if (symbol_conf.show_nr_samples)
433 perf_hpp__format[PERF_HPP__SAMPLES].cond = true; 413 perf_hpp__column_enable(PERF_HPP__SAMPLES);
434 414
435 if (symbol_conf.show_total_period) 415 if (symbol_conf.show_total_period)
436 perf_hpp__format[PERF_HPP__PERIOD].cond = true; 416 perf_hpp__column_enable(PERF_HPP__PERIOD);
417}
418
419void perf_hpp__column_register(struct perf_hpp_fmt *format)
420{
421 list_add_tail(&format->list, &perf_hpp__list);
437} 422}
438 423
439void perf_hpp__column_enable(unsigned col, bool enable) 424void perf_hpp__column_enable(unsigned col)
440{ 425{
441 BUG_ON(col >= PERF_HPP__MAX_INDEX); 426 BUG_ON(col >= PERF_HPP__MAX_INDEX);
442 perf_hpp__format[col].cond = enable; 427 perf_hpp__column_register(&perf_hpp__format[col]);
443} 428}
444 429
445static inline void advance_hpp(struct perf_hpp *hpp, int inc) 430static inline void advance_hpp(struct perf_hpp *hpp, int inc)
@@ -452,27 +437,29 @@ int hist_entry__period_snprintf(struct perf_hpp *hpp, struct hist_entry *he,
452 bool color) 437 bool color)
453{ 438{
454 const char *sep = symbol_conf.field_sep; 439 const char *sep = symbol_conf.field_sep;
440 struct perf_hpp_fmt *fmt;
455 char *start = hpp->buf; 441 char *start = hpp->buf;
456 int i, ret; 442 int ret;
457 bool first = true; 443 bool first = true;
458 444
459 if (symbol_conf.exclude_other && !he->parent) 445 if (symbol_conf.exclude_other && !he->parent)
460 return 0; 446 return 0;
461 447
462 for (i = 0; i < PERF_HPP__MAX_INDEX; i++) { 448 perf_hpp__for_each_format(fmt) {
463 if (!perf_hpp__format[i].cond) 449 /*
464 continue; 450 * If there's no field_sep, we still need
465 451 * to display initial ' '.
452 */
466 if (!sep || !first) { 453 if (!sep || !first) {
467 ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " "); 454 ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
468 advance_hpp(hpp, ret); 455 advance_hpp(hpp, ret);
456 } else
469 first = false; 457 first = false;
470 }
471 458
472 if (color && perf_hpp__format[i].color) 459 if (color && fmt->color)
473 ret = perf_hpp__format[i].color(hpp, he); 460 ret = fmt->color(hpp, he);
474 else 461 else
475 ret = perf_hpp__format[i].entry(hpp, he); 462 ret = fmt->entry(hpp, he);
476 463
477 advance_hpp(hpp, ret); 464 advance_hpp(hpp, ret);
478 } 465 }
@@ -504,16 +491,18 @@ int hist_entry__sort_snprintf(struct hist_entry *he, char *s, size_t size,
504 */ 491 */
505unsigned int hists__sort_list_width(struct hists *hists) 492unsigned int hists__sort_list_width(struct hists *hists)
506{ 493{
494 struct perf_hpp_fmt *fmt;
507 struct sort_entry *se; 495 struct sort_entry *se;
508 int i, ret = 0; 496 int i = 0, ret = 0;
497 struct perf_hpp dummy_hpp = {
498 .ptr = hists_to_evsel(hists),
499 };
509 500
510 for (i = 0; i < PERF_HPP__MAX_INDEX; i++) { 501 perf_hpp__for_each_format(fmt) {
511 if (!perf_hpp__format[i].cond)
512 continue;
513 if (i) 502 if (i)
514 ret += 2; 503 ret += 2;
515 504
516 ret += perf_hpp__format[i].width(NULL); 505 ret += fmt->width(&dummy_hpp);
517 } 506 }
518 507
519 list_for_each_entry(se, &hist_entry__sort_list, list) 508 list_for_each_entry(se, &hist_entry__sort_list, list)
diff --git a/tools/perf/ui/keysyms.h b/tools/perf/ui/keysyms.h
index 809eca5707fa..65092d576b4e 100644
--- a/tools/perf/ui/keysyms.h
+++ b/tools/perf/ui/keysyms.h
@@ -23,5 +23,6 @@
23#define K_TIMER -1 23#define K_TIMER -1
24#define K_ERROR -2 24#define K_ERROR -2
25#define K_RESIZE -3 25#define K_RESIZE -3
26#define K_SWITCH_INPUT_DATA -4
26 27
27#endif /* _PERF_KEYSYMS_H_ */ 28#endif /* _PERF_KEYSYMS_H_ */
diff --git a/tools/perf/ui/setup.c b/tools/perf/ui/setup.c
index ebb4cc107876..ae6a789cb0f6 100644
--- a/tools/perf/ui/setup.c
+++ b/tools/perf/ui/setup.c
@@ -8,7 +8,7 @@ pthread_mutex_t ui__lock = PTHREAD_MUTEX_INITIALIZER;
8 8
9void setup_browser(bool fallback_to_pager) 9void setup_browser(bool fallback_to_pager)
10{ 10{
11 if (!isatty(1) || dump_trace) 11 if (use_browser < 2 && (!isatty(1) || dump_trace))
12 use_browser = 0; 12 use_browser = 0;
13 13
14 /* default to TUI */ 14 /* default to TUI */
@@ -30,6 +30,7 @@ void setup_browser(bool fallback_to_pager)
30 if (fallback_to_pager) 30 if (fallback_to_pager)
31 setup_pager(); 31 setup_pager();
32 32
33 perf_hpp__column_enable(PERF_HPP__OVERHEAD);
33 perf_hpp__init(); 34 perf_hpp__init();
34 break; 35 break;
35 } 36 }
diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c
index f0ee204f99bb..ff1f60cf442e 100644
--- a/tools/perf/ui/stdio/hist.c
+++ b/tools/perf/ui/stdio/hist.c
@@ -3,6 +3,7 @@
3#include "../../util/util.h" 3#include "../../util/util.h"
4#include "../../util/hist.h" 4#include "../../util/hist.h"
5#include "../../util/sort.h" 5#include "../../util/sort.h"
6#include "../../util/evsel.h"
6 7
7 8
8static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin) 9static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
@@ -335,17 +336,19 @@ static int hist_entry__fprintf(struct hist_entry *he, size_t size,
335size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows, 336size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
336 int max_cols, FILE *fp) 337 int max_cols, FILE *fp)
337{ 338{
339 struct perf_hpp_fmt *fmt;
338 struct sort_entry *se; 340 struct sort_entry *se;
339 struct rb_node *nd; 341 struct rb_node *nd;
340 size_t ret = 0; 342 size_t ret = 0;
341 unsigned int width; 343 unsigned int width;
342 const char *sep = symbol_conf.field_sep; 344 const char *sep = symbol_conf.field_sep;
343 const char *col_width = symbol_conf.col_width_list_str; 345 const char *col_width = symbol_conf.col_width_list_str;
344 int idx, nr_rows = 0; 346 int nr_rows = 0;
345 char bf[96]; 347 char bf[96];
346 struct perf_hpp dummy_hpp = { 348 struct perf_hpp dummy_hpp = {
347 .buf = bf, 349 .buf = bf,
348 .size = sizeof(bf), 350 .size = sizeof(bf),
351 .ptr = hists_to_evsel(hists),
349 }; 352 };
350 bool first = true; 353 bool first = true;
351 354
@@ -355,16 +358,14 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
355 goto print_entries; 358 goto print_entries;
356 359
357 fprintf(fp, "# "); 360 fprintf(fp, "# ");
358 for (idx = 0; idx < PERF_HPP__MAX_INDEX; idx++) {
359 if (!perf_hpp__format[idx].cond)
360 continue;
361 361
362 perf_hpp__for_each_format(fmt) {
362 if (!first) 363 if (!first)
363 fprintf(fp, "%s", sep ?: " "); 364 fprintf(fp, "%s", sep ?: " ");
364 else 365 else
365 first = false; 366 first = false;
366 367
367 perf_hpp__format[idx].header(&dummy_hpp); 368 fmt->header(&dummy_hpp);
368 fprintf(fp, "%s", bf); 369 fprintf(fp, "%s", bf);
369 } 370 }
370 371
@@ -400,18 +401,16 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
400 first = true; 401 first = true;
401 402
402 fprintf(fp, "# "); 403 fprintf(fp, "# ");
403 for (idx = 0; idx < PERF_HPP__MAX_INDEX; idx++) {
404 unsigned int i;
405 404
406 if (!perf_hpp__format[idx].cond) 405 perf_hpp__for_each_format(fmt) {
407 continue; 406 unsigned int i;
408 407
409 if (!first) 408 if (!first)
410 fprintf(fp, "%s", sep ?: " "); 409 fprintf(fp, "%s", sep ?: " ");
411 else 410 else
412 first = false; 411 first = false;
413 412
414 width = perf_hpp__format[idx].width(&dummy_hpp); 413 width = fmt->width(&dummy_hpp);
415 for (i = 0; i < width; i++) 414 for (i = 0; i < width; i++)
416 fprintf(fp, "."); 415 fprintf(fp, ".");
417 } 416 }
@@ -462,7 +461,7 @@ out:
462 return ret; 461 return ret;
463} 462}
464 463
465size_t hists__fprintf_nr_events(struct hists *hists, FILE *fp) 464size_t events_stats__fprintf(struct events_stats *stats, FILE *fp)
466{ 465{
467 int i; 466 int i;
468 size_t ret = 0; 467 size_t ret = 0;
@@ -470,7 +469,7 @@ size_t hists__fprintf_nr_events(struct hists *hists, FILE *fp)
470 for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) { 469 for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
471 const char *name; 470 const char *name;
472 471
473 if (hists->stats.nr_events[i] == 0) 472 if (stats->nr_events[i] == 0)
474 continue; 473 continue;
475 474
476 name = perf_event__name(i); 475 name = perf_event__name(i);
@@ -478,7 +477,7 @@ size_t hists__fprintf_nr_events(struct hists *hists, FILE *fp)
478 continue; 477 continue;
479 478
480 ret += fprintf(fp, "%16s events: %10d\n", name, 479 ret += fprintf(fp, "%16s events: %10d\n", name,
481 hists->stats.nr_events[i]); 480 stats->nr_events[i]);
482 } 481 }
483 482
484 return ret; 483 return ret;
diff --git a/tools/perf/ui/tui/helpline.c b/tools/perf/ui/tui/helpline.c
index 2884d2f41e33..1c8b9afd5d6e 100644
--- a/tools/perf/ui/tui/helpline.c
+++ b/tools/perf/ui/tui/helpline.c
@@ -8,6 +8,8 @@
8#include "../ui.h" 8#include "../ui.h"
9#include "../libslang.h" 9#include "../libslang.h"
10 10
11char ui_helpline__last_msg[1024];
12
11static void tui_helpline__pop(void) 13static void tui_helpline__pop(void)
12{ 14{
13} 15}
@@ -23,20 +25,7 @@ static void tui_helpline__push(const char *msg)
23 strncpy(ui_helpline__current, msg, sz)[sz - 1] = '\0'; 25 strncpy(ui_helpline__current, msg, sz)[sz - 1] = '\0';
24} 26}
25 27
26struct ui_helpline tui_helpline_fns = { 28static int tui_helpline__show(const char *format, va_list ap)
27 .pop = tui_helpline__pop,
28 .push = tui_helpline__push,
29};
30
31void ui_helpline__init(void)
32{
33 helpline_fns = &tui_helpline_fns;
34 ui_helpline__puts(" ");
35}
36
37char ui_helpline__last_msg[1024];
38
39int ui_helpline__show_help(const char *format, va_list ap)
40{ 29{
41 int ret; 30 int ret;
42 static int backlog; 31 static int backlog;
@@ -55,3 +44,15 @@ int ui_helpline__show_help(const char *format, va_list ap)
55 44
56 return ret; 45 return ret;
57} 46}
47
48struct ui_helpline tui_helpline_fns = {
49 .pop = tui_helpline__pop,
50 .push = tui_helpline__push,
51 .show = tui_helpline__show,
52};
53
54void ui_helpline__init(void)
55{
56 helpline_fns = &tui_helpline_fns;
57 ui_helpline__puts(" ");
58}
diff --git a/tools/perf/ui/util.c b/tools/perf/ui/util.c
index 4f989774c8c6..e3e0a963d03a 100644
--- a/tools/perf/ui/util.c
+++ b/tools/perf/ui/util.c
@@ -52,7 +52,6 @@ int ui__warning(const char *format, ...)
52 return ret; 52 return ret;
53} 53}
54 54
55
56/** 55/**
57 * perf_error__register - Register error logging functions 56 * perf_error__register - Register error logging functions
58 * @eops: The pointer to error logging function struct 57 * @eops: The pointer to error logging function struct
diff --git a/tools/perf/util/PERF-VERSION-GEN b/tools/perf/util/PERF-VERSION-GEN
index 6aa34e5afdcf..055fef34b6f6 100755
--- a/tools/perf/util/PERF-VERSION-GEN
+++ b/tools/perf/util/PERF-VERSION-GEN
@@ -26,13 +26,13 @@ VN=$(expr "$VN" : v*'\(.*\)')
26 26
27if test -r $GVF 27if test -r $GVF
28then 28then
29 VC=$(sed -e 's/^PERF_VERSION = //' <$GVF) 29 VC=$(sed -e 's/^#define PERF_VERSION "\(.*\)"/\1/' <$GVF)
30else 30else
31 VC=unset 31 VC=unset
32fi 32fi
33test "$VN" = "$VC" || { 33test "$VN" = "$VC" || {
34 echo >&2 "PERF_VERSION = $VN" 34 echo >&2 "PERF_VERSION = $VN"
35 echo "PERF_VERSION = $VN" >$GVF 35 echo "#define PERF_VERSION \"$VN\"" >$GVF
36} 36}
37 37
38 38
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 07aaeea60000..d33fe937e6f1 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -809,7 +809,7 @@ fallback:
809 pr_err("Can't annotate %s:\n\n" 809 pr_err("Can't annotate %s:\n\n"
810 "No vmlinux file%s\nwas found in the path.\n\n" 810 "No vmlinux file%s\nwas found in the path.\n\n"
811 "Please use:\n\n" 811 "Please use:\n\n"
812 " perf buildid-cache -av vmlinux\n\n" 812 " perf buildid-cache -vu vmlinux\n\n"
813 "or:\n\n" 813 "or:\n\n"
814 " --vmlinux vmlinux\n", 814 " --vmlinux vmlinux\n",
815 sym->name, build_id_msg ?: ""); 815 sym->name, build_id_msg ?: "");
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index 8eec94358a4a..c422440fe611 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -6,6 +6,7 @@
6#include "types.h" 6#include "types.h"
7#include "symbol.h" 7#include "symbol.h"
8#include "hist.h" 8#include "hist.h"
9#include "sort.h"
9#include <linux/list.h> 10#include <linux/list.h>
10#include <linux/rbtree.h> 11#include <linux/rbtree.h>
11#include <pthread.h> 12#include <pthread.h>
@@ -154,6 +155,29 @@ static inline int symbol__tui_annotate(struct symbol *sym __maybe_unused,
154} 155}
155#endif 156#endif
156 157
158#ifdef GTK2_SUPPORT
159int symbol__gtk_annotate(struct symbol *sym, struct map *map, int evidx,
160 struct hist_browser_timer *hbt);
161
162static inline int hist_entry__gtk_annotate(struct hist_entry *he, int evidx,
163 struct hist_browser_timer *hbt)
164{
165 return symbol__gtk_annotate(he->ms.sym, he->ms.map, evidx, hbt);
166}
167
168void perf_gtk__show_annotations(void);
169#else
170static inline int hist_entry__gtk_annotate(struct hist_entry *he __maybe_unused,
171 int evidx __maybe_unused,
172 struct hist_browser_timer *hbt
173 __maybe_unused)
174{
175 return 0;
176}
177
178static inline void perf_gtk__show_annotations(void) {}
179#endif
180
157extern const char *disassembler_style; 181extern const char *disassembler_style;
158 182
159#endif /* __PERF_ANNOTATE_H */ 183#endif /* __PERF_ANNOTATE_H */
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index d3b3f5d82137..42b6a632fe7b 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -444,7 +444,7 @@ int callchain_cursor_append(struct callchain_cursor *cursor,
444 struct callchain_cursor_node *node = *cursor->last; 444 struct callchain_cursor_node *node = *cursor->last;
445 445
446 if (!node) { 446 if (!node) {
447 node = calloc(sizeof(*node), 1); 447 node = calloc(1, sizeof(*node));
448 if (!node) 448 if (!node)
449 return -ENOMEM; 449 return -ENOMEM;
450 450
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index eb340571e7d6..3ee9f67d5af0 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -143,4 +143,9 @@ static inline void callchain_cursor_advance(struct callchain_cursor *cursor)
143 cursor->curr = cursor->curr->next; 143 cursor->curr = cursor->curr->next;
144 cursor->pos++; 144 cursor->pos++;
145} 145}
146
147struct option;
148
149int record_parse_callchain_opt(const struct option *opt, const char *arg, int unset);
150extern const char record_callchain_help[];
146#endif /* __PERF_CALLCHAIN_H */ 151#endif /* __PERF_CALLCHAIN_H */
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
index 2b32ffa9ebdb..f817046e22b1 100644
--- a/tools/perf/util/cpumap.c
+++ b/tools/perf/util/cpumap.c
@@ -1,4 +1,5 @@
1#include "util.h" 1#include "util.h"
2#include "sysfs.h"
2#include "../perf.h" 3#include "../perf.h"
3#include "cpumap.h" 4#include "cpumap.h"
4#include <assert.h> 5#include <assert.h>
@@ -201,3 +202,56 @@ void cpu_map__delete(struct cpu_map *map)
201{ 202{
202 free(map); 203 free(map);
203} 204}
205
206int cpu_map__get_socket(struct cpu_map *map, int idx)
207{
208 FILE *fp;
209 const char *mnt;
210 char path[PATH_MAX];
211 int cpu, ret;
212
213 if (idx > map->nr)
214 return -1;
215
216 cpu = map->map[idx];
217
218 mnt = sysfs_find_mountpoint();
219 if (!mnt)
220 return -1;
221
222 sprintf(path,
223 "%s/devices/system/cpu/cpu%d/topology/physical_package_id",
224 mnt, cpu);
225
226 fp = fopen(path, "r");
227 if (!fp)
228 return -1;
229 ret = fscanf(fp, "%d", &cpu);
230 fclose(fp);
231 return ret == 1 ? cpu : -1;
232}
233
234int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp)
235{
236 struct cpu_map *sock;
237 int nr = cpus->nr;
238 int cpu, s1, s2;
239
240 sock = calloc(1, sizeof(*sock) + nr * sizeof(int));
241 if (!sock)
242 return -1;
243
244 for (cpu = 0; cpu < nr; cpu++) {
245 s1 = cpu_map__get_socket(cpus, cpu);
246 for (s2 = 0; s2 < sock->nr; s2++) {
247 if (s1 == sock->map[s2])
248 break;
249 }
250 if (s2 == sock->nr) {
251 sock->map[sock->nr] = s1;
252 sock->nr++;
253 }
254 }
255 *sockp = sock;
256 return 0;
257}
diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h
index 2f68a3b8c285..161b00756a12 100644
--- a/tools/perf/util/cpumap.h
+++ b/tools/perf/util/cpumap.h
@@ -14,6 +14,15 @@ struct cpu_map *cpu_map__dummy_new(void);
14void cpu_map__delete(struct cpu_map *map); 14void cpu_map__delete(struct cpu_map *map);
15struct cpu_map *cpu_map__read(FILE *file); 15struct cpu_map *cpu_map__read(FILE *file);
16size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp); 16size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp);
17int cpu_map__get_socket(struct cpu_map *map, int idx);
18int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp);
19
20static inline int cpu_map__socket(struct cpu_map *sock, int s)
21{
22 if (!sock || s > sock->nr || s < 0)
23 return 0;
24 return sock->map[s];
25}
17 26
18static inline int cpu_map__nr(const struct cpu_map *map) 27static inline int cpu_map__nr(const struct cpu_map *map)
19{ 28{
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
index 03f830b48148..399e74c34c1a 100644
--- a/tools/perf/util/debug.c
+++ b/tools/perf/util/debug.c
@@ -23,10 +23,8 @@ int eprintf(int level, const char *fmt, ...)
23 23
24 if (verbose >= level) { 24 if (verbose >= level) {
25 va_start(args, fmt); 25 va_start(args, fmt);
26 if (use_browser == 1) 26 if (use_browser >= 1)
27 ret = ui_helpline__show_help(fmt, args); 27 ui_helpline__vshow(fmt, args);
28 else if (use_browser == 2)
29 ret = perf_gtk__show_helpline(fmt, args);
30 else 28 else
31 ret = vfprintf(stderr, fmt, args); 29 ret = vfprintf(stderr, fmt, args);
32 va_end(args); 30 va_end(args);
@@ -49,28 +47,6 @@ int dump_printf(const char *fmt, ...)
49 return ret; 47 return ret;
50} 48}
51 49
52#if !defined(NEWT_SUPPORT) && !defined(GTK2_SUPPORT)
53int ui__warning(const char *format, ...)
54{
55 va_list args;
56
57 va_start(args, format);
58 vfprintf(stderr, format, args);
59 va_end(args);
60 return 0;
61}
62#endif
63
64int ui__error_paranoid(void)
65{
66 return ui__error("Permission error - are you root?\n"
67 "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n"
68 " -1 - Not paranoid at all\n"
69 " 0 - Disallow raw tracepoint access for unpriv\n"
70 " 1 - Disallow cpu events for unpriv\n"
71 " 2 - Disallow kernel profiling for unpriv\n");
72}
73
74void trace_event(union perf_event *event) 50void trace_event(union perf_event *event)
75{ 51{
76 unsigned char *raw_event = (void *)event; 52 unsigned char *raw_event = (void *)event;
diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h
index 83e8d234af6b..efbd98805ad0 100644
--- a/tools/perf/util/debug.h
+++ b/tools/perf/util/debug.h
@@ -5,6 +5,8 @@
5#include <stdbool.h> 5#include <stdbool.h>
6#include "event.h" 6#include "event.h"
7#include "../ui/helpline.h" 7#include "../ui/helpline.h"
8#include "../ui/progress.h"
9#include "../ui/util.h"
8 10
9extern int verbose; 11extern int verbose;
10extern bool quiet, dump_trace; 12extern bool quiet, dump_trace;
@@ -12,39 +14,7 @@ extern bool quiet, dump_trace;
12int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2))); 14int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
13void trace_event(union perf_event *event); 15void trace_event(union perf_event *event);
14 16
15struct ui_progress;
16struct perf_error_ops;
17
18#if defined(NEWT_SUPPORT) || defined(GTK2_SUPPORT)
19
20#include "../ui/progress.h"
21int ui__error(const char *format, ...) __attribute__((format(printf, 1, 2))); 17int ui__error(const char *format, ...) __attribute__((format(printf, 1, 2)));
22#include "../ui/util.h"
23
24#else
25
26static inline void ui_progress__update(u64 curr __maybe_unused,
27 u64 total __maybe_unused,
28 const char *title __maybe_unused) {}
29static inline void ui_progress__finish(void) {}
30
31#define ui__error(format, arg...) ui__warning(format, ##arg)
32
33static inline int
34perf_error__register(struct perf_error_ops *eops __maybe_unused)
35{
36 return 0;
37}
38
39static inline int
40perf_error__unregister(struct perf_error_ops *eops __maybe_unused)
41{
42 return 0;
43}
44
45#endif /* NEWT_SUPPORT || GTK2_SUPPORT */
46
47int ui__warning(const char *format, ...) __attribute__((format(printf, 1, 2))); 18int ui__warning(const char *format, ...) __attribute__((format(printf, 1, 2)));
48int ui__error_paranoid(void);
49 19
50#endif /* __PERF_DEBUG_H */ 20#endif /* __PERF_DEBUG_H */
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index d6d9a465acdb..6f7d5a9d6b05 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -539,13 +539,13 @@ struct dso *__dsos__findnew(struct list_head *head, const char *name)
539} 539}
540 540
541size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp, 541size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
542 bool with_hits) 542 bool (skip)(struct dso *dso, int parm), int parm)
543{ 543{
544 struct dso *pos; 544 struct dso *pos;
545 size_t ret = 0; 545 size_t ret = 0;
546 546
547 list_for_each_entry(pos, head, node) { 547 list_for_each_entry(pos, head, node) {
548 if (with_hits && !pos->hit) 548 if (skip && skip(pos, parm))
549 continue; 549 continue;
550 ret += dso__fprintf_buildid(pos, fp); 550 ret += dso__fprintf_buildid(pos, fp);
551 ret += fprintf(fp, " %s\n", pos->long_name); 551 ret += fprintf(fp, " %s\n", pos->long_name);
@@ -583,7 +583,7 @@ size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp)
583 if (dso->short_name != dso->long_name) 583 if (dso->short_name != dso->long_name)
584 ret += fprintf(fp, "%s, ", dso->long_name); 584 ret += fprintf(fp, "%s, ", dso->long_name);
585 ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type], 585 ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type],
586 dso->loaded ? "" : "NOT "); 586 dso__loaded(dso, type) ? "" : "NOT ");
587 ret += dso__fprintf_buildid(dso, fp); 587 ret += dso__fprintf_buildid(dso, fp);
588 ret += fprintf(fp, ")\n"); 588 ret += fprintf(fp, ")\n");
589 for (nd = rb_first(&dso->symbols[type]); nd; nd = rb_next(nd)) { 589 for (nd = rb_first(&dso->symbols[type]); nd; nd = rb_next(nd)) {
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index e03276940b99..450199ab51b5 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -138,7 +138,7 @@ struct dso *__dsos__findnew(struct list_head *head, const char *name);
138bool __dsos__read_build_ids(struct list_head *head, bool with_hits); 138bool __dsos__read_build_ids(struct list_head *head, bool with_hits);
139 139
140size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp, 140size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
141 bool with_hits); 141 bool (skip)(struct dso *dso, int parm), int parm);
142size_t __dsos__fprintf(struct list_head *head, FILE *fp); 142size_t __dsos__fprintf(struct list_head *head, FILE *fp);
143 143
144size_t dso__fprintf_buildid(struct dso *dso, FILE *fp); 144size_t dso__fprintf_buildid(struct dso *dso, FILE *fp);
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 3cf2c3e0605f..5cd13d768cec 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -476,8 +476,10 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
476 } 476 }
477 } 477 }
478 478
479 if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0) 479 if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0) {
480 free(event);
480 return -ENOENT; 481 return -ENOENT;
482 }
481 483
482 map = machine->vmlinux_maps[MAP__FUNCTION]; 484 map = machine->vmlinux_maps[MAP__FUNCTION];
483 size = snprintf(event->mmap.filename, sizeof(event->mmap.filename), 485 size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 705293489e3c..bc4ad7977438 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -49,10 +49,16 @@ struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
49 return evlist; 49 return evlist;
50} 50}
51 51
52void perf_evlist__config_attrs(struct perf_evlist *evlist, 52void perf_evlist__config(struct perf_evlist *evlist,
53 struct perf_record_opts *opts) 53 struct perf_record_opts *opts)
54{ 54{
55 struct perf_evsel *evsel; 55 struct perf_evsel *evsel;
56 /*
57 * Set the evsel leader links before we configure attributes,
58 * since some might depend on this info.
59 */
60 if (opts->group)
61 perf_evlist__set_leader(evlist);
56 62
57 if (evlist->cpus->map[0] < 0) 63 if (evlist->cpus->map[0] < 0)
58 opts->no_inherit = true; 64 opts->no_inherit = true;
@@ -61,7 +67,7 @@ void perf_evlist__config_attrs(struct perf_evlist *evlist,
61 perf_evsel__config(evsel, opts); 67 perf_evsel__config(evsel, opts);
62 68
63 if (evlist->nr_entries > 1) 69 if (evlist->nr_entries > 1)
64 evsel->attr.sample_type |= PERF_SAMPLE_ID; 70 perf_evsel__set_sample_id(evsel);
65 } 71 }
66} 72}
67 73
@@ -111,18 +117,21 @@ void __perf_evlist__set_leader(struct list_head *list)
111 struct perf_evsel *evsel, *leader; 117 struct perf_evsel *evsel, *leader;
112 118
113 leader = list_entry(list->next, struct perf_evsel, node); 119 leader = list_entry(list->next, struct perf_evsel, node);
114 leader->leader = NULL; 120 evsel = list_entry(list->prev, struct perf_evsel, node);
121
122 leader->nr_members = evsel->idx - leader->idx + 1;
115 123
116 list_for_each_entry(evsel, list, node) { 124 list_for_each_entry(evsel, list, node) {
117 if (evsel != leader) 125 evsel->leader = leader;
118 evsel->leader = leader;
119 } 126 }
120} 127}
121 128
122void perf_evlist__set_leader(struct perf_evlist *evlist) 129void perf_evlist__set_leader(struct perf_evlist *evlist)
123{ 130{
124 if (evlist->nr_entries) 131 if (evlist->nr_entries) {
132 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
125 __perf_evlist__set_leader(&evlist->entries); 133 __perf_evlist__set_leader(&evlist->entries);
134 }
126} 135}
127 136
128int perf_evlist__add_default(struct perf_evlist *evlist) 137int perf_evlist__add_default(struct perf_evlist *evlist)
@@ -222,7 +231,7 @@ void perf_evlist__disable(struct perf_evlist *evlist)
222 231
223 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { 232 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
224 list_for_each_entry(pos, &evlist->entries, node) { 233 list_for_each_entry(pos, &evlist->entries, node) {
225 if (perf_evsel__is_group_member(pos)) 234 if (!perf_evsel__is_group_leader(pos))
226 continue; 235 continue;
227 for (thread = 0; thread < evlist->threads->nr; thread++) 236 for (thread = 0; thread < evlist->threads->nr; thread++)
228 ioctl(FD(pos, cpu, thread), 237 ioctl(FD(pos, cpu, thread),
@@ -238,7 +247,7 @@ void perf_evlist__enable(struct perf_evlist *evlist)
238 247
239 for (cpu = 0; cpu < cpu_map__nr(evlist->cpus); cpu++) { 248 for (cpu = 0; cpu < cpu_map__nr(evlist->cpus); cpu++) {
240 list_for_each_entry(pos, &evlist->entries, node) { 249 list_for_each_entry(pos, &evlist->entries, node) {
241 if (perf_evsel__is_group_member(pos)) 250 if (!perf_evsel__is_group_leader(pos))
242 continue; 251 continue;
243 for (thread = 0; thread < evlist->threads->nr; thread++) 252 for (thread = 0; thread < evlist->threads->nr; thread++)
244 ioctl(FD(pos, cpu, thread), 253 ioctl(FD(pos, cpu, thread),
@@ -366,7 +375,7 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
366 if ((old & md->mask) + size != ((old + size) & md->mask)) { 375 if ((old & md->mask) + size != ((old + size) & md->mask)) {
367 unsigned int offset = old; 376 unsigned int offset = old;
368 unsigned int len = min(sizeof(*event), size), cpy; 377 unsigned int len = min(sizeof(*event), size), cpy;
369 void *dst = &evlist->event_copy; 378 void *dst = &md->event_copy;
370 379
371 do { 380 do {
372 cpy = min(md->mask + 1 - (offset & md->mask), len); 381 cpy = min(md->mask + 1 - (offset & md->mask), len);
@@ -376,7 +385,7 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
376 len -= cpy; 385 len -= cpy;
377 } while (len); 386 } while (len);
378 387
379 event = &evlist->event_copy; 388 event = &md->event_copy;
380 } 389 }
381 390
382 old += size; 391 old += size;
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 56003f779e60..2dd07bd60b4f 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -17,10 +17,18 @@ struct perf_record_opts;
17#define PERF_EVLIST__HLIST_BITS 8 17#define PERF_EVLIST__HLIST_BITS 8
18#define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS) 18#define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS)
19 19
20struct perf_mmap {
21 void *base;
22 int mask;
23 unsigned int prev;
24 union perf_event event_copy;
25};
26
20struct perf_evlist { 27struct perf_evlist {
21 struct list_head entries; 28 struct list_head entries;
22 struct hlist_head heads[PERF_EVLIST__HLIST_SIZE]; 29 struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
23 int nr_entries; 30 int nr_entries;
31 int nr_groups;
24 int nr_fds; 32 int nr_fds;
25 int nr_mmaps; 33 int nr_mmaps;
26 int mmap_len; 34 int mmap_len;
@@ -29,7 +37,6 @@ struct perf_evlist {
29 pid_t pid; 37 pid_t pid;
30 } workload; 38 } workload;
31 bool overwrite; 39 bool overwrite;
32 union perf_event event_copy;
33 struct perf_mmap *mmap; 40 struct perf_mmap *mmap;
34 struct pollfd *pollfd; 41 struct pollfd *pollfd;
35 struct thread_map *threads; 42 struct thread_map *threads;
@@ -76,8 +83,8 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx);
76 83
77int perf_evlist__open(struct perf_evlist *evlist); 84int perf_evlist__open(struct perf_evlist *evlist);
78 85
79void perf_evlist__config_attrs(struct perf_evlist *evlist, 86void perf_evlist__config(struct perf_evlist *evlist,
80 struct perf_record_opts *opts); 87 struct perf_record_opts *opts);
81 88
82int perf_evlist__prepare_workload(struct perf_evlist *evlist, 89int perf_evlist__prepare_workload(struct perf_evlist *evlist,
83 struct perf_record_opts *opts, 90 struct perf_record_opts *opts,
@@ -135,4 +142,25 @@ static inline struct perf_evsel *perf_evlist__last(struct perf_evlist *evlist)
135} 142}
136 143
137size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp); 144size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp);
145
146static inline unsigned int perf_mmap__read_head(struct perf_mmap *mm)
147{
148 struct perf_event_mmap_page *pc = mm->base;
149 int head = pc->data_head;
150 rmb();
151 return head;
152}
153
154static inline void perf_mmap__write_tail(struct perf_mmap *md,
155 unsigned long tail)
156{
157 struct perf_event_mmap_page *pc = md->base;
158
159 /*
160 * ensure all reads are done before we write the tail out.
161 */
162 /* mb(); */
163 pc->data_tail = tail;
164}
165
138#endif /* __PERF_EVLIST_H */ 166#endif /* __PERF_EVLIST_H */
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 1b16dd1edc8e..9c82f98f26de 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -22,6 +22,11 @@
22#include <linux/perf_event.h> 22#include <linux/perf_event.h>
23#include "perf_regs.h" 23#include "perf_regs.h"
24 24
25static struct {
26 bool sample_id_all;
27 bool exclude_guest;
28} perf_missing_features;
29
25#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) 30#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
26 31
27static int __perf_evsel__sample_size(u64 sample_type) 32static int __perf_evsel__sample_size(u64 sample_type)
@@ -50,11 +55,36 @@ void hists__init(struct hists *hists)
50 pthread_mutex_init(&hists->lock, NULL); 55 pthread_mutex_init(&hists->lock, NULL);
51} 56}
52 57
58void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
59 enum perf_event_sample_format bit)
60{
61 if (!(evsel->attr.sample_type & bit)) {
62 evsel->attr.sample_type |= bit;
63 evsel->sample_size += sizeof(u64);
64 }
65}
66
67void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
68 enum perf_event_sample_format bit)
69{
70 if (evsel->attr.sample_type & bit) {
71 evsel->attr.sample_type &= ~bit;
72 evsel->sample_size -= sizeof(u64);
73 }
74}
75
76void perf_evsel__set_sample_id(struct perf_evsel *evsel)
77{
78 perf_evsel__set_sample_bit(evsel, ID);
79 evsel->attr.read_format |= PERF_FORMAT_ID;
80}
81
53void perf_evsel__init(struct perf_evsel *evsel, 82void perf_evsel__init(struct perf_evsel *evsel,
54 struct perf_event_attr *attr, int idx) 83 struct perf_event_attr *attr, int idx)
55{ 84{
56 evsel->idx = idx; 85 evsel->idx = idx;
57 evsel->attr = *attr; 86 evsel->attr = *attr;
87 evsel->leader = evsel;
58 INIT_LIST_HEAD(&evsel->node); 88 INIT_LIST_HEAD(&evsel->node);
59 hists__init(&evsel->hists); 89 hists__init(&evsel->hists);
60 evsel->sample_size = __perf_evsel__sample_size(attr->sample_type); 90 evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
@@ -404,6 +434,31 @@ const char *perf_evsel__name(struct perf_evsel *evsel)
404 return evsel->name ?: "unknown"; 434 return evsel->name ?: "unknown";
405} 435}
406 436
437const char *perf_evsel__group_name(struct perf_evsel *evsel)
438{
439 return evsel->group_name ?: "anon group";
440}
441
442int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size)
443{
444 int ret;
445 struct perf_evsel *pos;
446 const char *group_name = perf_evsel__group_name(evsel);
447
448 ret = scnprintf(buf, size, "%s", group_name);
449
450 ret += scnprintf(buf + ret, size - ret, " { %s",
451 perf_evsel__name(evsel));
452
453 for_each_group_member(pos, evsel)
454 ret += scnprintf(buf + ret, size - ret, ", %s",
455 perf_evsel__name(pos));
456
457 ret += scnprintf(buf + ret, size - ret, " }");
458
459 return ret;
460}
461
407/* 462/*
408 * The enable_on_exec/disabled value strategy: 463 * The enable_on_exec/disabled value strategy:
409 * 464 *
@@ -438,13 +493,11 @@ void perf_evsel__config(struct perf_evsel *evsel,
438 struct perf_event_attr *attr = &evsel->attr; 493 struct perf_event_attr *attr = &evsel->attr;
439 int track = !evsel->idx; /* only the first counter needs these */ 494 int track = !evsel->idx; /* only the first counter needs these */
440 495
441 attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1; 496 attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1;
442 attr->inherit = !opts->no_inherit; 497 attr->inherit = !opts->no_inherit;
443 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
444 PERF_FORMAT_TOTAL_TIME_RUNNING |
445 PERF_FORMAT_ID;
446 498
447 attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID; 499 perf_evsel__set_sample_bit(evsel, IP);
500 perf_evsel__set_sample_bit(evsel, TID);
448 501
449 /* 502 /*
450 * We default some events to a 1 default interval. But keep 503 * We default some events to a 1 default interval. But keep
@@ -453,7 +506,7 @@ void perf_evsel__config(struct perf_evsel *evsel,
453 if (!attr->sample_period || (opts->user_freq != UINT_MAX && 506 if (!attr->sample_period || (opts->user_freq != UINT_MAX &&
454 opts->user_interval != ULLONG_MAX)) { 507 opts->user_interval != ULLONG_MAX)) {
455 if (opts->freq) { 508 if (opts->freq) {
456 attr->sample_type |= PERF_SAMPLE_PERIOD; 509 perf_evsel__set_sample_bit(evsel, PERIOD);
457 attr->freq = 1; 510 attr->freq = 1;
458 attr->sample_freq = opts->freq; 511 attr->sample_freq = opts->freq;
459 } else { 512 } else {
@@ -468,16 +521,16 @@ void perf_evsel__config(struct perf_evsel *evsel,
468 attr->inherit_stat = 1; 521 attr->inherit_stat = 1;
469 522
470 if (opts->sample_address) { 523 if (opts->sample_address) {
471 attr->sample_type |= PERF_SAMPLE_ADDR; 524 perf_evsel__set_sample_bit(evsel, ADDR);
472 attr->mmap_data = track; 525 attr->mmap_data = track;
473 } 526 }
474 527
475 if (opts->call_graph) { 528 if (opts->call_graph) {
476 attr->sample_type |= PERF_SAMPLE_CALLCHAIN; 529 perf_evsel__set_sample_bit(evsel, CALLCHAIN);
477 530
478 if (opts->call_graph == CALLCHAIN_DWARF) { 531 if (opts->call_graph == CALLCHAIN_DWARF) {
479 attr->sample_type |= PERF_SAMPLE_REGS_USER | 532 perf_evsel__set_sample_bit(evsel, REGS_USER);
480 PERF_SAMPLE_STACK_USER; 533 perf_evsel__set_sample_bit(evsel, STACK_USER);
481 attr->sample_regs_user = PERF_REGS_MASK; 534 attr->sample_regs_user = PERF_REGS_MASK;
482 attr->sample_stack_user = opts->stack_dump_size; 535 attr->sample_stack_user = opts->stack_dump_size;
483 attr->exclude_callchain_user = 1; 536 attr->exclude_callchain_user = 1;
@@ -485,20 +538,20 @@ void perf_evsel__config(struct perf_evsel *evsel,
485 } 538 }
486 539
487 if (perf_target__has_cpu(&opts->target)) 540 if (perf_target__has_cpu(&opts->target))
488 attr->sample_type |= PERF_SAMPLE_CPU; 541 perf_evsel__set_sample_bit(evsel, CPU);
489 542
490 if (opts->period) 543 if (opts->period)
491 attr->sample_type |= PERF_SAMPLE_PERIOD; 544 perf_evsel__set_sample_bit(evsel, PERIOD);
492 545
493 if (!opts->sample_id_all_missing && 546 if (!perf_missing_features.sample_id_all &&
494 (opts->sample_time || !opts->no_inherit || 547 (opts->sample_time || !opts->no_inherit ||
495 perf_target__has_cpu(&opts->target))) 548 perf_target__has_cpu(&opts->target)))
496 attr->sample_type |= PERF_SAMPLE_TIME; 549 perf_evsel__set_sample_bit(evsel, TIME);
497 550
498 if (opts->raw_samples) { 551 if (opts->raw_samples) {
499 attr->sample_type |= PERF_SAMPLE_TIME; 552 perf_evsel__set_sample_bit(evsel, TIME);
500 attr->sample_type |= PERF_SAMPLE_RAW; 553 perf_evsel__set_sample_bit(evsel, RAW);
501 attr->sample_type |= PERF_SAMPLE_CPU; 554 perf_evsel__set_sample_bit(evsel, CPU);
502 } 555 }
503 556
504 if (opts->no_delay) { 557 if (opts->no_delay) {
@@ -506,7 +559,7 @@ void perf_evsel__config(struct perf_evsel *evsel,
506 attr->wakeup_events = 1; 559 attr->wakeup_events = 1;
507 } 560 }
508 if (opts->branch_stack) { 561 if (opts->branch_stack) {
509 attr->sample_type |= PERF_SAMPLE_BRANCH_STACK; 562 perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
510 attr->branch_sample_type = opts->branch_stack; 563 attr->branch_sample_type = opts->branch_stack;
511 } 564 }
512 565
@@ -519,14 +572,14 @@ void perf_evsel__config(struct perf_evsel *evsel,
519 * Disabling only independent events or group leaders, 572 * Disabling only independent events or group leaders,
520 * keeping group members enabled. 573 * keeping group members enabled.
521 */ 574 */
522 if (!perf_evsel__is_group_member(evsel)) 575 if (perf_evsel__is_group_leader(evsel))
523 attr->disabled = 1; 576 attr->disabled = 1;
524 577
525 /* 578 /*
526 * Setting enable_on_exec for independent events and 579 * Setting enable_on_exec for independent events and
527 * group leaders for traced executed by perf. 580 * group leaders for traced executed by perf.
528 */ 581 */
529 if (perf_target__none(&opts->target) && !perf_evsel__is_group_member(evsel)) 582 if (perf_target__none(&opts->target) && perf_evsel__is_group_leader(evsel))
530 attr->enable_on_exec = 1; 583 attr->enable_on_exec = 1;
531} 584}
532 585
@@ -612,6 +665,11 @@ void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
612 } 665 }
613} 666}
614 667
668void perf_evsel__free_counts(struct perf_evsel *evsel)
669{
670 free(evsel->counts);
671}
672
615void perf_evsel__exit(struct perf_evsel *evsel) 673void perf_evsel__exit(struct perf_evsel *evsel)
616{ 674{
617 assert(list_empty(&evsel->node)); 675 assert(list_empty(&evsel->node));
@@ -631,6 +689,28 @@ void perf_evsel__delete(struct perf_evsel *evsel)
631 free(evsel); 689 free(evsel);
632} 690}
633 691
692static inline void compute_deltas(struct perf_evsel *evsel,
693 int cpu,
694 struct perf_counts_values *count)
695{
696 struct perf_counts_values tmp;
697
698 if (!evsel->prev_raw_counts)
699 return;
700
701 if (cpu == -1) {
702 tmp = evsel->prev_raw_counts->aggr;
703 evsel->prev_raw_counts->aggr = *count;
704 } else {
705 tmp = evsel->prev_raw_counts->cpu[cpu];
706 evsel->prev_raw_counts->cpu[cpu] = *count;
707 }
708
709 count->val = count->val - tmp.val;
710 count->ena = count->ena - tmp.ena;
711 count->run = count->run - tmp.run;
712}
713
634int __perf_evsel__read_on_cpu(struct perf_evsel *evsel, 714int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
635 int cpu, int thread, bool scale) 715 int cpu, int thread, bool scale)
636{ 716{
@@ -646,6 +726,8 @@ int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
646 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0) 726 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
647 return -errno; 727 return -errno;
648 728
729 compute_deltas(evsel, cpu, &count);
730
649 if (scale) { 731 if (scale) {
650 if (count.run == 0) 732 if (count.run == 0)
651 count.val = 0; 733 count.val = 0;
@@ -684,6 +766,8 @@ int __perf_evsel__read(struct perf_evsel *evsel,
684 } 766 }
685 } 767 }
686 768
769 compute_deltas(evsel, -1, aggr);
770
687 evsel->counts->scaled = 0; 771 evsel->counts->scaled = 0;
688 if (scale) { 772 if (scale) {
689 if (aggr->run == 0) { 773 if (aggr->run == 0) {
@@ -707,7 +791,7 @@ static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread)
707 struct perf_evsel *leader = evsel->leader; 791 struct perf_evsel *leader = evsel->leader;
708 int fd; 792 int fd;
709 793
710 if (!perf_evsel__is_group_member(evsel)) 794 if (perf_evsel__is_group_leader(evsel))
711 return -1; 795 return -1;
712 796
713 /* 797 /*
@@ -738,6 +822,13 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
738 pid = evsel->cgrp->fd; 822 pid = evsel->cgrp->fd;
739 } 823 }
740 824
825fallback_missing_features:
826 if (perf_missing_features.exclude_guest)
827 evsel->attr.exclude_guest = evsel->attr.exclude_host = 0;
828retry_sample_id:
829 if (perf_missing_features.sample_id_all)
830 evsel->attr.sample_id_all = 0;
831
741 for (cpu = 0; cpu < cpus->nr; cpu++) { 832 for (cpu = 0; cpu < cpus->nr; cpu++) {
742 833
743 for (thread = 0; thread < threads->nr; thread++) { 834 for (thread = 0; thread < threads->nr; thread++) {
@@ -754,13 +845,26 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
754 group_fd, flags); 845 group_fd, flags);
755 if (FD(evsel, cpu, thread) < 0) { 846 if (FD(evsel, cpu, thread) < 0) {
756 err = -errno; 847 err = -errno;
757 goto out_close; 848 goto try_fallback;
758 } 849 }
759 } 850 }
760 } 851 }
761 852
762 return 0; 853 return 0;
763 854
855try_fallback:
856 if (err != -EINVAL || cpu > 0 || thread > 0)
857 goto out_close;
858
859 if (!perf_missing_features.exclude_guest &&
860 (evsel->attr.exclude_guest || evsel->attr.exclude_host)) {
861 perf_missing_features.exclude_guest = true;
862 goto fallback_missing_features;
863 } else if (!perf_missing_features.sample_id_all) {
864 perf_missing_features.sample_id_all = true;
865 goto retry_sample_id;
866 }
867
764out_close: 868out_close:
765 do { 869 do {
766 while (--thread >= 0) { 870 while (--thread >= 0) {
@@ -1205,3 +1309,225 @@ u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
1205 1309
1206 return 0; 1310 return 0;
1207} 1311}
1312
1313static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...)
1314{
1315 va_list args;
1316 int ret = 0;
1317
1318 if (!*first) {
1319 ret += fprintf(fp, ",");
1320 } else {
1321 ret += fprintf(fp, ":");
1322 *first = false;
1323 }
1324
1325 va_start(args, fmt);
1326 ret += vfprintf(fp, fmt, args);
1327 va_end(args);
1328 return ret;
1329}
1330
1331static int __if_fprintf(FILE *fp, bool *first, const char *field, u64 value)
1332{
1333 if (value == 0)
1334 return 0;
1335
1336 return comma_fprintf(fp, first, " %s: %" PRIu64, field, value);
1337}
1338
1339#define if_print(field) printed += __if_fprintf(fp, &first, #field, evsel->attr.field)
1340
1341struct bit_names {
1342 int bit;
1343 const char *name;
1344};
1345
1346static int bits__fprintf(FILE *fp, const char *field, u64 value,
1347 struct bit_names *bits, bool *first)
1348{
1349 int i = 0, printed = comma_fprintf(fp, first, " %s: ", field);
1350 bool first_bit = true;
1351
1352 do {
1353 if (value & bits[i].bit) {
1354 printed += fprintf(fp, "%s%s", first_bit ? "" : "|", bits[i].name);
1355 first_bit = false;
1356 }
1357 } while (bits[++i].name != NULL);
1358
1359 return printed;
1360}
1361
1362static int sample_type__fprintf(FILE *fp, bool *first, u64 value)
1363{
1364#define bit_name(n) { PERF_SAMPLE_##n, #n }
1365 struct bit_names bits[] = {
1366 bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR),
1367 bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU),
1368 bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
1369 bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
1370 { .name = NULL, }
1371 };
1372#undef bit_name
1373 return bits__fprintf(fp, "sample_type", value, bits, first);
1374}
1375
1376static int read_format__fprintf(FILE *fp, bool *first, u64 value)
1377{
1378#define bit_name(n) { PERF_FORMAT_##n, #n }
1379 struct bit_names bits[] = {
1380 bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING),
1381 bit_name(ID), bit_name(GROUP),
1382 { .name = NULL, }
1383 };
1384#undef bit_name
1385 return bits__fprintf(fp, "read_format", value, bits, first);
1386}
1387
1388int perf_evsel__fprintf(struct perf_evsel *evsel,
1389 struct perf_attr_details *details, FILE *fp)
1390{
1391 bool first = true;
1392 int printed = 0;
1393
1394 if (details->event_group) {
1395 struct perf_evsel *pos;
1396
1397 if (!perf_evsel__is_group_leader(evsel))
1398 return 0;
1399
1400 if (evsel->nr_members > 1)
1401 printed += fprintf(fp, "%s{", evsel->group_name ?: "");
1402
1403 printed += fprintf(fp, "%s", perf_evsel__name(evsel));
1404 for_each_group_member(pos, evsel)
1405 printed += fprintf(fp, ",%s", perf_evsel__name(pos));
1406
1407 if (evsel->nr_members > 1)
1408 printed += fprintf(fp, "}");
1409 goto out;
1410 }
1411
1412 printed += fprintf(fp, "%s", perf_evsel__name(evsel));
1413
1414 if (details->verbose || details->freq) {
1415 printed += comma_fprintf(fp, &first, " sample_freq=%" PRIu64,
1416 (u64)evsel->attr.sample_freq);
1417 }
1418
1419 if (details->verbose) {
1420 if_print(type);
1421 if_print(config);
1422 if_print(config1);
1423 if_print(config2);
1424 if_print(size);
1425 printed += sample_type__fprintf(fp, &first, evsel->attr.sample_type);
1426 if (evsel->attr.read_format)
1427 printed += read_format__fprintf(fp, &first, evsel->attr.read_format);
1428 if_print(disabled);
1429 if_print(inherit);
1430 if_print(pinned);
1431 if_print(exclusive);
1432 if_print(exclude_user);
1433 if_print(exclude_kernel);
1434 if_print(exclude_hv);
1435 if_print(exclude_idle);
1436 if_print(mmap);
1437 if_print(comm);
1438 if_print(freq);
1439 if_print(inherit_stat);
1440 if_print(enable_on_exec);
1441 if_print(task);
1442 if_print(watermark);
1443 if_print(precise_ip);
1444 if_print(mmap_data);
1445 if_print(sample_id_all);
1446 if_print(exclude_host);
1447 if_print(exclude_guest);
1448 if_print(__reserved_1);
1449 if_print(wakeup_events);
1450 if_print(bp_type);
1451 if_print(branch_sample_type);
1452 }
1453out:
1454 fputc('\n', fp);
1455 return ++printed;
1456}
1457
1458bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
1459 char *msg, size_t msgsize)
1460{
1461 if ((err == ENOENT || err == ENXIO) &&
1462 evsel->attr.type == PERF_TYPE_HARDWARE &&
1463 evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
1464 /*
1465 * If it's cycles then fall back to hrtimer based
1466 * cpu-clock-tick sw counter, which is always available even if
1467 * no PMU support.
1468 *
1469 * PPC returns ENXIO until 2.6.37 (behavior changed with commit
1470 * b0a873e).
1471 */
1472 scnprintf(msg, msgsize, "%s",
1473"The cycles event is not supported, trying to fall back to cpu-clock-ticks");
1474
1475 evsel->attr.type = PERF_TYPE_SOFTWARE;
1476 evsel->attr.config = PERF_COUNT_SW_CPU_CLOCK;
1477
1478 free(evsel->name);
1479 evsel->name = NULL;
1480 return true;
1481 }
1482
1483 return false;
1484}
1485
1486int perf_evsel__open_strerror(struct perf_evsel *evsel,
1487 struct perf_target *target,
1488 int err, char *msg, size_t size)
1489{
1490 switch (err) {
1491 case EPERM:
1492 case EACCES:
1493 return scnprintf(msg, size, "%s",
1494 "You may not have permission to collect %sstats.\n"
1495 "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n"
1496 " -1 - Not paranoid at all\n"
1497 " 0 - Disallow raw tracepoint access for unpriv\n"
1498 " 1 - Disallow cpu events for unpriv\n"
1499 " 2 - Disallow kernel profiling for unpriv",
1500 target->system_wide ? "system-wide " : "");
1501 case ENOENT:
1502 return scnprintf(msg, size, "The %s event is not supported.",
1503 perf_evsel__name(evsel));
1504 case EMFILE:
1505 return scnprintf(msg, size, "%s",
1506 "Too many events are opened.\n"
1507 "Try again after reducing the number of events.");
1508 case ENODEV:
1509 if (target->cpu_list)
1510 return scnprintf(msg, size, "%s",
1511 "No such device - did you specify an out-of-range profile CPU?\n");
1512 break;
1513 case EOPNOTSUPP:
1514 if (evsel->attr.precise_ip)
1515 return scnprintf(msg, size, "%s",
1516 "\'precise\' request may not be supported. Try removing 'p' modifier.");
1517#if defined(__i386__) || defined(__x86_64__)
1518 if (evsel->attr.type == PERF_TYPE_HARDWARE)
1519 return scnprintf(msg, size, "%s",
1520 "No hardware sampling interrupt available.\n"
1521 "No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it.");
1522#endif
1523 break;
1524 default:
1525 break;
1526 }
1527
1528 return scnprintf(msg, size,
1529 "The sys_perf_event_open() syscall returned with %d (%s) for event (%s). \n"
1530 "/bin/dmesg may provide additional information.\n"
1531 "No CONFIG_PERF_EVENTS=y kernel support configured?\n",
1532 err, strerror(err), perf_evsel__name(evsel));
1533}
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 3d2b8017438c..52021c3087df 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -53,6 +53,7 @@ struct perf_evsel {
53 struct xyarray *sample_id; 53 struct xyarray *sample_id;
54 u64 *id; 54 u64 *id;
55 struct perf_counts *counts; 55 struct perf_counts *counts;
56 struct perf_counts *prev_raw_counts;
56 int idx; 57 int idx;
57 u32 ids; 58 u32 ids;
58 struct hists hists; 59 struct hists hists;
@@ -73,10 +74,13 @@ struct perf_evsel {
73 bool needs_swap; 74 bool needs_swap;
74 /* parse modifier helper */ 75 /* parse modifier helper */
75 int exclude_GH; 76 int exclude_GH;
77 int nr_members;
76 struct perf_evsel *leader; 78 struct perf_evsel *leader;
77 char *group_name; 79 char *group_name;
78}; 80};
79 81
82#define hists_to_evsel(h) container_of(h, struct perf_evsel, hists)
83
80struct cpu_map; 84struct cpu_map;
81struct thread_map; 85struct thread_map;
82struct perf_evlist; 86struct perf_evlist;
@@ -110,14 +114,30 @@ extern const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX];
110int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result, 114int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
111 char *bf, size_t size); 115 char *bf, size_t size);
112const char *perf_evsel__name(struct perf_evsel *evsel); 116const char *perf_evsel__name(struct perf_evsel *evsel);
117const char *perf_evsel__group_name(struct perf_evsel *evsel);
118int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size);
113 119
114int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads); 120int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
115int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads); 121int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
116int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus); 122int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus);
117void perf_evsel__free_fd(struct perf_evsel *evsel); 123void perf_evsel__free_fd(struct perf_evsel *evsel);
118void perf_evsel__free_id(struct perf_evsel *evsel); 124void perf_evsel__free_id(struct perf_evsel *evsel);
125void perf_evsel__free_counts(struct perf_evsel *evsel);
119void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads); 126void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
120 127
128void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
129 enum perf_event_sample_format bit);
130void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
131 enum perf_event_sample_format bit);
132
133#define perf_evsel__set_sample_bit(evsel, bit) \
134 __perf_evsel__set_sample_bit(evsel, PERF_SAMPLE_##bit)
135
136#define perf_evsel__reset_sample_bit(evsel, bit) \
137 __perf_evsel__reset_sample_bit(evsel, PERF_SAMPLE_##bit)
138
139void perf_evsel__set_sample_id(struct perf_evsel *evsel);
140
121int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads, 141int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
122 const char *filter); 142 const char *filter);
123 143
@@ -226,8 +246,34 @@ static inline struct perf_evsel *perf_evsel__next(struct perf_evsel *evsel)
226 return list_entry(evsel->node.next, struct perf_evsel, node); 246 return list_entry(evsel->node.next, struct perf_evsel, node);
227} 247}
228 248
229static inline bool perf_evsel__is_group_member(const struct perf_evsel *evsel) 249static inline bool perf_evsel__is_group_leader(const struct perf_evsel *evsel)
250{
251 return evsel->leader == evsel;
252}
253
254struct perf_attr_details {
255 bool freq;
256 bool verbose;
257 bool event_group;
258};
259
260int perf_evsel__fprintf(struct perf_evsel *evsel,
261 struct perf_attr_details *details, FILE *fp);
262
263bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
264 char *msg, size_t msgsize);
265int perf_evsel__open_strerror(struct perf_evsel *evsel,
266 struct perf_target *target,
267 int err, char *msg, size_t size);
268
269static inline int perf_evsel__group_idx(struct perf_evsel *evsel)
230{ 270{
231 return evsel->leader != NULL; 271 return evsel->idx - evsel->leader->idx;
232} 272}
273
274#define for_each_group_member(_evsel, _leader) \
275for ((_evsel) = list_entry((_leader)->node.next, struct perf_evsel, node); \
276 (_evsel) && (_evsel)->leader == (_leader); \
277 (_evsel) = list_entry((_evsel)->node.next, struct perf_evsel, node))
278
233#endif /* __PERF_EVSEL_H */ 279#endif /* __PERF_EVSEL_H */
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index b7da4634a047..f4bfd79ef6a7 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -148,7 +148,7 @@ static char *do_read_string(int fd, struct perf_header *ph)
148 u32 len; 148 u32 len;
149 char *buf; 149 char *buf;
150 150
151 sz = read(fd, &len, sizeof(len)); 151 sz = readn(fd, &len, sizeof(len));
152 if (sz < (ssize_t)sizeof(len)) 152 if (sz < (ssize_t)sizeof(len))
153 return NULL; 153 return NULL;
154 154
@@ -159,7 +159,7 @@ static char *do_read_string(int fd, struct perf_header *ph)
159 if (!buf) 159 if (!buf)
160 return NULL; 160 return NULL;
161 161
162 ret = read(fd, buf, len); 162 ret = readn(fd, buf, len);
163 if (ret == (ssize_t)len) { 163 if (ret == (ssize_t)len) {
164 /* 164 /*
165 * strings are padded by zeroes 165 * strings are padded by zeroes
@@ -287,12 +287,12 @@ static int dsos__write_buildid_table(struct perf_header *header, int fd)
287 struct perf_session *session = container_of(header, 287 struct perf_session *session = container_of(header,
288 struct perf_session, header); 288 struct perf_session, header);
289 struct rb_node *nd; 289 struct rb_node *nd;
290 int err = machine__write_buildid_table(&session->host_machine, fd); 290 int err = machine__write_buildid_table(&session->machines.host, fd);
291 291
292 if (err) 292 if (err)
293 return err; 293 return err;
294 294
295 for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) { 295 for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
296 struct machine *pos = rb_entry(nd, struct machine, rb_node); 296 struct machine *pos = rb_entry(nd, struct machine, rb_node);
297 err = machine__write_buildid_table(pos, fd); 297 err = machine__write_buildid_table(pos, fd);
298 if (err) 298 if (err)
@@ -313,7 +313,8 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
313 if (is_kallsyms) { 313 if (is_kallsyms) {
314 if (symbol_conf.kptr_restrict) { 314 if (symbol_conf.kptr_restrict) {
315 pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n"); 315 pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n");
316 return 0; 316 err = 0;
317 goto out_free;
317 } 318 }
318 realname = (char *) name; 319 realname = (char *) name;
319 } else 320 } else
@@ -448,9 +449,9 @@ static int perf_session__cache_build_ids(struct perf_session *session)
448 if (mkdir(debugdir, 0755) != 0 && errno != EEXIST) 449 if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
449 return -1; 450 return -1;
450 451
451 ret = machine__cache_build_ids(&session->host_machine, debugdir); 452 ret = machine__cache_build_ids(&session->machines.host, debugdir);
452 453
453 for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) { 454 for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
454 struct machine *pos = rb_entry(nd, struct machine, rb_node); 455 struct machine *pos = rb_entry(nd, struct machine, rb_node);
455 ret |= machine__cache_build_ids(pos, debugdir); 456 ret |= machine__cache_build_ids(pos, debugdir);
456 } 457 }
@@ -467,9 +468,9 @@ static bool machine__read_build_ids(struct machine *machine, bool with_hits)
467static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits) 468static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits)
468{ 469{
469 struct rb_node *nd; 470 struct rb_node *nd;
470 bool ret = machine__read_build_ids(&session->host_machine, with_hits); 471 bool ret = machine__read_build_ids(&session->machines.host, with_hits);
471 472
472 for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) { 473 for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
473 struct machine *pos = rb_entry(nd, struct machine, rb_node); 474 struct machine *pos = rb_entry(nd, struct machine, rb_node);
474 ret |= machine__read_build_ids(pos, with_hits); 475 ret |= machine__read_build_ids(pos, with_hits);
475 } 476 }
@@ -954,6 +955,7 @@ static int write_topo_node(int fd, int node)
954 } 955 }
955 956
956 fclose(fp); 957 fclose(fp);
958 fp = NULL;
957 959
958 ret = do_write(fd, &mem_total, sizeof(u64)); 960 ret = do_write(fd, &mem_total, sizeof(u64));
959 if (ret) 961 if (ret)
@@ -980,7 +982,8 @@ static int write_topo_node(int fd, int node)
980 ret = do_write_string(fd, buf); 982 ret = do_write_string(fd, buf);
981done: 983done:
982 free(buf); 984 free(buf);
983 fclose(fp); 985 if (fp)
986 fclose(fp);
984 return ret; 987 return ret;
985} 988}
986 989
@@ -1051,16 +1054,25 @@ static int write_pmu_mappings(int fd, struct perf_header *h __maybe_unused,
1051 struct perf_pmu *pmu = NULL; 1054 struct perf_pmu *pmu = NULL;
1052 off_t offset = lseek(fd, 0, SEEK_CUR); 1055 off_t offset = lseek(fd, 0, SEEK_CUR);
1053 __u32 pmu_num = 0; 1056 __u32 pmu_num = 0;
1057 int ret;
1054 1058
1055 /* write real pmu_num later */ 1059 /* write real pmu_num later */
1056 do_write(fd, &pmu_num, sizeof(pmu_num)); 1060 ret = do_write(fd, &pmu_num, sizeof(pmu_num));
1061 if (ret < 0)
1062 return ret;
1057 1063
1058 while ((pmu = perf_pmu__scan(pmu))) { 1064 while ((pmu = perf_pmu__scan(pmu))) {
1059 if (!pmu->name) 1065 if (!pmu->name)
1060 continue; 1066 continue;
1061 pmu_num++; 1067 pmu_num++;
1062 do_write(fd, &pmu->type, sizeof(pmu->type)); 1068
1063 do_write_string(fd, pmu->name); 1069 ret = do_write(fd, &pmu->type, sizeof(pmu->type));
1070 if (ret < 0)
1071 return ret;
1072
1073 ret = do_write_string(fd, pmu->name);
1074 if (ret < 0)
1075 return ret;
1064 } 1076 }
1065 1077
1066 if (pwrite(fd, &pmu_num, sizeof(pmu_num), offset) != sizeof(pmu_num)) { 1078 if (pwrite(fd, &pmu_num, sizeof(pmu_num), offset) != sizeof(pmu_num)) {
@@ -1073,6 +1085,52 @@ static int write_pmu_mappings(int fd, struct perf_header *h __maybe_unused,
1073} 1085}
1074 1086
1075/* 1087/*
1088 * File format:
1089 *
1090 * struct group_descs {
1091 * u32 nr_groups;
1092 * struct group_desc {
1093 * char name[];
1094 * u32 leader_idx;
1095 * u32 nr_members;
1096 * }[nr_groups];
1097 * };
1098 */
1099static int write_group_desc(int fd, struct perf_header *h __maybe_unused,
1100 struct perf_evlist *evlist)
1101{
1102 u32 nr_groups = evlist->nr_groups;
1103 struct perf_evsel *evsel;
1104 int ret;
1105
1106 ret = do_write(fd, &nr_groups, sizeof(nr_groups));
1107 if (ret < 0)
1108 return ret;
1109
1110 list_for_each_entry(evsel, &evlist->entries, node) {
1111 if (perf_evsel__is_group_leader(evsel) &&
1112 evsel->nr_members > 1) {
1113 const char *name = evsel->group_name ?: "{anon_group}";
1114 u32 leader_idx = evsel->idx;
1115 u32 nr_members = evsel->nr_members;
1116
1117 ret = do_write_string(fd, name);
1118 if (ret < 0)
1119 return ret;
1120
1121 ret = do_write(fd, &leader_idx, sizeof(leader_idx));
1122 if (ret < 0)
1123 return ret;
1124
1125 ret = do_write(fd, &nr_members, sizeof(nr_members));
1126 if (ret < 0)
1127 return ret;
1128 }
1129 }
1130 return 0;
1131}
1132
1133/*
1076 * default get_cpuid(): nothing gets recorded 1134 * default get_cpuid(): nothing gets recorded
1077 * actual implementation must be in arch/$(ARCH)/util/header.c 1135 * actual implementation must be in arch/$(ARCH)/util/header.c
1078 */ 1136 */
@@ -1209,14 +1267,14 @@ read_event_desc(struct perf_header *ph, int fd)
1209 size_t msz; 1267 size_t msz;
1210 1268
1211 /* number of events */ 1269 /* number of events */
1212 ret = read(fd, &nre, sizeof(nre)); 1270 ret = readn(fd, &nre, sizeof(nre));
1213 if (ret != (ssize_t)sizeof(nre)) 1271 if (ret != (ssize_t)sizeof(nre))
1214 goto error; 1272 goto error;
1215 1273
1216 if (ph->needs_swap) 1274 if (ph->needs_swap)
1217 nre = bswap_32(nre); 1275 nre = bswap_32(nre);
1218 1276
1219 ret = read(fd, &sz, sizeof(sz)); 1277 ret = readn(fd, &sz, sizeof(sz));
1220 if (ret != (ssize_t)sizeof(sz)) 1278 if (ret != (ssize_t)sizeof(sz))
1221 goto error; 1279 goto error;
1222 1280
@@ -1244,7 +1302,7 @@ read_event_desc(struct perf_header *ph, int fd)
1244 * must read entire on-file attr struct to 1302 * must read entire on-file attr struct to
1245 * sync up with layout. 1303 * sync up with layout.
1246 */ 1304 */
1247 ret = read(fd, buf, sz); 1305 ret = readn(fd, buf, sz);
1248 if (ret != (ssize_t)sz) 1306 if (ret != (ssize_t)sz)
1249 goto error; 1307 goto error;
1250 1308
@@ -1253,7 +1311,7 @@ read_event_desc(struct perf_header *ph, int fd)
1253 1311
1254 memcpy(&evsel->attr, buf, msz); 1312 memcpy(&evsel->attr, buf, msz);
1255 1313
1256 ret = read(fd, &nr, sizeof(nr)); 1314 ret = readn(fd, &nr, sizeof(nr));
1257 if (ret != (ssize_t)sizeof(nr)) 1315 if (ret != (ssize_t)sizeof(nr))
1258 goto error; 1316 goto error;
1259 1317
@@ -1274,7 +1332,7 @@ read_event_desc(struct perf_header *ph, int fd)
1274 evsel->id = id; 1332 evsel->id = id;
1275 1333
1276 for (j = 0 ; j < nr; j++) { 1334 for (j = 0 ; j < nr; j++) {
1277 ret = read(fd, id, sizeof(*id)); 1335 ret = readn(fd, id, sizeof(*id));
1278 if (ret != (ssize_t)sizeof(*id)) 1336 if (ret != (ssize_t)sizeof(*id))
1279 goto error; 1337 goto error;
1280 if (ph->needs_swap) 1338 if (ph->needs_swap)
@@ -1435,6 +1493,31 @@ error:
1435 fprintf(fp, "# pmu mappings: unable to read\n"); 1493 fprintf(fp, "# pmu mappings: unable to read\n");
1436} 1494}
1437 1495
1496static void print_group_desc(struct perf_header *ph, int fd __maybe_unused,
1497 FILE *fp)
1498{
1499 struct perf_session *session;
1500 struct perf_evsel *evsel;
1501 u32 nr = 0;
1502
1503 session = container_of(ph, struct perf_session, header);
1504
1505 list_for_each_entry(evsel, &session->evlist->entries, node) {
1506 if (perf_evsel__is_group_leader(evsel) &&
1507 evsel->nr_members > 1) {
1508 fprintf(fp, "# group: %s{%s", evsel->group_name ?: "",
1509 perf_evsel__name(evsel));
1510
1511 nr = evsel->nr_members - 1;
1512 } else if (nr) {
1513 fprintf(fp, ",%s", perf_evsel__name(evsel));
1514
1515 if (--nr == 0)
1516 fprintf(fp, "}\n");
1517 }
1518 }
1519}
1520
1438static int __event_process_build_id(struct build_id_event *bev, 1521static int __event_process_build_id(struct build_id_event *bev,
1439 char *filename, 1522 char *filename,
1440 struct perf_session *session) 1523 struct perf_session *session)
@@ -1506,14 +1589,14 @@ static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
1506 while (offset < limit) { 1589 while (offset < limit) {
1507 ssize_t len; 1590 ssize_t len;
1508 1591
1509 if (read(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev)) 1592 if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
1510 return -1; 1593 return -1;
1511 1594
1512 if (header->needs_swap) 1595 if (header->needs_swap)
1513 perf_event_header__bswap(&old_bev.header); 1596 perf_event_header__bswap(&old_bev.header);
1514 1597
1515 len = old_bev.header.size - sizeof(old_bev); 1598 len = old_bev.header.size - sizeof(old_bev);
1516 if (read(input, filename, len) != len) 1599 if (readn(input, filename, len) != len)
1517 return -1; 1600 return -1;
1518 1601
1519 bev.header = old_bev.header; 1602 bev.header = old_bev.header;
@@ -1548,14 +1631,14 @@ static int perf_header__read_build_ids(struct perf_header *header,
1548 while (offset < limit) { 1631 while (offset < limit) {
1549 ssize_t len; 1632 ssize_t len;
1550 1633
1551 if (read(input, &bev, sizeof(bev)) != sizeof(bev)) 1634 if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
1552 goto out; 1635 goto out;
1553 1636
1554 if (header->needs_swap) 1637 if (header->needs_swap)
1555 perf_event_header__bswap(&bev.header); 1638 perf_event_header__bswap(&bev.header);
1556 1639
1557 len = bev.header.size - sizeof(bev); 1640 len = bev.header.size - sizeof(bev);
1558 if (read(input, filename, len) != len) 1641 if (readn(input, filename, len) != len)
1559 goto out; 1642 goto out;
1560 /* 1643 /*
1561 * The a1645ce1 changeset: 1644 * The a1645ce1 changeset:
@@ -1641,7 +1724,7 @@ static int process_nrcpus(struct perf_file_section *section __maybe_unused,
1641 size_t ret; 1724 size_t ret;
1642 u32 nr; 1725 u32 nr;
1643 1726
1644 ret = read(fd, &nr, sizeof(nr)); 1727 ret = readn(fd, &nr, sizeof(nr));
1645 if (ret != sizeof(nr)) 1728 if (ret != sizeof(nr))
1646 return -1; 1729 return -1;
1647 1730
@@ -1650,7 +1733,7 @@ static int process_nrcpus(struct perf_file_section *section __maybe_unused,
1650 1733
1651 ph->env.nr_cpus_online = nr; 1734 ph->env.nr_cpus_online = nr;
1652 1735
1653 ret = read(fd, &nr, sizeof(nr)); 1736 ret = readn(fd, &nr, sizeof(nr));
1654 if (ret != sizeof(nr)) 1737 if (ret != sizeof(nr))
1655 return -1; 1738 return -1;
1656 1739
@@ -1684,7 +1767,7 @@ static int process_total_mem(struct perf_file_section *section __maybe_unused,
1684 uint64_t mem; 1767 uint64_t mem;
1685 size_t ret; 1768 size_t ret;
1686 1769
1687 ret = read(fd, &mem, sizeof(mem)); 1770 ret = readn(fd, &mem, sizeof(mem));
1688 if (ret != sizeof(mem)) 1771 if (ret != sizeof(mem))
1689 return -1; 1772 return -1;
1690 1773
@@ -1756,7 +1839,7 @@ static int process_cmdline(struct perf_file_section *section __maybe_unused,
1756 u32 nr, i; 1839 u32 nr, i;
1757 struct strbuf sb; 1840 struct strbuf sb;
1758 1841
1759 ret = read(fd, &nr, sizeof(nr)); 1842 ret = readn(fd, &nr, sizeof(nr));
1760 if (ret != sizeof(nr)) 1843 if (ret != sizeof(nr))
1761 return -1; 1844 return -1;
1762 1845
@@ -1792,7 +1875,7 @@ static int process_cpu_topology(struct perf_file_section *section __maybe_unused
1792 char *str; 1875 char *str;
1793 struct strbuf sb; 1876 struct strbuf sb;
1794 1877
1795 ret = read(fd, &nr, sizeof(nr)); 1878 ret = readn(fd, &nr, sizeof(nr));
1796 if (ret != sizeof(nr)) 1879 if (ret != sizeof(nr))
1797 return -1; 1880 return -1;
1798 1881
@@ -1813,7 +1896,7 @@ static int process_cpu_topology(struct perf_file_section *section __maybe_unused
1813 } 1896 }
1814 ph->env.sibling_cores = strbuf_detach(&sb, NULL); 1897 ph->env.sibling_cores = strbuf_detach(&sb, NULL);
1815 1898
1816 ret = read(fd, &nr, sizeof(nr)); 1899 ret = readn(fd, &nr, sizeof(nr));
1817 if (ret != sizeof(nr)) 1900 if (ret != sizeof(nr))
1818 return -1; 1901 return -1;
1819 1902
@@ -1850,7 +1933,7 @@ static int process_numa_topology(struct perf_file_section *section __maybe_unuse
1850 struct strbuf sb; 1933 struct strbuf sb;
1851 1934
1852 /* nr nodes */ 1935 /* nr nodes */
1853 ret = read(fd, &nr, sizeof(nr)); 1936 ret = readn(fd, &nr, sizeof(nr));
1854 if (ret != sizeof(nr)) 1937 if (ret != sizeof(nr))
1855 goto error; 1938 goto error;
1856 1939
@@ -1862,15 +1945,15 @@ static int process_numa_topology(struct perf_file_section *section __maybe_unuse
1862 1945
1863 for (i = 0; i < nr; i++) { 1946 for (i = 0; i < nr; i++) {
1864 /* node number */ 1947 /* node number */
1865 ret = read(fd, &node, sizeof(node)); 1948 ret = readn(fd, &node, sizeof(node));
1866 if (ret != sizeof(node)) 1949 if (ret != sizeof(node))
1867 goto error; 1950 goto error;
1868 1951
1869 ret = read(fd, &mem_total, sizeof(u64)); 1952 ret = readn(fd, &mem_total, sizeof(u64));
1870 if (ret != sizeof(u64)) 1953 if (ret != sizeof(u64))
1871 goto error; 1954 goto error;
1872 1955
1873 ret = read(fd, &mem_free, sizeof(u64)); 1956 ret = readn(fd, &mem_free, sizeof(u64));
1874 if (ret != sizeof(u64)) 1957 if (ret != sizeof(u64))
1875 goto error; 1958 goto error;
1876 1959
@@ -1909,7 +1992,7 @@ static int process_pmu_mappings(struct perf_file_section *section __maybe_unused
1909 u32 type; 1992 u32 type;
1910 struct strbuf sb; 1993 struct strbuf sb;
1911 1994
1912 ret = read(fd, &pmu_num, sizeof(pmu_num)); 1995 ret = readn(fd, &pmu_num, sizeof(pmu_num));
1913 if (ret != sizeof(pmu_num)) 1996 if (ret != sizeof(pmu_num))
1914 return -1; 1997 return -1;
1915 1998
@@ -1925,7 +2008,7 @@ static int process_pmu_mappings(struct perf_file_section *section __maybe_unused
1925 strbuf_init(&sb, 128); 2008 strbuf_init(&sb, 128);
1926 2009
1927 while (pmu_num) { 2010 while (pmu_num) {
1928 if (read(fd, &type, sizeof(type)) != sizeof(type)) 2011 if (readn(fd, &type, sizeof(type)) != sizeof(type))
1929 goto error; 2012 goto error;
1930 if (ph->needs_swap) 2013 if (ph->needs_swap)
1931 type = bswap_32(type); 2014 type = bswap_32(type);
@@ -1949,6 +2032,98 @@ error:
1949 return -1; 2032 return -1;
1950} 2033}
1951 2034
2035static int process_group_desc(struct perf_file_section *section __maybe_unused,
2036 struct perf_header *ph, int fd,
2037 void *data __maybe_unused)
2038{
2039 size_t ret = -1;
2040 u32 i, nr, nr_groups;
2041 struct perf_session *session;
2042 struct perf_evsel *evsel, *leader = NULL;
2043 struct group_desc {
2044 char *name;
2045 u32 leader_idx;
2046 u32 nr_members;
2047 } *desc;
2048
2049 if (readn(fd, &nr_groups, sizeof(nr_groups)) != sizeof(nr_groups))
2050 return -1;
2051
2052 if (ph->needs_swap)
2053 nr_groups = bswap_32(nr_groups);
2054
2055 ph->env.nr_groups = nr_groups;
2056 if (!nr_groups) {
2057 pr_debug("group desc not available\n");
2058 return 0;
2059 }
2060
2061 desc = calloc(nr_groups, sizeof(*desc));
2062 if (!desc)
2063 return -1;
2064
2065 for (i = 0; i < nr_groups; i++) {
2066 desc[i].name = do_read_string(fd, ph);
2067 if (!desc[i].name)
2068 goto out_free;
2069
2070 if (readn(fd, &desc[i].leader_idx, sizeof(u32)) != sizeof(u32))
2071 goto out_free;
2072
2073 if (readn(fd, &desc[i].nr_members, sizeof(u32)) != sizeof(u32))
2074 goto out_free;
2075
2076 if (ph->needs_swap) {
2077 desc[i].leader_idx = bswap_32(desc[i].leader_idx);
2078 desc[i].nr_members = bswap_32(desc[i].nr_members);
2079 }
2080 }
2081
2082 /*
2083 * Rebuild group relationship based on the group_desc
2084 */
2085 session = container_of(ph, struct perf_session, header);
2086 session->evlist->nr_groups = nr_groups;
2087
2088 i = nr = 0;
2089 list_for_each_entry(evsel, &session->evlist->entries, node) {
2090 if (evsel->idx == (int) desc[i].leader_idx) {
2091 evsel->leader = evsel;
2092 /* {anon_group} is a dummy name */
2093 if (strcmp(desc[i].name, "{anon_group}"))
2094 evsel->group_name = desc[i].name;
2095 evsel->nr_members = desc[i].nr_members;
2096
2097 if (i >= nr_groups || nr > 0) {
2098 pr_debug("invalid group desc\n");
2099 goto out_free;
2100 }
2101
2102 leader = evsel;
2103 nr = evsel->nr_members - 1;
2104 i++;
2105 } else if (nr) {
2106 /* This is a group member */
2107 evsel->leader = leader;
2108
2109 nr--;
2110 }
2111 }
2112
2113 if (i != nr_groups || nr != 0) {
2114 pr_debug("invalid group desc\n");
2115 goto out_free;
2116 }
2117
2118 ret = 0;
2119out_free:
2120 while ((int) --i >= 0)
2121 free(desc[i].name);
2122 free(desc);
2123
2124 return ret;
2125}
2126
1952struct feature_ops { 2127struct feature_ops {
1953 int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist); 2128 int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist);
1954 void (*print)(struct perf_header *h, int fd, FILE *fp); 2129 void (*print)(struct perf_header *h, int fd, FILE *fp);
@@ -1988,6 +2163,7 @@ static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
1988 FEAT_OPF(HEADER_NUMA_TOPOLOGY, numa_topology), 2163 FEAT_OPF(HEADER_NUMA_TOPOLOGY, numa_topology),
1989 FEAT_OPA(HEADER_BRANCH_STACK, branch_stack), 2164 FEAT_OPA(HEADER_BRANCH_STACK, branch_stack),
1990 FEAT_OPP(HEADER_PMU_MAPPINGS, pmu_mappings), 2165 FEAT_OPP(HEADER_PMU_MAPPINGS, pmu_mappings),
2166 FEAT_OPP(HEADER_GROUP_DESC, group_desc),
1991}; 2167};
1992 2168
1993struct header_print_data { 2169struct header_print_data {
@@ -2077,7 +2253,7 @@ static int perf_header__adds_write(struct perf_header *header,
2077 if (!nr_sections) 2253 if (!nr_sections)
2078 return 0; 2254 return 0;
2079 2255
2080 feat_sec = p = calloc(sizeof(*feat_sec), nr_sections); 2256 feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
2081 if (feat_sec == NULL) 2257 if (feat_sec == NULL)
2082 return -ENOMEM; 2258 return -ENOMEM;
2083 2259
@@ -2249,7 +2425,7 @@ int perf_header__process_sections(struct perf_header *header, int fd,
2249 if (!nr_sections) 2425 if (!nr_sections)
2250 return 0; 2426 return 0;
2251 2427
2252 feat_sec = sec = calloc(sizeof(*feat_sec), nr_sections); 2428 feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
2253 if (!feat_sec) 2429 if (!feat_sec)
2254 return -1; 2430 return -1;
2255 2431
@@ -2912,16 +3088,22 @@ int perf_event__process_tracing_data(union perf_event *event,
2912 session->repipe); 3088 session->repipe);
2913 padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read; 3089 padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
2914 3090
2915 if (read(session->fd, buf, padding) < 0) 3091 if (readn(session->fd, buf, padding) < 0) {
2916 die("reading input file"); 3092 pr_err("%s: reading input file", __func__);
3093 return -1;
3094 }
2917 if (session->repipe) { 3095 if (session->repipe) {
2918 int retw = write(STDOUT_FILENO, buf, padding); 3096 int retw = write(STDOUT_FILENO, buf, padding);
2919 if (retw <= 0 || retw != padding) 3097 if (retw <= 0 || retw != padding) {
2920 die("repiping tracing data padding"); 3098 pr_err("%s: repiping tracing data padding", __func__);
3099 return -1;
3100 }
2921 } 3101 }
2922 3102
2923 if (size_read + padding != size) 3103 if (size_read + padding != size) {
2924 die("tracing data size mismatch"); 3104 pr_err("%s: tracing data size mismatch", __func__);
3105 return -1;
3106 }
2925 3107
2926 perf_evlist__prepare_tracepoint_events(session->evlist, 3108 perf_evlist__prepare_tracepoint_events(session->evlist,
2927 session->pevent); 3109 session->pevent);
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index 20f0344accb1..c9fc55cada6d 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -29,6 +29,7 @@ enum {
29 HEADER_NUMA_TOPOLOGY, 29 HEADER_NUMA_TOPOLOGY,
30 HEADER_BRANCH_STACK, 30 HEADER_BRANCH_STACK,
31 HEADER_PMU_MAPPINGS, 31 HEADER_PMU_MAPPINGS,
32 HEADER_GROUP_DESC,
32 HEADER_LAST_FEATURE, 33 HEADER_LAST_FEATURE,
33 HEADER_FEAT_BITS = 256, 34 HEADER_FEAT_BITS = 256,
34}; 35};
@@ -79,6 +80,7 @@ struct perf_session_env {
79 char *numa_nodes; 80 char *numa_nodes;
80 int nr_pmu_mappings; 81 int nr_pmu_mappings;
81 char *pmu_mappings; 82 char *pmu_mappings;
83 int nr_groups;
82}; 84};
83 85
84struct perf_header { 86struct perf_header {
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index cb17e2a8c6ed..f855941bebea 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -4,6 +4,7 @@
4#include "hist.h" 4#include "hist.h"
5#include "session.h" 5#include "session.h"
6#include "sort.h" 6#include "sort.h"
7#include "evsel.h"
7#include <math.h> 8#include <math.h>
8 9
9static bool hists__filter_entry_by_dso(struct hists *hists, 10static bool hists__filter_entry_by_dso(struct hists *hists,
@@ -82,6 +83,9 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
82 hists__new_col_len(hists, HISTC_DSO, len); 83 hists__new_col_len(hists, HISTC_DSO, len);
83 } 84 }
84 85
86 if (h->parent)
87 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
88
85 if (h->branch_info) { 89 if (h->branch_info) {
86 int symlen; 90 int symlen;
87 /* 91 /*
@@ -242,6 +246,14 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template)
242 246
243 if (he->ms.map) 247 if (he->ms.map)
244 he->ms.map->referenced = true; 248 he->ms.map->referenced = true;
249
250 if (he->branch_info) {
251 if (he->branch_info->from.map)
252 he->branch_info->from.map->referenced = true;
253 if (he->branch_info->to.map)
254 he->branch_info->to.map->referenced = true;
255 }
256
245 if (symbol_conf.use_callchain) 257 if (symbol_conf.use_callchain)
246 callchain_init(he->callchain); 258 callchain_init(he->callchain);
247 259
@@ -251,7 +263,7 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template)
251 return he; 263 return he;
252} 264}
253 265
254static void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h) 266void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
255{ 267{
256 if (!h->filtered) { 268 if (!h->filtered) {
257 hists__calc_col_len(hists, h); 269 hists__calc_col_len(hists, h);
@@ -285,7 +297,13 @@ static struct hist_entry *add_hist_entry(struct hists *hists,
285 parent = *p; 297 parent = *p;
286 he = rb_entry(parent, struct hist_entry, rb_node_in); 298 he = rb_entry(parent, struct hist_entry, rb_node_in);
287 299
288 cmp = hist_entry__cmp(entry, he); 300 /*
301 * Make sure that it receives arguments in a same order as
302 * hist_entry__collapse() so that we can use an appropriate
303 * function when searching an entry regardless which sort
304 * keys were used.
305 */
306 cmp = hist_entry__cmp(he, entry);
289 307
290 if (!cmp) { 308 if (!cmp) {
291 he_stat__add_period(&he->stat, period); 309 he_stat__add_period(&he->stat, period);
@@ -523,6 +541,62 @@ void hists__collapse_resort_threaded(struct hists *hists)
523 * reverse the map, sort on period. 541 * reverse the map, sort on period.
524 */ 542 */
525 543
544static int period_cmp(u64 period_a, u64 period_b)
545{
546 if (period_a > period_b)
547 return 1;
548 if (period_a < period_b)
549 return -1;
550 return 0;
551}
552
553static int hist_entry__sort_on_period(struct hist_entry *a,
554 struct hist_entry *b)
555{
556 int ret;
557 int i, nr_members;
558 struct perf_evsel *evsel;
559 struct hist_entry *pair;
560 u64 *periods_a, *periods_b;
561
562 ret = period_cmp(a->stat.period, b->stat.period);
563 if (ret || !symbol_conf.event_group)
564 return ret;
565
566 evsel = hists_to_evsel(a->hists);
567 nr_members = evsel->nr_members;
568 if (nr_members <= 1)
569 return ret;
570
571 periods_a = zalloc(sizeof(periods_a) * nr_members);
572 periods_b = zalloc(sizeof(periods_b) * nr_members);
573
574 if (!periods_a || !periods_b)
575 goto out;
576
577 list_for_each_entry(pair, &a->pairs.head, pairs.node) {
578 evsel = hists_to_evsel(pair->hists);
579 periods_a[perf_evsel__group_idx(evsel)] = pair->stat.period;
580 }
581
582 list_for_each_entry(pair, &b->pairs.head, pairs.node) {
583 evsel = hists_to_evsel(pair->hists);
584 periods_b[perf_evsel__group_idx(evsel)] = pair->stat.period;
585 }
586
587 for (i = 1; i < nr_members; i++) {
588 ret = period_cmp(periods_a[i], periods_b[i]);
589 if (ret)
590 break;
591 }
592
593out:
594 free(periods_a);
595 free(periods_b);
596
597 return ret;
598}
599
526static void __hists__insert_output_entry(struct rb_root *entries, 600static void __hists__insert_output_entry(struct rb_root *entries,
527 struct hist_entry *he, 601 struct hist_entry *he,
528 u64 min_callchain_hits) 602 u64 min_callchain_hits)
@@ -539,7 +613,7 @@ static void __hists__insert_output_entry(struct rb_root *entries,
539 parent = *p; 613 parent = *p;
540 iter = rb_entry(parent, struct hist_entry, rb_node); 614 iter = rb_entry(parent, struct hist_entry, rb_node);
541 615
542 if (he->stat.period > iter->stat.period) 616 if (hist_entry__sort_on_period(he, iter) > 0)
543 p = &(*p)->rb_left; 617 p = &(*p)->rb_left;
544 else 618 else
545 p = &(*p)->rb_right; 619 p = &(*p)->rb_right;
@@ -711,25 +785,38 @@ int hist_entry__annotate(struct hist_entry *he, size_t privsize)
711 return symbol__annotate(he->ms.sym, he->ms.map, privsize); 785 return symbol__annotate(he->ms.sym, he->ms.map, privsize);
712} 786}
713 787
788void events_stats__inc(struct events_stats *stats, u32 type)
789{
790 ++stats->nr_events[0];
791 ++stats->nr_events[type];
792}
793
714void hists__inc_nr_events(struct hists *hists, u32 type) 794void hists__inc_nr_events(struct hists *hists, u32 type)
715{ 795{
716 ++hists->stats.nr_events[0]; 796 events_stats__inc(&hists->stats, type);
717 ++hists->stats.nr_events[type];
718} 797}
719 798
720static struct hist_entry *hists__add_dummy_entry(struct hists *hists, 799static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
721 struct hist_entry *pair) 800 struct hist_entry *pair)
722{ 801{
723 struct rb_node **p = &hists->entries.rb_node; 802 struct rb_root *root;
803 struct rb_node **p;
724 struct rb_node *parent = NULL; 804 struct rb_node *parent = NULL;
725 struct hist_entry *he; 805 struct hist_entry *he;
726 int cmp; 806 int cmp;
727 807
808 if (sort__need_collapse)
809 root = &hists->entries_collapsed;
810 else
811 root = hists->entries_in;
812
813 p = &root->rb_node;
814
728 while (*p != NULL) { 815 while (*p != NULL) {
729 parent = *p; 816 parent = *p;
730 he = rb_entry(parent, struct hist_entry, rb_node); 817 he = rb_entry(parent, struct hist_entry, rb_node_in);
731 818
732 cmp = hist_entry__cmp(pair, he); 819 cmp = hist_entry__collapse(he, pair);
733 820
734 if (!cmp) 821 if (!cmp)
735 goto out; 822 goto out;
@@ -744,8 +831,8 @@ static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
744 if (he) { 831 if (he) {
745 memset(&he->stat, 0, sizeof(he->stat)); 832 memset(&he->stat, 0, sizeof(he->stat));
746 he->hists = hists; 833 he->hists = hists;
747 rb_link_node(&he->rb_node, parent, p); 834 rb_link_node(&he->rb_node_in, parent, p);
748 rb_insert_color(&he->rb_node, &hists->entries); 835 rb_insert_color(&he->rb_node_in, root);
749 hists__inc_nr_entries(hists, he); 836 hists__inc_nr_entries(hists, he);
750 } 837 }
751out: 838out:
@@ -755,11 +842,16 @@ out:
755static struct hist_entry *hists__find_entry(struct hists *hists, 842static struct hist_entry *hists__find_entry(struct hists *hists,
756 struct hist_entry *he) 843 struct hist_entry *he)
757{ 844{
758 struct rb_node *n = hists->entries.rb_node; 845 struct rb_node *n;
846
847 if (sort__need_collapse)
848 n = hists->entries_collapsed.rb_node;
849 else
850 n = hists->entries_in->rb_node;
759 851
760 while (n) { 852 while (n) {
761 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node); 853 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
762 int64_t cmp = hist_entry__cmp(he, iter); 854 int64_t cmp = hist_entry__collapse(iter, he);
763 855
764 if (cmp < 0) 856 if (cmp < 0)
765 n = n->rb_left; 857 n = n->rb_left;
@@ -777,15 +869,21 @@ static struct hist_entry *hists__find_entry(struct hists *hists,
777 */ 869 */
778void hists__match(struct hists *leader, struct hists *other) 870void hists__match(struct hists *leader, struct hists *other)
779{ 871{
872 struct rb_root *root;
780 struct rb_node *nd; 873 struct rb_node *nd;
781 struct hist_entry *pos, *pair; 874 struct hist_entry *pos, *pair;
782 875
783 for (nd = rb_first(&leader->entries); nd; nd = rb_next(nd)) { 876 if (sort__need_collapse)
784 pos = rb_entry(nd, struct hist_entry, rb_node); 877 root = &leader->entries_collapsed;
878 else
879 root = leader->entries_in;
880
881 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
882 pos = rb_entry(nd, struct hist_entry, rb_node_in);
785 pair = hists__find_entry(other, pos); 883 pair = hists__find_entry(other, pos);
786 884
787 if (pair) 885 if (pair)
788 hist__entry_add_pair(pos, pair); 886 hist_entry__add_pair(pair, pos);
789 } 887 }
790} 888}
791 889
@@ -796,17 +894,23 @@ void hists__match(struct hists *leader, struct hists *other)
796 */ 894 */
797int hists__link(struct hists *leader, struct hists *other) 895int hists__link(struct hists *leader, struct hists *other)
798{ 896{
897 struct rb_root *root;
799 struct rb_node *nd; 898 struct rb_node *nd;
800 struct hist_entry *pos, *pair; 899 struct hist_entry *pos, *pair;
801 900
802 for (nd = rb_first(&other->entries); nd; nd = rb_next(nd)) { 901 if (sort__need_collapse)
803 pos = rb_entry(nd, struct hist_entry, rb_node); 902 root = &other->entries_collapsed;
903 else
904 root = other->entries_in;
905
906 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
907 pos = rb_entry(nd, struct hist_entry, rb_node_in);
804 908
805 if (!hist_entry__has_pairs(pos)) { 909 if (!hist_entry__has_pairs(pos)) {
806 pair = hists__add_dummy_entry(leader, pos); 910 pair = hists__add_dummy_entry(leader, pos);
807 if (pair == NULL) 911 if (pair == NULL)
808 return -1; 912 return -1;
809 hist__entry_add_pair(pair, pos); 913 hist_entry__add_pair(pos, pair);
810 } 914 }
811 } 915 }
812 916
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 8b091a51e4a2..38624686ee9a 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -96,8 +96,10 @@ void hists__decay_entries_threaded(struct hists *hists, bool zap_user,
96 bool zap_kernel); 96 bool zap_kernel);
97void hists__output_recalc_col_len(struct hists *hists, int max_rows); 97void hists__output_recalc_col_len(struct hists *hists, int max_rows);
98 98
99void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h);
99void hists__inc_nr_events(struct hists *self, u32 type); 100void hists__inc_nr_events(struct hists *self, u32 type);
100size_t hists__fprintf_nr_events(struct hists *self, FILE *fp); 101void events_stats__inc(struct events_stats *stats, u32 type);
102size_t events_stats__fprintf(struct events_stats *stats, FILE *fp);
101 103
102size_t hists__fprintf(struct hists *self, bool show_header, int max_rows, 104size_t hists__fprintf(struct hists *self, bool show_header, int max_rows,
103 int max_cols, FILE *fp); 105 int max_cols, FILE *fp);
@@ -126,13 +128,19 @@ struct perf_hpp {
126}; 128};
127 129
128struct perf_hpp_fmt { 130struct perf_hpp_fmt {
129 bool cond;
130 int (*header)(struct perf_hpp *hpp); 131 int (*header)(struct perf_hpp *hpp);
131 int (*width)(struct perf_hpp *hpp); 132 int (*width)(struct perf_hpp *hpp);
132 int (*color)(struct perf_hpp *hpp, struct hist_entry *he); 133 int (*color)(struct perf_hpp *hpp, struct hist_entry *he);
133 int (*entry)(struct perf_hpp *hpp, struct hist_entry *he); 134 int (*entry)(struct perf_hpp *hpp, struct hist_entry *he);
135
136 struct list_head list;
134}; 137};
135 138
139extern struct list_head perf_hpp__list;
140
141#define perf_hpp__for_each_format(format) \
142 list_for_each_entry(format, &perf_hpp__list, list)
143
136extern struct perf_hpp_fmt perf_hpp__format[]; 144extern struct perf_hpp_fmt perf_hpp__format[];
137 145
138enum { 146enum {
@@ -148,14 +156,14 @@ enum {
148 PERF_HPP__DELTA, 156 PERF_HPP__DELTA,
149 PERF_HPP__RATIO, 157 PERF_HPP__RATIO,
150 PERF_HPP__WEIGHTED_DIFF, 158 PERF_HPP__WEIGHTED_DIFF,
151 PERF_HPP__DISPL,
152 PERF_HPP__FORMULA, 159 PERF_HPP__FORMULA,
153 160
154 PERF_HPP__MAX_INDEX 161 PERF_HPP__MAX_INDEX
155}; 162};
156 163
157void perf_hpp__init(void); 164void perf_hpp__init(void);
158void perf_hpp__column_enable(unsigned col, bool enable); 165void perf_hpp__column_register(struct perf_hpp_fmt *format);
166void perf_hpp__column_enable(unsigned col);
159int hist_entry__period_snprintf(struct perf_hpp *hpp, struct hist_entry *he, 167int hist_entry__period_snprintf(struct perf_hpp *hpp, struct hist_entry *he,
160 bool color); 168 bool color);
161 169
@@ -219,8 +227,10 @@ int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist __maybe_unused,
219 227
220unsigned int hists__sort_list_width(struct hists *self); 228unsigned int hists__sort_list_width(struct hists *self);
221 229
222double perf_diff__compute_delta(struct hist_entry *he); 230double perf_diff__compute_delta(struct hist_entry *he, struct hist_entry *pair);
223double perf_diff__compute_ratio(struct hist_entry *he); 231double perf_diff__compute_ratio(struct hist_entry *he, struct hist_entry *pair);
224s64 perf_diff__compute_wdiff(struct hist_entry *he); 232s64 perf_diff__compute_wdiff(struct hist_entry *he, struct hist_entry *pair);
225int perf_diff__formula(char *buf, size_t size, struct hist_entry *he); 233int perf_diff__formula(struct hist_entry *he, struct hist_entry *pair,
234 char *buf, size_t size);
235double perf_diff__period_percent(struct hist_entry *he, u64 period);
226#endif /* __PERF_HIST_H */ 236#endif /* __PERF_HIST_H */
diff --git a/tools/perf/util/include/linux/bitops.h b/tools/perf/util/include/linux/bitops.h
index a55d8cf083c9..45cf10a562bd 100644
--- a/tools/perf/util/include/linux/bitops.h
+++ b/tools/perf/util/include/linux/bitops.h
@@ -14,6 +14,7 @@
14#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) 14#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
15#define BITS_TO_U64(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64)) 15#define BITS_TO_U64(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64))
16#define BITS_TO_U32(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32)) 16#define BITS_TO_U32(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32))
17#define BITS_TO_BYTES(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE)
17 18
18#define for_each_set_bit(bit, addr, size) \ 19#define for_each_set_bit(bit, addr, size) \
19 for ((bit) = find_first_bit((addr), (size)); \ 20 for ((bit) = find_first_bit((addr), (size)); \
diff --git a/tools/perf/util/intlist.c b/tools/perf/util/intlist.c
index 9d0740024ba8..11a8d86f7fea 100644
--- a/tools/perf/util/intlist.c
+++ b/tools/perf/util/intlist.c
@@ -59,16 +59,40 @@ void intlist__remove(struct intlist *ilist, struct int_node *node)
59 59
60struct int_node *intlist__find(struct intlist *ilist, int i) 60struct int_node *intlist__find(struct intlist *ilist, int i)
61{ 61{
62 struct int_node *node = NULL; 62 struct int_node *node;
63 struct rb_node *rb_node = rblist__find(&ilist->rblist, (void *)((long)i)); 63 struct rb_node *rb_node;
64 64
65 if (ilist == NULL)
66 return NULL;
67
68 node = NULL;
69 rb_node = rblist__find(&ilist->rblist, (void *)((long)i));
65 if (rb_node) 70 if (rb_node)
66 node = container_of(rb_node, struct int_node, rb_node); 71 node = container_of(rb_node, struct int_node, rb_node);
67 72
68 return node; 73 return node;
69} 74}
70 75
71struct intlist *intlist__new(void) 76static int intlist__parse_list(struct intlist *ilist, const char *s)
77{
78 char *sep;
79 int err;
80
81 do {
82 long value = strtol(s, &sep, 10);
83 err = -EINVAL;
84 if (*sep != ',' && *sep != '\0')
85 break;
86 err = intlist__add(ilist, value);
87 if (err)
88 break;
89 s = sep + 1;
90 } while (*sep != '\0');
91
92 return err;
93}
94
95struct intlist *intlist__new(const char *slist)
72{ 96{
73 struct intlist *ilist = malloc(sizeof(*ilist)); 97 struct intlist *ilist = malloc(sizeof(*ilist));
74 98
@@ -77,9 +101,15 @@ struct intlist *intlist__new(void)
77 ilist->rblist.node_cmp = intlist__node_cmp; 101 ilist->rblist.node_cmp = intlist__node_cmp;
78 ilist->rblist.node_new = intlist__node_new; 102 ilist->rblist.node_new = intlist__node_new;
79 ilist->rblist.node_delete = intlist__node_delete; 103 ilist->rblist.node_delete = intlist__node_delete;
104
105 if (slist && intlist__parse_list(ilist, slist))
106 goto out_delete;
80 } 107 }
81 108
82 return ilist; 109 return ilist;
110out_delete:
111 intlist__delete(ilist);
112 return NULL;
83} 113}
84 114
85void intlist__delete(struct intlist *ilist) 115void intlist__delete(struct intlist *ilist)
diff --git a/tools/perf/util/intlist.h b/tools/perf/util/intlist.h
index 6d63ab90db50..62351dad848f 100644
--- a/tools/perf/util/intlist.h
+++ b/tools/perf/util/intlist.h
@@ -15,7 +15,7 @@ struct intlist {
15 struct rblist rblist; 15 struct rblist rblist;
16}; 16};
17 17
18struct intlist *intlist__new(void); 18struct intlist *intlist__new(const char *slist);
19void intlist__delete(struct intlist *ilist); 19void intlist__delete(struct intlist *ilist);
20 20
21void intlist__remove(struct intlist *ilist, struct int_node *in); 21void intlist__remove(struct intlist *ilist, struct int_node *in);
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 1f09d0581e6b..efdb38e65a92 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -1,10 +1,15 @@
1#include "callchain.h"
1#include "debug.h" 2#include "debug.h"
2#include "event.h" 3#include "event.h"
4#include "evsel.h"
5#include "hist.h"
3#include "machine.h" 6#include "machine.h"
4#include "map.h" 7#include "map.h"
8#include "sort.h"
5#include "strlist.h" 9#include "strlist.h"
6#include "thread.h" 10#include "thread.h"
7#include <stdbool.h> 11#include <stdbool.h>
12#include "unwind.h"
8 13
9int machine__init(struct machine *machine, const char *root_dir, pid_t pid) 14int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
10{ 15{
@@ -48,6 +53,29 @@ static void dsos__delete(struct list_head *dsos)
48 } 53 }
49} 54}
50 55
56void machine__delete_dead_threads(struct machine *machine)
57{
58 struct thread *n, *t;
59
60 list_for_each_entry_safe(t, n, &machine->dead_threads, node) {
61 list_del(&t->node);
62 thread__delete(t);
63 }
64}
65
66void machine__delete_threads(struct machine *machine)
67{
68 struct rb_node *nd = rb_first(&machine->threads);
69
70 while (nd) {
71 struct thread *t = rb_entry(nd, struct thread, rb_node);
72
73 rb_erase(&t->rb_node, &machine->threads);
74 nd = rb_next(nd);
75 thread__delete(t);
76 }
77}
78
51void machine__exit(struct machine *machine) 79void machine__exit(struct machine *machine)
52{ 80{
53 map_groups__exit(&machine->kmaps); 81 map_groups__exit(&machine->kmaps);
@@ -63,10 +91,22 @@ void machine__delete(struct machine *machine)
63 free(machine); 91 free(machine);
64} 92}
65 93
66struct machine *machines__add(struct rb_root *machines, pid_t pid, 94void machines__init(struct machines *machines)
95{
96 machine__init(&machines->host, "", HOST_KERNEL_ID);
97 machines->guests = RB_ROOT;
98}
99
100void machines__exit(struct machines *machines)
101{
102 machine__exit(&machines->host);
103 /* XXX exit guest */
104}
105
106struct machine *machines__add(struct machines *machines, pid_t pid,
67 const char *root_dir) 107 const char *root_dir)
68{ 108{
69 struct rb_node **p = &machines->rb_node; 109 struct rb_node **p = &machines->guests.rb_node;
70 struct rb_node *parent = NULL; 110 struct rb_node *parent = NULL;
71 struct machine *pos, *machine = malloc(sizeof(*machine)); 111 struct machine *pos, *machine = malloc(sizeof(*machine));
72 112
@@ -88,18 +128,21 @@ struct machine *machines__add(struct rb_root *machines, pid_t pid,
88 } 128 }
89 129
90 rb_link_node(&machine->rb_node, parent, p); 130 rb_link_node(&machine->rb_node, parent, p);
91 rb_insert_color(&machine->rb_node, machines); 131 rb_insert_color(&machine->rb_node, &machines->guests);
92 132
93 return machine; 133 return machine;
94} 134}
95 135
96struct machine *machines__find(struct rb_root *machines, pid_t pid) 136struct machine *machines__find(struct machines *machines, pid_t pid)
97{ 137{
98 struct rb_node **p = &machines->rb_node; 138 struct rb_node **p = &machines->guests.rb_node;
99 struct rb_node *parent = NULL; 139 struct rb_node *parent = NULL;
100 struct machine *machine; 140 struct machine *machine;
101 struct machine *default_machine = NULL; 141 struct machine *default_machine = NULL;
102 142
143 if (pid == HOST_KERNEL_ID)
144 return &machines->host;
145
103 while (*p != NULL) { 146 while (*p != NULL) {
104 parent = *p; 147 parent = *p;
105 machine = rb_entry(parent, struct machine, rb_node); 148 machine = rb_entry(parent, struct machine, rb_node);
@@ -116,7 +159,7 @@ struct machine *machines__find(struct rb_root *machines, pid_t pid)
116 return default_machine; 159 return default_machine;
117} 160}
118 161
119struct machine *machines__findnew(struct rb_root *machines, pid_t pid) 162struct machine *machines__findnew(struct machines *machines, pid_t pid)
120{ 163{
121 char path[PATH_MAX]; 164 char path[PATH_MAX];
122 const char *root_dir = ""; 165 const char *root_dir = "";
@@ -150,12 +193,12 @@ out:
150 return machine; 193 return machine;
151} 194}
152 195
153void machines__process(struct rb_root *machines, 196void machines__process_guests(struct machines *machines,
154 machine__process_t process, void *data) 197 machine__process_t process, void *data)
155{ 198{
156 struct rb_node *nd; 199 struct rb_node *nd;
157 200
158 for (nd = rb_first(machines); nd; nd = rb_next(nd)) { 201 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
159 struct machine *pos = rb_entry(nd, struct machine, rb_node); 202 struct machine *pos = rb_entry(nd, struct machine, rb_node);
160 process(pos, data); 203 process(pos, data);
161 } 204 }
@@ -175,12 +218,14 @@ char *machine__mmap_name(struct machine *machine, char *bf, size_t size)
175 return bf; 218 return bf;
176} 219}
177 220
178void machines__set_id_hdr_size(struct rb_root *machines, u16 id_hdr_size) 221void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
179{ 222{
180 struct rb_node *node; 223 struct rb_node *node;
181 struct machine *machine; 224 struct machine *machine;
182 225
183 for (node = rb_first(machines); node; node = rb_next(node)) { 226 machines->host.id_hdr_size = id_hdr_size;
227
228 for (node = rb_first(&machines->guests); node; node = rb_next(node)) {
184 machine = rb_entry(node, struct machine, rb_node); 229 machine = rb_entry(node, struct machine, rb_node);
185 machine->id_hdr_size = id_hdr_size; 230 machine->id_hdr_size = id_hdr_size;
186 } 231 }
@@ -264,6 +309,537 @@ int machine__process_lost_event(struct machine *machine __maybe_unused,
264 return 0; 309 return 0;
265} 310}
266 311
312struct map *machine__new_module(struct machine *machine, u64 start,
313 const char *filename)
314{
315 struct map *map;
316 struct dso *dso = __dsos__findnew(&machine->kernel_dsos, filename);
317
318 if (dso == NULL)
319 return NULL;
320
321 map = map__new2(start, dso, MAP__FUNCTION);
322 if (map == NULL)
323 return NULL;
324
325 if (machine__is_host(machine))
326 dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
327 else
328 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
329 map_groups__insert(&machine->kmaps, map);
330 return map;
331}
332
333size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
334{
335 struct rb_node *nd;
336 size_t ret = __dsos__fprintf(&machines->host.kernel_dsos, fp) +
337 __dsos__fprintf(&machines->host.user_dsos, fp);
338
339 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
340 struct machine *pos = rb_entry(nd, struct machine, rb_node);
341 ret += __dsos__fprintf(&pos->kernel_dsos, fp);
342 ret += __dsos__fprintf(&pos->user_dsos, fp);
343 }
344
345 return ret;
346}
347
348size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp,
349 bool (skip)(struct dso *dso, int parm), int parm)
350{
351 return __dsos__fprintf_buildid(&machine->kernel_dsos, fp, skip, parm) +
352 __dsos__fprintf_buildid(&machine->user_dsos, fp, skip, parm);
353}
354
355size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
356 bool (skip)(struct dso *dso, int parm), int parm)
357{
358 struct rb_node *nd;
359 size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
360
361 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
362 struct machine *pos = rb_entry(nd, struct machine, rb_node);
363 ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
364 }
365 return ret;
366}
367
368size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
369{
370 int i;
371 size_t printed = 0;
372 struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso;
373
374 if (kdso->has_build_id) {
375 char filename[PATH_MAX];
376 if (dso__build_id_filename(kdso, filename, sizeof(filename)))
377 printed += fprintf(fp, "[0] %s\n", filename);
378 }
379
380 for (i = 0; i < vmlinux_path__nr_entries; ++i)
381 printed += fprintf(fp, "[%d] %s\n",
382 i + kdso->has_build_id, vmlinux_path[i]);
383
384 return printed;
385}
386
387size_t machine__fprintf(struct machine *machine, FILE *fp)
388{
389 size_t ret = 0;
390 struct rb_node *nd;
391
392 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
393 struct thread *pos = rb_entry(nd, struct thread, rb_node);
394
395 ret += thread__fprintf(pos, fp);
396 }
397
398 return ret;
399}
400
401static struct dso *machine__get_kernel(struct machine *machine)
402{
403 const char *vmlinux_name = NULL;
404 struct dso *kernel;
405
406 if (machine__is_host(machine)) {
407 vmlinux_name = symbol_conf.vmlinux_name;
408 if (!vmlinux_name)
409 vmlinux_name = "[kernel.kallsyms]";
410
411 kernel = dso__kernel_findnew(machine, vmlinux_name,
412 "[kernel]",
413 DSO_TYPE_KERNEL);
414 } else {
415 char bf[PATH_MAX];
416
417 if (machine__is_default_guest(machine))
418 vmlinux_name = symbol_conf.default_guest_vmlinux_name;
419 if (!vmlinux_name)
420 vmlinux_name = machine__mmap_name(machine, bf,
421 sizeof(bf));
422
423 kernel = dso__kernel_findnew(machine, vmlinux_name,
424 "[guest.kernel]",
425 DSO_TYPE_GUEST_KERNEL);
426 }
427
428 if (kernel != NULL && (!kernel->has_build_id))
429 dso__read_running_kernel_build_id(kernel, machine);
430
431 return kernel;
432}
433
434struct process_args {
435 u64 start;
436};
437
438static int symbol__in_kernel(void *arg, const char *name,
439 char type __maybe_unused, u64 start)
440{
441 struct process_args *args = arg;
442
443 if (strchr(name, '['))
444 return 0;
445
446 args->start = start;
447 return 1;
448}
449
450/* Figure out the start address of kernel map from /proc/kallsyms */
451static u64 machine__get_kernel_start_addr(struct machine *machine)
452{
453 const char *filename;
454 char path[PATH_MAX];
455 struct process_args args;
456
457 if (machine__is_host(machine)) {
458 filename = "/proc/kallsyms";
459 } else {
460 if (machine__is_default_guest(machine))
461 filename = (char *)symbol_conf.default_guest_kallsyms;
462 else {
463 sprintf(path, "%s/proc/kallsyms", machine->root_dir);
464 filename = path;
465 }
466 }
467
468 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
469 return 0;
470
471 if (kallsyms__parse(filename, &args, symbol__in_kernel) <= 0)
472 return 0;
473
474 return args.start;
475}
476
477int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
478{
479 enum map_type type;
480 u64 start = machine__get_kernel_start_addr(machine);
481
482 for (type = 0; type < MAP__NR_TYPES; ++type) {
483 struct kmap *kmap;
484
485 machine->vmlinux_maps[type] = map__new2(start, kernel, type);
486 if (machine->vmlinux_maps[type] == NULL)
487 return -1;
488
489 machine->vmlinux_maps[type]->map_ip =
490 machine->vmlinux_maps[type]->unmap_ip =
491 identity__map_ip;
492 kmap = map__kmap(machine->vmlinux_maps[type]);
493 kmap->kmaps = &machine->kmaps;
494 map_groups__insert(&machine->kmaps,
495 machine->vmlinux_maps[type]);
496 }
497
498 return 0;
499}
500
501void machine__destroy_kernel_maps(struct machine *machine)
502{
503 enum map_type type;
504
505 for (type = 0; type < MAP__NR_TYPES; ++type) {
506 struct kmap *kmap;
507
508 if (machine->vmlinux_maps[type] == NULL)
509 continue;
510
511 kmap = map__kmap(machine->vmlinux_maps[type]);
512 map_groups__remove(&machine->kmaps,
513 machine->vmlinux_maps[type]);
514 if (kmap->ref_reloc_sym) {
515 /*
516 * ref_reloc_sym is shared among all maps, so free just
517 * on one of them.
518 */
519 if (type == MAP__FUNCTION) {
520 free((char *)kmap->ref_reloc_sym->name);
521 kmap->ref_reloc_sym->name = NULL;
522 free(kmap->ref_reloc_sym);
523 }
524 kmap->ref_reloc_sym = NULL;
525 }
526
527 map__delete(machine->vmlinux_maps[type]);
528 machine->vmlinux_maps[type] = NULL;
529 }
530}
531
532int machines__create_guest_kernel_maps(struct machines *machines)
533{
534 int ret = 0;
535 struct dirent **namelist = NULL;
536 int i, items = 0;
537 char path[PATH_MAX];
538 pid_t pid;
539 char *endp;
540
541 if (symbol_conf.default_guest_vmlinux_name ||
542 symbol_conf.default_guest_modules ||
543 symbol_conf.default_guest_kallsyms) {
544 machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
545 }
546
547 if (symbol_conf.guestmount) {
548 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
549 if (items <= 0)
550 return -ENOENT;
551 for (i = 0; i < items; i++) {
552 if (!isdigit(namelist[i]->d_name[0])) {
553 /* Filter out . and .. */
554 continue;
555 }
556 pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
557 if ((*endp != '\0') ||
558 (endp == namelist[i]->d_name) ||
559 (errno == ERANGE)) {
560 pr_debug("invalid directory (%s). Skipping.\n",
561 namelist[i]->d_name);
562 continue;
563 }
564 sprintf(path, "%s/%s/proc/kallsyms",
565 symbol_conf.guestmount,
566 namelist[i]->d_name);
567 ret = access(path, R_OK);
568 if (ret) {
569 pr_debug("Can't access file %s\n", path);
570 goto failure;
571 }
572 machines__create_kernel_maps(machines, pid);
573 }
574failure:
575 free(namelist);
576 }
577
578 return ret;
579}
580
581void machines__destroy_kernel_maps(struct machines *machines)
582{
583 struct rb_node *next = rb_first(&machines->guests);
584
585 machine__destroy_kernel_maps(&machines->host);
586
587 while (next) {
588 struct machine *pos = rb_entry(next, struct machine, rb_node);
589
590 next = rb_next(&pos->rb_node);
591 rb_erase(&pos->rb_node, &machines->guests);
592 machine__delete(pos);
593 }
594}
595
596int machines__create_kernel_maps(struct machines *machines, pid_t pid)
597{
598 struct machine *machine = machines__findnew(machines, pid);
599
600 if (machine == NULL)
601 return -1;
602
603 return machine__create_kernel_maps(machine);
604}
605
606int machine__load_kallsyms(struct machine *machine, const char *filename,
607 enum map_type type, symbol_filter_t filter)
608{
609 struct map *map = machine->vmlinux_maps[type];
610 int ret = dso__load_kallsyms(map->dso, filename, map, filter);
611
612 if (ret > 0) {
613 dso__set_loaded(map->dso, type);
614 /*
615 * Since /proc/kallsyms will have multiple sessions for the
616 * kernel, with modules between them, fixup the end of all
617 * sections.
618 */
619 __map_groups__fixup_end(&machine->kmaps, type);
620 }
621
622 return ret;
623}
624
625int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
626 symbol_filter_t filter)
627{
628 struct map *map = machine->vmlinux_maps[type];
629 int ret = dso__load_vmlinux_path(map->dso, map, filter);
630
631 if (ret > 0) {
632 dso__set_loaded(map->dso, type);
633 map__reloc_vmlinux(map);
634 }
635
636 return ret;
637}
638
639static void map_groups__fixup_end(struct map_groups *mg)
640{
641 int i;
642 for (i = 0; i < MAP__NR_TYPES; ++i)
643 __map_groups__fixup_end(mg, i);
644}
645
646static char *get_kernel_version(const char *root_dir)
647{
648 char version[PATH_MAX];
649 FILE *file;
650 char *name, *tmp;
651 const char *prefix = "Linux version ";
652
653 sprintf(version, "%s/proc/version", root_dir);
654 file = fopen(version, "r");
655 if (!file)
656 return NULL;
657
658 version[0] = '\0';
659 tmp = fgets(version, sizeof(version), file);
660 fclose(file);
661
662 name = strstr(version, prefix);
663 if (!name)
664 return NULL;
665 name += strlen(prefix);
666 tmp = strchr(name, ' ');
667 if (tmp)
668 *tmp = '\0';
669
670 return strdup(name);
671}
672
673static int map_groups__set_modules_path_dir(struct map_groups *mg,
674 const char *dir_name)
675{
676 struct dirent *dent;
677 DIR *dir = opendir(dir_name);
678 int ret = 0;
679
680 if (!dir) {
681 pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
682 return -1;
683 }
684
685 while ((dent = readdir(dir)) != NULL) {
686 char path[PATH_MAX];
687 struct stat st;
688
689 /*sshfs might return bad dent->d_type, so we have to stat*/
690 snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
691 if (stat(path, &st))
692 continue;
693
694 if (S_ISDIR(st.st_mode)) {
695 if (!strcmp(dent->d_name, ".") ||
696 !strcmp(dent->d_name, ".."))
697 continue;
698
699 ret = map_groups__set_modules_path_dir(mg, path);
700 if (ret < 0)
701 goto out;
702 } else {
703 char *dot = strrchr(dent->d_name, '.'),
704 dso_name[PATH_MAX];
705 struct map *map;
706 char *long_name;
707
708 if (dot == NULL || strcmp(dot, ".ko"))
709 continue;
710 snprintf(dso_name, sizeof(dso_name), "[%.*s]",
711 (int)(dot - dent->d_name), dent->d_name);
712
713 strxfrchar(dso_name, '-', '_');
714 map = map_groups__find_by_name(mg, MAP__FUNCTION,
715 dso_name);
716 if (map == NULL)
717 continue;
718
719 long_name = strdup(path);
720 if (long_name == NULL) {
721 ret = -1;
722 goto out;
723 }
724 dso__set_long_name(map->dso, long_name);
725 map->dso->lname_alloc = 1;
726 dso__kernel_module_get_build_id(map->dso, "");
727 }
728 }
729
730out:
731 closedir(dir);
732 return ret;
733}
734
735static int machine__set_modules_path(struct machine *machine)
736{
737 char *version;
738 char modules_path[PATH_MAX];
739
740 version = get_kernel_version(machine->root_dir);
741 if (!version)
742 return -1;
743
744 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s/kernel",
745 machine->root_dir, version);
746 free(version);
747
748 return map_groups__set_modules_path_dir(&machine->kmaps, modules_path);
749}
750
751static int machine__create_modules(struct machine *machine)
752{
753 char *line = NULL;
754 size_t n;
755 FILE *file;
756 struct map *map;
757 const char *modules;
758 char path[PATH_MAX];
759
760 if (machine__is_default_guest(machine))
761 modules = symbol_conf.default_guest_modules;
762 else {
763 sprintf(path, "%s/proc/modules", machine->root_dir);
764 modules = path;
765 }
766
767 if (symbol__restricted_filename(path, "/proc/modules"))
768 return -1;
769
770 file = fopen(modules, "r");
771 if (file == NULL)
772 return -1;
773
774 while (!feof(file)) {
775 char name[PATH_MAX];
776 u64 start;
777 char *sep;
778 int line_len;
779
780 line_len = getline(&line, &n, file);
781 if (line_len < 0)
782 break;
783
784 if (!line)
785 goto out_failure;
786
787 line[--line_len] = '\0'; /* \n */
788
789 sep = strrchr(line, 'x');
790 if (sep == NULL)
791 continue;
792
793 hex2u64(sep + 1, &start);
794
795 sep = strchr(line, ' ');
796 if (sep == NULL)
797 continue;
798
799 *sep = '\0';
800
801 snprintf(name, sizeof(name), "[%s]", line);
802 map = machine__new_module(machine, start, name);
803 if (map == NULL)
804 goto out_delete_line;
805 dso__kernel_module_get_build_id(map->dso, machine->root_dir);
806 }
807
808 free(line);
809 fclose(file);
810
811 return machine__set_modules_path(machine);
812
813out_delete_line:
814 free(line);
815out_failure:
816 return -1;
817}
818
819int machine__create_kernel_maps(struct machine *machine)
820{
821 struct dso *kernel = machine__get_kernel(machine);
822
823 if (kernel == NULL ||
824 __machine__create_kernel_maps(machine, kernel) < 0)
825 return -1;
826
827 if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
828 if (machine__is_host(machine))
829 pr_debug("Problems creating module maps, "
830 "continuing anyway...\n");
831 else
832 pr_debug("Problems creating module maps for guest %d, "
833 "continuing anyway...\n", machine->pid);
834 }
835
836 /*
837 * Now that we have all the maps created, just set the ->end of them:
838 */
839 map_groups__fixup_end(&machine->kmaps);
840 return 0;
841}
842
267static void machine__set_kernel_mmap_len(struct machine *machine, 843static void machine__set_kernel_mmap_len(struct machine *machine,
268 union perf_event *event) 844 union perf_event *event)
269{ 845{
@@ -462,3 +1038,189 @@ int machine__process_event(struct machine *machine, union perf_event *event)
462 1038
463 return ret; 1039 return ret;
464} 1040}
1041
1042void machine__remove_thread(struct machine *machine, struct thread *th)
1043{
1044 machine->last_match = NULL;
1045 rb_erase(&th->rb_node, &machine->threads);
1046 /*
1047 * We may have references to this thread, for instance in some hist_entry
1048 * instances, so just move them to a separate list.
1049 */
1050 list_add_tail(&th->node, &machine->dead_threads);
1051}
1052
1053static bool symbol__match_parent_regex(struct symbol *sym)
1054{
1055 if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
1056 return 1;
1057
1058 return 0;
1059}
1060
1061static const u8 cpumodes[] = {
1062 PERF_RECORD_MISC_USER,
1063 PERF_RECORD_MISC_KERNEL,
1064 PERF_RECORD_MISC_GUEST_USER,
1065 PERF_RECORD_MISC_GUEST_KERNEL
1066};
1067#define NCPUMODES (sizeof(cpumodes)/sizeof(u8))
1068
1069static void ip__resolve_ams(struct machine *machine, struct thread *thread,
1070 struct addr_map_symbol *ams,
1071 u64 ip)
1072{
1073 struct addr_location al;
1074 size_t i;
1075 u8 m;
1076
1077 memset(&al, 0, sizeof(al));
1078
1079 for (i = 0; i < NCPUMODES; i++) {
1080 m = cpumodes[i];
1081 /*
1082 * We cannot use the header.misc hint to determine whether a
1083 * branch stack address is user, kernel, guest, hypervisor.
1084 * Branches may straddle the kernel/user/hypervisor boundaries.
1085 * Thus, we have to try consecutively until we find a match
1086 * or else, the symbol is unknown
1087 */
1088 thread__find_addr_location(thread, machine, m, MAP__FUNCTION,
1089 ip, &al, NULL);
1090 if (al.sym)
1091 goto found;
1092 }
1093found:
1094 ams->addr = ip;
1095 ams->al_addr = al.addr;
1096 ams->sym = al.sym;
1097 ams->map = al.map;
1098}
1099
1100struct branch_info *machine__resolve_bstack(struct machine *machine,
1101 struct thread *thr,
1102 struct branch_stack *bs)
1103{
1104 struct branch_info *bi;
1105 unsigned int i;
1106
1107 bi = calloc(bs->nr, sizeof(struct branch_info));
1108 if (!bi)
1109 return NULL;
1110
1111 for (i = 0; i < bs->nr; i++) {
1112 ip__resolve_ams(machine, thr, &bi[i].to, bs->entries[i].to);
1113 ip__resolve_ams(machine, thr, &bi[i].from, bs->entries[i].from);
1114 bi[i].flags = bs->entries[i].flags;
1115 }
1116 return bi;
1117}
1118
1119static int machine__resolve_callchain_sample(struct machine *machine,
1120 struct thread *thread,
1121 struct ip_callchain *chain,
1122 struct symbol **parent)
1123
1124{
1125 u8 cpumode = PERF_RECORD_MISC_USER;
1126 unsigned int i;
1127 int err;
1128
1129 callchain_cursor_reset(&callchain_cursor);
1130
1131 if (chain->nr > PERF_MAX_STACK_DEPTH) {
1132 pr_warning("corrupted callchain. skipping...\n");
1133 return 0;
1134 }
1135
1136 for (i = 0; i < chain->nr; i++) {
1137 u64 ip;
1138 struct addr_location al;
1139
1140 if (callchain_param.order == ORDER_CALLEE)
1141 ip = chain->ips[i];
1142 else
1143 ip = chain->ips[chain->nr - i - 1];
1144
1145 if (ip >= PERF_CONTEXT_MAX) {
1146 switch (ip) {
1147 case PERF_CONTEXT_HV:
1148 cpumode = PERF_RECORD_MISC_HYPERVISOR;
1149 break;
1150 case PERF_CONTEXT_KERNEL:
1151 cpumode = PERF_RECORD_MISC_KERNEL;
1152 break;
1153 case PERF_CONTEXT_USER:
1154 cpumode = PERF_RECORD_MISC_USER;
1155 break;
1156 default:
1157 pr_debug("invalid callchain context: "
1158 "%"PRId64"\n", (s64) ip);
1159 /*
1160 * It seems the callchain is corrupted.
1161 * Discard all.
1162 */
1163 callchain_cursor_reset(&callchain_cursor);
1164 return 0;
1165 }
1166 continue;
1167 }
1168
1169 al.filtered = false;
1170 thread__find_addr_location(thread, machine, cpumode,
1171 MAP__FUNCTION, ip, &al, NULL);
1172 if (al.sym != NULL) {
1173 if (sort__has_parent && !*parent &&
1174 symbol__match_parent_regex(al.sym))
1175 *parent = al.sym;
1176 if (!symbol_conf.use_callchain)
1177 break;
1178 }
1179
1180 err = callchain_cursor_append(&callchain_cursor,
1181 ip, al.map, al.sym);
1182 if (err)
1183 return err;
1184 }
1185
1186 return 0;
1187}
1188
1189static int unwind_entry(struct unwind_entry *entry, void *arg)
1190{
1191 struct callchain_cursor *cursor = arg;
1192 return callchain_cursor_append(cursor, entry->ip,
1193 entry->map, entry->sym);
1194}
1195
1196int machine__resolve_callchain(struct machine *machine,
1197 struct perf_evsel *evsel,
1198 struct thread *thread,
1199 struct perf_sample *sample,
1200 struct symbol **parent)
1201
1202{
1203 int ret;
1204
1205 callchain_cursor_reset(&callchain_cursor);
1206
1207 ret = machine__resolve_callchain_sample(machine, thread,
1208 sample->callchain, parent);
1209 if (ret)
1210 return ret;
1211
1212 /* Can we do dwarf post unwind? */
1213 if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) &&
1214 (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER)))
1215 return 0;
1216
1217 /* Bail out if nothing was captured. */
1218 if ((!sample->user_regs.regs) ||
1219 (!sample->user_stack.size))
1220 return 0;
1221
1222 return unwind__get_entries(unwind_entry, &callchain_cursor, machine,
1223 thread, evsel->attr.sample_regs_user,
1224 sample);
1225
1226}
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index b7cde7467d55..5ac5892f2326 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -47,23 +47,32 @@ int machine__process_event(struct machine *machine, union perf_event *event);
47 47
48typedef void (*machine__process_t)(struct machine *machine, void *data); 48typedef void (*machine__process_t)(struct machine *machine, void *data);
49 49
50void machines__process(struct rb_root *machines, 50struct machines {
51 machine__process_t process, void *data); 51 struct machine host;
52 struct rb_root guests;
53};
54
55void machines__init(struct machines *machines);
56void machines__exit(struct machines *machines);
52 57
53struct machine *machines__add(struct rb_root *machines, pid_t pid, 58void machines__process_guests(struct machines *machines,
59 machine__process_t process, void *data);
60
61struct machine *machines__add(struct machines *machines, pid_t pid,
54 const char *root_dir); 62 const char *root_dir);
55struct machine *machines__find_host(struct rb_root *machines); 63struct machine *machines__find_host(struct machines *machines);
56struct machine *machines__find(struct rb_root *machines, pid_t pid); 64struct machine *machines__find(struct machines *machines, pid_t pid);
57struct machine *machines__findnew(struct rb_root *machines, pid_t pid); 65struct machine *machines__findnew(struct machines *machines, pid_t pid);
58 66
59void machines__set_id_hdr_size(struct rb_root *machines, u16 id_hdr_size); 67void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size);
60char *machine__mmap_name(struct machine *machine, char *bf, size_t size); 68char *machine__mmap_name(struct machine *machine, char *bf, size_t size);
61 69
62int machine__init(struct machine *machine, const char *root_dir, pid_t pid); 70int machine__init(struct machine *machine, const char *root_dir, pid_t pid);
63void machine__exit(struct machine *machine); 71void machine__exit(struct machine *machine);
72void machine__delete_dead_threads(struct machine *machine);
73void machine__delete_threads(struct machine *machine);
64void machine__delete(struct machine *machine); 74void machine__delete(struct machine *machine);
65 75
66
67struct branch_info *machine__resolve_bstack(struct machine *machine, 76struct branch_info *machine__resolve_bstack(struct machine *machine,
68 struct thread *thread, 77 struct thread *thread,
69 struct branch_stack *bs); 78 struct branch_stack *bs);
@@ -129,19 +138,19 @@ int machine__load_kallsyms(struct machine *machine, const char *filename,
129int machine__load_vmlinux_path(struct machine *machine, enum map_type type, 138int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
130 symbol_filter_t filter); 139 symbol_filter_t filter);
131 140
132size_t machine__fprintf_dsos_buildid(struct machine *machine, 141size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp,
133 FILE *fp, bool with_hits); 142 bool (skip)(struct dso *dso, int parm), int parm);
134size_t machines__fprintf_dsos(struct rb_root *machines, FILE *fp); 143size_t machines__fprintf_dsos(struct machines *machines, FILE *fp);
135size_t machines__fprintf_dsos_buildid(struct rb_root *machines, 144size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
136 FILE *fp, bool with_hits); 145 bool (skip)(struct dso *dso, int parm), int parm);
137 146
138void machine__destroy_kernel_maps(struct machine *machine); 147void machine__destroy_kernel_maps(struct machine *machine);
139int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel); 148int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel);
140int machine__create_kernel_maps(struct machine *machine); 149int machine__create_kernel_maps(struct machine *machine);
141 150
142int machines__create_kernel_maps(struct rb_root *machines, pid_t pid); 151int machines__create_kernel_maps(struct machines *machines, pid_t pid);
143int machines__create_guest_kernel_maps(struct rb_root *machines); 152int machines__create_guest_kernel_maps(struct machines *machines);
144void machines__destroy_guest_kernel_maps(struct rb_root *machines); 153void machines__destroy_kernel_maps(struct machines *machines);
145 154
146size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp); 155size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp);
147 156
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 0328d45c4f2a..6fcb9de62340 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -11,6 +11,7 @@
11#include "strlist.h" 11#include "strlist.h"
12#include "vdso.h" 12#include "vdso.h"
13#include "build-id.h" 13#include "build-id.h"
14#include <linux/string.h>
14 15
15const char *map_type__name[MAP__NR_TYPES] = { 16const char *map_type__name[MAP__NR_TYPES] = {
16 [MAP__FUNCTION] = "Functions", 17 [MAP__FUNCTION] = "Functions",
@@ -19,7 +20,8 @@ const char *map_type__name[MAP__NR_TYPES] = {
19 20
20static inline int is_anon_memory(const char *filename) 21static inline int is_anon_memory(const char *filename)
21{ 22{
22 return strcmp(filename, "//anon") == 0; 23 return !strcmp(filename, "//anon") ||
24 !strcmp(filename, "/anon_hugepage (deleted)");
23} 25}
24 26
25static inline int is_no_dso_memory(const char *filename) 27static inline int is_no_dso_memory(const char *filename)
@@ -28,29 +30,29 @@ static inline int is_no_dso_memory(const char *filename)
28 !strcmp(filename, "[heap]"); 30 !strcmp(filename, "[heap]");
29} 31}
30 32
31void map__init(struct map *self, enum map_type type, 33void map__init(struct map *map, enum map_type type,
32 u64 start, u64 end, u64 pgoff, struct dso *dso) 34 u64 start, u64 end, u64 pgoff, struct dso *dso)
33{ 35{
34 self->type = type; 36 map->type = type;
35 self->start = start; 37 map->start = start;
36 self->end = end; 38 map->end = end;
37 self->pgoff = pgoff; 39 map->pgoff = pgoff;
38 self->dso = dso; 40 map->dso = dso;
39 self->map_ip = map__map_ip; 41 map->map_ip = map__map_ip;
40 self->unmap_ip = map__unmap_ip; 42 map->unmap_ip = map__unmap_ip;
41 RB_CLEAR_NODE(&self->rb_node); 43 RB_CLEAR_NODE(&map->rb_node);
42 self->groups = NULL; 44 map->groups = NULL;
43 self->referenced = false; 45 map->referenced = false;
44 self->erange_warned = false; 46 map->erange_warned = false;
45} 47}
46 48
47struct map *map__new(struct list_head *dsos__list, u64 start, u64 len, 49struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
48 u64 pgoff, u32 pid, char *filename, 50 u64 pgoff, u32 pid, char *filename,
49 enum map_type type) 51 enum map_type type)
50{ 52{
51 struct map *self = malloc(sizeof(*self)); 53 struct map *map = malloc(sizeof(*map));
52 54
53 if (self != NULL) { 55 if (map != NULL) {
54 char newfilename[PATH_MAX]; 56 char newfilename[PATH_MAX];
55 struct dso *dso; 57 struct dso *dso;
56 int anon, no_dso, vdso; 58 int anon, no_dso, vdso;
@@ -73,10 +75,10 @@ struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
73 if (dso == NULL) 75 if (dso == NULL)
74 goto out_delete; 76 goto out_delete;
75 77
76 map__init(self, type, start, start + len, pgoff, dso); 78 map__init(map, type, start, start + len, pgoff, dso);
77 79
78 if (anon || no_dso) { 80 if (anon || no_dso) {
79 self->map_ip = self->unmap_ip = identity__map_ip; 81 map->map_ip = map->unmap_ip = identity__map_ip;
80 82
81 /* 83 /*
82 * Set memory without DSO as loaded. All map__find_* 84 * Set memory without DSO as loaded. All map__find_*
@@ -84,12 +86,12 @@ struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
84 * unnecessary map__load warning. 86 * unnecessary map__load warning.
85 */ 87 */
86 if (no_dso) 88 if (no_dso)
87 dso__set_loaded(dso, self->type); 89 dso__set_loaded(dso, map->type);
88 } 90 }
89 } 91 }
90 return self; 92 return map;
91out_delete: 93out_delete:
92 free(self); 94 free(map);
93 return NULL; 95 return NULL;
94} 96}
95 97
@@ -112,48 +114,48 @@ struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
112 return map; 114 return map;
113} 115}
114 116
115void map__delete(struct map *self) 117void map__delete(struct map *map)
116{ 118{
117 free(self); 119 free(map);
118} 120}
119 121
120void map__fixup_start(struct map *self) 122void map__fixup_start(struct map *map)
121{ 123{
122 struct rb_root *symbols = &self->dso->symbols[self->type]; 124 struct rb_root *symbols = &map->dso->symbols[map->type];
123 struct rb_node *nd = rb_first(symbols); 125 struct rb_node *nd = rb_first(symbols);
124 if (nd != NULL) { 126 if (nd != NULL) {
125 struct symbol *sym = rb_entry(nd, struct symbol, rb_node); 127 struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
126 self->start = sym->start; 128 map->start = sym->start;
127 } 129 }
128} 130}
129 131
130void map__fixup_end(struct map *self) 132void map__fixup_end(struct map *map)
131{ 133{
132 struct rb_root *symbols = &self->dso->symbols[self->type]; 134 struct rb_root *symbols = &map->dso->symbols[map->type];
133 struct rb_node *nd = rb_last(symbols); 135 struct rb_node *nd = rb_last(symbols);
134 if (nd != NULL) { 136 if (nd != NULL) {
135 struct symbol *sym = rb_entry(nd, struct symbol, rb_node); 137 struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
136 self->end = sym->end; 138 map->end = sym->end;
137 } 139 }
138} 140}
139 141
140#define DSO__DELETED "(deleted)" 142#define DSO__DELETED "(deleted)"
141 143
142int map__load(struct map *self, symbol_filter_t filter) 144int map__load(struct map *map, symbol_filter_t filter)
143{ 145{
144 const char *name = self->dso->long_name; 146 const char *name = map->dso->long_name;
145 int nr; 147 int nr;
146 148
147 if (dso__loaded(self->dso, self->type)) 149 if (dso__loaded(map->dso, map->type))
148 return 0; 150 return 0;
149 151
150 nr = dso__load(self->dso, self, filter); 152 nr = dso__load(map->dso, map, filter);
151 if (nr < 0) { 153 if (nr < 0) {
152 if (self->dso->has_build_id) { 154 if (map->dso->has_build_id) {
153 char sbuild_id[BUILD_ID_SIZE * 2 + 1]; 155 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
154 156
155 build_id__sprintf(self->dso->build_id, 157 build_id__sprintf(map->dso->build_id,
156 sizeof(self->dso->build_id), 158 sizeof(map->dso->build_id),
157 sbuild_id); 159 sbuild_id);
158 pr_warning("%s with build id %s not found", 160 pr_warning("%s with build id %s not found",
159 name, sbuild_id); 161 name, sbuild_id);
@@ -183,43 +185,36 @@ int map__load(struct map *self, symbol_filter_t filter)
183 * Only applies to the kernel, as its symtabs aren't relative like the 185 * Only applies to the kernel, as its symtabs aren't relative like the
184 * module ones. 186 * module ones.
185 */ 187 */
186 if (self->dso->kernel) 188 if (map->dso->kernel)
187 map__reloc_vmlinux(self); 189 map__reloc_vmlinux(map);
188 190
189 return 0; 191 return 0;
190} 192}
191 193
192struct symbol *map__find_symbol(struct map *self, u64 addr, 194struct symbol *map__find_symbol(struct map *map, u64 addr,
193 symbol_filter_t filter) 195 symbol_filter_t filter)
194{ 196{
195 if (map__load(self, filter) < 0) 197 if (map__load(map, filter) < 0)
196 return NULL; 198 return NULL;
197 199
198 return dso__find_symbol(self->dso, self->type, addr); 200 return dso__find_symbol(map->dso, map->type, addr);
199} 201}
200 202
201struct symbol *map__find_symbol_by_name(struct map *self, const char *name, 203struct symbol *map__find_symbol_by_name(struct map *map, const char *name,
202 symbol_filter_t filter) 204 symbol_filter_t filter)
203{ 205{
204 if (map__load(self, filter) < 0) 206 if (map__load(map, filter) < 0)
205 return NULL; 207 return NULL;
206 208
207 if (!dso__sorted_by_name(self->dso, self->type)) 209 if (!dso__sorted_by_name(map->dso, map->type))
208 dso__sort_by_name(self->dso, self->type); 210 dso__sort_by_name(map->dso, map->type);
209 211
210 return dso__find_symbol_by_name(self->dso, self->type, name); 212 return dso__find_symbol_by_name(map->dso, map->type, name);
211} 213}
212 214
213struct map *map__clone(struct map *self) 215struct map *map__clone(struct map *map)
214{ 216{
215 struct map *map = malloc(sizeof(*self)); 217 return memdup(map, sizeof(*map));
216
217 if (!map)
218 return NULL;
219
220 memcpy(map, self, sizeof(*self));
221
222 return map;
223} 218}
224 219
225int map__overlap(struct map *l, struct map *r) 220int map__overlap(struct map *l, struct map *r)
@@ -236,10 +231,10 @@ int map__overlap(struct map *l, struct map *r)
236 return 0; 231 return 0;
237} 232}
238 233
239size_t map__fprintf(struct map *self, FILE *fp) 234size_t map__fprintf(struct map *map, FILE *fp)
240{ 235{
241 return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n", 236 return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n",
242 self->start, self->end, self->pgoff, self->dso->name); 237 map->start, map->end, map->pgoff, map->dso->name);
243} 238}
244 239
245size_t map__fprintf_dsoname(struct map *map, FILE *fp) 240size_t map__fprintf_dsoname(struct map *map, FILE *fp)
@@ -527,9 +522,9 @@ static u64 map__reloc_unmap_ip(struct map *map, u64 ip)
527 return ip - (s64)map->pgoff; 522 return ip - (s64)map->pgoff;
528} 523}
529 524
530void map__reloc_vmlinux(struct map *self) 525void map__reloc_vmlinux(struct map *map)
531{ 526{
532 struct kmap *kmap = map__kmap(self); 527 struct kmap *kmap = map__kmap(map);
533 s64 reloc; 528 s64 reloc;
534 529
535 if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->unrelocated_addr) 530 if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->unrelocated_addr)
@@ -541,9 +536,9 @@ void map__reloc_vmlinux(struct map *self)
541 if (!reloc) 536 if (!reloc)
542 return; 537 return;
543 538
544 self->map_ip = map__reloc_map_ip; 539 map->map_ip = map__reloc_map_ip;
545 self->unmap_ip = map__reloc_unmap_ip; 540 map->unmap_ip = map__reloc_unmap_ip;
546 self->pgoff = reloc; 541 map->pgoff = reloc;
547} 542}
548 543
549void maps__insert(struct rb_root *maps, struct map *map) 544void maps__insert(struct rb_root *maps, struct map *map)
@@ -566,9 +561,9 @@ void maps__insert(struct rb_root *maps, struct map *map)
566 rb_insert_color(&map->rb_node, maps); 561 rb_insert_color(&map->rb_node, maps);
567} 562}
568 563
569void maps__remove(struct rb_root *self, struct map *map) 564void maps__remove(struct rb_root *maps, struct map *map)
570{ 565{
571 rb_erase(&map->rb_node, self); 566 rb_erase(&map->rb_node, maps);
572} 567}
573 568
574struct map *maps__find(struct rb_root *maps, u64 ip) 569struct map *maps__find(struct rb_root *maps, u64 ip)
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index bcb39e2a6965..a887f2c9dfbb 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -57,9 +57,9 @@ struct map_groups {
57 struct machine *machine; 57 struct machine *machine;
58}; 58};
59 59
60static inline struct kmap *map__kmap(struct map *self) 60static inline struct kmap *map__kmap(struct map *map)
61{ 61{
62 return (struct kmap *)(self + 1); 62 return (struct kmap *)(map + 1);
63} 63}
64 64
65static inline u64 map__map_ip(struct map *map, u64 ip) 65static inline u64 map__map_ip(struct map *map, u64 ip)
@@ -85,27 +85,27 @@ struct symbol;
85 85
86typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym); 86typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym);
87 87
88void map__init(struct map *self, enum map_type type, 88void map__init(struct map *map, enum map_type type,
89 u64 start, u64 end, u64 pgoff, struct dso *dso); 89 u64 start, u64 end, u64 pgoff, struct dso *dso);
90struct map *map__new(struct list_head *dsos__list, u64 start, u64 len, 90struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
91 u64 pgoff, u32 pid, char *filename, 91 u64 pgoff, u32 pid, char *filename,
92 enum map_type type); 92 enum map_type type);
93struct map *map__new2(u64 start, struct dso *dso, enum map_type type); 93struct map *map__new2(u64 start, struct dso *dso, enum map_type type);
94void map__delete(struct map *self); 94void map__delete(struct map *map);
95struct map *map__clone(struct map *self); 95struct map *map__clone(struct map *map);
96int map__overlap(struct map *l, struct map *r); 96int map__overlap(struct map *l, struct map *r);
97size_t map__fprintf(struct map *self, FILE *fp); 97size_t map__fprintf(struct map *map, FILE *fp);
98size_t map__fprintf_dsoname(struct map *map, FILE *fp); 98size_t map__fprintf_dsoname(struct map *map, FILE *fp);
99 99
100int map__load(struct map *self, symbol_filter_t filter); 100int map__load(struct map *map, symbol_filter_t filter);
101struct symbol *map__find_symbol(struct map *self, 101struct symbol *map__find_symbol(struct map *map,
102 u64 addr, symbol_filter_t filter); 102 u64 addr, symbol_filter_t filter);
103struct symbol *map__find_symbol_by_name(struct map *self, const char *name, 103struct symbol *map__find_symbol_by_name(struct map *map, const char *name,
104 symbol_filter_t filter); 104 symbol_filter_t filter);
105void map__fixup_start(struct map *self); 105void map__fixup_start(struct map *map);
106void map__fixup_end(struct map *self); 106void map__fixup_end(struct map *map);
107 107
108void map__reloc_vmlinux(struct map *self); 108void map__reloc_vmlinux(struct map *map);
109 109
110size_t __map_groups__fprintf_maps(struct map_groups *mg, 110size_t __map_groups__fprintf_maps(struct map_groups *mg,
111 enum map_type type, int verbose, FILE *fp); 111 enum map_type type, int verbose, FILE *fp);
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 2d8d53bec17e..c84f48cf9678 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -380,8 +380,8 @@ static int add_tracepoint(struct list_head **listp, int *idx,
380 return 0; 380 return 0;
381} 381}
382 382
383static int add_tracepoint_multi(struct list_head **list, int *idx, 383static int add_tracepoint_multi_event(struct list_head **list, int *idx,
384 char *sys_name, char *evt_name) 384 char *sys_name, char *evt_name)
385{ 385{
386 char evt_path[MAXPATHLEN]; 386 char evt_path[MAXPATHLEN];
387 struct dirent *evt_ent; 387 struct dirent *evt_ent;
@@ -408,6 +408,47 @@ static int add_tracepoint_multi(struct list_head **list, int *idx,
408 ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name); 408 ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name);
409 } 409 }
410 410
411 closedir(evt_dir);
412 return ret;
413}
414
415static int add_tracepoint_event(struct list_head **list, int *idx,
416 char *sys_name, char *evt_name)
417{
418 return strpbrk(evt_name, "*?") ?
419 add_tracepoint_multi_event(list, idx, sys_name, evt_name) :
420 add_tracepoint(list, idx, sys_name, evt_name);
421}
422
423static int add_tracepoint_multi_sys(struct list_head **list, int *idx,
424 char *sys_name, char *evt_name)
425{
426 struct dirent *events_ent;
427 DIR *events_dir;
428 int ret = 0;
429
430 events_dir = opendir(tracing_events_path);
431 if (!events_dir) {
432 perror("Can't open event dir");
433 return -1;
434 }
435
436 while (!ret && (events_ent = readdir(events_dir))) {
437 if (!strcmp(events_ent->d_name, ".")
438 || !strcmp(events_ent->d_name, "..")
439 || !strcmp(events_ent->d_name, "enable")
440 || !strcmp(events_ent->d_name, "header_event")
441 || !strcmp(events_ent->d_name, "header_page"))
442 continue;
443
444 if (!strglobmatch(events_ent->d_name, sys_name))
445 continue;
446
447 ret = add_tracepoint_event(list, idx, events_ent->d_name,
448 evt_name);
449 }
450
451 closedir(events_dir);
411 return ret; 452 return ret;
412} 453}
413 454
@@ -420,9 +461,10 @@ int parse_events_add_tracepoint(struct list_head **list, int *idx,
420 if (ret) 461 if (ret)
421 return ret; 462 return ret;
422 463
423 return strpbrk(event, "*?") ? 464 if (strpbrk(sys, "*?"))
424 add_tracepoint_multi(list, idx, sys, event) : 465 return add_tracepoint_multi_sys(list, idx, sys, event);
425 add_tracepoint(list, idx, sys, event); 466 else
467 return add_tracepoint_event(list, idx, sys, event);
426} 468}
427 469
428static int 470static int
@@ -492,7 +534,7 @@ int parse_events_add_breakpoint(struct list_head **list, int *idx,
492} 534}
493 535
494static int config_term(struct perf_event_attr *attr, 536static int config_term(struct perf_event_attr *attr,
495 struct parse_events__term *term) 537 struct parse_events_term *term)
496{ 538{
497#define CHECK_TYPE_VAL(type) \ 539#define CHECK_TYPE_VAL(type) \
498do { \ 540do { \
@@ -537,7 +579,7 @@ do { \
537static int config_attr(struct perf_event_attr *attr, 579static int config_attr(struct perf_event_attr *attr,
538 struct list_head *head, int fail) 580 struct list_head *head, int fail)
539{ 581{
540 struct parse_events__term *term; 582 struct parse_events_term *term;
541 583
542 list_for_each_entry(term, head, list) 584 list_for_each_entry(term, head, list)
543 if (config_term(attr, term) && fail) 585 if (config_term(attr, term) && fail)
@@ -563,14 +605,14 @@ int parse_events_add_numeric(struct list_head **list, int *idx,
563 return add_event(list, idx, &attr, NULL); 605 return add_event(list, idx, &attr, NULL);
564} 606}
565 607
566static int parse_events__is_name_term(struct parse_events__term *term) 608static int parse_events__is_name_term(struct parse_events_term *term)
567{ 609{
568 return term->type_term == PARSE_EVENTS__TERM_TYPE_NAME; 610 return term->type_term == PARSE_EVENTS__TERM_TYPE_NAME;
569} 611}
570 612
571static char *pmu_event_name(struct list_head *head_terms) 613static char *pmu_event_name(struct list_head *head_terms)
572{ 614{
573 struct parse_events__term *term; 615 struct parse_events_term *term;
574 616
575 list_for_each_entry(term, head_terms, list) 617 list_for_each_entry(term, head_terms, list)
576 if (parse_events__is_name_term(term)) 618 if (parse_events__is_name_term(term))
@@ -657,14 +699,6 @@ static int get_event_modifier(struct event_modifier *mod, char *str,
657 int exclude = eu | ek | eh; 699 int exclude = eu | ek | eh;
658 int exclude_GH = evsel ? evsel->exclude_GH : 0; 700 int exclude_GH = evsel ? evsel->exclude_GH : 0;
659 701
660 /*
661 * We are here for group and 'GH' was not set as event
662 * modifier and whatever event/group modifier override
663 * default 'GH' setup.
664 */
665 if (evsel && !exclude_GH)
666 eH = eG = 0;
667
668 memset(mod, 0, sizeof(*mod)); 702 memset(mod, 0, sizeof(*mod));
669 703
670 while (*str) { 704 while (*str) {
@@ -814,7 +848,7 @@ static int parse_events__scanner(const char *str, void *data, int start_token)
814 */ 848 */
815int parse_events_terms(struct list_head *terms, const char *str) 849int parse_events_terms(struct list_head *terms, const char *str)
816{ 850{
817 struct parse_events_data__terms data = { 851 struct parse_events_terms data = {
818 .terms = NULL, 852 .terms = NULL,
819 }; 853 };
820 int ret; 854 int ret;
@@ -830,10 +864,9 @@ int parse_events_terms(struct list_head *terms, const char *str)
830 return ret; 864 return ret;
831} 865}
832 866
833int parse_events(struct perf_evlist *evlist, const char *str, 867int parse_events(struct perf_evlist *evlist, const char *str)
834 int unset __maybe_unused)
835{ 868{
836 struct parse_events_data__events data = { 869 struct parse_events_evlist data = {
837 .list = LIST_HEAD_INIT(data.list), 870 .list = LIST_HEAD_INIT(data.list),
838 .idx = evlist->nr_entries, 871 .idx = evlist->nr_entries,
839 }; 872 };
@@ -843,6 +876,7 @@ int parse_events(struct perf_evlist *evlist, const char *str,
843 if (!ret) { 876 if (!ret) {
844 int entries = data.idx - evlist->nr_entries; 877 int entries = data.idx - evlist->nr_entries;
845 perf_evlist__splice_list_tail(evlist, &data.list, entries); 878 perf_evlist__splice_list_tail(evlist, &data.list, entries);
879 evlist->nr_groups += data.nr_groups;
846 return 0; 880 return 0;
847 } 881 }
848 882
@@ -858,7 +892,7 @@ int parse_events_option(const struct option *opt, const char *str,
858 int unset __maybe_unused) 892 int unset __maybe_unused)
859{ 893{
860 struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; 894 struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
861 int ret = parse_events(evlist, str, unset); 895 int ret = parse_events(evlist, str);
862 896
863 if (ret) { 897 if (ret) {
864 fprintf(stderr, "invalid or unsupported event: '%s'\n", str); 898 fprintf(stderr, "invalid or unsupported event: '%s'\n", str);
@@ -1121,16 +1155,16 @@ void print_events(const char *event_glob, bool name_only)
1121 print_tracepoint_events(NULL, NULL, name_only); 1155 print_tracepoint_events(NULL, NULL, name_only);
1122} 1156}
1123 1157
1124int parse_events__is_hardcoded_term(struct parse_events__term *term) 1158int parse_events__is_hardcoded_term(struct parse_events_term *term)
1125{ 1159{
1126 return term->type_term != PARSE_EVENTS__TERM_TYPE_USER; 1160 return term->type_term != PARSE_EVENTS__TERM_TYPE_USER;
1127} 1161}
1128 1162
1129static int new_term(struct parse_events__term **_term, int type_val, 1163static int new_term(struct parse_events_term **_term, int type_val,
1130 int type_term, char *config, 1164 int type_term, char *config,
1131 char *str, u64 num) 1165 char *str, u64 num)
1132{ 1166{
1133 struct parse_events__term *term; 1167 struct parse_events_term *term;
1134 1168
1135 term = zalloc(sizeof(*term)); 1169 term = zalloc(sizeof(*term));
1136 if (!term) 1170 if (!term)
@@ -1156,21 +1190,21 @@ static int new_term(struct parse_events__term **_term, int type_val,
1156 return 0; 1190 return 0;
1157} 1191}
1158 1192
1159int parse_events__term_num(struct parse_events__term **term, 1193int parse_events_term__num(struct parse_events_term **term,
1160 int type_term, char *config, u64 num) 1194 int type_term, char *config, u64 num)
1161{ 1195{
1162 return new_term(term, PARSE_EVENTS__TERM_TYPE_NUM, type_term, 1196 return new_term(term, PARSE_EVENTS__TERM_TYPE_NUM, type_term,
1163 config, NULL, num); 1197 config, NULL, num);
1164} 1198}
1165 1199
1166int parse_events__term_str(struct parse_events__term **term, 1200int parse_events_term__str(struct parse_events_term **term,
1167 int type_term, char *config, char *str) 1201 int type_term, char *config, char *str)
1168{ 1202{
1169 return new_term(term, PARSE_EVENTS__TERM_TYPE_STR, type_term, 1203 return new_term(term, PARSE_EVENTS__TERM_TYPE_STR, type_term,
1170 config, str, 0); 1204 config, str, 0);
1171} 1205}
1172 1206
1173int parse_events__term_sym_hw(struct parse_events__term **term, 1207int parse_events_term__sym_hw(struct parse_events_term **term,
1174 char *config, unsigned idx) 1208 char *config, unsigned idx)
1175{ 1209{
1176 struct event_symbol *sym; 1210 struct event_symbol *sym;
@@ -1188,8 +1222,8 @@ int parse_events__term_sym_hw(struct parse_events__term **term,
1188 (char *) "event", (char *) sym->symbol, 0); 1222 (char *) "event", (char *) sym->symbol, 0);
1189} 1223}
1190 1224
1191int parse_events__term_clone(struct parse_events__term **new, 1225int parse_events_term__clone(struct parse_events_term **new,
1192 struct parse_events__term *term) 1226 struct parse_events_term *term)
1193{ 1227{
1194 return new_term(new, term->type_val, term->type_term, term->config, 1228 return new_term(new, term->type_val, term->type_term, term->config,
1195 term->val.str, term->val.num); 1229 term->val.str, term->val.num);
@@ -1197,7 +1231,7 @@ int parse_events__term_clone(struct parse_events__term **new,
1197 1231
1198void parse_events__free_terms(struct list_head *terms) 1232void parse_events__free_terms(struct list_head *terms)
1199{ 1233{
1200 struct parse_events__term *term, *h; 1234 struct parse_events_term *term, *h;
1201 1235
1202 list_for_each_entry_safe(term, h, terms, list) 1236 list_for_each_entry_safe(term, h, terms, list)
1203 free(term); 1237 free(term);
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index b7af80b8bdda..8a4859315fd9 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -29,8 +29,7 @@ const char *event_type(int type);
29 29
30extern int parse_events_option(const struct option *opt, const char *str, 30extern int parse_events_option(const struct option *opt, const char *str,
31 int unset); 31 int unset);
32extern int parse_events(struct perf_evlist *evlist, const char *str, 32extern int parse_events(struct perf_evlist *evlist, const char *str);
33 int unset);
34extern int parse_events_terms(struct list_head *terms, const char *str); 33extern int parse_events_terms(struct list_head *terms, const char *str);
35extern int parse_filter(const struct option *opt, const char *str, int unset); 34extern int parse_filter(const struct option *opt, const char *str, int unset);
36 35
@@ -51,7 +50,7 @@ enum {
51 PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE, 50 PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE,
52}; 51};
53 52
54struct parse_events__term { 53struct parse_events_term {
55 char *config; 54 char *config;
56 union { 55 union {
57 char *str; 56 char *str;
@@ -62,24 +61,25 @@ struct parse_events__term {
62 struct list_head list; 61 struct list_head list;
63}; 62};
64 63
65struct parse_events_data__events { 64struct parse_events_evlist {
66 struct list_head list; 65 struct list_head list;
67 int idx; 66 int idx;
67 int nr_groups;
68}; 68};
69 69
70struct parse_events_data__terms { 70struct parse_events_terms {
71 struct list_head *terms; 71 struct list_head *terms;
72}; 72};
73 73
74int parse_events__is_hardcoded_term(struct parse_events__term *term); 74int parse_events__is_hardcoded_term(struct parse_events_term *term);
75int parse_events__term_num(struct parse_events__term **_term, 75int parse_events_term__num(struct parse_events_term **_term,
76 int type_term, char *config, u64 num); 76 int type_term, char *config, u64 num);
77int parse_events__term_str(struct parse_events__term **_term, 77int parse_events_term__str(struct parse_events_term **_term,
78 int type_term, char *config, char *str); 78 int type_term, char *config, char *str);
79int parse_events__term_sym_hw(struct parse_events__term **term, 79int parse_events_term__sym_hw(struct parse_events_term **term,
80 char *config, unsigned idx); 80 char *config, unsigned idx);
81int parse_events__term_clone(struct parse_events__term **new, 81int parse_events_term__clone(struct parse_events_term **new,
82 struct parse_events__term *term); 82 struct parse_events_term *term);
83void parse_events__free_terms(struct list_head *terms); 83void parse_events__free_terms(struct list_head *terms);
84int parse_events__modifier_event(struct list_head *list, char *str, bool add); 84int parse_events__modifier_event(struct list_head *list, char *str, bool add);
85int parse_events__modifier_group(struct list_head *list, char *event_mod); 85int parse_events__modifier_group(struct list_head *list, char *event_mod);
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index 0f9914ae6bac..afc44c18dfe1 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -1,5 +1,4 @@
1%pure-parser 1%pure-parser
2%name-prefix "parse_events_"
3%parse-param {void *_data} 2%parse-param {void *_data}
4%parse-param {void *scanner} 3%parse-param {void *scanner}
5%lex-param {void* scanner} 4%lex-param {void* scanner}
@@ -23,6 +22,14 @@ do { \
23 YYABORT; \ 22 YYABORT; \
24} while (0) 23} while (0)
25 24
25static inc_group_count(struct list_head *list,
26 struct parse_events_evlist *data)
27{
28 /* Count groups only have more than 1 members */
29 if (!list_is_last(list->next, list))
30 data->nr_groups++;
31}
32
26%} 33%}
27 34
28%token PE_START_EVENTS PE_START_TERMS 35%token PE_START_EVENTS PE_START_TERMS
@@ -68,7 +75,7 @@ do { \
68 char *str; 75 char *str;
69 u64 num; 76 u64 num;
70 struct list_head *head; 77 struct list_head *head;
71 struct parse_events__term *term; 78 struct parse_events_term *term;
72} 79}
73%% 80%%
74 81
@@ -79,7 +86,7 @@ PE_START_TERMS start_terms
79 86
80start_events: groups 87start_events: groups
81{ 88{
82 struct parse_events_data__events *data = _data; 89 struct parse_events_evlist *data = _data;
83 90
84 parse_events_update_lists($1, &data->list); 91 parse_events_update_lists($1, &data->list);
85} 92}
@@ -123,6 +130,7 @@ PE_NAME '{' events '}'
123{ 130{
124 struct list_head *list = $3; 131 struct list_head *list = $3;
125 132
133 inc_group_count(list, _data);
126 parse_events__set_leader($1, list); 134 parse_events__set_leader($1, list);
127 $$ = list; 135 $$ = list;
128} 136}
@@ -131,6 +139,7 @@ PE_NAME '{' events '}'
131{ 139{
132 struct list_head *list = $2; 140 struct list_head *list = $2;
133 141
142 inc_group_count(list, _data);
134 parse_events__set_leader(NULL, list); 143 parse_events__set_leader(NULL, list);
135 $$ = list; 144 $$ = list;
136} 145}
@@ -186,7 +195,7 @@ event_def: event_pmu |
186event_pmu: 195event_pmu:
187PE_NAME '/' event_config '/' 196PE_NAME '/' event_config '/'
188{ 197{
189 struct parse_events_data__events *data = _data; 198 struct parse_events_evlist *data = _data;
190 struct list_head *list = NULL; 199 struct list_head *list = NULL;
191 200
192 ABORT_ON(parse_events_add_pmu(&list, &data->idx, $1, $3)); 201 ABORT_ON(parse_events_add_pmu(&list, &data->idx, $1, $3));
@@ -202,7 +211,7 @@ PE_VALUE_SYM_SW
202event_legacy_symbol: 211event_legacy_symbol:
203value_sym '/' event_config '/' 212value_sym '/' event_config '/'
204{ 213{
205 struct parse_events_data__events *data = _data; 214 struct parse_events_evlist *data = _data;
206 struct list_head *list = NULL; 215 struct list_head *list = NULL;
207 int type = $1 >> 16; 216 int type = $1 >> 16;
208 int config = $1 & 255; 217 int config = $1 & 255;
@@ -215,7 +224,7 @@ value_sym '/' event_config '/'
215| 224|
216value_sym sep_slash_dc 225value_sym sep_slash_dc
217{ 226{
218 struct parse_events_data__events *data = _data; 227 struct parse_events_evlist *data = _data;
219 struct list_head *list = NULL; 228 struct list_head *list = NULL;
220 int type = $1 >> 16; 229 int type = $1 >> 16;
221 int config = $1 & 255; 230 int config = $1 & 255;
@@ -228,7 +237,7 @@ value_sym sep_slash_dc
228event_legacy_cache: 237event_legacy_cache:
229PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT '-' PE_NAME_CACHE_OP_RESULT 238PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT '-' PE_NAME_CACHE_OP_RESULT
230{ 239{
231 struct parse_events_data__events *data = _data; 240 struct parse_events_evlist *data = _data;
232 struct list_head *list = NULL; 241 struct list_head *list = NULL;
233 242
234 ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, $3, $5)); 243 ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, $3, $5));
@@ -237,7 +246,7 @@ PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT '-' PE_NAME_CACHE_OP_RESULT
237| 246|
238PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT 247PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT
239{ 248{
240 struct parse_events_data__events *data = _data; 249 struct parse_events_evlist *data = _data;
241 struct list_head *list = NULL; 250 struct list_head *list = NULL;
242 251
243 ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, $3, NULL)); 252 ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, $3, NULL));
@@ -246,7 +255,7 @@ PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT
246| 255|
247PE_NAME_CACHE_TYPE 256PE_NAME_CACHE_TYPE
248{ 257{
249 struct parse_events_data__events *data = _data; 258 struct parse_events_evlist *data = _data;
250 struct list_head *list = NULL; 259 struct list_head *list = NULL;
251 260
252 ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, NULL, NULL)); 261 ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, NULL, NULL));
@@ -256,7 +265,7 @@ PE_NAME_CACHE_TYPE
256event_legacy_mem: 265event_legacy_mem:
257PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc 266PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc
258{ 267{
259 struct parse_events_data__events *data = _data; 268 struct parse_events_evlist *data = _data;
260 struct list_head *list = NULL; 269 struct list_head *list = NULL;
261 270
262 ABORT_ON(parse_events_add_breakpoint(&list, &data->idx, 271 ABORT_ON(parse_events_add_breakpoint(&list, &data->idx,
@@ -266,7 +275,7 @@ PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc
266| 275|
267PE_PREFIX_MEM PE_VALUE sep_dc 276PE_PREFIX_MEM PE_VALUE sep_dc
268{ 277{
269 struct parse_events_data__events *data = _data; 278 struct parse_events_evlist *data = _data;
270 struct list_head *list = NULL; 279 struct list_head *list = NULL;
271 280
272 ABORT_ON(parse_events_add_breakpoint(&list, &data->idx, 281 ABORT_ON(parse_events_add_breakpoint(&list, &data->idx,
@@ -277,7 +286,7 @@ PE_PREFIX_MEM PE_VALUE sep_dc
277event_legacy_tracepoint: 286event_legacy_tracepoint:
278PE_NAME ':' PE_NAME 287PE_NAME ':' PE_NAME
279{ 288{
280 struct parse_events_data__events *data = _data; 289 struct parse_events_evlist *data = _data;
281 struct list_head *list = NULL; 290 struct list_head *list = NULL;
282 291
283 ABORT_ON(parse_events_add_tracepoint(&list, &data->idx, $1, $3)); 292 ABORT_ON(parse_events_add_tracepoint(&list, &data->idx, $1, $3));
@@ -287,7 +296,7 @@ PE_NAME ':' PE_NAME
287event_legacy_numeric: 296event_legacy_numeric:
288PE_VALUE ':' PE_VALUE 297PE_VALUE ':' PE_VALUE
289{ 298{
290 struct parse_events_data__events *data = _data; 299 struct parse_events_evlist *data = _data;
291 struct list_head *list = NULL; 300 struct list_head *list = NULL;
292 301
293 ABORT_ON(parse_events_add_numeric(&list, &data->idx, (u32)$1, $3, NULL)); 302 ABORT_ON(parse_events_add_numeric(&list, &data->idx, (u32)$1, $3, NULL));
@@ -297,7 +306,7 @@ PE_VALUE ':' PE_VALUE
297event_legacy_raw: 306event_legacy_raw:
298PE_RAW 307PE_RAW
299{ 308{
300 struct parse_events_data__events *data = _data; 309 struct parse_events_evlist *data = _data;
301 struct list_head *list = NULL; 310 struct list_head *list = NULL;
302 311
303 ABORT_ON(parse_events_add_numeric(&list, &data->idx, 312 ABORT_ON(parse_events_add_numeric(&list, &data->idx,
@@ -307,7 +316,7 @@ PE_RAW
307 316
308start_terms: event_config 317start_terms: event_config
309{ 318{
310 struct parse_events_data__terms *data = _data; 319 struct parse_events_terms *data = _data;
311 data->terms = $1; 320 data->terms = $1;
312} 321}
313 322
@@ -315,7 +324,7 @@ event_config:
315event_config ',' event_term 324event_config ',' event_term
316{ 325{
317 struct list_head *head = $1; 326 struct list_head *head = $1;
318 struct parse_events__term *term = $3; 327 struct parse_events_term *term = $3;
319 328
320 ABORT_ON(!head); 329 ABORT_ON(!head);
321 list_add_tail(&term->list, head); 330 list_add_tail(&term->list, head);
@@ -325,7 +334,7 @@ event_config ',' event_term
325event_term 334event_term
326{ 335{
327 struct list_head *head = malloc(sizeof(*head)); 336 struct list_head *head = malloc(sizeof(*head));
328 struct parse_events__term *term = $1; 337 struct parse_events_term *term = $1;
329 338
330 ABORT_ON(!head); 339 ABORT_ON(!head);
331 INIT_LIST_HEAD(head); 340 INIT_LIST_HEAD(head);
@@ -336,70 +345,70 @@ event_term
336event_term: 345event_term:
337PE_NAME '=' PE_NAME 346PE_NAME '=' PE_NAME
338{ 347{
339 struct parse_events__term *term; 348 struct parse_events_term *term;
340 349
341 ABORT_ON(parse_events__term_str(&term, PARSE_EVENTS__TERM_TYPE_USER, 350 ABORT_ON(parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_USER,
342 $1, $3)); 351 $1, $3));
343 $$ = term; 352 $$ = term;
344} 353}
345| 354|
346PE_NAME '=' PE_VALUE 355PE_NAME '=' PE_VALUE
347{ 356{
348 struct parse_events__term *term; 357 struct parse_events_term *term;
349 358
350 ABORT_ON(parse_events__term_num(&term, PARSE_EVENTS__TERM_TYPE_USER, 359 ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER,
351 $1, $3)); 360 $1, $3));
352 $$ = term; 361 $$ = term;
353} 362}
354| 363|
355PE_NAME '=' PE_VALUE_SYM_HW 364PE_NAME '=' PE_VALUE_SYM_HW
356{ 365{
357 struct parse_events__term *term; 366 struct parse_events_term *term;
358 int config = $3 & 255; 367 int config = $3 & 255;
359 368
360 ABORT_ON(parse_events__term_sym_hw(&term, $1, config)); 369 ABORT_ON(parse_events_term__sym_hw(&term, $1, config));
361 $$ = term; 370 $$ = term;
362} 371}
363| 372|
364PE_NAME 373PE_NAME
365{ 374{
366 struct parse_events__term *term; 375 struct parse_events_term *term;
367 376
368 ABORT_ON(parse_events__term_num(&term, PARSE_EVENTS__TERM_TYPE_USER, 377 ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER,
369 $1, 1)); 378 $1, 1));
370 $$ = term; 379 $$ = term;
371} 380}
372| 381|
373PE_VALUE_SYM_HW 382PE_VALUE_SYM_HW
374{ 383{
375 struct parse_events__term *term; 384 struct parse_events_term *term;
376 int config = $1 & 255; 385 int config = $1 & 255;
377 386
378 ABORT_ON(parse_events__term_sym_hw(&term, NULL, config)); 387 ABORT_ON(parse_events_term__sym_hw(&term, NULL, config));
379 $$ = term; 388 $$ = term;
380} 389}
381| 390|
382PE_TERM '=' PE_NAME 391PE_TERM '=' PE_NAME
383{ 392{
384 struct parse_events__term *term; 393 struct parse_events_term *term;
385 394
386 ABORT_ON(parse_events__term_str(&term, (int)$1, NULL, $3)); 395 ABORT_ON(parse_events_term__str(&term, (int)$1, NULL, $3));
387 $$ = term; 396 $$ = term;
388} 397}
389| 398|
390PE_TERM '=' PE_VALUE 399PE_TERM '=' PE_VALUE
391{ 400{
392 struct parse_events__term *term; 401 struct parse_events_term *term;
393 402
394 ABORT_ON(parse_events__term_num(&term, (int)$1, NULL, $3)); 403 ABORT_ON(parse_events_term__num(&term, (int)$1, NULL, $3));
395 $$ = term; 404 $$ = term;
396} 405}
397| 406|
398PE_TERM 407PE_TERM
399{ 408{
400 struct parse_events__term *term; 409 struct parse_events_term *term;
401 410
402 ABORT_ON(parse_events__term_num(&term, (int)$1, NULL, 1)); 411 ABORT_ON(parse_events_term__num(&term, (int)$1, NULL, 1));
403 $$ = term; 412 $$ = term;
404} 413}
405 414
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 9bdc60c6f138..4c6f9c490a8d 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -1,4 +1,3 @@
1
2#include <linux/list.h> 1#include <linux/list.h>
3#include <sys/types.h> 2#include <sys/types.h>
4#include <sys/stat.h> 3#include <sys/stat.h>
@@ -11,6 +10,19 @@
11#include "parse-events.h" 10#include "parse-events.h"
12#include "cpumap.h" 11#include "cpumap.h"
13 12
13struct perf_pmu_alias {
14 char *name;
15 struct list_head terms;
16 struct list_head list;
17};
18
19struct perf_pmu_format {
20 char *name;
21 int value;
22 DECLARE_BITMAP(bits, PERF_PMU_FORMAT_BITS);
23 struct list_head list;
24};
25
14#define EVENT_SOURCE_DEVICE_PATH "/bus/event_source/devices/" 26#define EVENT_SOURCE_DEVICE_PATH "/bus/event_source/devices/"
15 27
16int perf_pmu_parse(struct list_head *list, char *name); 28int perf_pmu_parse(struct list_head *list, char *name);
@@ -85,7 +97,7 @@ static int pmu_format(char *name, struct list_head *format)
85 97
86static int perf_pmu__new_alias(struct list_head *list, char *name, FILE *file) 98static int perf_pmu__new_alias(struct list_head *list, char *name, FILE *file)
87{ 99{
88 struct perf_pmu__alias *alias; 100 struct perf_pmu_alias *alias;
89 char buf[256]; 101 char buf[256];
90 int ret; 102 int ret;
91 103
@@ -172,15 +184,15 @@ static int pmu_aliases(char *name, struct list_head *head)
172 return 0; 184 return 0;
173} 185}
174 186
175static int pmu_alias_terms(struct perf_pmu__alias *alias, 187static int pmu_alias_terms(struct perf_pmu_alias *alias,
176 struct list_head *terms) 188 struct list_head *terms)
177{ 189{
178 struct parse_events__term *term, *clone; 190 struct parse_events_term *term, *clone;
179 LIST_HEAD(list); 191 LIST_HEAD(list);
180 int ret; 192 int ret;
181 193
182 list_for_each_entry(term, &alias->terms, list) { 194 list_for_each_entry(term, &alias->terms, list) {
183 ret = parse_events__term_clone(&clone, term); 195 ret = parse_events_term__clone(&clone, term);
184 if (ret) { 196 if (ret) {
185 parse_events__free_terms(&list); 197 parse_events__free_terms(&list);
186 return ret; 198 return ret;
@@ -360,10 +372,10 @@ struct perf_pmu *perf_pmu__find(char *name)
360 return pmu_lookup(name); 372 return pmu_lookup(name);
361} 373}
362 374
363static struct perf_pmu__format* 375static struct perf_pmu_format *
364pmu_find_format(struct list_head *formats, char *name) 376pmu_find_format(struct list_head *formats, char *name)
365{ 377{
366 struct perf_pmu__format *format; 378 struct perf_pmu_format *format;
367 379
368 list_for_each_entry(format, formats, list) 380 list_for_each_entry(format, formats, list)
369 if (!strcmp(format->name, name)) 381 if (!strcmp(format->name, name))
@@ -403,9 +415,9 @@ static __u64 pmu_format_value(unsigned long *format, __u64 value)
403 */ 415 */
404static int pmu_config_term(struct list_head *formats, 416static int pmu_config_term(struct list_head *formats,
405 struct perf_event_attr *attr, 417 struct perf_event_attr *attr,
406 struct parse_events__term *term) 418 struct parse_events_term *term)
407{ 419{
408 struct perf_pmu__format *format; 420 struct perf_pmu_format *format;
409 __u64 *vp; 421 __u64 *vp;
410 422
411 /* 423 /*
@@ -450,7 +462,7 @@ int perf_pmu__config_terms(struct list_head *formats,
450 struct perf_event_attr *attr, 462 struct perf_event_attr *attr,
451 struct list_head *head_terms) 463 struct list_head *head_terms)
452{ 464{
453 struct parse_events__term *term; 465 struct parse_events_term *term;
454 466
455 list_for_each_entry(term, head_terms, list) 467 list_for_each_entry(term, head_terms, list)
456 if (pmu_config_term(formats, attr, term)) 468 if (pmu_config_term(formats, attr, term))
@@ -471,10 +483,10 @@ int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
471 return perf_pmu__config_terms(&pmu->format, attr, head_terms); 483 return perf_pmu__config_terms(&pmu->format, attr, head_terms);
472} 484}
473 485
474static struct perf_pmu__alias *pmu_find_alias(struct perf_pmu *pmu, 486static struct perf_pmu_alias *pmu_find_alias(struct perf_pmu *pmu,
475 struct parse_events__term *term) 487 struct parse_events_term *term)
476{ 488{
477 struct perf_pmu__alias *alias; 489 struct perf_pmu_alias *alias;
478 char *name; 490 char *name;
479 491
480 if (parse_events__is_hardcoded_term(term)) 492 if (parse_events__is_hardcoded_term(term))
@@ -507,8 +519,8 @@ static struct perf_pmu__alias *pmu_find_alias(struct perf_pmu *pmu,
507 */ 519 */
508int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms) 520int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms)
509{ 521{
510 struct parse_events__term *term, *h; 522 struct parse_events_term *term, *h;
511 struct perf_pmu__alias *alias; 523 struct perf_pmu_alias *alias;
512 int ret; 524 int ret;
513 525
514 list_for_each_entry_safe(term, h, head_terms, list) { 526 list_for_each_entry_safe(term, h, head_terms, list) {
@@ -527,7 +539,7 @@ int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms)
527int perf_pmu__new_format(struct list_head *list, char *name, 539int perf_pmu__new_format(struct list_head *list, char *name,
528 int config, unsigned long *bits) 540 int config, unsigned long *bits)
529{ 541{
530 struct perf_pmu__format *format; 542 struct perf_pmu_format *format;
531 543
532 format = zalloc(sizeof(*format)); 544 format = zalloc(sizeof(*format));
533 if (!format) 545 if (!format)
@@ -548,7 +560,7 @@ void perf_pmu__set_format(unsigned long *bits, long from, long to)
548 if (!to) 560 if (!to)
549 to = from; 561 to = from;
550 562
551 memset(bits, 0, BITS_TO_LONGS(PERF_PMU_FORMAT_BITS)); 563 memset(bits, 0, BITS_TO_BYTES(PERF_PMU_FORMAT_BITS));
552 for (b = from; b <= to; b++) 564 for (b = from; b <= to; b++)
553 set_bit(b, bits); 565 set_bit(b, bits);
554} 566}
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index a313ed76a49a..32fe55b659fa 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -12,19 +12,6 @@ enum {
12 12
13#define PERF_PMU_FORMAT_BITS 64 13#define PERF_PMU_FORMAT_BITS 64
14 14
15struct perf_pmu__format {
16 char *name;
17 int value;
18 DECLARE_BITMAP(bits, PERF_PMU_FORMAT_BITS);
19 struct list_head list;
20};
21
22struct perf_pmu__alias {
23 char *name;
24 struct list_head terms;
25 struct list_head list;
26};
27
28struct perf_pmu { 15struct perf_pmu {
29 char *name; 16 char *name;
30 __u32 type; 17 __u32 type;
@@ -42,7 +29,7 @@ int perf_pmu__config_terms(struct list_head *formats,
42 struct list_head *head_terms); 29 struct list_head *head_terms);
43int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms); 30int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms);
44struct list_head *perf_pmu__alias(struct perf_pmu *pmu, 31struct list_head *perf_pmu__alias(struct perf_pmu *pmu,
45 struct list_head *head_terms); 32 struct list_head *head_terms);
46int perf_pmu_wrap(void); 33int perf_pmu_wrap(void);
47void perf_pmu_error(struct list_head *list, char *name, char const *msg); 34void perf_pmu_error(struct list_head *list, char *name, char const *msg);
48 35
diff --git a/tools/perf/util/pmu.y b/tools/perf/util/pmu.y
index ec898047ebb9..bfd7e8509869 100644
--- a/tools/perf/util/pmu.y
+++ b/tools/perf/util/pmu.y
@@ -1,5 +1,4 @@
1 1
2%name-prefix "perf_pmu_"
3%parse-param {struct list_head *format} 2%parse-param {struct list_head *format}
4%parse-param {char *name} 3%parse-param {char *name}
5 4
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 1daf5c14e751..be0329394d56 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -413,12 +413,12 @@ static int convert_variable_type(Dwarf_Die *vr_die,
413 dwarf_diename(vr_die), dwarf_diename(&type)); 413 dwarf_diename(vr_die), dwarf_diename(&type));
414 return -EINVAL; 414 return -EINVAL;
415 } 415 }
416 if (die_get_real_type(&type, &type) == NULL) {
417 pr_warning("Failed to get a type"
418 " information.\n");
419 return -ENOENT;
420 }
416 if (ret == DW_TAG_pointer_type) { 421 if (ret == DW_TAG_pointer_type) {
417 if (die_get_real_type(&type, &type) == NULL) {
418 pr_warning("Failed to get a type"
419 " information.\n");
420 return -ENOENT;
421 }
422 while (*ref_ptr) 422 while (*ref_ptr)
423 ref_ptr = &(*ref_ptr)->next; 423 ref_ptr = &(*ref_ptr)->next;
424 /* Add new reference with offset +0 */ 424 /* Add new reference with offset +0 */
diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources
index c40c2d33199e..64536a993f4a 100644
--- a/tools/perf/util/python-ext-sources
+++ b/tools/perf/util/python-ext-sources
@@ -18,4 +18,5 @@ util/cgroup.c
18util/debugfs.c 18util/debugfs.c
19util/rblist.c 19util/rblist.c
20util/strlist.c 20util/strlist.c
21util/sysfs.c
21../../lib/rbtree.c 22../../lib/rbtree.c
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index a2657fd96837..925e0c3e6d91 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -1045,3 +1045,12 @@ error:
1045 if (PyErr_Occurred()) 1045 if (PyErr_Occurred())
1046 PyErr_SetString(PyExc_ImportError, "perf: Init failed!"); 1046 PyErr_SetString(PyExc_ImportError, "perf: Init failed!");
1047} 1047}
1048
1049/*
1050 * Dummy, to avoid dragging all the test_attr infrastructure in the python
1051 * binding.
1052 */
1053void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu,
1054 int fd, int group_fd, unsigned long flags)
1055{
1056}
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index f80605eb1855..eacec859f299 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -292,6 +292,7 @@ static void perl_process_tracepoint(union perf_event *perf_event __maybe_unused,
292 ns = nsecs - s * NSECS_PER_SEC; 292 ns = nsecs - s * NSECS_PER_SEC;
293 293
294 scripting_context->event_data = data; 294 scripting_context->event_data = data;
295 scripting_context->pevent = evsel->tp_format->pevent;
295 296
296 ENTER; 297 ENTER;
297 SAVETMPS; 298 SAVETMPS;
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 14683dfca2ee..e87aa5d9696b 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -265,6 +265,7 @@ static void python_process_tracepoint(union perf_event *perf_event
265 ns = nsecs - s * NSECS_PER_SEC; 265 ns = nsecs - s * NSECS_PER_SEC;
266 266
267 scripting_context->event_data = data; 267 scripting_context->event_data = data;
268 scripting_context->pevent = evsel->tp_format->pevent;
268 269
269 context = PyCObject_FromVoidPtr(scripting_context, NULL); 270 context = PyCObject_FromVoidPtr(scripting_context, NULL);
270 271
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index ce6f51162386..bd85280bb6e8 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -16,7 +16,6 @@
16#include "cpumap.h" 16#include "cpumap.h"
17#include "event-parse.h" 17#include "event-parse.h"
18#include "perf_regs.h" 18#include "perf_regs.h"
19#include "unwind.h"
20#include "vdso.h" 19#include "vdso.h"
21 20
22static int perf_session__open(struct perf_session *self, bool force) 21static int perf_session__open(struct perf_session *self, bool force)
@@ -87,13 +86,12 @@ void perf_session__set_id_hdr_size(struct perf_session *session)
87{ 86{
88 u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist); 87 u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
89 88
90 session->host_machine.id_hdr_size = id_hdr_size;
91 machines__set_id_hdr_size(&session->machines, id_hdr_size); 89 machines__set_id_hdr_size(&session->machines, id_hdr_size);
92} 90}
93 91
94int perf_session__create_kernel_maps(struct perf_session *self) 92int perf_session__create_kernel_maps(struct perf_session *self)
95{ 93{
96 int ret = machine__create_kernel_maps(&self->host_machine); 94 int ret = machine__create_kernel_maps(&self->machines.host);
97 95
98 if (ret >= 0) 96 if (ret >= 0)
99 ret = machines__create_guest_kernel_maps(&self->machines); 97 ret = machines__create_guest_kernel_maps(&self->machines);
@@ -102,8 +100,7 @@ int perf_session__create_kernel_maps(struct perf_session *self)
102 100
103static void perf_session__destroy_kernel_maps(struct perf_session *self) 101static void perf_session__destroy_kernel_maps(struct perf_session *self)
104{ 102{
105 machine__destroy_kernel_maps(&self->host_machine); 103 machines__destroy_kernel_maps(&self->machines);
106 machines__destroy_guest_kernel_maps(&self->machines);
107} 104}
108 105
109struct perf_session *perf_session__new(const char *filename, int mode, 106struct perf_session *perf_session__new(const char *filename, int mode,
@@ -128,22 +125,11 @@ struct perf_session *perf_session__new(const char *filename, int mode,
128 goto out; 125 goto out;
129 126
130 memcpy(self->filename, filename, len); 127 memcpy(self->filename, filename, len);
131 /*
132 * On 64bit we can mmap the data file in one go. No need for tiny mmap
133 * slices. On 32bit we use 32MB.
134 */
135#if BITS_PER_LONG == 64
136 self->mmap_window = ULLONG_MAX;
137#else
138 self->mmap_window = 32 * 1024 * 1024ULL;
139#endif
140 self->machines = RB_ROOT;
141 self->repipe = repipe; 128 self->repipe = repipe;
142 INIT_LIST_HEAD(&self->ordered_samples.samples); 129 INIT_LIST_HEAD(&self->ordered_samples.samples);
143 INIT_LIST_HEAD(&self->ordered_samples.sample_cache); 130 INIT_LIST_HEAD(&self->ordered_samples.sample_cache);
144 INIT_LIST_HEAD(&self->ordered_samples.to_free); 131 INIT_LIST_HEAD(&self->ordered_samples.to_free);
145 machine__init(&self->host_machine, "", HOST_KERNEL_ID); 132 machines__init(&self->machines);
146 hists__init(&self->hists);
147 133
148 if (mode == O_RDONLY) { 134 if (mode == O_RDONLY) {
149 if (perf_session__open(self, force) < 0) 135 if (perf_session__open(self, force) < 0)
@@ -171,37 +157,30 @@ out_delete:
171 return NULL; 157 return NULL;
172} 158}
173 159
174static void machine__delete_dead_threads(struct machine *machine)
175{
176 struct thread *n, *t;
177
178 list_for_each_entry_safe(t, n, &machine->dead_threads, node) {
179 list_del(&t->node);
180 thread__delete(t);
181 }
182}
183
184static void perf_session__delete_dead_threads(struct perf_session *session) 160static void perf_session__delete_dead_threads(struct perf_session *session)
185{ 161{
186 machine__delete_dead_threads(&session->host_machine); 162 machine__delete_dead_threads(&session->machines.host);
187} 163}
188 164
189static void machine__delete_threads(struct machine *self) 165static void perf_session__delete_threads(struct perf_session *session)
190{ 166{
191 struct rb_node *nd = rb_first(&self->threads); 167 machine__delete_threads(&session->machines.host);
192
193 while (nd) {
194 struct thread *t = rb_entry(nd, struct thread, rb_node);
195
196 rb_erase(&t->rb_node, &self->threads);
197 nd = rb_next(nd);
198 thread__delete(t);
199 }
200} 168}
201 169
202static void perf_session__delete_threads(struct perf_session *session) 170static void perf_session_env__delete(struct perf_session_env *env)
203{ 171{
204 machine__delete_threads(&session->host_machine); 172 free(env->hostname);
173 free(env->os_release);
174 free(env->version);
175 free(env->arch);
176 free(env->cpu_desc);
177 free(env->cpuid);
178
179 free(env->cmdline);
180 free(env->sibling_cores);
181 free(env->sibling_threads);
182 free(env->numa_nodes);
183 free(env->pmu_mappings);
205} 184}
206 185
207void perf_session__delete(struct perf_session *self) 186void perf_session__delete(struct perf_session *self)
@@ -209,198 +188,13 @@ void perf_session__delete(struct perf_session *self)
209 perf_session__destroy_kernel_maps(self); 188 perf_session__destroy_kernel_maps(self);
210 perf_session__delete_dead_threads(self); 189 perf_session__delete_dead_threads(self);
211 perf_session__delete_threads(self); 190 perf_session__delete_threads(self);
212 machine__exit(&self->host_machine); 191 perf_session_env__delete(&self->header.env);
192 machines__exit(&self->machines);
213 close(self->fd); 193 close(self->fd);
214 free(self); 194 free(self);
215 vdso__exit(); 195 vdso__exit();
216} 196}
217 197
218void machine__remove_thread(struct machine *self, struct thread *th)
219{
220 self->last_match = NULL;
221 rb_erase(&th->rb_node, &self->threads);
222 /*
223 * We may have references to this thread, for instance in some hist_entry
224 * instances, so just move them to a separate list.
225 */
226 list_add_tail(&th->node, &self->dead_threads);
227}
228
229static bool symbol__match_parent_regex(struct symbol *sym)
230{
231 if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
232 return 1;
233
234 return 0;
235}
236
237static const u8 cpumodes[] = {
238 PERF_RECORD_MISC_USER,
239 PERF_RECORD_MISC_KERNEL,
240 PERF_RECORD_MISC_GUEST_USER,
241 PERF_RECORD_MISC_GUEST_KERNEL
242};
243#define NCPUMODES (sizeof(cpumodes)/sizeof(u8))
244
245static void ip__resolve_ams(struct machine *self, struct thread *thread,
246 struct addr_map_symbol *ams,
247 u64 ip)
248{
249 struct addr_location al;
250 size_t i;
251 u8 m;
252
253 memset(&al, 0, sizeof(al));
254
255 for (i = 0; i < NCPUMODES; i++) {
256 m = cpumodes[i];
257 /*
258 * We cannot use the header.misc hint to determine whether a
259 * branch stack address is user, kernel, guest, hypervisor.
260 * Branches may straddle the kernel/user/hypervisor boundaries.
261 * Thus, we have to try consecutively until we find a match
262 * or else, the symbol is unknown
263 */
264 thread__find_addr_location(thread, self, m, MAP__FUNCTION,
265 ip, &al, NULL);
266 if (al.sym)
267 goto found;
268 }
269found:
270 ams->addr = ip;
271 ams->al_addr = al.addr;
272 ams->sym = al.sym;
273 ams->map = al.map;
274}
275
276struct branch_info *machine__resolve_bstack(struct machine *self,
277 struct thread *thr,
278 struct branch_stack *bs)
279{
280 struct branch_info *bi;
281 unsigned int i;
282
283 bi = calloc(bs->nr, sizeof(struct branch_info));
284 if (!bi)
285 return NULL;
286
287 for (i = 0; i < bs->nr; i++) {
288 ip__resolve_ams(self, thr, &bi[i].to, bs->entries[i].to);
289 ip__resolve_ams(self, thr, &bi[i].from, bs->entries[i].from);
290 bi[i].flags = bs->entries[i].flags;
291 }
292 return bi;
293}
294
295static int machine__resolve_callchain_sample(struct machine *machine,
296 struct thread *thread,
297 struct ip_callchain *chain,
298 struct symbol **parent)
299
300{
301 u8 cpumode = PERF_RECORD_MISC_USER;
302 unsigned int i;
303 int err;
304
305 callchain_cursor_reset(&callchain_cursor);
306
307 if (chain->nr > PERF_MAX_STACK_DEPTH) {
308 pr_warning("corrupted callchain. skipping...\n");
309 return 0;
310 }
311
312 for (i = 0; i < chain->nr; i++) {
313 u64 ip;
314 struct addr_location al;
315
316 if (callchain_param.order == ORDER_CALLEE)
317 ip = chain->ips[i];
318 else
319 ip = chain->ips[chain->nr - i - 1];
320
321 if (ip >= PERF_CONTEXT_MAX) {
322 switch (ip) {
323 case PERF_CONTEXT_HV:
324 cpumode = PERF_RECORD_MISC_HYPERVISOR;
325 break;
326 case PERF_CONTEXT_KERNEL:
327 cpumode = PERF_RECORD_MISC_KERNEL;
328 break;
329 case PERF_CONTEXT_USER:
330 cpumode = PERF_RECORD_MISC_USER;
331 break;
332 default:
333 pr_debug("invalid callchain context: "
334 "%"PRId64"\n", (s64) ip);
335 /*
336 * It seems the callchain is corrupted.
337 * Discard all.
338 */
339 callchain_cursor_reset(&callchain_cursor);
340 return 0;
341 }
342 continue;
343 }
344
345 al.filtered = false;
346 thread__find_addr_location(thread, machine, cpumode,
347 MAP__FUNCTION, ip, &al, NULL);
348 if (al.sym != NULL) {
349 if (sort__has_parent && !*parent &&
350 symbol__match_parent_regex(al.sym))
351 *parent = al.sym;
352 if (!symbol_conf.use_callchain)
353 break;
354 }
355
356 err = callchain_cursor_append(&callchain_cursor,
357 ip, al.map, al.sym);
358 if (err)
359 return err;
360 }
361
362 return 0;
363}
364
365static int unwind_entry(struct unwind_entry *entry, void *arg)
366{
367 struct callchain_cursor *cursor = arg;
368 return callchain_cursor_append(cursor, entry->ip,
369 entry->map, entry->sym);
370}
371
372int machine__resolve_callchain(struct machine *machine,
373 struct perf_evsel *evsel,
374 struct thread *thread,
375 struct perf_sample *sample,
376 struct symbol **parent)
377
378{
379 int ret;
380
381 callchain_cursor_reset(&callchain_cursor);
382
383 ret = machine__resolve_callchain_sample(machine, thread,
384 sample->callchain, parent);
385 if (ret)
386 return ret;
387
388 /* Can we do dwarf post unwind? */
389 if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) &&
390 (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER)))
391 return 0;
392
393 /* Bail out if nothing was captured. */
394 if ((!sample->user_regs.regs) ||
395 (!sample->user_stack.size))
396 return 0;
397
398 return unwind__get_entries(unwind_entry, &callchain_cursor, machine,
399 thread, evsel->attr.sample_regs_user,
400 sample);
401
402}
403
404static int process_event_synth_tracing_data_stub(union perf_event *event 198static int process_event_synth_tracing_data_stub(union perf_event *event
405 __maybe_unused, 199 __maybe_unused,
406 struct perf_session *session 200 struct perf_session *session
@@ -1027,7 +821,7 @@ static struct machine *
1027 return perf_session__findnew_machine(session, pid); 821 return perf_session__findnew_machine(session, pid);
1028 } 822 }
1029 823
1030 return perf_session__find_host_machine(session); 824 return &session->machines.host;
1031} 825}
1032 826
1033static int perf_session_deliver_event(struct perf_session *session, 827static int perf_session_deliver_event(struct perf_session *session,
@@ -1065,11 +859,11 @@ static int perf_session_deliver_event(struct perf_session *session,
1065 case PERF_RECORD_SAMPLE: 859 case PERF_RECORD_SAMPLE:
1066 dump_sample(evsel, event, sample); 860 dump_sample(evsel, event, sample);
1067 if (evsel == NULL) { 861 if (evsel == NULL) {
1068 ++session->hists.stats.nr_unknown_id; 862 ++session->stats.nr_unknown_id;
1069 return 0; 863 return 0;
1070 } 864 }
1071 if (machine == NULL) { 865 if (machine == NULL) {
1072 ++session->hists.stats.nr_unprocessable_samples; 866 ++session->stats.nr_unprocessable_samples;
1073 return 0; 867 return 0;
1074 } 868 }
1075 return tool->sample(tool, event, sample, evsel, machine); 869 return tool->sample(tool, event, sample, evsel, machine);
@@ -1083,7 +877,7 @@ static int perf_session_deliver_event(struct perf_session *session,
1083 return tool->exit(tool, event, sample, machine); 877 return tool->exit(tool, event, sample, machine);
1084 case PERF_RECORD_LOST: 878 case PERF_RECORD_LOST:
1085 if (tool->lost == perf_event__process_lost) 879 if (tool->lost == perf_event__process_lost)
1086 session->hists.stats.total_lost += event->lost.lost; 880 session->stats.total_lost += event->lost.lost;
1087 return tool->lost(tool, event, sample, machine); 881 return tool->lost(tool, event, sample, machine);
1088 case PERF_RECORD_READ: 882 case PERF_RECORD_READ:
1089 return tool->read(tool, event, sample, evsel, machine); 883 return tool->read(tool, event, sample, evsel, machine);
@@ -1092,7 +886,7 @@ static int perf_session_deliver_event(struct perf_session *session,
1092 case PERF_RECORD_UNTHROTTLE: 886 case PERF_RECORD_UNTHROTTLE:
1093 return tool->unthrottle(tool, event, sample, machine); 887 return tool->unthrottle(tool, event, sample, machine);
1094 default: 888 default:
1095 ++session->hists.stats.nr_unknown_events; 889 ++session->stats.nr_unknown_events;
1096 return -1; 890 return -1;
1097 } 891 }
1098} 892}
@@ -1106,8 +900,8 @@ static int perf_session__preprocess_sample(struct perf_session *session,
1106 900
1107 if (!ip_callchain__valid(sample->callchain, event)) { 901 if (!ip_callchain__valid(sample->callchain, event)) {
1108 pr_debug("call-chain problem with event, skipping it.\n"); 902 pr_debug("call-chain problem with event, skipping it.\n");
1109 ++session->hists.stats.nr_invalid_chains; 903 ++session->stats.nr_invalid_chains;
1110 session->hists.stats.total_invalid_chains += sample->period; 904 session->stats.total_invalid_chains += sample->period;
1111 return -EINVAL; 905 return -EINVAL;
1112 } 906 }
1113 return 0; 907 return 0;
@@ -1165,7 +959,7 @@ static int perf_session__process_event(struct perf_session *session,
1165 if (event->header.type >= PERF_RECORD_HEADER_MAX) 959 if (event->header.type >= PERF_RECORD_HEADER_MAX)
1166 return -EINVAL; 960 return -EINVAL;
1167 961
1168 hists__inc_nr_events(&session->hists, event->header.type); 962 events_stats__inc(&session->stats, event->header.type);
1169 963
1170 if (event->header.type >= PERF_RECORD_USER_TYPE_START) 964 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1171 return perf_session__process_user_event(session, event, tool, file_offset); 965 return perf_session__process_user_event(session, event, tool, file_offset);
@@ -1201,7 +995,7 @@ void perf_event_header__bswap(struct perf_event_header *self)
1201 995
1202struct thread *perf_session__findnew(struct perf_session *session, pid_t pid) 996struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1203{ 997{
1204 return machine__findnew_thread(&session->host_machine, pid); 998 return machine__findnew_thread(&session->machines.host, pid);
1205} 999}
1206 1000
1207static struct thread *perf_session__register_idle_thread(struct perf_session *self) 1001static struct thread *perf_session__register_idle_thread(struct perf_session *self)
@@ -1220,39 +1014,39 @@ static void perf_session__warn_about_errors(const struct perf_session *session,
1220 const struct perf_tool *tool) 1014 const struct perf_tool *tool)
1221{ 1015{
1222 if (tool->lost == perf_event__process_lost && 1016 if (tool->lost == perf_event__process_lost &&
1223 session->hists.stats.nr_events[PERF_RECORD_LOST] != 0) { 1017 session->stats.nr_events[PERF_RECORD_LOST] != 0) {
1224 ui__warning("Processed %d events and lost %d chunks!\n\n" 1018 ui__warning("Processed %d events and lost %d chunks!\n\n"
1225 "Check IO/CPU overload!\n\n", 1019 "Check IO/CPU overload!\n\n",
1226 session->hists.stats.nr_events[0], 1020 session->stats.nr_events[0],
1227 session->hists.stats.nr_events[PERF_RECORD_LOST]); 1021 session->stats.nr_events[PERF_RECORD_LOST]);
1228 } 1022 }
1229 1023
1230 if (session->hists.stats.nr_unknown_events != 0) { 1024 if (session->stats.nr_unknown_events != 0) {
1231 ui__warning("Found %u unknown events!\n\n" 1025 ui__warning("Found %u unknown events!\n\n"
1232 "Is this an older tool processing a perf.data " 1026 "Is this an older tool processing a perf.data "
1233 "file generated by a more recent tool?\n\n" 1027 "file generated by a more recent tool?\n\n"
1234 "If that is not the case, consider " 1028 "If that is not the case, consider "
1235 "reporting to linux-kernel@vger.kernel.org.\n\n", 1029 "reporting to linux-kernel@vger.kernel.org.\n\n",
1236 session->hists.stats.nr_unknown_events); 1030 session->stats.nr_unknown_events);
1237 } 1031 }
1238 1032
1239 if (session->hists.stats.nr_unknown_id != 0) { 1033 if (session->stats.nr_unknown_id != 0) {
1240 ui__warning("%u samples with id not present in the header\n", 1034 ui__warning("%u samples with id not present in the header\n",
1241 session->hists.stats.nr_unknown_id); 1035 session->stats.nr_unknown_id);
1242 } 1036 }
1243 1037
1244 if (session->hists.stats.nr_invalid_chains != 0) { 1038 if (session->stats.nr_invalid_chains != 0) {
1245 ui__warning("Found invalid callchains!\n\n" 1039 ui__warning("Found invalid callchains!\n\n"
1246 "%u out of %u events were discarded for this reason.\n\n" 1040 "%u out of %u events were discarded for this reason.\n\n"
1247 "Consider reporting to linux-kernel@vger.kernel.org.\n\n", 1041 "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1248 session->hists.stats.nr_invalid_chains, 1042 session->stats.nr_invalid_chains,
1249 session->hists.stats.nr_events[PERF_RECORD_SAMPLE]); 1043 session->stats.nr_events[PERF_RECORD_SAMPLE]);
1250 } 1044 }
1251 1045
1252 if (session->hists.stats.nr_unprocessable_samples != 0) { 1046 if (session->stats.nr_unprocessable_samples != 0) {
1253 ui__warning("%u unprocessable samples recorded.\n" 1047 ui__warning("%u unprocessable samples recorded.\n"
1254 "Do you have a KVM guest running and not using 'perf kvm'?\n", 1048 "Do you have a KVM guest running and not using 'perf kvm'?\n",
1255 session->hists.stats.nr_unprocessable_samples); 1049 session->stats.nr_unprocessable_samples);
1256 } 1050 }
1257} 1051}
1258 1052
@@ -1369,6 +1163,18 @@ fetch_mmaped_event(struct perf_session *session,
1369 return event; 1163 return event;
1370} 1164}
1371 1165
1166/*
1167 * On 64bit we can mmap the data file in one go. No need for tiny mmap
1168 * slices. On 32bit we use 32MB.
1169 */
1170#if BITS_PER_LONG == 64
1171#define MMAP_SIZE ULLONG_MAX
1172#define NUM_MMAPS 1
1173#else
1174#define MMAP_SIZE (32 * 1024 * 1024ULL)
1175#define NUM_MMAPS 128
1176#endif
1177
1372int __perf_session__process_events(struct perf_session *session, 1178int __perf_session__process_events(struct perf_session *session,
1373 u64 data_offset, u64 data_size, 1179 u64 data_offset, u64 data_size,
1374 u64 file_size, struct perf_tool *tool) 1180 u64 file_size, struct perf_tool *tool)
@@ -1376,7 +1182,7 @@ int __perf_session__process_events(struct perf_session *session,
1376 u64 head, page_offset, file_offset, file_pos, progress_next; 1182 u64 head, page_offset, file_offset, file_pos, progress_next;
1377 int err, mmap_prot, mmap_flags, map_idx = 0; 1183 int err, mmap_prot, mmap_flags, map_idx = 0;
1378 size_t mmap_size; 1184 size_t mmap_size;
1379 char *buf, *mmaps[8]; 1185 char *buf, *mmaps[NUM_MMAPS];
1380 union perf_event *event; 1186 union perf_event *event;
1381 uint32_t size; 1187 uint32_t size;
1382 1188
@@ -1391,7 +1197,7 @@ int __perf_session__process_events(struct perf_session *session,
1391 1197
1392 progress_next = file_size / 16; 1198 progress_next = file_size / 16;
1393 1199
1394 mmap_size = session->mmap_window; 1200 mmap_size = MMAP_SIZE;
1395 if (mmap_size > file_size) 1201 if (mmap_size > file_size)
1396 mmap_size = file_size; 1202 mmap_size = file_size;
1397 1203
@@ -1526,16 +1332,13 @@ int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
1526 1332
1527size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp) 1333size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
1528{ 1334{
1529 return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) + 1335 return machines__fprintf_dsos(&self->machines, fp);
1530 __dsos__fprintf(&self->host_machine.user_dsos, fp) +
1531 machines__fprintf_dsos(&self->machines, fp);
1532} 1336}
1533 1337
1534size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp, 1338size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
1535 bool with_hits) 1339 bool (skip)(struct dso *dso, int parm), int parm)
1536{ 1340{
1537 size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits); 1341 return machines__fprintf_dsos_buildid(&self->machines, fp, skip, parm);
1538 return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits);
1539} 1342}
1540 1343
1541size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp) 1344size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
@@ -1543,11 +1346,11 @@ size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
1543 struct perf_evsel *pos; 1346 struct perf_evsel *pos;
1544 size_t ret = fprintf(fp, "Aggregated stats:\n"); 1347 size_t ret = fprintf(fp, "Aggregated stats:\n");
1545 1348
1546 ret += hists__fprintf_nr_events(&session->hists, fp); 1349 ret += events_stats__fprintf(&session->stats, fp);
1547 1350
1548 list_for_each_entry(pos, &session->evlist->entries, node) { 1351 list_for_each_entry(pos, &session->evlist->entries, node) {
1549 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos)); 1352 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
1550 ret += hists__fprintf_nr_events(&pos->hists, fp); 1353 ret += events_stats__fprintf(&pos->hists.stats, fp);
1551 } 1354 }
1552 1355
1553 return ret; 1356 return ret;
@@ -1559,7 +1362,7 @@ size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
1559 * FIXME: Here we have to actually print all the machines in this 1362 * FIXME: Here we have to actually print all the machines in this
1560 * session, not just the host... 1363 * session, not just the host...
1561 */ 1364 */
1562 return machine__fprintf(&session->host_machine, fp); 1365 return machine__fprintf(&session->machines.host, fp);
1563} 1366}
1564 1367
1565void perf_session__remove_thread(struct perf_session *session, 1368void perf_session__remove_thread(struct perf_session *session,
@@ -1568,10 +1371,10 @@ void perf_session__remove_thread(struct perf_session *session,
1568 /* 1371 /*
1569 * FIXME: This one makes no sense, we need to remove the thread from 1372 * FIXME: This one makes no sense, we need to remove the thread from
1570 * the machine it belongs to, perf_session can have many machines, so 1373 * the machine it belongs to, perf_session can have many machines, so
1571 * doing it always on ->host_machine is wrong. Fix when auditing all 1374 * doing it always on ->machines.host is wrong. Fix when auditing all
1572 * the 'perf kvm' code. 1375 * the 'perf kvm' code.
1573 */ 1376 */
1574 machine__remove_thread(&session->host_machine, th); 1377 machine__remove_thread(&session->machines.host, th);
1575} 1378}
1576 1379
1577struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, 1380struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index cea133a6bdf1..b5c0847edfa9 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -30,16 +30,10 @@ struct ordered_samples {
30struct perf_session { 30struct perf_session {
31 struct perf_header header; 31 struct perf_header header;
32 unsigned long size; 32 unsigned long size;
33 unsigned long mmap_window; 33 struct machines machines;
34 struct machine host_machine;
35 struct rb_root machines;
36 struct perf_evlist *evlist; 34 struct perf_evlist *evlist;
37 struct pevent *pevent; 35 struct pevent *pevent;
38 /* 36 struct events_stats stats;
39 * FIXME: Need to split this up further, we need global
40 * stats + per event stats.
41 */
42 struct hists hists;
43 int fd; 37 int fd;
44 bool fd_pipe; 38 bool fd_pipe;
45 bool repipe; 39 bool repipe;
@@ -54,7 +48,7 @@ struct perf_tool;
54struct perf_session *perf_session__new(const char *filename, int mode, 48struct perf_session *perf_session__new(const char *filename, int mode,
55 bool force, bool repipe, 49 bool force, bool repipe,
56 struct perf_tool *tool); 50 struct perf_tool *tool);
57void perf_session__delete(struct perf_session *self); 51void perf_session__delete(struct perf_session *session);
58 52
59void perf_event_header__bswap(struct perf_event_header *self); 53void perf_event_header__bswap(struct perf_event_header *self);
60 54
@@ -81,43 +75,24 @@ void perf_session__set_id_hdr_size(struct perf_session *session);
81void perf_session__remove_thread(struct perf_session *self, struct thread *th); 75void perf_session__remove_thread(struct perf_session *self, struct thread *th);
82 76
83static inline 77static inline
84struct machine *perf_session__find_host_machine(struct perf_session *self)
85{
86 return &self->host_machine;
87}
88
89static inline
90struct machine *perf_session__find_machine(struct perf_session *self, pid_t pid) 78struct machine *perf_session__find_machine(struct perf_session *self, pid_t pid)
91{ 79{
92 if (pid == HOST_KERNEL_ID)
93 return &self->host_machine;
94 return machines__find(&self->machines, pid); 80 return machines__find(&self->machines, pid);
95} 81}
96 82
97static inline 83static inline
98struct machine *perf_session__findnew_machine(struct perf_session *self, pid_t pid) 84struct machine *perf_session__findnew_machine(struct perf_session *self, pid_t pid)
99{ 85{
100 if (pid == HOST_KERNEL_ID)
101 return &self->host_machine;
102 return machines__findnew(&self->machines, pid); 86 return machines__findnew(&self->machines, pid);
103} 87}
104 88
105static inline
106void perf_session__process_machines(struct perf_session *self,
107 struct perf_tool *tool,
108 machine__process_t process)
109{
110 process(&self->host_machine, tool);
111 return machines__process(&self->machines, process, tool);
112}
113
114struct thread *perf_session__findnew(struct perf_session *self, pid_t pid); 89struct thread *perf_session__findnew(struct perf_session *self, pid_t pid);
115size_t perf_session__fprintf(struct perf_session *self, FILE *fp); 90size_t perf_session__fprintf(struct perf_session *self, FILE *fp);
116 91
117size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp); 92size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp);
118 93
119size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, 94size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
120 FILE *fp, bool with_hits); 95 bool (fn)(struct dso *dso, int parm), int parm);
121 96
122size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp); 97size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp);
123 98
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index cfd1c0feb32d..d41926cb9e3f 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -60,7 +60,7 @@ sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
60static int hist_entry__thread_snprintf(struct hist_entry *self, char *bf, 60static int hist_entry__thread_snprintf(struct hist_entry *self, char *bf,
61 size_t size, unsigned int width) 61 size_t size, unsigned int width)
62{ 62{
63 return repsep_snprintf(bf, size, "%*s:%5d", width, 63 return repsep_snprintf(bf, size, "%*s:%5d", width - 6,
64 self->thread->comm ?: "", self->thread->pid); 64 self->thread->comm ?: "", self->thread->pid);
65} 65}
66 66
@@ -97,6 +97,16 @@ static int hist_entry__comm_snprintf(struct hist_entry *self, char *bf,
97 return repsep_snprintf(bf, size, "%*s", width, self->thread->comm); 97 return repsep_snprintf(bf, size, "%*s", width, self->thread->comm);
98} 98}
99 99
100struct sort_entry sort_comm = {
101 .se_header = "Command",
102 .se_cmp = sort__comm_cmp,
103 .se_collapse = sort__comm_collapse,
104 .se_snprintf = hist_entry__comm_snprintf,
105 .se_width_idx = HISTC_COMM,
106};
107
108/* --sort dso */
109
100static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r) 110static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
101{ 111{
102 struct dso *dso_l = map_l ? map_l->dso : NULL; 112 struct dso *dso_l = map_l ? map_l->dso : NULL;
@@ -117,40 +127,12 @@ static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
117 return strcmp(dso_name_l, dso_name_r); 127 return strcmp(dso_name_l, dso_name_r);
118} 128}
119 129
120struct sort_entry sort_comm = {
121 .se_header = "Command",
122 .se_cmp = sort__comm_cmp,
123 .se_collapse = sort__comm_collapse,
124 .se_snprintf = hist_entry__comm_snprintf,
125 .se_width_idx = HISTC_COMM,
126};
127
128/* --sort dso */
129
130static int64_t 130static int64_t
131sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) 131sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
132{ 132{
133 return _sort__dso_cmp(left->ms.map, right->ms.map); 133 return _sort__dso_cmp(left->ms.map, right->ms.map);
134} 134}
135 135
136
137static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r,
138 u64 ip_l, u64 ip_r)
139{
140 if (!sym_l || !sym_r)
141 return cmp_null(sym_l, sym_r);
142
143 if (sym_l == sym_r)
144 return 0;
145
146 if (sym_l)
147 ip_l = sym_l->start;
148 if (sym_r)
149 ip_r = sym_r->start;
150
151 return (int64_t)(ip_r - ip_l);
152}
153
154static int _hist_entry__dso_snprintf(struct map *map, char *bf, 136static int _hist_entry__dso_snprintf(struct map *map, char *bf,
155 size_t size, unsigned int width) 137 size_t size, unsigned int width)
156{ 138{
@@ -169,9 +151,43 @@ static int hist_entry__dso_snprintf(struct hist_entry *self, char *bf,
169 return _hist_entry__dso_snprintf(self->ms.map, bf, size, width); 151 return _hist_entry__dso_snprintf(self->ms.map, bf, size, width);
170} 152}
171 153
154struct sort_entry sort_dso = {
155 .se_header = "Shared Object",
156 .se_cmp = sort__dso_cmp,
157 .se_snprintf = hist_entry__dso_snprintf,
158 .se_width_idx = HISTC_DSO,
159};
160
161/* --sort symbol */
162
163static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
164{
165 u64 ip_l, ip_r;
166
167 if (!sym_l || !sym_r)
168 return cmp_null(sym_l, sym_r);
169
170 if (sym_l == sym_r)
171 return 0;
172
173 ip_l = sym_l->start;
174 ip_r = sym_r->start;
175
176 return (int64_t)(ip_r - ip_l);
177}
178
179static int64_t
180sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
181{
182 if (!left->ms.sym && !right->ms.sym)
183 return right->level - left->level;
184
185 return _sort__sym_cmp(left->ms.sym, right->ms.sym);
186}
187
172static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym, 188static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
173 u64 ip, char level, char *bf, size_t size, 189 u64 ip, char level, char *bf, size_t size,
174 unsigned int width __maybe_unused) 190 unsigned int width)
175{ 191{
176 size_t ret = 0; 192 size_t ret = 0;
177 193
@@ -197,43 +213,13 @@ static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
197 return ret; 213 return ret;
198} 214}
199 215
200
201struct sort_entry sort_dso = {
202 .se_header = "Shared Object",
203 .se_cmp = sort__dso_cmp,
204 .se_snprintf = hist_entry__dso_snprintf,
205 .se_width_idx = HISTC_DSO,
206};
207
208static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf, 216static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf,
209 size_t size, 217 size_t size, unsigned int width)
210 unsigned int width __maybe_unused)
211{ 218{
212 return _hist_entry__sym_snprintf(self->ms.map, self->ms.sym, self->ip, 219 return _hist_entry__sym_snprintf(self->ms.map, self->ms.sym, self->ip,
213 self->level, bf, size, width); 220 self->level, bf, size, width);
214} 221}
215 222
216/* --sort symbol */
217static int64_t
218sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
219{
220 u64 ip_l, ip_r;
221
222 if (!left->ms.sym && !right->ms.sym)
223 return right->level - left->level;
224
225 if (!left->ms.sym || !right->ms.sym)
226 return cmp_null(left->ms.sym, right->ms.sym);
227
228 if (left->ms.sym == right->ms.sym)
229 return 0;
230
231 ip_l = left->ms.sym->start;
232 ip_r = right->ms.sym->start;
233
234 return _sort__sym_cmp(left->ms.sym, right->ms.sym, ip_l, ip_r);
235}
236
237struct sort_entry sort_sym = { 223struct sort_entry sort_sym = {
238 .se_header = "Symbol", 224 .se_header = "Symbol",
239 .se_cmp = sort__sym_cmp, 225 .se_cmp = sort__sym_cmp,
@@ -253,7 +239,7 @@ static int hist_entry__srcline_snprintf(struct hist_entry *self, char *bf,
253 size_t size, 239 size_t size,
254 unsigned int width __maybe_unused) 240 unsigned int width __maybe_unused)
255{ 241{
256 FILE *fp; 242 FILE *fp = NULL;
257 char cmd[PATH_MAX + 2], *path = self->srcline, *nl; 243 char cmd[PATH_MAX + 2], *path = self->srcline, *nl;
258 size_t line_len; 244 size_t line_len;
259 245
@@ -274,7 +260,6 @@ static int hist_entry__srcline_snprintf(struct hist_entry *self, char *bf,
274 260
275 if (getline(&path, &line_len, fp) < 0 || !line_len) 261 if (getline(&path, &line_len, fp) < 0 || !line_len)
276 goto out_ip; 262 goto out_ip;
277 fclose(fp);
278 self->srcline = strdup(path); 263 self->srcline = strdup(path);
279 if (self->srcline == NULL) 264 if (self->srcline == NULL)
280 goto out_ip; 265 goto out_ip;
@@ -284,8 +269,12 @@ static int hist_entry__srcline_snprintf(struct hist_entry *self, char *bf,
284 *nl = '\0'; 269 *nl = '\0';
285 path = self->srcline; 270 path = self->srcline;
286out_path: 271out_path:
272 if (fp)
273 pclose(fp);
287 return repsep_snprintf(bf, size, "%s", path); 274 return repsep_snprintf(bf, size, "%s", path);
288out_ip: 275out_ip:
276 if (fp)
277 pclose(fp);
289 return repsep_snprintf(bf, size, "%-#*llx", BITS_PER_LONG / 4, self->ip); 278 return repsep_snprintf(bf, size, "%-#*llx", BITS_PER_LONG / 4, self->ip);
290} 279}
291 280
@@ -335,7 +324,7 @@ sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
335static int hist_entry__cpu_snprintf(struct hist_entry *self, char *bf, 324static int hist_entry__cpu_snprintf(struct hist_entry *self, char *bf,
336 size_t size, unsigned int width) 325 size_t size, unsigned int width)
337{ 326{
338 return repsep_snprintf(bf, size, "%-*d", width, self->cpu); 327 return repsep_snprintf(bf, size, "%*d", width, self->cpu);
339} 328}
340 329
341struct sort_entry sort_cpu = { 330struct sort_entry sort_cpu = {
@@ -345,6 +334,8 @@ struct sort_entry sort_cpu = {
345 .se_width_idx = HISTC_CPU, 334 .se_width_idx = HISTC_CPU,
346}; 335};
347 336
337/* sort keys for branch stacks */
338
348static int64_t 339static int64_t
349sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right) 340sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
350{ 341{
@@ -359,13 +350,6 @@ static int hist_entry__dso_from_snprintf(struct hist_entry *self, char *bf,
359 bf, size, width); 350 bf, size, width);
360} 351}
361 352
362struct sort_entry sort_dso_from = {
363 .se_header = "Source Shared Object",
364 .se_cmp = sort__dso_from_cmp,
365 .se_snprintf = hist_entry__dso_from_snprintf,
366 .se_width_idx = HISTC_DSO_FROM,
367};
368
369static int64_t 353static int64_t
370sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right) 354sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
371{ 355{
@@ -389,8 +373,7 @@ sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
389 if (!from_l->sym && !from_r->sym) 373 if (!from_l->sym && !from_r->sym)
390 return right->level - left->level; 374 return right->level - left->level;
391 375
392 return _sort__sym_cmp(from_l->sym, from_r->sym, from_l->addr, 376 return _sort__sym_cmp(from_l->sym, from_r->sym);
393 from_r->addr);
394} 377}
395 378
396static int64_t 379static int64_t
@@ -402,12 +385,11 @@ sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
402 if (!to_l->sym && !to_r->sym) 385 if (!to_l->sym && !to_r->sym)
403 return right->level - left->level; 386 return right->level - left->level;
404 387
405 return _sort__sym_cmp(to_l->sym, to_r->sym, to_l->addr, to_r->addr); 388 return _sort__sym_cmp(to_l->sym, to_r->sym);
406} 389}
407 390
408static int hist_entry__sym_from_snprintf(struct hist_entry *self, char *bf, 391static int hist_entry__sym_from_snprintf(struct hist_entry *self, char *bf,
409 size_t size, 392 size_t size, unsigned int width)
410 unsigned int width __maybe_unused)
411{ 393{
412 struct addr_map_symbol *from = &self->branch_info->from; 394 struct addr_map_symbol *from = &self->branch_info->from;
413 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr, 395 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
@@ -416,8 +398,7 @@ static int hist_entry__sym_from_snprintf(struct hist_entry *self, char *bf,
416} 398}
417 399
418static int hist_entry__sym_to_snprintf(struct hist_entry *self, char *bf, 400static int hist_entry__sym_to_snprintf(struct hist_entry *self, char *bf,
419 size_t size, 401 size_t size, unsigned int width)
420 unsigned int width __maybe_unused)
421{ 402{
422 struct addr_map_symbol *to = &self->branch_info->to; 403 struct addr_map_symbol *to = &self->branch_info->to;
423 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr, 404 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
@@ -425,6 +406,13 @@ static int hist_entry__sym_to_snprintf(struct hist_entry *self, char *bf,
425 406
426} 407}
427 408
409struct sort_entry sort_dso_from = {
410 .se_header = "Source Shared Object",
411 .se_cmp = sort__dso_from_cmp,
412 .se_snprintf = hist_entry__dso_from_snprintf,
413 .se_width_idx = HISTC_DSO_FROM,
414};
415
428struct sort_entry sort_dso_to = { 416struct sort_entry sort_dso_to = {
429 .se_header = "Target Shared Object", 417 .se_header = "Target Shared Object",
430 .se_cmp = sort__dso_to_cmp, 418 .se_cmp = sort__dso_to_cmp,
@@ -484,30 +472,40 @@ struct sort_dimension {
484 472
485#define DIM(d, n, func) [d] = { .name = n, .entry = &(func) } 473#define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
486 474
487static struct sort_dimension sort_dimensions[] = { 475static struct sort_dimension common_sort_dimensions[] = {
488 DIM(SORT_PID, "pid", sort_thread), 476 DIM(SORT_PID, "pid", sort_thread),
489 DIM(SORT_COMM, "comm", sort_comm), 477 DIM(SORT_COMM, "comm", sort_comm),
490 DIM(SORT_DSO, "dso", sort_dso), 478 DIM(SORT_DSO, "dso", sort_dso),
491 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
492 DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
493 DIM(SORT_SYM, "symbol", sort_sym), 479 DIM(SORT_SYM, "symbol", sort_sym),
494 DIM(SORT_SYM_TO, "symbol_from", sort_sym_from),
495 DIM(SORT_SYM_FROM, "symbol_to", sort_sym_to),
496 DIM(SORT_PARENT, "parent", sort_parent), 480 DIM(SORT_PARENT, "parent", sort_parent),
497 DIM(SORT_CPU, "cpu", sort_cpu), 481 DIM(SORT_CPU, "cpu", sort_cpu),
498 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
499 DIM(SORT_SRCLINE, "srcline", sort_srcline), 482 DIM(SORT_SRCLINE, "srcline", sort_srcline),
500}; 483};
501 484
485#undef DIM
486
487#define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
488
489static struct sort_dimension bstack_sort_dimensions[] = {
490 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
491 DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
492 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
493 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
494 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
495};
496
497#undef DIM
498
502int sort_dimension__add(const char *tok) 499int sort_dimension__add(const char *tok)
503{ 500{
504 unsigned int i; 501 unsigned int i;
505 502
506 for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) { 503 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
507 struct sort_dimension *sd = &sort_dimensions[i]; 504 struct sort_dimension *sd = &common_sort_dimensions[i];
508 505
509 if (strncasecmp(tok, sd->name, strlen(tok))) 506 if (strncasecmp(tok, sd->name, strlen(tok)))
510 continue; 507 continue;
508
511 if (sd->entry == &sort_parent) { 509 if (sd->entry == &sort_parent) {
512 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); 510 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
513 if (ret) { 511 if (ret) {
@@ -518,9 +516,7 @@ int sort_dimension__add(const char *tok)
518 return -EINVAL; 516 return -EINVAL;
519 } 517 }
520 sort__has_parent = 1; 518 sort__has_parent = 1;
521 } else if (sd->entry == &sort_sym || 519 } else if (sd->entry == &sort_sym) {
522 sd->entry == &sort_sym_from ||
523 sd->entry == &sort_sym_to) {
524 sort__has_sym = 1; 520 sort__has_sym = 1;
525 } 521 }
526 522
@@ -530,52 +526,69 @@ int sort_dimension__add(const char *tok)
530 if (sd->entry->se_collapse) 526 if (sd->entry->se_collapse)
531 sort__need_collapse = 1; 527 sort__need_collapse = 1;
532 528
533 if (list_empty(&hist_entry__sort_list)) { 529 if (list_empty(&hist_entry__sort_list))
534 if (!strcmp(sd->name, "pid")) 530 sort__first_dimension = i;
535 sort__first_dimension = SORT_PID;
536 else if (!strcmp(sd->name, "comm"))
537 sort__first_dimension = SORT_COMM;
538 else if (!strcmp(sd->name, "dso"))
539 sort__first_dimension = SORT_DSO;
540 else if (!strcmp(sd->name, "symbol"))
541 sort__first_dimension = SORT_SYM;
542 else if (!strcmp(sd->name, "parent"))
543 sort__first_dimension = SORT_PARENT;
544 else if (!strcmp(sd->name, "cpu"))
545 sort__first_dimension = SORT_CPU;
546 else if (!strcmp(sd->name, "symbol_from"))
547 sort__first_dimension = SORT_SYM_FROM;
548 else if (!strcmp(sd->name, "symbol_to"))
549 sort__first_dimension = SORT_SYM_TO;
550 else if (!strcmp(sd->name, "dso_from"))
551 sort__first_dimension = SORT_DSO_FROM;
552 else if (!strcmp(sd->name, "dso_to"))
553 sort__first_dimension = SORT_DSO_TO;
554 else if (!strcmp(sd->name, "mispredict"))
555 sort__first_dimension = SORT_MISPREDICT;
556 }
557 531
558 list_add_tail(&sd->entry->list, &hist_entry__sort_list); 532 list_add_tail(&sd->entry->list, &hist_entry__sort_list);
559 sd->taken = 1; 533 sd->taken = 1;
560 534
561 return 0; 535 return 0;
562 } 536 }
537
538 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
539 struct sort_dimension *sd = &bstack_sort_dimensions[i];
540
541 if (strncasecmp(tok, sd->name, strlen(tok)))
542 continue;
543
544 if (sort__branch_mode != 1)
545 return -EINVAL;
546
547 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
548 sort__has_sym = 1;
549
550 if (sd->taken)
551 return 0;
552
553 if (sd->entry->se_collapse)
554 sort__need_collapse = 1;
555
556 if (list_empty(&hist_entry__sort_list))
557 sort__first_dimension = i + __SORT_BRANCH_STACK;
558
559 list_add_tail(&sd->entry->list, &hist_entry__sort_list);
560 sd->taken = 1;
561
562 return 0;
563 }
564
563 return -ESRCH; 565 return -ESRCH;
564} 566}
565 567
566void setup_sorting(const char * const usagestr[], const struct option *opts) 568int setup_sorting(void)
567{ 569{
568 char *tmp, *tok, *str = strdup(sort_order); 570 char *tmp, *tok, *str = strdup(sort_order);
571 int ret = 0;
572
573 if (str == NULL) {
574 error("Not enough memory to setup sort keys");
575 return -ENOMEM;
576 }
569 577
570 for (tok = strtok_r(str, ", ", &tmp); 578 for (tok = strtok_r(str, ", ", &tmp);
571 tok; tok = strtok_r(NULL, ", ", &tmp)) { 579 tok; tok = strtok_r(NULL, ", ", &tmp)) {
572 if (sort_dimension__add(tok) < 0) { 580 ret = sort_dimension__add(tok);
581 if (ret == -EINVAL) {
582 error("Invalid --sort key: `%s'", tok);
583 break;
584 } else if (ret == -ESRCH) {
573 error("Unknown --sort key: `%s'", tok); 585 error("Unknown --sort key: `%s'", tok);
574 usage_with_options(usagestr, opts); 586 break;
575 } 587 }
576 } 588 }
577 589
578 free(str); 590 free(str);
591 return ret;
579} 592}
580 593
581void sort_entry__setup_elide(struct sort_entry *self, struct strlist *list, 594void sort_entry__setup_elide(struct sort_entry *self, struct strlist *list,
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index b4e8c3ba559d..b13e56f6ccbe 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -55,9 +55,6 @@ struct he_stat {
55struct hist_entry_diff { 55struct hist_entry_diff {
56 bool computed; 56 bool computed;
57 57
58 /* PERF_HPP__DISPL */
59 int displacement;
60
61 /* PERF_HPP__DELTA */ 58 /* PERF_HPP__DELTA */
62 double period_ratio_delta; 59 double period_ratio_delta;
63 60
@@ -118,25 +115,29 @@ static inline struct hist_entry *hist_entry__next_pair(struct hist_entry *he)
118 return NULL; 115 return NULL;
119} 116}
120 117
121static inline void hist__entry_add_pair(struct hist_entry *he, 118static inline void hist_entry__add_pair(struct hist_entry *he,
122 struct hist_entry *pair) 119 struct hist_entry *pair)
123{ 120{
124 list_add_tail(&he->pairs.head, &pair->pairs.node); 121 list_add_tail(&he->pairs.head, &pair->pairs.node);
125} 122}
126 123
127enum sort_type { 124enum sort_type {
125 /* common sort keys */
128 SORT_PID, 126 SORT_PID,
129 SORT_COMM, 127 SORT_COMM,
130 SORT_DSO, 128 SORT_DSO,
131 SORT_SYM, 129 SORT_SYM,
132 SORT_PARENT, 130 SORT_PARENT,
133 SORT_CPU, 131 SORT_CPU,
134 SORT_DSO_FROM, 132 SORT_SRCLINE,
133
134 /* branch stack specific sort keys */
135 __SORT_BRANCH_STACK,
136 SORT_DSO_FROM = __SORT_BRANCH_STACK,
135 SORT_DSO_TO, 137 SORT_DSO_TO,
136 SORT_SYM_FROM, 138 SORT_SYM_FROM,
137 SORT_SYM_TO, 139 SORT_SYM_TO,
138 SORT_MISPREDICT, 140 SORT_MISPREDICT,
139 SORT_SRCLINE,
140}; 141};
141 142
142/* 143/*
@@ -159,7 +160,7 @@ struct sort_entry {
159extern struct sort_entry sort_thread; 160extern struct sort_entry sort_thread;
160extern struct list_head hist_entry__sort_list; 161extern struct list_head hist_entry__sort_list;
161 162
162void setup_sorting(const char * const usagestr[], const struct option *opts); 163int setup_sorting(void);
163extern int sort_dimension__add(const char *); 164extern int sort_dimension__add(const char *);
164void sort_entry__setup_elide(struct sort_entry *self, struct strlist *list, 165void sort_entry__setup_elide(struct sort_entry *self, struct strlist *list,
165 const char *list_name, FILE *fp); 166 const char *list_name, FILE *fp);
diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c
index 346707df04b9..29c7b2cb2521 100644
--- a/tools/perf/util/string.c
+++ b/tools/perf/util/string.c
@@ -332,6 +332,24 @@ char *strxfrchar(char *s, char from, char to)
332} 332}
333 333
334/** 334/**
335 * ltrim - Removes leading whitespace from @s.
336 * @s: The string to be stripped.
337 *
338 * Return pointer to the first non-whitespace character in @s.
339 */
340char *ltrim(char *s)
341{
342 int len = strlen(s);
343
344 while (len && isspace(*s)) {
345 len--;
346 s++;
347 }
348
349 return s;
350}
351
352/**
335 * rtrim - Removes trailing whitespace from @s. 353 * rtrim - Removes trailing whitespace from @s.
336 * @s: The string to be stripped. 354 * @s: The string to be stripped.
337 * 355 *
diff --git a/tools/perf/util/strlist.c b/tools/perf/util/strlist.c
index 155d8b7078a7..55433aa42c8f 100644
--- a/tools/perf/util/strlist.c
+++ b/tools/perf/util/strlist.c
@@ -35,11 +35,11 @@ out_delete:
35 return NULL; 35 return NULL;
36} 36}
37 37
38static void str_node__delete(struct str_node *self, bool dupstr) 38static void str_node__delete(struct str_node *snode, bool dupstr)
39{ 39{
40 if (dupstr) 40 if (dupstr)
41 free((void *)self->s); 41 free((void *)snode->s);
42 free(self); 42 free(snode);
43} 43}
44 44
45static 45static
@@ -59,12 +59,12 @@ static int strlist__node_cmp(struct rb_node *rb_node, const void *entry)
59 return strcmp(snode->s, str); 59 return strcmp(snode->s, str);
60} 60}
61 61
62int strlist__add(struct strlist *self, const char *new_entry) 62int strlist__add(struct strlist *slist, const char *new_entry)
63{ 63{
64 return rblist__add_node(&self->rblist, new_entry); 64 return rblist__add_node(&slist->rblist, new_entry);
65} 65}
66 66
67int strlist__load(struct strlist *self, const char *filename) 67int strlist__load(struct strlist *slist, const char *filename)
68{ 68{
69 char entry[1024]; 69 char entry[1024];
70 int err; 70 int err;
@@ -80,7 +80,7 @@ int strlist__load(struct strlist *self, const char *filename)
80 continue; 80 continue;
81 entry[len - 1] = '\0'; 81 entry[len - 1] = '\0';
82 82
83 err = strlist__add(self, entry); 83 err = strlist__add(slist, entry);
84 if (err != 0) 84 if (err != 0)
85 goto out; 85 goto out;
86 } 86 }
@@ -107,56 +107,56 @@ struct str_node *strlist__find(struct strlist *slist, const char *entry)
107 return snode; 107 return snode;
108} 108}
109 109
110static int strlist__parse_list_entry(struct strlist *self, const char *s) 110static int strlist__parse_list_entry(struct strlist *slist, const char *s)
111{ 111{
112 if (strncmp(s, "file://", 7) == 0) 112 if (strncmp(s, "file://", 7) == 0)
113 return strlist__load(self, s + 7); 113 return strlist__load(slist, s + 7);
114 114
115 return strlist__add(self, s); 115 return strlist__add(slist, s);
116} 116}
117 117
118int strlist__parse_list(struct strlist *self, const char *s) 118int strlist__parse_list(struct strlist *slist, const char *s)
119{ 119{
120 char *sep; 120 char *sep;
121 int err; 121 int err;
122 122
123 while ((sep = strchr(s, ',')) != NULL) { 123 while ((sep = strchr(s, ',')) != NULL) {
124 *sep = '\0'; 124 *sep = '\0';
125 err = strlist__parse_list_entry(self, s); 125 err = strlist__parse_list_entry(slist, s);
126 *sep = ','; 126 *sep = ',';
127 if (err != 0) 127 if (err != 0)
128 return err; 128 return err;
129 s = sep + 1; 129 s = sep + 1;
130 } 130 }
131 131
132 return *s ? strlist__parse_list_entry(self, s) : 0; 132 return *s ? strlist__parse_list_entry(slist, s) : 0;
133} 133}
134 134
135struct strlist *strlist__new(bool dupstr, const char *slist) 135struct strlist *strlist__new(bool dupstr, const char *list)
136{ 136{
137 struct strlist *self = malloc(sizeof(*self)); 137 struct strlist *slist = malloc(sizeof(*slist));
138 138
139 if (self != NULL) { 139 if (slist != NULL) {
140 rblist__init(&self->rblist); 140 rblist__init(&slist->rblist);
141 self->rblist.node_cmp = strlist__node_cmp; 141 slist->rblist.node_cmp = strlist__node_cmp;
142 self->rblist.node_new = strlist__node_new; 142 slist->rblist.node_new = strlist__node_new;
143 self->rblist.node_delete = strlist__node_delete; 143 slist->rblist.node_delete = strlist__node_delete;
144 144
145 self->dupstr = dupstr; 145 slist->dupstr = dupstr;
146 if (slist && strlist__parse_list(self, slist) != 0) 146 if (slist && strlist__parse_list(slist, list) != 0)
147 goto out_error; 147 goto out_error;
148 } 148 }
149 149
150 return self; 150 return slist;
151out_error: 151out_error:
152 free(self); 152 free(slist);
153 return NULL; 153 return NULL;
154} 154}
155 155
156void strlist__delete(struct strlist *self) 156void strlist__delete(struct strlist *slist)
157{ 157{
158 if (self != NULL) 158 if (slist != NULL)
159 rblist__delete(&self->rblist); 159 rblist__delete(&slist->rblist);
160} 160}
161 161
162struct str_node *strlist__entry(const struct strlist *slist, unsigned int idx) 162struct str_node *strlist__entry(const struct strlist *slist, unsigned int idx)
diff --git a/tools/perf/util/strlist.h b/tools/perf/util/strlist.h
index dd9f922ec67c..5c7f87069d9c 100644
--- a/tools/perf/util/strlist.h
+++ b/tools/perf/util/strlist.h
@@ -17,34 +17,34 @@ struct strlist {
17}; 17};
18 18
19struct strlist *strlist__new(bool dupstr, const char *slist); 19struct strlist *strlist__new(bool dupstr, const char *slist);
20void strlist__delete(struct strlist *self); 20void strlist__delete(struct strlist *slist);
21 21
22void strlist__remove(struct strlist *self, struct str_node *sn); 22void strlist__remove(struct strlist *slist, struct str_node *sn);
23int strlist__load(struct strlist *self, const char *filename); 23int strlist__load(struct strlist *slist, const char *filename);
24int strlist__add(struct strlist *self, const char *str); 24int strlist__add(struct strlist *slist, const char *str);
25 25
26struct str_node *strlist__entry(const struct strlist *self, unsigned int idx); 26struct str_node *strlist__entry(const struct strlist *slist, unsigned int idx);
27struct str_node *strlist__find(struct strlist *self, const char *entry); 27struct str_node *strlist__find(struct strlist *slist, const char *entry);
28 28
29static inline bool strlist__has_entry(struct strlist *self, const char *entry) 29static inline bool strlist__has_entry(struct strlist *slist, const char *entry)
30{ 30{
31 return strlist__find(self, entry) != NULL; 31 return strlist__find(slist, entry) != NULL;
32} 32}
33 33
34static inline bool strlist__empty(const struct strlist *self) 34static inline bool strlist__empty(const struct strlist *slist)
35{ 35{
36 return rblist__empty(&self->rblist); 36 return rblist__empty(&slist->rblist);
37} 37}
38 38
39static inline unsigned int strlist__nr_entries(const struct strlist *self) 39static inline unsigned int strlist__nr_entries(const struct strlist *slist)
40{ 40{
41 return rblist__nr_entries(&self->rblist); 41 return rblist__nr_entries(&slist->rblist);
42} 42}
43 43
44/* For strlist iteration */ 44/* For strlist iteration */
45static inline struct str_node *strlist__first(struct strlist *self) 45static inline struct str_node *strlist__first(struct strlist *slist)
46{ 46{
47 struct rb_node *rn = rb_first(&self->rblist.entries); 47 struct rb_node *rn = rb_first(&slist->rblist.entries);
48 return rn ? rb_entry(rn, struct str_node, rb_node) : NULL; 48 return rn ? rb_entry(rn, struct str_node, rb_node) : NULL;
49} 49}
50static inline struct str_node *strlist__next(struct str_node *sn) 50static inline struct str_node *strlist__next(struct str_node *sn)
@@ -59,21 +59,21 @@ static inline struct str_node *strlist__next(struct str_node *sn)
59/** 59/**
60 * strlist_for_each - iterate over a strlist 60 * strlist_for_each - iterate over a strlist
61 * @pos: the &struct str_node to use as a loop cursor. 61 * @pos: the &struct str_node to use as a loop cursor.
62 * @self: the &struct strlist for loop. 62 * @slist: the &struct strlist for loop.
63 */ 63 */
64#define strlist__for_each(pos, self) \ 64#define strlist__for_each(pos, slist) \
65 for (pos = strlist__first(self); pos; pos = strlist__next(pos)) 65 for (pos = strlist__first(slist); pos; pos = strlist__next(pos))
66 66
67/** 67/**
68 * strlist_for_each_safe - iterate over a strlist safe against removal of 68 * strlist_for_each_safe - iterate over a strlist safe against removal of
69 * str_node 69 * str_node
70 * @pos: the &struct str_node to use as a loop cursor. 70 * @pos: the &struct str_node to use as a loop cursor.
71 * @n: another &struct str_node to use as temporary storage. 71 * @n: another &struct str_node to use as temporary storage.
72 * @self: the &struct strlist for loop. 72 * @slist: the &struct strlist for loop.
73 */ 73 */
74#define strlist__for_each_safe(pos, n, self) \ 74#define strlist__for_each_safe(pos, n, slist) \
75 for (pos = strlist__first(self), n = strlist__next(pos); pos;\ 75 for (pos = strlist__first(slist), n = strlist__next(pos); pos;\
76 pos = n, n = strlist__next(n)) 76 pos = n, n = strlist__next(n))
77 77
78int strlist__parse_list(struct strlist *self, const char *s); 78int strlist__parse_list(struct strlist *slist, const char *s);
79#endif /* __PERF_STRLIST_H */ 79#endif /* __PERF_STRLIST_H */
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index db0cc92cf2ea..54efcb5659ac 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -1,6 +1,3 @@
1#include <libelf.h>
2#include <gelf.h>
3#include <elf.h>
4#include <fcntl.h> 1#include <fcntl.h>
5#include <stdio.h> 2#include <stdio.h>
6#include <errno.h> 3#include <errno.h>
@@ -718,6 +715,17 @@ int dso__load_sym(struct dso *dso, struct map *map,
718 sym.st_value); 715 sym.st_value);
719 used_opd = true; 716 used_opd = true;
720 } 717 }
718 /*
719 * When loading symbols in a data mapping, ABS symbols (which
720 * has a value of SHN_ABS in its st_shndx) failed at
721 * elf_getscn(). And it marks the loading as a failure so
722 * already loaded symbols cannot be fixed up.
723 *
724 * I'm not sure what should be done. Just ignore them for now.
725 * - Namhyung Kim
726 */
727 if (sym.st_shndx == SHN_ABS)
728 continue;
721 729
722 sec = elf_getscn(runtime_ss->elf, sym.st_shndx); 730 sec = elf_getscn(runtime_ss->elf, sym.st_shndx);
723 if (!sec) 731 if (!sec)
diff --git a/tools/perf/util/symbol-minimal.c b/tools/perf/util/symbol-minimal.c
index 259f8f2ea9c9..a7390cde63bc 100644
--- a/tools/perf/util/symbol-minimal.c
+++ b/tools/perf/util/symbol-minimal.c
@@ -1,6 +1,5 @@
1#include "symbol.h" 1#include "symbol.h"
2 2
3#include <elf.h>
4#include <stdio.h> 3#include <stdio.h>
5#include <fcntl.h> 4#include <fcntl.h>
6#include <string.h> 5#include <string.h>
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 295f8d4feedf..e6432d85b43d 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -28,8 +28,8 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map,
28 symbol_filter_t filter); 28 symbol_filter_t filter);
29static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map, 29static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map,
30 symbol_filter_t filter); 30 symbol_filter_t filter);
31static int vmlinux_path__nr_entries; 31int vmlinux_path__nr_entries;
32static char **vmlinux_path; 32char **vmlinux_path;
33 33
34struct symbol_conf symbol_conf = { 34struct symbol_conf symbol_conf = {
35 .exclude_other = true, 35 .exclude_other = true,
@@ -202,13 +202,6 @@ void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
202 curr->end = ~0ULL; 202 curr->end = ~0ULL;
203} 203}
204 204
205static void map_groups__fixup_end(struct map_groups *mg)
206{
207 int i;
208 for (i = 0; i < MAP__NR_TYPES; ++i)
209 __map_groups__fixup_end(mg, i);
210}
211
212struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name) 205struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name)
213{ 206{
214 size_t namelen = strlen(name) + 1; 207 size_t namelen = strlen(name) + 1;
@@ -652,8 +645,8 @@ discard_symbol: rb_erase(&pos->rb_node, root);
652 return count + moved; 645 return count + moved;
653} 646}
654 647
655static bool symbol__restricted_filename(const char *filename, 648bool symbol__restricted_filename(const char *filename,
656 const char *restricted_filename) 649 const char *restricted_filename)
657{ 650{
658 bool restricted = false; 651 bool restricted = false;
659 652
@@ -775,10 +768,6 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
775 else 768 else
776 machine = NULL; 769 machine = NULL;
777 770
778 name = malloc(PATH_MAX);
779 if (!name)
780 return -1;
781
782 dso->adjust_symbols = 0; 771 dso->adjust_symbols = 0;
783 772
784 if (strncmp(dso->name, "/tmp/perf-", 10) == 0) { 773 if (strncmp(dso->name, "/tmp/perf-", 10) == 0) {
@@ -802,6 +791,10 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
802 if (machine) 791 if (machine)
803 root_dir = machine->root_dir; 792 root_dir = machine->root_dir;
804 793
794 name = malloc(PATH_MAX);
795 if (!name)
796 return -1;
797
805 /* Iterate over candidate debug images. 798 /* Iterate over candidate debug images.
806 * Keep track of "interesting" ones (those which have a symtab, dynsym, 799 * Keep track of "interesting" ones (those which have a symtab, dynsym,
807 * and/or opd section) for processing. 800 * and/or opd section) for processing.
@@ -887,200 +880,6 @@ struct map *map_groups__find_by_name(struct map_groups *mg,
887 return NULL; 880 return NULL;
888} 881}
889 882
890static int map_groups__set_modules_path_dir(struct map_groups *mg,
891 const char *dir_name)
892{
893 struct dirent *dent;
894 DIR *dir = opendir(dir_name);
895 int ret = 0;
896
897 if (!dir) {
898 pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
899 return -1;
900 }
901
902 while ((dent = readdir(dir)) != NULL) {
903 char path[PATH_MAX];
904 struct stat st;
905
906 /*sshfs might return bad dent->d_type, so we have to stat*/
907 snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
908 if (stat(path, &st))
909 continue;
910
911 if (S_ISDIR(st.st_mode)) {
912 if (!strcmp(dent->d_name, ".") ||
913 !strcmp(dent->d_name, ".."))
914 continue;
915
916 ret = map_groups__set_modules_path_dir(mg, path);
917 if (ret < 0)
918 goto out;
919 } else {
920 char *dot = strrchr(dent->d_name, '.'),
921 dso_name[PATH_MAX];
922 struct map *map;
923 char *long_name;
924
925 if (dot == NULL || strcmp(dot, ".ko"))
926 continue;
927 snprintf(dso_name, sizeof(dso_name), "[%.*s]",
928 (int)(dot - dent->d_name), dent->d_name);
929
930 strxfrchar(dso_name, '-', '_');
931 map = map_groups__find_by_name(mg, MAP__FUNCTION,
932 dso_name);
933 if (map == NULL)
934 continue;
935
936 long_name = strdup(path);
937 if (long_name == NULL) {
938 ret = -1;
939 goto out;
940 }
941 dso__set_long_name(map->dso, long_name);
942 map->dso->lname_alloc = 1;
943 dso__kernel_module_get_build_id(map->dso, "");
944 }
945 }
946
947out:
948 closedir(dir);
949 return ret;
950}
951
952static char *get_kernel_version(const char *root_dir)
953{
954 char version[PATH_MAX];
955 FILE *file;
956 char *name, *tmp;
957 const char *prefix = "Linux version ";
958
959 sprintf(version, "%s/proc/version", root_dir);
960 file = fopen(version, "r");
961 if (!file)
962 return NULL;
963
964 version[0] = '\0';
965 tmp = fgets(version, sizeof(version), file);
966 fclose(file);
967
968 name = strstr(version, prefix);
969 if (!name)
970 return NULL;
971 name += strlen(prefix);
972 tmp = strchr(name, ' ');
973 if (tmp)
974 *tmp = '\0';
975
976 return strdup(name);
977}
978
979static int machine__set_modules_path(struct machine *machine)
980{
981 char *version;
982 char modules_path[PATH_MAX];
983
984 version = get_kernel_version(machine->root_dir);
985 if (!version)
986 return -1;
987
988 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s/kernel",
989 machine->root_dir, version);
990 free(version);
991
992 return map_groups__set_modules_path_dir(&machine->kmaps, modules_path);
993}
994
995struct map *machine__new_module(struct machine *machine, u64 start,
996 const char *filename)
997{
998 struct map *map;
999 struct dso *dso = __dsos__findnew(&machine->kernel_dsos, filename);
1000
1001 if (dso == NULL)
1002 return NULL;
1003
1004 map = map__new2(start, dso, MAP__FUNCTION);
1005 if (map == NULL)
1006 return NULL;
1007
1008 if (machine__is_host(machine))
1009 dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
1010 else
1011 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
1012 map_groups__insert(&machine->kmaps, map);
1013 return map;
1014}
1015
1016static int machine__create_modules(struct machine *machine)
1017{
1018 char *line = NULL;
1019 size_t n;
1020 FILE *file;
1021 struct map *map;
1022 const char *modules;
1023 char path[PATH_MAX];
1024
1025 if (machine__is_default_guest(machine))
1026 modules = symbol_conf.default_guest_modules;
1027 else {
1028 sprintf(path, "%s/proc/modules", machine->root_dir);
1029 modules = path;
1030 }
1031
1032 if (symbol__restricted_filename(path, "/proc/modules"))
1033 return -1;
1034
1035 file = fopen(modules, "r");
1036 if (file == NULL)
1037 return -1;
1038
1039 while (!feof(file)) {
1040 char name[PATH_MAX];
1041 u64 start;
1042 char *sep;
1043 int line_len;
1044
1045 line_len = getline(&line, &n, file);
1046 if (line_len < 0)
1047 break;
1048
1049 if (!line)
1050 goto out_failure;
1051
1052 line[--line_len] = '\0'; /* \n */
1053
1054 sep = strrchr(line, 'x');
1055 if (sep == NULL)
1056 continue;
1057
1058 hex2u64(sep + 1, &start);
1059
1060 sep = strchr(line, ' ');
1061 if (sep == NULL)
1062 continue;
1063
1064 *sep = '\0';
1065
1066 snprintf(name, sizeof(name), "[%s]", line);
1067 map = machine__new_module(machine, start, name);
1068 if (map == NULL)
1069 goto out_delete_line;
1070 dso__kernel_module_get_build_id(map->dso, machine->root_dir);
1071 }
1072
1073 free(line);
1074 fclose(file);
1075
1076 return machine__set_modules_path(machine);
1077
1078out_delete_line:
1079 free(line);
1080out_failure:
1081 return -1;
1082}
1083
1084int dso__load_vmlinux(struct dso *dso, struct map *map, 883int dso__load_vmlinux(struct dso *dso, struct map *map,
1085 const char *vmlinux, symbol_filter_t filter) 884 const char *vmlinux, symbol_filter_t filter)
1086{ 885{
@@ -1124,8 +923,10 @@ int dso__load_vmlinux_path(struct dso *dso, struct map *map,
1124 filename = dso__build_id_filename(dso, NULL, 0); 923 filename = dso__build_id_filename(dso, NULL, 0);
1125 if (filename != NULL) { 924 if (filename != NULL) {
1126 err = dso__load_vmlinux(dso, map, filename, filter); 925 err = dso__load_vmlinux(dso, map, filename, filter);
1127 if (err > 0) 926 if (err > 0) {
927 dso->lname_alloc = 1;
1128 goto out; 928 goto out;
929 }
1129 free(filename); 930 free(filename);
1130 } 931 }
1131 932
@@ -1133,6 +934,7 @@ int dso__load_vmlinux_path(struct dso *dso, struct map *map,
1133 err = dso__load_vmlinux(dso, map, vmlinux_path[i], filter); 934 err = dso__load_vmlinux(dso, map, vmlinux_path[i], filter);
1134 if (err > 0) { 935 if (err > 0) {
1135 dso__set_long_name(dso, strdup(vmlinux_path[i])); 936 dso__set_long_name(dso, strdup(vmlinux_path[i]));
937 dso->lname_alloc = 1;
1136 break; 938 break;
1137 } 939 }
1138 } 940 }
@@ -1172,6 +974,7 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map,
1172 if (err > 0) { 974 if (err > 0) {
1173 dso__set_long_name(dso, 975 dso__set_long_name(dso,
1174 strdup(symbol_conf.vmlinux_name)); 976 strdup(symbol_conf.vmlinux_name));
977 dso->lname_alloc = 1;
1175 goto out_fixup; 978 goto out_fixup;
1176 } 979 }
1177 return err; 980 return err;
@@ -1300,195 +1103,6 @@ out_try_fixup:
1300 return err; 1103 return err;
1301} 1104}
1302 1105
1303size_t machines__fprintf_dsos(struct rb_root *machines, FILE *fp)
1304{
1305 struct rb_node *nd;
1306 size_t ret = 0;
1307
1308 for (nd = rb_first(machines); nd; nd = rb_next(nd)) {
1309 struct machine *pos = rb_entry(nd, struct machine, rb_node);
1310 ret += __dsos__fprintf(&pos->kernel_dsos, fp);
1311 ret += __dsos__fprintf(&pos->user_dsos, fp);
1312 }
1313
1314 return ret;
1315}
1316
1317size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp,
1318 bool with_hits)
1319{
1320 return __dsos__fprintf_buildid(&machine->kernel_dsos, fp, with_hits) +
1321 __dsos__fprintf_buildid(&machine->user_dsos, fp, with_hits);
1322}
1323
1324size_t machines__fprintf_dsos_buildid(struct rb_root *machines,
1325 FILE *fp, bool with_hits)
1326{
1327 struct rb_node *nd;
1328 size_t ret = 0;
1329
1330 for (nd = rb_first(machines); nd; nd = rb_next(nd)) {
1331 struct machine *pos = rb_entry(nd, struct machine, rb_node);
1332 ret += machine__fprintf_dsos_buildid(pos, fp, with_hits);
1333 }
1334 return ret;
1335}
1336
1337static struct dso *machine__get_kernel(struct machine *machine)
1338{
1339 const char *vmlinux_name = NULL;
1340 struct dso *kernel;
1341
1342 if (machine__is_host(machine)) {
1343 vmlinux_name = symbol_conf.vmlinux_name;
1344 if (!vmlinux_name)
1345 vmlinux_name = "[kernel.kallsyms]";
1346
1347 kernel = dso__kernel_findnew(machine, vmlinux_name,
1348 "[kernel]",
1349 DSO_TYPE_KERNEL);
1350 } else {
1351 char bf[PATH_MAX];
1352
1353 if (machine__is_default_guest(machine))
1354 vmlinux_name = symbol_conf.default_guest_vmlinux_name;
1355 if (!vmlinux_name)
1356 vmlinux_name = machine__mmap_name(machine, bf,
1357 sizeof(bf));
1358
1359 kernel = dso__kernel_findnew(machine, vmlinux_name,
1360 "[guest.kernel]",
1361 DSO_TYPE_GUEST_KERNEL);
1362 }
1363
1364 if (kernel != NULL && (!kernel->has_build_id))
1365 dso__read_running_kernel_build_id(kernel, machine);
1366
1367 return kernel;
1368}
1369
1370struct process_args {
1371 u64 start;
1372};
1373
1374static int symbol__in_kernel(void *arg, const char *name,
1375 char type __maybe_unused, u64 start)
1376{
1377 struct process_args *args = arg;
1378
1379 if (strchr(name, '['))
1380 return 0;
1381
1382 args->start = start;
1383 return 1;
1384}
1385
1386/* Figure out the start address of kernel map from /proc/kallsyms */
1387static u64 machine__get_kernel_start_addr(struct machine *machine)
1388{
1389 const char *filename;
1390 char path[PATH_MAX];
1391 struct process_args args;
1392
1393 if (machine__is_host(machine)) {
1394 filename = "/proc/kallsyms";
1395 } else {
1396 if (machine__is_default_guest(machine))
1397 filename = (char *)symbol_conf.default_guest_kallsyms;
1398 else {
1399 sprintf(path, "%s/proc/kallsyms", machine->root_dir);
1400 filename = path;
1401 }
1402 }
1403
1404 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
1405 return 0;
1406
1407 if (kallsyms__parse(filename, &args, symbol__in_kernel) <= 0)
1408 return 0;
1409
1410 return args.start;
1411}
1412
1413int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
1414{
1415 enum map_type type;
1416 u64 start = machine__get_kernel_start_addr(machine);
1417
1418 for (type = 0; type < MAP__NR_TYPES; ++type) {
1419 struct kmap *kmap;
1420
1421 machine->vmlinux_maps[type] = map__new2(start, kernel, type);
1422 if (machine->vmlinux_maps[type] == NULL)
1423 return -1;
1424
1425 machine->vmlinux_maps[type]->map_ip =
1426 machine->vmlinux_maps[type]->unmap_ip =
1427 identity__map_ip;
1428 kmap = map__kmap(machine->vmlinux_maps[type]);
1429 kmap->kmaps = &machine->kmaps;
1430 map_groups__insert(&machine->kmaps,
1431 machine->vmlinux_maps[type]);
1432 }
1433
1434 return 0;
1435}
1436
1437void machine__destroy_kernel_maps(struct machine *machine)
1438{
1439 enum map_type type;
1440
1441 for (type = 0; type < MAP__NR_TYPES; ++type) {
1442 struct kmap *kmap;
1443
1444 if (machine->vmlinux_maps[type] == NULL)
1445 continue;
1446
1447 kmap = map__kmap(machine->vmlinux_maps[type]);
1448 map_groups__remove(&machine->kmaps,
1449 machine->vmlinux_maps[type]);
1450 if (kmap->ref_reloc_sym) {
1451 /*
1452 * ref_reloc_sym is shared among all maps, so free just
1453 * on one of them.
1454 */
1455 if (type == MAP__FUNCTION) {
1456 free((char *)kmap->ref_reloc_sym->name);
1457 kmap->ref_reloc_sym->name = NULL;
1458 free(kmap->ref_reloc_sym);
1459 }
1460 kmap->ref_reloc_sym = NULL;
1461 }
1462
1463 map__delete(machine->vmlinux_maps[type]);
1464 machine->vmlinux_maps[type] = NULL;
1465 }
1466}
1467
1468int machine__create_kernel_maps(struct machine *machine)
1469{
1470 struct dso *kernel = machine__get_kernel(machine);
1471
1472 if (kernel == NULL ||
1473 __machine__create_kernel_maps(machine, kernel) < 0)
1474 return -1;
1475
1476 if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
1477 if (machine__is_host(machine))
1478 pr_debug("Problems creating module maps, "
1479 "continuing anyway...\n");
1480 else
1481 pr_debug("Problems creating module maps for guest %d, "
1482 "continuing anyway...\n", machine->pid);
1483 }
1484
1485 /*
1486 * Now that we have all the maps created, just set the ->end of them:
1487 */
1488 map_groups__fixup_end(&machine->kmaps);
1489 return 0;
1490}
1491
1492static void vmlinux_path__exit(void) 1106static void vmlinux_path__exit(void)
1493{ 1107{
1494 while (--vmlinux_path__nr_entries >= 0) { 1108 while (--vmlinux_path__nr_entries >= 0) {
@@ -1549,25 +1163,6 @@ out_fail:
1549 return -1; 1163 return -1;
1550} 1164}
1551 1165
1552size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
1553{
1554 int i;
1555 size_t printed = 0;
1556 struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso;
1557
1558 if (kdso->has_build_id) {
1559 char filename[PATH_MAX];
1560 if (dso__build_id_filename(kdso, filename, sizeof(filename)))
1561 printed += fprintf(fp, "[0] %s\n", filename);
1562 }
1563
1564 for (i = 0; i < vmlinux_path__nr_entries; ++i)
1565 printed += fprintf(fp, "[%d] %s\n",
1566 i + kdso->has_build_id, vmlinux_path[i]);
1567
1568 return printed;
1569}
1570
1571static int setup_list(struct strlist **list, const char *list_str, 1166static int setup_list(struct strlist **list, const char *list_str,
1572 const char *list_name) 1167 const char *list_name)
1573{ 1168{
@@ -1671,108 +1266,3 @@ void symbol__exit(void)
1671 symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL; 1266 symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
1672 symbol_conf.initialized = false; 1267 symbol_conf.initialized = false;
1673} 1268}
1674
1675int machines__create_kernel_maps(struct rb_root *machines, pid_t pid)
1676{
1677 struct machine *machine = machines__findnew(machines, pid);
1678
1679 if (machine == NULL)
1680 return -1;
1681
1682 return machine__create_kernel_maps(machine);
1683}
1684
1685int machines__create_guest_kernel_maps(struct rb_root *machines)
1686{
1687 int ret = 0;
1688 struct dirent **namelist = NULL;
1689 int i, items = 0;
1690 char path[PATH_MAX];
1691 pid_t pid;
1692 char *endp;
1693
1694 if (symbol_conf.default_guest_vmlinux_name ||
1695 symbol_conf.default_guest_modules ||
1696 symbol_conf.default_guest_kallsyms) {
1697 machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
1698 }
1699
1700 if (symbol_conf.guestmount) {
1701 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
1702 if (items <= 0)
1703 return -ENOENT;
1704 for (i = 0; i < items; i++) {
1705 if (!isdigit(namelist[i]->d_name[0])) {
1706 /* Filter out . and .. */
1707 continue;
1708 }
1709 pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
1710 if ((*endp != '\0') ||
1711 (endp == namelist[i]->d_name) ||
1712 (errno == ERANGE)) {
1713 pr_debug("invalid directory (%s). Skipping.\n",
1714 namelist[i]->d_name);
1715 continue;
1716 }
1717 sprintf(path, "%s/%s/proc/kallsyms",
1718 symbol_conf.guestmount,
1719 namelist[i]->d_name);
1720 ret = access(path, R_OK);
1721 if (ret) {
1722 pr_debug("Can't access file %s\n", path);
1723 goto failure;
1724 }
1725 machines__create_kernel_maps(machines, pid);
1726 }
1727failure:
1728 free(namelist);
1729 }
1730
1731 return ret;
1732}
1733
1734void machines__destroy_guest_kernel_maps(struct rb_root *machines)
1735{
1736 struct rb_node *next = rb_first(machines);
1737
1738 while (next) {
1739 struct machine *pos = rb_entry(next, struct machine, rb_node);
1740
1741 next = rb_next(&pos->rb_node);
1742 rb_erase(&pos->rb_node, machines);
1743 machine__delete(pos);
1744 }
1745}
1746
1747int machine__load_kallsyms(struct machine *machine, const char *filename,
1748 enum map_type type, symbol_filter_t filter)
1749{
1750 struct map *map = machine->vmlinux_maps[type];
1751 int ret = dso__load_kallsyms(map->dso, filename, map, filter);
1752
1753 if (ret > 0) {
1754 dso__set_loaded(map->dso, type);
1755 /*
1756 * Since /proc/kallsyms will have multiple sessions for the
1757 * kernel, with modules between them, fixup the end of all
1758 * sections.
1759 */
1760 __map_groups__fixup_end(&machine->kmaps, type);
1761 }
1762
1763 return ret;
1764}
1765
1766int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
1767 symbol_filter_t filter)
1768{
1769 struct map *map = machine->vmlinux_maps[type];
1770 int ret = dso__load_vmlinux_path(map->dso, map, filter);
1771
1772 if (ret > 0) {
1773 dso__set_loaded(map->dso, type);
1774 map__reloc_vmlinux(map);
1775 }
1776
1777 return ret;
1778}
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index de68f98b236d..b62ca37c4b77 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -16,8 +16,8 @@
16#ifdef LIBELF_SUPPORT 16#ifdef LIBELF_SUPPORT
17#include <libelf.h> 17#include <libelf.h>
18#include <gelf.h> 18#include <gelf.h>
19#include <elf.h>
20#endif 19#endif
20#include <elf.h>
21 21
22#include "dso.h" 22#include "dso.h"
23 23
@@ -96,7 +96,8 @@ struct symbol_conf {
96 initialized, 96 initialized,
97 kptr_restrict, 97 kptr_restrict,
98 annotate_asm_raw, 98 annotate_asm_raw,
99 annotate_src; 99 annotate_src,
100 event_group;
100 const char *vmlinux_name, 101 const char *vmlinux_name,
101 *kallsyms_name, 102 *kallsyms_name,
102 *source_prefix, 103 *source_prefix,
@@ -120,6 +121,8 @@ struct symbol_conf {
120}; 121};
121 122
122extern struct symbol_conf symbol_conf; 123extern struct symbol_conf symbol_conf;
124extern int vmlinux_path__nr_entries;
125extern char **vmlinux_path;
123 126
124static inline void *symbol__priv(struct symbol *sym) 127static inline void *symbol__priv(struct symbol *sym)
125{ 128{
@@ -223,6 +226,8 @@ size_t symbol__fprintf_symname_offs(const struct symbol *sym,
223size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp); 226size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp);
224size_t symbol__fprintf(struct symbol *sym, FILE *fp); 227size_t symbol__fprintf(struct symbol *sym, FILE *fp);
225bool symbol_type__is_a(char symbol_type, enum map_type map_type); 228bool symbol_type__is_a(char symbol_type, enum map_type map_type);
229bool symbol__restricted_filename(const char *filename,
230 const char *restricted_filename);
226 231
227int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, 232int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
228 struct symsrc *runtime_ss, symbol_filter_t filter, 233 struct symsrc *runtime_ss, symbol_filter_t filter,
diff --git a/tools/perf/util/sysfs.c b/tools/perf/util/sysfs.c
index 48c6902e749f..f71e9eafe15a 100644
--- a/tools/perf/util/sysfs.c
+++ b/tools/perf/util/sysfs.c
@@ -8,7 +8,7 @@ static const char * const sysfs_known_mountpoints[] = {
8}; 8};
9 9
10static int sysfs_found; 10static int sysfs_found;
11char sysfs_mountpoint[PATH_MAX]; 11char sysfs_mountpoint[PATH_MAX + 1];
12 12
13static int sysfs_valid_mountpoint(const char *sysfs) 13static int sysfs_valid_mountpoint(const char *sysfs)
14{ 14{
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index df59623ac763..632e40e5ceca 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -54,10 +54,10 @@ int thread__comm_len(struct thread *self)
54 return self->comm_len; 54 return self->comm_len;
55} 55}
56 56
57static size_t thread__fprintf(struct thread *self, FILE *fp) 57size_t thread__fprintf(struct thread *thread, FILE *fp)
58{ 58{
59 return fprintf(fp, "Thread %d %s\n", self->pid, self->comm) + 59 return fprintf(fp, "Thread %d %s\n", thread->pid, thread->comm) +
60 map_groups__fprintf(&self->mg, verbose, fp); 60 map_groups__fprintf(&thread->mg, verbose, fp);
61} 61}
62 62
63void thread__insert_map(struct thread *self, struct map *map) 63void thread__insert_map(struct thread *self, struct map *map)
@@ -84,17 +84,3 @@ int thread__fork(struct thread *self, struct thread *parent)
84 return -ENOMEM; 84 return -ENOMEM;
85 return 0; 85 return 0;
86} 86}
87
88size_t machine__fprintf(struct machine *machine, FILE *fp)
89{
90 size_t ret = 0;
91 struct rb_node *nd;
92
93 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
94 struct thread *pos = rb_entry(nd, struct thread, rb_node);
95
96 ret += thread__fprintf(pos, fp);
97 }
98
99 return ret;
100}
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index f2fa17caa7d5..5ad266403098 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -30,6 +30,7 @@ int thread__set_comm(struct thread *self, const char *comm);
30int thread__comm_len(struct thread *self); 30int thread__comm_len(struct thread *self);
31void thread__insert_map(struct thread *self, struct map *map); 31void thread__insert_map(struct thread *self, struct map *map);
32int thread__fork(struct thread *self, struct thread *parent); 32int thread__fork(struct thread *self, struct thread *parent);
33size_t thread__fprintf(struct thread *thread, FILE *fp);
33 34
34static inline struct map *thread__find_map(struct thread *self, 35static inline struct map *thread__find_map(struct thread *self,
35 enum map_type type, u64 addr) 36 enum map_type type, u64 addr)
diff --git a/tools/perf/util/top.c b/tools/perf/util/top.c
index 884dde9b9bc1..54d37a4753c5 100644
--- a/tools/perf/util/top.c
+++ b/tools/perf/util/top.c
@@ -26,6 +26,8 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
26 float samples_per_sec = top->samples / top->delay_secs; 26 float samples_per_sec = top->samples / top->delay_secs;
27 float ksamples_per_sec = top->kernel_samples / top->delay_secs; 27 float ksamples_per_sec = top->kernel_samples / top->delay_secs;
28 float esamples_percent = (100.0 * top->exact_samples) / top->samples; 28 float esamples_percent = (100.0 * top->exact_samples) / top->samples;
29 struct perf_record_opts *opts = &top->record_opts;
30 struct perf_target *target = &opts->target;
29 size_t ret = 0; 31 size_t ret = 0;
30 32
31 if (!perf_guest) { 33 if (!perf_guest) {
@@ -61,31 +63,31 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
61 struct perf_evsel *first = perf_evlist__first(top->evlist); 63 struct perf_evsel *first = perf_evlist__first(top->evlist);
62 ret += SNPRINTF(bf + ret, size - ret, "%" PRIu64 "%s ", 64 ret += SNPRINTF(bf + ret, size - ret, "%" PRIu64 "%s ",
63 (uint64_t)first->attr.sample_period, 65 (uint64_t)first->attr.sample_period,
64 top->freq ? "Hz" : ""); 66 opts->freq ? "Hz" : "");
65 } 67 }
66 68
67 ret += SNPRINTF(bf + ret, size - ret, "%s", perf_evsel__name(top->sym_evsel)); 69 ret += SNPRINTF(bf + ret, size - ret, "%s", perf_evsel__name(top->sym_evsel));
68 70
69 ret += SNPRINTF(bf + ret, size - ret, "], "); 71 ret += SNPRINTF(bf + ret, size - ret, "], ");
70 72
71 if (top->target.pid) 73 if (target->pid)
72 ret += SNPRINTF(bf + ret, size - ret, " (target_pid: %s", 74 ret += SNPRINTF(bf + ret, size - ret, " (target_pid: %s",
73 top->target.pid); 75 target->pid);
74 else if (top->target.tid) 76 else if (target->tid)
75 ret += SNPRINTF(bf + ret, size - ret, " (target_tid: %s", 77 ret += SNPRINTF(bf + ret, size - ret, " (target_tid: %s",
76 top->target.tid); 78 target->tid);
77 else if (top->target.uid_str != NULL) 79 else if (target->uid_str != NULL)
78 ret += SNPRINTF(bf + ret, size - ret, " (uid: %s", 80 ret += SNPRINTF(bf + ret, size - ret, " (uid: %s",
79 top->target.uid_str); 81 target->uid_str);
80 else 82 else
81 ret += SNPRINTF(bf + ret, size - ret, " (all"); 83 ret += SNPRINTF(bf + ret, size - ret, " (all");
82 84
83 if (top->target.cpu_list) 85 if (target->cpu_list)
84 ret += SNPRINTF(bf + ret, size - ret, ", CPU%s: %s)", 86 ret += SNPRINTF(bf + ret, size - ret, ", CPU%s: %s)",
85 top->evlist->cpus->nr > 1 ? "s" : "", 87 top->evlist->cpus->nr > 1 ? "s" : "",
86 top->target.cpu_list); 88 target->cpu_list);
87 else { 89 else {
88 if (top->target.tid) 90 if (target->tid)
89 ret += SNPRINTF(bf + ret, size - ret, ")"); 91 ret += SNPRINTF(bf + ret, size - ret, ")");
90 else 92 else
91 ret += SNPRINTF(bf + ret, size - ret, ", %d CPU%s)", 93 ret += SNPRINTF(bf + ret, size - ret, ", %d CPU%s)",
diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h
index 86ff1b15059b..7ebf357dc9e1 100644
--- a/tools/perf/util/top.h
+++ b/tools/perf/util/top.h
@@ -14,7 +14,7 @@ struct perf_session;
14struct perf_top { 14struct perf_top {
15 struct perf_tool tool; 15 struct perf_tool tool;
16 struct perf_evlist *evlist; 16 struct perf_evlist *evlist;
17 struct perf_target target; 17 struct perf_record_opts record_opts;
18 /* 18 /*
19 * Symbols will be added here in perf_event__process_sample and will 19 * Symbols will be added here in perf_event__process_sample and will
20 * get out after decayed. 20 * get out after decayed.
@@ -24,24 +24,16 @@ struct perf_top {
24 u64 exact_samples; 24 u64 exact_samples;
25 u64 guest_us_samples, guest_kernel_samples; 25 u64 guest_us_samples, guest_kernel_samples;
26 int print_entries, count_filter, delay_secs; 26 int print_entries, count_filter, delay_secs;
27 int freq;
28 bool hide_kernel_symbols, hide_user_symbols, zero; 27 bool hide_kernel_symbols, hide_user_symbols, zero;
29 bool use_tui, use_stdio; 28 bool use_tui, use_stdio;
30 bool sort_has_symbols; 29 bool sort_has_symbols;
31 bool dont_use_callchains;
32 bool kptr_restrict_warned; 30 bool kptr_restrict_warned;
33 bool vmlinux_warned; 31 bool vmlinux_warned;
34 bool inherit;
35 bool group;
36 bool sample_id_all_missing;
37 bool exclude_guest_missing;
38 bool dump_symtab; 32 bool dump_symtab;
39 struct hist_entry *sym_filter_entry; 33 struct hist_entry *sym_filter_entry;
40 struct perf_evsel *sym_evsel; 34 struct perf_evsel *sym_evsel;
41 struct perf_session *session; 35 struct perf_session *session;
42 struct winsize winsize; 36 struct winsize winsize;
43 unsigned int mmap_pages;
44 int default_interval;
45 int realtime_prio; 37 int realtime_prio;
46 int sym_pcnt_filter; 38 int sym_pcnt_filter;
47 const char *sym_filter; 39 const char *sym_filter;
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index 5906e8426cc7..805d1f52c5b4 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -12,6 +12,8 @@
12 */ 12 */
13unsigned int page_size; 13unsigned int page_size;
14 14
15bool test_attr__enabled;
16
15bool perf_host = true; 17bool perf_host = true;
16bool perf_guest = false; 18bool perf_guest = false;
17 19
@@ -218,3 +220,25 @@ void dump_stack(void)
218#else 220#else
219void dump_stack(void) {} 221void dump_stack(void) {}
220#endif 222#endif
223
224void get_term_dimensions(struct winsize *ws)
225{
226 char *s = getenv("LINES");
227
228 if (s != NULL) {
229 ws->ws_row = atoi(s);
230 s = getenv("COLUMNS");
231 if (s != NULL) {
232 ws->ws_col = atoi(s);
233 if (ws->ws_row && ws->ws_col)
234 return;
235 }
236 }
237#ifdef TIOCGWINSZ
238 if (ioctl(1, TIOCGWINSZ, ws) == 0 &&
239 ws->ws_row && ws->ws_col)
240 return;
241#endif
242 ws->ws_row = 25;
243 ws->ws_col = 80;
244}
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index c2330918110c..09b4c26b71aa 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -265,10 +265,14 @@ bool is_power_of_2(unsigned long n)
265size_t hex_width(u64 v); 265size_t hex_width(u64 v);
266int hex2u64(const char *ptr, u64 *val); 266int hex2u64(const char *ptr, u64 *val);
267 267
268char *ltrim(char *s);
268char *rtrim(char *s); 269char *rtrim(char *s);
269 270
270void dump_stack(void); 271void dump_stack(void);
271 272
272extern unsigned int page_size; 273extern unsigned int page_size;
273 274
275struct winsize;
276void get_term_dimensions(struct winsize *ws);
277
274#endif 278#endif
diff --git a/tools/power/acpi/Makefile b/tools/power/acpi/Makefile
index 6b9cf7a987c7..bafeb8d662a3 100644
--- a/tools/power/acpi/Makefile
+++ b/tools/power/acpi/Makefile
@@ -13,6 +13,6 @@ clean :
13 rm -f $(CLEANFILES) $(patsubst %.c,%.o, $(SRCS)) *~ 13 rm -f $(CLEANFILES) $(patsubst %.c,%.o, $(SRCS)) *~
14 14
15install : 15install :
16 install acpidump /usr/bin/acpidump 16 install acpidump /usr/sbin/acpidump
17 install acpidump.8 /usr/share/man/man8 17 install acpidump.8 /usr/share/man/man8
18 18
diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8
index 0d7dc2cfefb5..b4ddb748356c 100644
--- a/tools/power/x86/turbostat/turbostat.8
+++ b/tools/power/x86/turbostat/turbostat.8
@@ -31,8 +31,6 @@ The \fB-S\fP option limits output to a 1-line System Summary for each interval.
31.PP 31.PP
32The \fB-v\fP option increases verbosity. 32The \fB-v\fP option increases verbosity.
33.PP 33.PP
34The \fB-s\fP option prints the SMI counter, equivalent to "-c 0x34"
35.PP
36The \fB-c MSR#\fP option includes the delta of the specified 32-bit MSR counter. 34The \fB-c MSR#\fP option includes the delta of the specified 32-bit MSR counter.
37.PP 35.PP
38The \fB-C MSR#\fP option includes the delta of the specified 64-bit MSR counter. 36The \fB-C MSR#\fP option includes the delta of the specified 64-bit MSR counter.
@@ -186,26 +184,24 @@ This is a weighted average, where the weight is %c0. ie. it is the total number
186un-halted cycles elapsed per time divided by the number of CPUs. 184un-halted cycles elapsed per time divided by the number of CPUs.
187.SH SMI COUNTING EXAMPLE 185.SH SMI COUNTING EXAMPLE
188On Intel Nehalem and newer processors, MSR 0x34 is a System Management Mode Interrupt (SMI) counter. 186On Intel Nehalem and newer processors, MSR 0x34 is a System Management Mode Interrupt (SMI) counter.
189Using the -m option, you can display how many SMIs have fired since reset, or if there 187This counter is shown by default under the "SMI" column.
190are SMIs during the measurement interval, you can display the delta using the -d option.
191.nf 188.nf
192[root@x980 ~]# turbostat -m 0x34 189[root@x980 ~]# turbostat
193cor CPU %c0 GHz TSC MSR 0x034 %c1 %c3 %c6 %pc3 %pc6 190cor CPU %c0 GHz TSC SMI %c1 %c3 %c6 CTMP %pc3 %pc6
194 1.41 1.82 3.38 0x00000000 8.92 37.82 51.85 17.37 0.55 191 0.11 1.91 3.38 0 1.84 0.26 97.79 29 0.82 83.87
195 0 0 3.73 2.03 3.38 0x00000055 1.72 48.25 46.31 17.38 0.55 192 0 0 0.40 1.63 3.38 0 10.27 0.12 89.20 20 0.82 83.88
196 0 6 0.14 1.63 3.38 0x00000056 5.30 193 0 6 0.06 1.63 3.38 0 10.61
197 1 2 2.51 1.80 3.38 0x00000056 15.65 29.33 52.52 194 1 2 0.37 2.63 3.38 0 0.02 0.10 99.51 22
198 1 8 0.10 1.65 3.38 0x00000056 18.05 195 1 8 0.01 1.62 3.38 0 0.39
199 2 4 1.16 1.68 3.38 0x00000056 5.87 24.47 68.50 196 2 4 0.07 1.62 3.38 0 0.04 0.07 99.82 23
200 2 10 0.10 1.63 3.38 0x00000056 6.93 197 2 10 0.02 1.62 3.38 0 0.09
201 8 1 3.84 1.91 3.38 0x00000056 1.36 50.65 44.16 198 8 1 0.23 1.64 3.38 0 0.10 1.07 98.60 24
202 8 7 0.08 1.64 3.38 0x00000056 5.12 199 8 7 0.02 1.64 3.38 0 0.31
203 9 3 1.82 1.73 3.38 0x00000056 7.59 24.21 66.38 200 9 3 0.03 1.62 3.38 0 0.03 0.05 99.89 29
204 9 9 0.09 1.68 3.38 0x00000056 9.32 201 9 9 0.02 1.62 3.38 0 0.05
205 10 5 1.66 1.65 3.38 0x00000056 15.10 50.00 33.23 202 10 5 0.07 1.62 3.38 0 0.08 0.12 99.73 27
206 10 11 1.72 1.65 3.38 0x00000056 15.05 203 10 11 0.03 1.62 3.38 0 0.13
207^C 204^C
208[root@x980 ~]#
209.fi 205.fi
210.SH NOTES 206.SH NOTES
211 207
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index ce6d46038f74..6f3214ed4444 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -58,6 +58,7 @@ unsigned int extra_msr_offset32;
58unsigned int extra_msr_offset64; 58unsigned int extra_msr_offset64;
59unsigned int extra_delta_offset32; 59unsigned int extra_delta_offset32;
60unsigned int extra_delta_offset64; 60unsigned int extra_delta_offset64;
61int do_smi;
61double bclk; 62double bclk;
62unsigned int show_pkg; 63unsigned int show_pkg;
63unsigned int show_core; 64unsigned int show_core;
@@ -99,6 +100,7 @@ struct thread_data {
99 unsigned long long extra_delta64; 100 unsigned long long extra_delta64;
100 unsigned long long extra_msr32; 101 unsigned long long extra_msr32;
101 unsigned long long extra_delta32; 102 unsigned long long extra_delta32;
103 unsigned int smi_count;
102 unsigned int cpu_id; 104 unsigned int cpu_id;
103 unsigned int flags; 105 unsigned int flags;
104#define CPU_IS_FIRST_THREAD_IN_CORE 0x2 106#define CPU_IS_FIRST_THREAD_IN_CORE 0x2
@@ -248,6 +250,8 @@ void print_header(void)
248 if (has_aperf) 250 if (has_aperf)
249 outp += sprintf(outp, " GHz"); 251 outp += sprintf(outp, " GHz");
250 outp += sprintf(outp, " TSC"); 252 outp += sprintf(outp, " TSC");
253 if (do_smi)
254 outp += sprintf(outp, " SMI");
251 if (extra_delta_offset32) 255 if (extra_delta_offset32)
252 outp += sprintf(outp, " count 0x%03X", extra_delta_offset32); 256 outp += sprintf(outp, " count 0x%03X", extra_delta_offset32);
253 if (extra_delta_offset64) 257 if (extra_delta_offset64)
@@ -314,6 +318,8 @@ int dump_counters(struct thread_data *t, struct core_data *c,
314 extra_msr_offset32, t->extra_msr32); 318 extra_msr_offset32, t->extra_msr32);
315 fprintf(stderr, "msr0x%x: %016llX\n", 319 fprintf(stderr, "msr0x%x: %016llX\n",
316 extra_msr_offset64, t->extra_msr64); 320 extra_msr_offset64, t->extra_msr64);
321 if (do_smi)
322 fprintf(stderr, "SMI: %08X\n", t->smi_count);
317 } 323 }
318 324
319 if (c) { 325 if (c) {
@@ -352,6 +358,7 @@ int dump_counters(struct thread_data *t, struct core_data *c,
352 * RAM_W: %5.2 358 * RAM_W: %5.2
353 * GHz: "GHz" 3 columns %3.2 359 * GHz: "GHz" 3 columns %3.2
354 * TSC: "TSC" 3 columns %3.2 360 * TSC: "TSC" 3 columns %3.2
361 * SMI: "SMI" 4 columns %4d
355 * percentage " %pc3" %6.2 362 * percentage " %pc3" %6.2
356 * Perf Status percentage: %5.2 363 * Perf Status percentage: %5.2
357 * "CTMP" 4 columns %4d 364 * "CTMP" 4 columns %4d
@@ -431,6 +438,10 @@ int format_counters(struct thread_data *t, struct core_data *c,
431 /* TSC */ 438 /* TSC */
432 outp += sprintf(outp, "%5.2f", 1.0 * t->tsc/units/interval_float); 439 outp += sprintf(outp, "%5.2f", 1.0 * t->tsc/units/interval_float);
433 440
441 /* SMI */
442 if (do_smi)
443 outp += sprintf(outp, "%4d", t->smi_count);
444
434 /* delta */ 445 /* delta */
435 if (extra_delta_offset32) 446 if (extra_delta_offset32)
436 outp += sprintf(outp, " %11llu", t->extra_delta32); 447 outp += sprintf(outp, " %11llu", t->extra_delta32);
@@ -645,6 +656,9 @@ delta_thread(struct thread_data *new, struct thread_data *old,
645 */ 656 */
646 old->extra_msr32 = new->extra_msr32; 657 old->extra_msr32 = new->extra_msr32;
647 old->extra_msr64 = new->extra_msr64; 658 old->extra_msr64 = new->extra_msr64;
659
660 if (do_smi)
661 old->smi_count = new->smi_count - old->smi_count;
648} 662}
649 663
650int delta_cpu(struct thread_data *t, struct core_data *c, 664int delta_cpu(struct thread_data *t, struct core_data *c,
@@ -672,6 +686,7 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data
672 t->mperf = 0; 686 t->mperf = 0;
673 t->c1 = 0; 687 t->c1 = 0;
674 688
689 t->smi_count = 0;
675 t->extra_delta32 = 0; 690 t->extra_delta32 = 0;
676 t->extra_delta64 = 0; 691 t->extra_delta64 = 0;
677 692
@@ -802,6 +817,11 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
802 return -4; 817 return -4;
803 } 818 }
804 819
820 if (do_smi) {
821 if (get_msr(cpu, MSR_SMI_COUNT, &msr))
822 return -5;
823 t->smi_count = msr & 0xFFFFFFFF;
824 }
805 if (extra_delta_offset32) { 825 if (extra_delta_offset32) {
806 if (get_msr(cpu, extra_delta_offset32, &msr)) 826 if (get_msr(cpu, extra_delta_offset32, &msr))
807 return -5; 827 return -5;
@@ -908,8 +928,7 @@ void print_verbose_header(void)
908 928
909 get_msr(0, MSR_NHM_PLATFORM_INFO, &msr); 929 get_msr(0, MSR_NHM_PLATFORM_INFO, &msr);
910 930
911 if (verbose) 931 fprintf(stderr, "cpu0: MSR_NHM_PLATFORM_INFO: 0x%08llx\n", msr);
912 fprintf(stderr, "cpu0: MSR_NHM_PLATFORM_INFO: 0x%08llx\n", msr);
913 932
914 ratio = (msr >> 40) & 0xFF; 933 ratio = (msr >> 40) & 0xFF;
915 fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency\n", 934 fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency\n",
@@ -919,13 +938,16 @@ void print_verbose_header(void)
919 fprintf(stderr, "%d * %.0f = %.0f MHz TSC frequency\n", 938 fprintf(stderr, "%d * %.0f = %.0f MHz TSC frequency\n",
920 ratio, bclk, ratio * bclk); 939 ratio, bclk, ratio * bclk);
921 940
941 get_msr(0, MSR_IA32_POWER_CTL, &msr);
942 fprintf(stderr, "cpu0: MSR_IA32_POWER_CTL: 0x%08llx (C1E: %sabled)\n",
943 msr, msr & 0x2 ? "EN" : "DIS");
944
922 if (!do_ivt_turbo_ratio_limit) 945 if (!do_ivt_turbo_ratio_limit)
923 goto print_nhm_turbo_ratio_limits; 946 goto print_nhm_turbo_ratio_limits;
924 947
925 get_msr(0, MSR_IVT_TURBO_RATIO_LIMIT, &msr); 948 get_msr(0, MSR_IVT_TURBO_RATIO_LIMIT, &msr);
926 949
927 if (verbose) 950 fprintf(stderr, "cpu0: MSR_IVT_TURBO_RATIO_LIMIT: 0x%08llx\n", msr);
928 fprintf(stderr, "cpu0: MSR_IVT_TURBO_RATIO_LIMIT: 0x%08llx\n", msr);
929 951
930 ratio = (msr >> 56) & 0xFF; 952 ratio = (msr >> 56) & 0xFF;
931 if (ratio) 953 if (ratio)
@@ -1016,8 +1038,7 @@ print_nhm_turbo_ratio_limits:
1016 1038
1017 get_msr(0, MSR_NHM_TURBO_RATIO_LIMIT, &msr); 1039 get_msr(0, MSR_NHM_TURBO_RATIO_LIMIT, &msr);
1018 1040
1019 if (verbose) 1041 fprintf(stderr, "cpu0: MSR_NHM_TURBO_RATIO_LIMIT: 0x%08llx\n", msr);
1020 fprintf(stderr, "cpu0: MSR_NHM_TURBO_RATIO_LIMIT: 0x%08llx\n", msr);
1021 1042
1022 ratio = (msr >> 56) & 0xFF; 1043 ratio = (msr >> 56) & 0xFF;
1023 if (ratio) 1044 if (ratio)
@@ -1397,6 +1418,9 @@ int has_nehalem_turbo_ratio_limit(unsigned int family, unsigned int model)
1397 case 0x2D: /* SNB Xeon */ 1418 case 0x2D: /* SNB Xeon */
1398 case 0x3A: /* IVB */ 1419 case 0x3A: /* IVB */
1399 case 0x3E: /* IVB Xeon */ 1420 case 0x3E: /* IVB Xeon */
1421 case 0x3C: /* HSW */
1422 case 0x3F: /* HSW */
1423 case 0x45: /* HSW */
1400 return 1; 1424 return 1;
1401 case 0x2E: /* Nehalem-EX Xeon - Beckton */ 1425 case 0x2E: /* Nehalem-EX Xeon - Beckton */
1402 case 0x2F: /* Westmere-EX Xeon - Eagleton */ 1426 case 0x2F: /* Westmere-EX Xeon - Eagleton */
@@ -1488,6 +1512,9 @@ void rapl_probe(unsigned int family, unsigned int model)
1488 switch (model) { 1512 switch (model) {
1489 case 0x2A: 1513 case 0x2A:
1490 case 0x3A: 1514 case 0x3A:
1515 case 0x3C: /* HSW */
1516 case 0x3F: /* HSW */
1517 case 0x45: /* HSW */
1491 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_GFX; 1518 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_GFX;
1492 break; 1519 break;
1493 case 0x2D: 1520 case 0x2D:
@@ -1724,6 +1751,9 @@ int is_snb(unsigned int family, unsigned int model)
1724 case 0x2D: 1751 case 0x2D:
1725 case 0x3A: /* IVB */ 1752 case 0x3A: /* IVB */
1726 case 0x3E: /* IVB Xeon */ 1753 case 0x3E: /* IVB Xeon */
1754 case 0x3C: /* HSW */
1755 case 0x3F: /* HSW */
1756 case 0x45: /* HSW */
1727 return 1; 1757 return 1;
1728 } 1758 }
1729 return 0; 1759 return 0;
@@ -1883,6 +1913,7 @@ void check_cpuid()
1883 1913
1884 do_nehalem_platform_info = genuine_intel && has_invariant_tsc; 1914 do_nehalem_platform_info = genuine_intel && has_invariant_tsc;
1885 do_nhm_cstates = genuine_intel; /* all Intel w/ non-stop TSC have NHM counters */ 1915 do_nhm_cstates = genuine_intel; /* all Intel w/ non-stop TSC have NHM counters */
1916 do_smi = do_nhm_cstates;
1886 do_snb_cstates = is_snb(family, model); 1917 do_snb_cstates = is_snb(family, model);
1887 bclk = discover_bclk(family, model); 1918 bclk = discover_bclk(family, model);
1888 1919
@@ -2219,9 +2250,6 @@ void cmdline(int argc, char **argv)
2219 case 'c': 2250 case 'c':
2220 sscanf(optarg, "%x", &extra_delta_offset32); 2251 sscanf(optarg, "%x", &extra_delta_offset32);
2221 break; 2252 break;
2222 case 's':
2223 extra_delta_offset32 = 0x34; /* SMI counter */
2224 break;
2225 case 'C': 2253 case 'C':
2226 sscanf(optarg, "%x", &extra_delta_offset64); 2254 sscanf(optarg, "%x", &extra_delta_offset64);
2227 break; 2255 break;
@@ -2248,7 +2276,7 @@ int main(int argc, char **argv)
2248 cmdline(argc, argv); 2276 cmdline(argc, argv);
2249 2277
2250 if (verbose) 2278 if (verbose)
2251 fprintf(stderr, "turbostat v3.0 November 23, 2012" 2279 fprintf(stderr, "turbostat v3.2 February 11, 2013"
2252 " - Len Brown <lenb@kernel.org>\n"); 2280 " - Len Brown <lenb@kernel.org>\n");
2253 2281
2254 turbostat_init(); 2282 turbostat_init();
diff --git a/tools/vm/.gitignore b/tools/vm/.gitignore
new file mode 100644
index 000000000000..44f095fa2604
--- /dev/null
+++ b/tools/vm/.gitignore
@@ -0,0 +1,2 @@
1slabinfo
2page-types